seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20484377775 | from collections import Counter
# initializing string
test_str = "aabbbccde"
# using collections.Counter() to get
# count of each element in string
res = Counter(test_str)
# valuesList = list(res.values())
# # printing result
# str1 = str(res)
print(res)
keysList = list(res.keys())
print(keysList)
# print(valuesList)
for item in sorted(res):
print("{} {}".format(item,res[item]))
| prathammodi333/python-programs | pr1.py | pr1.py | py | 407 | python | en | code | 0 | github-code | 36 |
34987945802 | import sqlite3
import re
from gcpTalent import create_company
def sanitize_company_name(input_string):
# Replace spaces with underscores
sanitized_string = input_string.replace(' ', '_')
# Remove special characters using regular expression
sanitized_string = re.sub(r'[^a-zA-Z0-9_]', '', sanitized_string)
# Convert to lowercase
sanitized_string = sanitized_string.lower()
return sanitized_string
# Connect to the SQLite database
conn = sqlite3.connect('../jobs.db')
cursor = conn.cursor()
# Replace 'your_table' with the actual table name and 'your_column' with the actual column name
query = 'SELECT DISTINCT company FROM jobs'
# Execute the query
cursor.execute(query)
# Fetch all the unique values from the column
companies = cursor.fetchall()
# Close the connection
conn.close()
for company in companies:
project_id = 'laborup'
tenant_id = '065a2ef4-6bf2-4341-a621-29abad6031d8'
display_name = company[0]
external_id = sanitize_company_name(company[0])
create_company(project_id, tenant_id, display_name, external_id) | LoganOneal/job-scraper | gcp-talent/createCompanies.py | createCompanies.py | py | 1,099 | python | en | code | 0 | github-code | 36 |
22840825846 | import cv2
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, AffineTransform
import numpy as np
class FeatureExtractor(object):
def __init__(self, orbParam):
self.kpData = []
self.orb = cv2.ORB_create(orbParam)
def computeKpData(self, img):
kp, des = self.orb.detectAndCompute(img,None)
return [kp,des]
def getMatchingPoints(kpDataIdx01,kpDataIdx_02):
return ExtractMatchingInliers(kpData[kpDataIdx01],kpData[kpDataIdx02])
def ExtractMatchingInliers(self, srcImgKpData, dstImgKpData):
#Matching
prevImg = srcImgKpData
curImg = dstImgKpData
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(curImg[1],prevImg[1], k=2)
#Filtering
# Lowe's Ratio test
good = []
for m, n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
src_pts = np.float32([ prevImg[0][m.trainIdx].pt for m in good ]).reshape(-1, 2)
dst_pts = np.float32([ curImg[0][m.queryIdx].pt for m in good ]).reshape(-1, 2)
# Ransac
model, inliers = ransac(
(src_pts, dst_pts),
AffineTransform, min_samples=4,
residual_threshold=8, max_trials=100
)
#Format Output
matchingInliers = []
src_pts_inliers = []
dst_pts_inliers = []
index = 0
for i in inliers:
if(i):
matchingInliers.append([src_pts[index],dst_pts[index]])
src_pts_inliers.append(src_pts[index])
dst_pts_inliers.append(dst_pts[index])
index+=1
src_pts_inliers = np.array(src_pts_inliers)
dst_pts_inliers = np.array(dst_pts_inliers)
return matchingInliers,src_pts_inliers,dst_pts_inliers
| naurunnahansa/SLAM_implementation | featureExtractor.py | featureExtractor.py | py | 1,856 | python | en | code | 1 | github-code | 36 |
5728382872 | from actions._base import BaseAction, Action
import os
from actions._base import ActionBase
import asyncio
class ExploitAction(ActionBase):
async def __call__(self, *args, **kwargs):
# await asyncio.sleep(0)
rpc = await self.connect()
for exploit in kwargs["service_info"]["exploits"]:
result = 0
if self.read("ping", args[0], args[1]) == 1:
exploit_rounds = kwargs["service_info"]["exploits"][exploit]["rounds"]
if kwargs["round"] in exploit_rounds:
request = {
"id": args[0],
"srv": args[1],
"script": kwargs["service_info"]["script"],
"args": ["exploit", args[2], exploit],
}
answer = await rpc.rpc_send("runner", request)
print("exploit return", answer, type(answer))
if answer["answer"] == "1":
result = 1
self.write("exploit", args[0], args[1], exploit, result=result)
| PoteeDev/scenario-manager | manager/actions/exploit/main.py | main.py | py | 1,103 | python | en | code | 0 | github-code | 36 |
70807032423 | import sys
from collections import deque
sys.stdin = open('input.txt')
def bfs(start):
global answer
q = deque(start)
while q:
node = q.popleft()
answer += visited[node[0]][node[1]]
for k in range(4):
y = node[0] + dr[k]
x = node[1] + dc[k]
if 0 <= y < N and 0 <= x < M and not visited[y][x]:
visited[y][x] = visited[node[0]][node[1]] + 1
q.append((y, x))
dr = [-1, 0, 1, 0]
dc = [0, 1, 0, -1]
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
beach = [input() for _ in range(N)]
visited = [[0] * M for _ in range(N)]
start = []
answer = 0
for i in range(N):
for j in range(M):
if beach[i][j] == 'W':
visited[i][j] = -1
for k in range(4):
y = i + dr[k]
x = j + dc[k]
if 0 <= y < N and 0 <= x < M and beach[y][x] == 'L' and not visited[y][x]:
visited[y][x] = 1
start.append((y, x))
bfs(start)
print('#{} {}'.format(tc, answer)) | unho-lee/TIL | CodeTest/Python/SWEA/10966.py | 10966.py | py | 1,161 | python | en | code | 0 | github-code | 36 |
20715607582 | # HAPPY NEW YEAR... or something.
import re
from collections import defaultdict
from itertools import repeat
DIRECTIONS = {
'se': (.5, 1),
'sw': (-.5, 1),
'nw': (-.5, -1),
'ne': (.5, -1),
'e': (1, 0),
'w': (-1, 0),
}
def find_tile(reference):
reference = re.findall('se|sw|nw|ne|e|w', reference)
pos = (0, 0)
for step in reference:
pos = tuple(map(sum, zip(pos, DIRECTIONS[step])))
return pos
def get_neighbours(tile):
return map(lambda p: tuple(map(sum, zip(*p))), zip(DIRECTIONS.values(), repeat(tile)))
def game_of_life(black_tiles, days=100):
for _ in range(days):
new_tiles = set()
neighbour_count = defaultdict(int)
for tile in black_tiles:
neighbours = set(get_neighbours(tile))
black_neighbours = len(black_tiles & neighbours)
for neighbour in neighbours:
if neighbour not in black_tiles:
neighbour_count[neighbour] += 1
if black_neighbours in [1, 2]:
new_tiles.add(tile)
for white_tile, black_neighbours in neighbour_count.items():
if black_neighbours == 2:
new_tiles.add(white_tile)
black_tiles = new_tiles
return black_tiles
def solve(references):
black_tiles = set()
for reference in references:
tile = find_tile(reference)
black_tiles ^= { tile }
return len(black_tiles), len(game_of_life(black_tiles))
references = open('input', 'r').read().split('\n')
part_1, part_2 = solve(references)
print(f"{part_1} tiles are left with the black side up.")
print(f"{part_2} tiles are black after 100 days.")
| jonassjoh/AdventOfCode | 2020/24/day24.py | day24.py | py | 1,682 | python | en | code | 0 | github-code | 36 |
32001984091 | class Solution:
def __init__(self) -> None:
self.memo={}
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
self.memo = {len(graph)-1:[[len(graph)-1]]}
def calc(N):
if N in self.memo:
return self.memo[N]
a = []
for n in graph[N]:
for path in calc(n):
a.append([N]+path)
self.memo[N]=a
return a
return calc(0) | plan-bug/LeetCode-Challenge | microcephalus7/medium/797.1.py | 797.1.py | py | 484 | python | en | code | 2 | github-code | 36 |
20112154408 | import argparse
import tflearn
import numpy as np
from processsing import Processing
from training import Training
class Prediction():
def __init__(self):
# Construct the Neural Network classifier and start the learning phase
training = Training()
net = training.buildNN()
self.model = tflearn.DNN(net, tensorboard_verbose=0)
self.LABEL = ['focus', 'distract']
def predict(self, data):
self.model.load('./DNN.tflearn', weights_only=True)
X = np.array(data)[:, :-2]
predictions = self.model.predict(X)
return predictions
def getMostProbableLabel(self, prediction):
result = np.where(prediction == np.amax(prediction))
return self.LABEL[result[0][0]]
if __name__ == '__main__':
# Get argument parser
parser = argparse.ArgumentParser(description='Chain of focus detection using human pose detection')
parser.add_argument('--path', type=str, default='../openPoseDatasetPredict/', help='Path to input json dataset')
args = parser.parse_args()
## Start detection chain for predictions
# Concat all the positions data into a single array and save it as pickle file
process = Processing()
data = process.createInputMatrix(args.path)
data = process.standardise(data)
# Prediction
prediction = Prediction()
predictions = prediction.predict(data)
for index, pred in enumerate(predictions):
print('Personne n°' + str(index) + ' is ' + prediction.getMostProbableLabel(pred))
| Pierre-Assemat/DeepPoseIdentification | predictions/WorkInProgress/prediction_tflearn.py | prediction_tflearn.py | py | 1,534 | python | en | code | 0 | github-code | 36 |
6163296593 | '''
Classe wordCloudGenerator que a partir de um conjunto de token gera uma nuvem de palavra
Argumentos:
text: lista de token (preferencilmente geradas pela classe pdfReader) (OBRIGATORIO)
max_font_size: tamanho maximo das palavras na nuvem
max_words: numero maximo de palavras na nuvem
background_color: color de fundo da nuvem
'''
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
class wordCloud(object):
def __init__(self, text, max_font_size=50, max_words=100, background_color="white"):
# Transforma token em uma string unica
self.text = ' '.join(text)
self.wordcloud = WordCloud(max_font_size=max_font_size, max_words=max_words, background_color=background_color).generate(self.text)
def generator(self):
# Gera e plota a wordcloud
plt.imshow(self.wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
def save(self, file_name):
# Gera e salva no diretorio atual a wordcloud
self.wordcloud.to_file(file_name)
if __name__ == '__main__':
# Exemplo de uso da classe
import pdfReader as pdf
# classe pdfReader para gerar o conjunto de tokens do pdf e stopwords adicionais como parametro
reader = pdf.PDFReader('example.pdf', add_stopwords=['et', 'al'])
# classe wordCloud com os tokens gerados como parametros
wc = wordCloud(reader.getTokens())
# Plota e salva
wc.generator()
#wc.save('wc.png') | InfoEduc/Automatizando-Pesquisas-Bibliometricas | wordCloudGenerator.py | wordCloudGenerator.py | py | 1,515 | python | pt | code | 1 | github-code | 36 |
21818093229 | import pickle
from os import path
import os
import sys
# obter o cominho do arquivo
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ir para parent_dir
sys.path.append(parent_dir)
def historic_process(id_user, mensagem_from_bot):
historic = []
file_name = f"{parent_dir}/historic/{id_user}.pkl"
if path.exists(file_name):
open_file = open(file_name, "rb")
historic = pickle.load(open_file)
open_file.close()
last_item = historic[-1]
else:
last_item = ''
historic.append(mensagem_from_bot)
open_file = open(file_name, "wb")
pickle.dump(historic, open_file)
open_file.close()
return last_item
def query(id_user):
historic = []
file_name = f"{parent_dir}/historic/{id_user}.pkl"
if path.exists(file_name):
open_file = open(file_name, "rb")
historic = pickle.load(open_file)
open_file.close()
return historic[-1]
else:
return None
| lucashahnndev/Assistant-OS | src/historic.py | historic.py | py | 990 | python | en | code | 2 | github-code | 36 |
20233976437 | class Solution:
def findMedianSortedArrays(self, nums1, nums2):
nums = []
len1 = len(nums1)
len2 = len(nums2)
i = 0
j = 0
while i < len1 and j < len2:
if nums1[i] < nums2[j]:
nums.append(nums1[i])
i += 1
else:
nums.append(nums2[j])
j += 1
if i < len1:
nums += nums1[i: len1]
elif j < len2:
nums += nums2[j: len2]
nums_size = len(nums)
middle_index = int(nums_size/2)
if nums_size == 0:
return []
elif nums_size % 2 == 0:
return (nums[middle_index - 1] + nums[middle_index]) / 2
else:
return nums[middle_index]
if __name__ == "__main__":
nums1 = [1, 2]
nums2 = [3, 4]
middle = Solution().findMedianSortedArrays(nums1, nums2)
print(middle)
| geroge-gao/Algorithm | LeetCode/python/4_寻找两个正序数中的中位数.py | 4_寻找两个正序数中的中位数.py | py | 914 | python | en | code | 26 | github-code | 36 |
483065631 |
def headerForRequests():
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:96.0) Gecko/20100101 Firefox/96.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/x-www-form-urlencoded",
}
return header
def url_001():
url=''
return url
def dataForGet():
data={'draw': '5',
'start': '0',
'length': '13',
'deptTypes': '[]',
'defunctInd': 'N',
'searchStr': 'FSK_01',
'_': '1597112917355'}
def dataForPost():
data= {"ipEntityMstrId": 7233101,
"entityCode": "BLL1",
"entityDesc": "BLL2019",
"entityDescLang1": "BLL2019#",
"shortCode": "BLL",
"seqNo": "1",
"entityNameAlias": "BL",
"entityNameAlias1": "BL#",
"addressDetail": "详细地址111"} | wengyuanpei/pandaInterfaceTest | parameter/locustParameter.py | locustParameter.py | py | 1,076 | python | en | code | 0 | github-code | 36 |
19288984909 | import tensorflow as tf
import numpy as np
from dataset import get_dataset, get_rotation_augmentor, get_translation_augmentor
from model import build_model
AUTOTUNE = tf.data.experimental.AUTOTUNE
dataset, num_classes = get_dataset()
model = build_model(num_classes)
model.load_weights('./saved_weights/weights')
rng = np.random.RandomState()
test_dataset = dataset.concatenate(
dataset.repeat(500)
.map(get_rotation_augmentor(rng), num_parallel_calls=AUTOTUNE)
.map(get_translation_augmentor(rng), num_parallel_calls=AUTOTUNE))
model.evaluate(test_dataset.batch(8))
# print(model.predict(test_dataset.batch(8).take(1))) | wdecay/ShapeClassification | test_model.py | test_model.py | py | 640 | python | en | code | 0 | github-code | 36 |
42245347977 | #!/usr/bin/python
# noinspection PyUnresolvedReferences
import json
import requests
from yoctopuce.yocto_api import *
from yoctopuce.yocto_display import *
from yoctopuce.yocto_anbutton import *
display_list = []
class SimpleXMBC(object):
def __init__(self, host, port, user, password):
self._password = password
self._user = user
self._port = port
self._host = host
self._id = 1
self._url = 'http://%s:%d/jsonrpc' % (self._host, self._port)
def json_rpc_request(self, method, params):
headers = {'content-type': 'application/json'}
# Example echo method
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": self._id,
}
response = requests.post(
self._url, data=json.dumps(payload), headers=headers).json()
self._id += 1
if 'error' in response:
print(response['error'])
return response
def get_info_to_display(self):
res = self.json_rpc_request('Player.GetActivePlayers', {})
if 'result' in res and len(res['result']) > 0:
player_id = res['result'][0]['playerid']
player_type = res['result'][0]['type']
else:
return 0, "not playing anything"
params = {"playerid": player_id, "properties": ["percentage"]}
res = self.json_rpc_request('Player.GetProperties', params)
if 'result' in res:
percentage = res['result']['percentage']
else:
percentage = 0
params = {"playerid": player_id,
"properties": ["title", "album", "artist", "season", "episode", "duration", "showtitle", "tvshowid",
"thumbnail", "file", "fanart", "streamdetails"]}
res = self.json_rpc_request('Player.GetItem', params)
if 'result' in res:
if player_type == "audio":
info = res['result']['item']['title'] + " (" + res['result']['item']['artist'][0] + ")"
else:
info = res['result']['item']['label']
else:
info = "not playing anything"
return percentage, info
def up(self):
self.json_rpc_request('Input.Up', {})
print("up)")
def down(self):
self.json_rpc_request('Input.down', {})
print('down')
def left(self):
self.json_rpc_request('Input.Left', json.loads('{}'))
print('left')
def right(self):
self.json_rpc_request('Input.Right', json.loads('{}'))
print('right')
def ok(self):
print('ok')
self.json_rpc_request('Input.Select', json.loads('{}'))
def back(self):
self.json_rpc_request('Input.Back', json.loads('{}'))
print('back')
xbmc_interface = SimpleXMBC('localhost', 8080, 'xbmc', '')
def init_screen(ydisplay):
"""
:type ydisplay: YDisplay
"""
ydisplay.resetAll()
w = ydisplay.get_displayWidth()
h = ydisplay.get_displayHeight()
layer1 = ydisplay.get_displayLayer(1)
layer1.selectGrayPen(0)
layer1.drawBar(0, 0, w - 1, h - 1)
layer1.selectGrayPen(255)
layer1.drawText(w / 2, h / 2, YDisplayLayer.ALIGN.CENTER, "detected!")
def an_button_callback(anbutton, value):
"""
:type value: str
:type anbutton: YAnButton
"""
if (anbutton.get_isPressed() == YAnButton.ISPRESSED_TRUE):
last = anbutton.get_userData()
if last == YAnButton.ISPRESSED_FALSE:
print("send command for " + anbutton.get_friendlyName())
funcid = anbutton.get_functionId()
if funcid == 'anButton1':
xbmc_interface.left()
elif funcid == 'anButton2':
xbmc_interface.up()
elif funcid == 'anButton3':
xbmc_interface.right()
elif funcid == 'anButton4':
xbmc_interface.down()
elif funcid == 'anButton5':
xbmc_interface.ok()
elif funcid == 'anButton6':
xbmc_interface.back()
anbutton.set_userData(anbutton.get_isPressed())
def device_arrival(module):
"""
:type module: YModule
"""
serial_number = module.get_serialNumber()
print("plug of " + serial_number)
product = module.get_productName()
if (product == "Yocto-MaxiDisplay") or product == "Yocto-Display":
display = YDisplay.FindDisplay(serial_number + ".display")
init_screen(display)
display_list.append(display)
for i in range(1, 7):
button = YAnButton.FindAnButton("%s.anButton%d" % (serial_number, i))
button.set_userData(button.get_isPressed())
button.registerValueCallback(an_button_callback)
def device_removal(module):
print("unplug of " + module.get_serialNumber())
def main():
errmsg = YRefParam()
YAPI.InitAPI(0, errmsg)
YAPI.RegisterDeviceArrivalCallback(device_arrival)
YAPI.RegisterDeviceRemovalCallback(device_removal)
if YAPI.RegisterHub("usb", errmsg) < 0:
print("Unable register usb :" + str(errmsg))
return -1
try:
last_title = ''
last_progress = 0
plug_unplug_delay = 0
while True:
progress, title = xbmc_interface.get_info_to_display()
if (progress != last_progress) or (last_title != title):
last_progress = progress
last_title = title
for display in display_list:
w = display.get_displayWidth()
h = display.get_displayHeight()
layer0 = display.get_displayLayer(0)
layer0.selectGrayPen(0)
layer0.drawBar(0, 0, w - 1, h - 1)
layer0.selectGrayPen(255)
layer0.drawText(w / 2, h / 2, YDisplayLayer.ALIGN.CENTER, title)
if progress > 0:
layer0.drawBar(0, h - 1, int(progress * w / 100), h - 1)
display.swapLayerContent(0, 1)
plug_unplug_delay -= 1
if plug_unplug_delay < 0:
YAPI.UpdateDeviceList()
plug_unplug_delay = 5
YAPI.Sleep(1000)
except KeyboardInterrupt:
print("exit with Ctrl-C")
return -1
if __name__ == '__main__':
main() | yoctopuce-examples/xbmc_remote | xbmc_remote.py | xbmc_remote.py | py | 6,386 | python | en | code | 0 | github-code | 36 |
30722703381 | #파이썬 정규형 연습
import re
def RepresentObject(obj):
if obj:
print("Match found : ", obj.group()); #group이란? -> 정규 표현식을 여러가지 그룹으로 나눌 수 있음. 이때 group(number) 매개변수 number에 따라 해당 그룹만 표현되게 할 수 있음. 예를 들어 group(1)이라고 하면 첫번째 그룹에 해당되는 객체들만 반환되게 됨.
else:
print("Not match");
#match - 문자열의 처음부터 정규식과 매치되는 지 조사 return -> match 객체 or None
p = re.compile('[a-z]+') #소문자 알파벳이 적어도 하나 이상
m = p.match("pythoN");
print(m); #match 객체 리턴
_m = p.match("3 python");
print(_m); #None 객체 리턴
p = re.compile('[a-z]+\d') #소문자 알파벳이 적어도 하나 이상 + 숫자 한 개
m = p.match("python");
RepresentObject(m); # Not match
p = re.compile('([a-z]+)(\d)') #소문자 알파벳이 적어도 하나 이상 + 숫자 한 개
m = p.match("python3");
RepresentObject(m); # match
#search - 문자열의 처음부터가 아닌 계속 검색하여 해당하는 객체있으면 반환.
p = re.compile('[a-z]+') #소문자 알파벳이 적어도 하나 이상
m = p.search("pythoN");
RepresentObject(m);
m = p.search("pythoN123pyth334dvs");
RepresentObject(m);
#findall - search 함수를 계속 for문 돌려 결과를 리스트 형태로 반환하는 것.
p = re.compile('[a-z]+') #소문자 알파벳이 적어도 하나 이상
m = p.findall("pythoN");
print(m);
m = p.findall("pythoN123pyth334dvs");
print(m);
#finditer - findall결과를 반복가능한 iteration 객체로 반환하는 거.
p = re.compile('[a-z]+') #소문자 알파벳이 적어도 하나 이상
m = p.finditer("pythoN123pyth334dvs");
for item in m:
RepresentObject(item);
#start, end, span 함수 -> 각 찾은 객체의 시작 index, 끝 index 등을 알려주는 함수다.
#이때 match는 항상 처음부터 확인하므로 start 반환 값은 항상 0이다. search는 다름.
#이때 match객체의 매소드이므로 findall에선 사용하긴 힘들다.
p = re.compile('[a-z]+') #소문자 알파벳이 적어도 하나 이상
m = p.match("pythoN123pyth334dvs");
print(m.span());
m = p.search("pythoN123pyth334dvs");
print(m.span());
# m = p.findall("pythoN123pyth334dvs");
# for item in m:
# print(item.span());
# ==> findall은 list 안의 string 형태의 객체를 반환하므로 오류 발생
m = p.finditer("pythoN123pyth334dvs");
for item in m:
print(item.span());
#축약된 방법으로 사용하기 => re.method(정규식, string 객체)
m = re.match("[a-z]+", "pythoN123pyth334dvs");
RepresentObject(m);
#compile 옵션 - 가로 안에 축양형 사용해도 됨.
#DOTALL(S) - '.'이 \n 포함해서 카운팅 가능
p = re.compile("a.b", re.DOTALL);
m = p.match("a\nb");
print(m); #\n 인식함.
#IGNORECASE(I) - 대소문자 구분 X
p = re.compile("[a-z]+", re.IGNORECASE);
m1 = p.match("python");
m2 = p.match("PYTHON");
print(m1);
print(m2);
#MULTILINE(M) - 여러 줄 가능
p = re.compile("^python \w+", re.M);
data = """python one
life is too short
python two
you need python
python three"""
m = p.findall(data);
print(m);
#VERBOSE(X) - 정규식을 보게 편하게 만들어 준다.
p = re.compile(r'&[#](0[0-7]+|[0-9]+|x[0-9a-fA-F]+);');
charref = re.compile(r"""
&[#] # Start of a numeric entity reference
(
0[0-7]+ # Octal form
| [0-9]+ # Decimal form
| x[0-9a-fA-F]+ # Hexadecimal form
)
; # Trailing semicolon
""", re.VERBOSE)
#이때 compile 앞에 있는 r은 뒤의 string이 raw string임을 의미한다.
p = re.compile('\\\\nclass')
p = re.compile(r'\\nclass') #둘 다 같은 결과를 보여줌.
data = '\\nclass';
print(data);
print(p.findall(data)); | Hoony0321/Algorithm | 2022_02/11/정규표현식공부.py | 정규표현식공부.py | py | 3,814 | python | ko | code | 0 | github-code | 36 |
7034071052 | def equalStacks(h1, h2, h3):
heights = [sum(h1), sum(h2), sum(h3)]
while heights[0] != heights[1] or heights[1] != heights[2]:
max_height = max(heights)
max_index = heights.index(max_height)
if max_index == 0:
heights[0] -= h1.pop(0)
elif max_index == 1:
heights[1] -= h2.pop(0)
else:
heights[2] -= h3.pop(0)
return heights[0] | TheArchons/Leetcode | hackerrank/Datastructures/Stacks/EqualStacks.py | EqualStacks.py | py | 419 | python | en | code | 1 | github-code | 36 |
5511991110 |
import os
from re import M, search
from unicodedata import category
import requests
import json
import psycopg2
import itertools
from flask import Flask, render_template, request, flash, redirect, session, g, jsonify,url_for,abort
from sqlalchemy.exc import IntegrityError
from forms import LoginForm, UserAddForm, PasswordResetForm
from models import db, connect_db, User,Addproduct, FavoriteProduct
from helpers import get_products_from_api_response
CURR_USER_KEY = "curr_user"
app = Flask(__name__)
app.jinja_env.filters['zip'] = zip
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'postgresql:///eCommerce')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', "it's a secret")
API_BASE_URL = "https://fakestoreapi.com"
connect_db(app)
@app.before_request
def add_user_to_g():
"""If we're logged in, add curr user to Flask global."""
if CURR_USER_KEY in session:
g.user = User.query.get(session[CURR_USER_KEY])
else:
g.user = None
def do_login(user):
"""Log in user."""
session[CURR_USER_KEY] = user.id
def do_logout():
"""Logout user."""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
@app.route('/signup', methods=["GET", "POST"])
def signup():
"""Handle user signup.
Create new user and add to DB. Redirect to home page.
If form not valid, present form.
If the there already is a user with that username: flash message
and re-present form.
"""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
form = UserAddForm()
if form.validate_on_submit():
try:
user = User.signup(
username=form.username.data,
password=form.password.data,
email=form.email.data,
)
db.create_all()
db.session.commit()
except IntegrityError:
flash("Username already taken", 'danger')
return render_template('users/signup.html', form=form)
do_login(user)
return redirect("/")
else:
return render_template('users/signup.html', form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
"""Handle user login."""
form = LoginForm()
if form.validate_on_submit():
user = User.authenticate(form.username.data,
form.password.data)
if user:
do_login(user)
flash(f"Hello, {user.username}!", "success")
return redirect("/")
flash("Invalid credentials.", 'danger')
return render_template('users/login.html', form=form)
@app.route('/logout')
def logout():
"""Handle logout of user."""
do_logout()
flash("You have successfully logged out.", 'success')
return redirect("/")
#######################################################
@app.route('/')
def show_products_form():
stores = most_popular_products()
return render_template("home.html", stores=stores)
@app.route('/index',methods=['POST', 'GET'])
def product_by_name():
res = requests.get('https://fakestoreapi.com/products',
)
data = res.json()
if data.get('items') == None:
flash("No item name found", "danger")
return redirect('/')
else:
stores = get_products_from_api_response(data)
return render_template('stores/index.html', stores=stores)
##############################################################################
# The navbar route Links
def most_popular_products():
res = requests.get('https://fakestoreapi.com/products',
)
data = res.json()
return get_products_from_api_response(data)
##### This gives you a list of categories###
@app.route('/category')
def item_by_category():
selected_category = request.args.get('selected_category')
stores = []
if selected_category:
res = requests.get('https://fakestoreapi.com/products/category/'+ selected_category
)
data = res.json()
stores = get_products_from_api_response(data)
return render_template('stores/category.html', stores=stores)
#############################################################################
# Get all the details of the product: Needs fixing
@app.route('/product_detail')
def details_by_id():
product_id =request.args.get('product_id')
res = requests.get('https://fakestoreapi.com/products/'+ product_id)
data = res.json()
stores = []
for item in data:
store = {
'id': item ['id'],
'title': item['title'],
'image': item['image'],
'description': item['description'],
'price':item['price']
}
stores.append(store)
return render_template('stores/product_detail.html', stores=stores)
##############################################################################
# User Homepage- Needs Fixing
@app.route('/users/favorite')
def user_favorite():
user_id = g.user.id
user = User.query.get_or_404(user_id)
if user:
all_items = FavoriteProduct.query.filter_by(
user_id=user_id).order_by(FavoriteProduct.id.desc()).all()
shops = []
for item in all_items:
shop = {'name': item.item_name,
'id': item.item_id, 'thumb': item.item_thum}
shops.append(shop)
return render_template("users/favorite.html", user=user, shops=shops, show_delete=True)
else:
return render_template("users/favorite.html")
@app.route('/users/favorite/<int:item_id>', methods=["GET", "POST"])
def add_favorite(item_id):
"""Add Item id to user favorite."""
user_id = g.user.id
user = User.query.get_or_404(user_id)
item_object = FavoriteProduct.query.filter_by(
item_id=str(item_id),
user_id=str(user_id)
).all()
if not item_object:
res = requests.get(f"{API_BASE_URL}/product",
)
data = res.json()
item = data['items'][0]
item_id = item['id']
item_name = item['title']
item_thum = item['image']
new_item = FavoriteProduct(item_id=item_id,
item_name=item_name, item_thum=item_thum, user_id=user_id)
db.session.add(new_item)
db.session.commit()
return redirect(url_for('user_favorite.html'))
else:
flash("Item already in favorites!", "danger")
return redirect(url_for('show_product_for'))
# -------------------- Remove the favorite product --------------------------->
@app.route('/users/delete/<int:item_id>', methods=["GET", "POST"])
def delete_item(item_id):
"""Have currently-logged-in-user delete product."""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user_favorite_product = FavoriteProduct.query.filter_by(
item_id=str(g.user.id)
).first()
db.session.delete(user_favorite_product)
db.session.commit()
return redirect("/users/favorite.html")
##############################################################################
@app.errorhandler(404)
def page_not_found(e):
"""404 NOT FOUND page."""
return render_template('404/404.html'), 404
##############################################################################
# Turn off all caching in Flask
# (useful for dev; in production, this kind of stuff is typically
# handled elsewhere)
#
# https://stackoverflow.com/questions/34066804/disabling-caching-in-flask
@ app.after_request
def add_header(req):
"""Add non-caching headers on every request."""
req.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
req.headers["Pragma"] = "no-cache"
req.headers["Expires"] = "0"
req.headers['Cache-Control'] = 'public, max-age=0'
return req | MITHIRI1/Capstone-Project-1 | app.py | app.py | py | 8,123 | python | en | code | 0 | github-code | 36 |
24300694321 | from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, DataFrame
from pyspark.sql.functions import lit
from pyspark.sql.functions import split, explode, monotonically_increasing_id
import numpy as np
from numpy import linalg as LA
from scipy.sparse import csr_matrix
import json
import datetime
from tqdm.notebook import trange, tqdm
## TODO:
# - Implementar es_recommendation:
# - Reemplazar variables locales de item_indexes y user_indexes por el indice en es_recommendation
# - Hacer update_item_viewers en elastic.py, que actualice el indice de items correspondiente en elastic_search
from sar.models.elastic import ElasticHelper
from sar.models.hilfe import Utils
class SAR():
# Inicializo el modelo
def __init__(self, es_recommendation, es_metadata, TIME_SCORING=(17/3)*10**8, W_SCORING=1):
# elastic search con la metadata
self.es_helper = ElasticHelper(es_recommendation, es_metadata)
# Matrices del modelo
self.A = None
self.C = None
self.S = None
pass
def load_new_users(self, dataset, verb=False):
# Dado un dataset,
# cargar los usuarios nuevos a los indices del modelo
# obtengo los usuarios unicos del dataset
users_in_ds:set = set([u.user_id for u in dataset.select("user_id").collect()])
users_in_ds:list = list(users_in_ds)
# cantidad de user_ids ignorados
ignored:int = 0
for i in trange(len(users_in_ds)):
userid = users_in_ds[i]
if userid not in self.user_indexes.keys():
index = len(self.user_indexes)
# agrego al usuario al indice recommendation de es
self.es_helper.add_user(userid, index)
else:
ignored += 1
if verb:
print("* (pre) N° de usuarios únicos: ", len(self.user_indexes.keys()))
print("* (post) N° de usuarios únicos: ", len(self.user_indexes.keys()))
print("* Diferencia de usuarios: ", len(self.user_indexes) - ignored)
return ignored
def load_new_items(self, dataset, verb=False):
# Dado un dataset,
# cargar los items nuevos a los indices del modelo
# cantidad de items antes de arrancar el ciclo
n_items_pre = self.es_helper.n_items_in_recommendation()
# info de items omitidos
info:dict = { "missing_metadata": [] }
# obtengo los items unicos del dataset
items_in_ds = set([i.group_id for i in dataset.select("group_id").collect()])
items_in_ds = list(items_in_ds)
for j in trange(len(items_in_ds)):
itemid = items_in_ds[j]
if itemid not in self.item_indexes.keys():
# el index es por aparicion
index = len(self.item_indexes)
# leo la metadata segun el group_id
metadata = self.es_metadata.get_item_metadata(itemid)
# solo agrego items que tengan metadata
if metadata == dict():
info["missing_metadata"].append(itemid)
else:
self.es_helper.add_item(itemid, index)
if verb:
print("* (pre) N° de items únicos: ", len(self.item_indexes.keys()))
print("* (post) N° de items únicos: ", len(self.item_indexes.keys()))
print("* Diferencia de items: ", len(self.item_indexes.keys()) - n_items_pre)
print("* Items omitidos: ", len(info["missing_metadata"]))
return info
def build_coocurrence_matrix(self) -> csr_matrix:
M:int = self.es_helper.n_items_in_recommendation() # n items en el es_recommendation
C:csr_matrix = csr_matrix((M,M)).tolil()
for i, item_i in enumerate(M):
index_i:int = self.es_metadata.get_item_index(item_i) # index del item i
item_i_viewers:set = self.es_metadata.get_item_viewers(item_i) # usuarios que vieron el item i
for j, item_j in enumerate(M):
index_j:int = self.es_metadata.get_item_index(item_j) # index del item j
item_j_viewers:set = self.es_metadata.get_item_viewers(item_j) # usuarios que vieron el item j
C[index_j, index_i] = len(item_j_viewers.intersection(item_i_viewers))
return C
def build_similarity_matrix(self) -> csr_matrix:
return self.C
def scoring_function(self, event_time) -> float:
t_k = event_time.timestamp()
t_0 = datetime.datetime.now().timestamp()
exp = - (t_0 - t_k) / self.T
return self.W * 2 ** exp
def build_affinity_matrix(self, dataset) -> csr_matrix:
# Dado un dataset, actualiza la matriz A
M = self.es_helper.n_items_in_recommendation()
N = self.es_helper.n_users_in_recommendation()
self.A:csr_matrix = csr_matrix((N, M)).tolil()
ignored:int = 0
for interaction in dataset.collect():
user_id, group_id, event_time = Utils.decode_interaction(interaction)
if self.es_helper.is_valid_group_id(group_id) and self.es_helper.is_valid_user_id(user_id):
index_item = self.es_helper.get_item_index(group_id)
index_user = self.es_helper.get_user_index(user_id)
self.A[index_user, index_item] += self.scoring_function(event_time)
else:
ignored += 1
return ignored
def fit(self, dataset):
# Dado un dataset, actualiza las matrices C, S, A
# actualiza el indice con las vistas por item
self.es_metadata.update_items_viewers(dataset)
# actualizo las matrices
self.C = self.build_coocurrence_matrix()
self.S = self.build_similarity_matrix()
self.A = self.build_affinity_matrix()
# armo la matriz de predicciones
self.Y = self.A @ self.S
pass
def recommend_similar_k_items(self,
group_id,
k:int,
include=[],
exclude=[],
enhance=[]):
# Dado un group_id y ...
# Recomendar los k items mas similares
itemIndex = self.es_helper.get_item_index(group_id)
item2item:list = [
(similarity, i) for i, similarity in enumerate(list(self.S[itemIndex].toarray()[0]))
]
# eliminamos al elemento del que se buscan los similares
item2item.pop(itemIndex)
## filtro y enhance
BIAS = 10
for _, index in item2item:
item_metadata = self.es_metadata.get_item_metadata(index, index=True)
# filtrar
if item_metadata["filtros"] not in include:
item2item[index] = -1
# potenciar
if item_metadata["filtros"] in enhance:
item2item[index] += BIAS
pass
# ordeno los items
ordered_items = sorted(item2item, key=lambda x: x[0], reverse=True)
recommendations:list = ordered_items[:k]
# hay items con scoring 0?
l = sum([1 for scoring, index in recommendations if scoring == 0])
if l > 0:
# agrego l items populares a recommendations
recommendations = recommendations[:k-l]
# recommendations.extend(getTopLMovies(self.Y, l, exclude=exclude))
# los dejo en el formato valido
top_k:list = []
for n_views, index in recommendations:
rec = (self.es_helper.get_group_id[index], n_views)
top_k.append(rec)
return top_k
def recommend_k_items_to_user(self, user_id):
# Dado un user_id y ...
# Recomendar los k items con mas afinidad
pass
| SebasAndres/Recomendadores | src/sar/models/sar.py | sar.py | py | 8,299 | python | en | code | 0 | github-code | 36 |
6301592120 | # !/user/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/12 21:11
# @Author : chineseluo
# @Email : 848257135@qq.com
# @File : run.py
# @Software: PyCharm
import os
from Common.publicMethod import PubMethod
import logging
from selenium.webdriver.common.by import By
from Base.baseBy import BaseBy
pub_api = PubMethod()
root_dir = os.path.dirname(os.path.dirname(__file__))
config_path = os.path.join(root_dir, 'ActivityObject')
config_path = os.path.abspath(config_path)
class ElemParams:
def __init__(self, dir_name, file_name, root_dir_name=config_path):
self.elem_name = []
self.desc = []
self.data = []
self.info = []
self.__run(dir_name, root_dir_name, file_name)
def __run(self, dir_name, root_dir_name, file_name):
config_dir_name = os.path.join(root_dir_name, dir_name)
file_path = os.path.abspath(os.path.join(config_dir_name, file_name))
try:
self.info = PubMethod().read_yaml(file_path)['parameters']
for i in self.info:
self.elem_name.append(i['elem_name'])
self.desc.append(i['desc'])
self.data.append(i['data'])
except Exception as e:
logging.error("文件解析失败!{},文件路径:{}".format(e, file_path))
def get_locator(self, elem_name):
"""
@param page_elem_class:传入页面元素对象
@param elem_name:传入自定义的元素名称
@return:
"""
page_obj_elem = self.info
elems_info = page_obj_elem
for item in elems_info:
if item["elem_name"] == elem_name:
method = item["data"]["method"]
value = item["data"]["value"]
logging.info("元素名称为:{},元素定位方式为:{},元素对象值为:{}".format(elem_name, method, value))
if method == "ID" and value is not None:
elem_locator = (By.ID, value)
return elem_locator
elif method == "XPATH" and value is not None:
elem_locator = (By.XPATH, value)
return elem_locator
elif method == "LINK_TEXT" and value is not None:
elem_locator = (By.LINK_TEXT, value)
return elem_locator
elif method == "PARTIAL_LINK_TEXT" and value is not None:
elem_locator = (By.PARTIAL_LINK_TEXT, value)
return elem_locator
elif method == "NAME" and value is not None:
elem_locator = (By.NAME, value)
return elem_locator
elif method == "TAG_NAME" and value is not None:
elem_locator = (By.TAG_NAME, value)
return elem_locator
elif method == "CLASS_NAME" and value is not None:
elem_locator = (By.CLASS_NAME, value)
return elem_locator
elif method == "CSS_SELECTOR" and value is not None:
elem_locator = (By.CSS_SELECTOR, value)
return elem_locator
elif method == "IOS_UIAUTOMATION" and value is not None:
elem_locator = (BaseBy.IOS_UIAUTOMATION, value)
return elem_locator
elif method == "IOS_PREDICATE" and value is not None:
elem_locator = (BaseBy.IOS_PREDICATE, value)
return elem_locator
elif method == "IOS_CLASS_CHAIN" and value is not None:
elem_locator = (BaseBy.IOS_CLASS_CHAIN, value)
return elem_locator
elif method == "ANDROID_UIAUTOMATOR" and value is not None:
elem_locator = (BaseBy.ANDROID_UIAUTOMATOR, value)
return elem_locator
elif method == "ANDROID_VIEWTAG" and value is not None:
elem_locator = (BaseBy.ANDROID_VIEWTAG, value)
return elem_locator
elif method == "WINDOWS_UI_AUTOMATION" and value is not None:
elem_locator = (BaseBy.WINDOWS_UI_AUTOMATION, value)
return elem_locator
elif method == "ACCESSIBILITY_ID" and value is not None:
elem_locator = (BaseBy.ACCESSIBILITY_ID, value)
return elem_locator
elif method == "IMAGE" and value is not None:
elem_locator = (BaseBy.IMAGE, value)
return elem_locator
elif method == "CUSTOM" and value is not None:
elem_locator = (BaseBy.CUSTOM, value)
return elem_locator
else:
logging.error("元素名称:{},此元素定位方式异常,定位元素值异常,请检查!!!".format(elem_name))
# 注册yaml文件对象
class LoginActivityElem(ElemParams):
def __init__(self):
super(LoginActivityElem, self).__init__('Login_activity', 'Login_activity.yaml')
if __name__ == '__main__':
login_activity = LoginActivityElem()
print(login_activity.get_locator("phone_number"))
| chineseluo/app_auto_frame_v1 | ActivityObject/elemParams.py | elemParams.py | py | 5,248 | python | en | code | 11 | github-code | 36 |
39585567433 | #0<xt<1
#0<r<4
r1=float(input("0 ila 4 arasında bir r değeri girin."))
xö=0.1
for i in range(0,100):
print(i,xö)
a=1-xö
xs=(r1*xö)*a
xö=xs
| Yasemnerdogan/Kodluyoruz-71.AnkaraFullStack-Python-Angular | ödevler/3)Python-hafta2-bireysel/döngüler/d.5)Lojistik fonksiyon ve kaos.py.py | d.5)Lojistik fonksiyon ve kaos.py.py | py | 174 | python | tr | code | 0 | github-code | 36 |
18518883640 | import random
from multiprocessing import Pool
from ai_player import Ai_player
from deck import Deck
class Population:
"""
blackjack Ai player population
"""
POPULATION_SIZE = 400
BJ_ROUNDS = 50000
PARENT_SIZE = 5
MAX_THREADS = 40 # most efficient
def __init__(self):
def __init_players():
return [Ai_player() for _ in range(self.POPULATION_SIZE)]
self.generation = 0
self.best_player = None
self.players = __init_players()
self.__decks = []
def create_new_generation(self):
self.__create_new_gen_players()
self.generation += 1
def __create_new_gen_players(self):
parents = self.__get_best_players(self.PARENT_SIZE)
# an array of ai players that will make the next generation
players_parents = random.choices(parents,
weights=(55, 20, 15, 8, 2),
k=(self.POPULATION_SIZE -
self.PARENT_SIZE))
self.players = [Ai_player(player) for player in players_parents]
# for i in range(self.PARENT_SIZE):
for i in range(1):
self.players.append(parents[i])
def __get_best_players(self, num) -> list[Ai_player]:
"""
gets the top num highest ranked players
"""
return self.players[:num]
def play_generation(self):
"""
runs the current generation of players
"""
# generate the decks
self.__decks = [Deck(6) for _ in range(self.BJ_ROUNDS)]
# shuffle the decks
for deck in self.__decks:
deck.shuffle()
with Pool(self.MAX_THREADS) as pool:
players = pool.map(self.thread_worker, self.players)
self.players = players
# set the best player
self.players.sort(key=lambda x: x.get_fitness(), reverse=True)
self.best_player = self.players[0]
def thread_worker(self, player: Ai_player):
player.play_rounds(self.__decks)
return player
| BenPVandenberg/blackjack-ai | Dustin_Marks/population.py | population.py | py | 2,112 | python | en | code | 1 | github-code | 36 |
20087427863 | from argparse import ArgumentParser
from copy import deepcopy
from pathlib import Path
def build_parser():
parser = ArgumentParser()
parser.add_argument(
'-i', '--input-filename', type=Path,
required=True
)
return parser
def string_to_integer_list(string):
return list(map(int, string))
def get_scenic_score(sequence):
scenic_score = [0] * len(sequence)
for index, element in enumerate(sequence):
left, right = sequence[:index], sequence[index + 1:]
left = left[::-1]
edge = not left or not right
if edge:
continue
score_left = next(
(
index for index, comparison_value in enumerate(left, 1)
if comparison_value >= element
), len(left)
)
score_right = next(
(
index for index, comparison_value in enumerate(right, 1)
if comparison_value >= element
), len(right)
)
scenic_score[index] = score_left * score_right
return scenic_score
def transpose(matrix):
return list(zip(*matrix))
def main():
args = build_parser().parse_args()
with open(args.input_filename) as fd:
data = fd.read()
lines = data.splitlines()
grid = [
string_to_integer_list(line)
for line in lines
]
row_scenic_score = [
get_scenic_score(row)
for row in grid
]
column_scenic_score = transpose([
get_scenic_score(column)
for column in transpose(grid)
])
total_scenic_score = deepcopy(row_scenic_score)
for row_index in range(len(column_scenic_score)):
for col_index in range(len(column_scenic_score)):
total_scenic_score[row_index][col_index] = (
row_scenic_score[row_index][col_index] *
column_scenic_score[row_index][col_index]
)
print(max(max(row) for row in total_scenic_score))
if __name__ == '__main__':
main()
| reynoldscem/aoc2022 | day_08/part2.py | part2.py | py | 2,023 | python | en | code | 0 | github-code | 36 |
36808206999 | import os
import copy
from typing import Dict, Union
from torch.utils.data import Dataset
import torchio as tio
from .subject_loaders import SubjectLoader
from .subject_filters import SubjectFilter, ComposeFilters
class SubjectFolder(Dataset):
""" A PyTorch Dataset for 3D medical data.
Args:
root: Path to the root of the subject folder dataset.
subject_path: Path to folder containing subjects, relative to the root.
Each subject must have their own folder within the subject_path.
subject_loader: A SubjectLoader pipeline that loads subject data from the subject folders.
cohorts: An optional dictionary that defines different subject cohorts in this dataset.
The dictionary keys are cohort names, and the values are ``SubjectFilter``s.
The active cohort can be set with the ``set_cohort(cohort_name)`` method.
A special cohort name ``'all'`` may be provided to define a filter that is applied
to all subjects.
transforms: Optional ``tio.Transform``s that are applied to each subject.
This can be a single transformation pipeline, or a dictionary that defines
a number of pipelines.
The key ``"default"`` can be used to set a default transformation pipeline
when no cohort is active.
If a matching key is in `cohorts`, then that transformation will become
active when ``set_cohort(cohort_name)`` is called.
A transformation can also be explicitly set with ``set_transform(transform_name)``.
"""
def __init__(
self,
root: str,
subject_path: str,
subject_loader: SubjectLoader,
cohorts: Dict[str, SubjectFilter] = None,
transforms: Union[tio.Transform, Dict[str, tio.Transform]] = None,
ref_img = None
):
self.root = root
self.subject_path = os.path.join(self.root, subject_path)
self.subject_loader = subject_loader
self.cohorts = {} if cohorts is None else cohorts
self.transforms = transforms
self.ref_img = ref_img
self._preloaded = False
self._pretransformed = False
# Loops through all subjects in the directory
subjects = []
subject_names = os.listdir(self.subject_path)
for subject_name in subject_names:
# The subject_data dictionary will be used to initialize the tio.Subject
subject_folder = os.path.join(self.subject_path, subject_name)
subject_data = dict(name=subject_name, folder=subject_folder)
# Apply subject loaders
self.subject_loader(subject_data)
# torchio doesn't like to load a subject with no images
if not any(isinstance(v, tio.Image) for v in subject_data.values()):
continue
subject = tio.Subject(**subject_data)
if self.ref_img:
transform = tio.CopyAffine(self.ref_img)
subject = transform(subject)
subjects.append(subject)
if "all" in self.cohorts:
all_filter = self.cohorts['all']
subjects = all_filter(subjects)
self.active_cohort = 'all'
self.all_subjects = None
self.all_subjects_map = None
self.subjects = None
self.subjects_map = None
self.excluded_subjects = None
self.transform = None
self.set_all_subjects(subjects)
def set_all_subjects(self, subjects):
subjects.sort(key=lambda subject: subject['name'])
self.all_subjects = subjects
self.all_subjects_map = {subject['name']: subject for subject in subjects}
self.set_cohort(self.active_cohort)
def set_subjects(self, subjects):
self.subjects = subjects
self.subjects_map = {subject['name']: subject for subject in subjects}
self.excluded_subjects = [subject for subject in self.all_subjects
if subject not in self.subjects]
def set_cohort(self, cohort: Union[str, SubjectFilter]):
self.active_cohort = cohort
if isinstance(cohort, str):
self.set_transform(cohort)
if cohort == "all" or cohort is None:
self.set_subjects(self.all_subjects)
elif cohort in self.cohorts:
subject_filter = self.cohorts[cohort]
self.set_subjects(subject_filter(self.all_subjects))
else:
raise ValueError(f"Cohort name {cohort} is not defined in dataset cohorts: {self.cohorts}.")
if isinstance(cohort, SubjectFilter):
self.set_transform('default')
subject_filter = cohort
self.set_subjects(subject_filter(self.all_subjects))
def set_transform(self, transform: Union[str, tio.Transform]):
if isinstance(transform, str):
transform_name = transform
if self.transforms is None:
self.transform = None
elif isinstance(self.transforms, tio.Transform):
self.transform = self.transforms
elif isinstance(self.transforms, dict):
if transform_name in self.transforms:
self.transform = self.transforms[transform_name]
elif 'default' in self.transforms:
self.transform = self.transforms['default']
else:
self.transform = None
elif isinstance(transform, tio.Transform):
self.transform = transform
else:
raise ValueError()
def get_cohort_dataset(self, cohort: Union[str, SubjectFilter]) -> 'SubjectFolder':
transforms = self.transforms
if isinstance(cohort, str):
subject_filter = self.cohorts[cohort]
if isinstance(transforms, dict):
transforms = transforms.copy()
if cohort in transforms:
transforms['default'] = transforms[cohort]
del transforms[cohort]
elif isinstance(cohort, SubjectFilter):
subject_filter = cohort
else:
raise ValueError()
cohorts = self.cohorts.copy()
if 'all' in cohorts:
cohorts['all'] = ComposeFilters(cohorts['all'], subject_filter)
else:
cohorts['all'] = subject_filter
return SubjectFolder(self.root, self.subject_path, self.subject_loader, cohorts, transforms, ref_img=self.ref_img)
def __len__(self):
return len(self.subjects)
def __getitem__(self, idx):
# Get subjects by an integer ID in 0..N, or by the subject's folder name
if isinstance(idx, int):
subject = self.subjects[idx]
elif isinstance(idx, str):
subject = self.subjects_map[idx]
else:
raise ValueError(f"Subject index must be an int or a string, not {idx} of type {type(idx)}")
# Load subject and apply transform
subject = copy.deepcopy(subject)
if not self._preloaded:
subject.load()
if not self._pretransformed and self.transform is not None:
subject = self.transform(subject)
return subject
def __contains__(self, item):
if isinstance(item, int):
return item < len(self)
if isinstance(item, str):
return item in self.subjects_map
if isinstance(item, tio.Subject):
return item in self.subjects
return False
# Preloads the images for all subjects. Typically they are lazy-loaded in __getitem__.
def preload_subjects(self):
if self._preloaded:
return
self._preloaded = True
loaded_subjects = []
for subject in self.all_subjects:
subject = copy.deepcopy(subject)
subject.load()
loaded_subjects.append(subject)
self.set_all_subjects(loaded_subjects)
self.set_cohort(self.active_cohort)
def preload_and_transform_subjects(self):
if self._pretransformed:
return
self.preload_subjects()
if self.transform is not None:
self._pretransformed = True
self.set_all_subjects([self.transform(subject) for subject in self.subjects])
# TODO: Do this better.
def load_additional_data(self, path: str, subject_loader: SubjectLoader):
subject_names = os.listdir(path)
for subject_name in subject_names:
subject_folder = os.path.join(path, subject_name)
subject_data = dict(name=subject_name, folder=subject_folder)
subject_loader(subject_data)
del subject_data['name']
del subject_data['folder']
# find the first subject with matching name, else return None
matched_subject = next((subject for subject in self.subjects if subject['name'] == subject_name), None)
if matched_subject is not None:
# update the primary object so other references such as subject_map are updated
matched_subject.update(subject_data)
| efirdc/Segmentation-Pipeline | segmentation_pipeline/data_processing/subject_folder.py | subject_folder.py | py | 9,208 | python | en | code | 1 | github-code | 36 |
14391422801 | import json
import os
from collections import OrderedDict
TRAIN_CATEGORY_TYPE_SPECS = {"optim": True, "batch_size": True}
TRAIN_RANGE_SPECS = {
"batch_size": {
"categories": [512, 1024]
},
# TODO: make learning rate sample range dependent on optimizer type.
"learning_rate": {
"low": -4.5,
"high": -1.5,
"scale": "log10",
},
"grad_clip": {
"low": 1,
"high": 100,
},
"weight_decay": {
"low": -9,
"high": -4,
"scale": "log10",
},
"momentum": {
"low": 0.85,
"high": 0.99,
},
"optim": {
"categories": ["RMSprop", "Adam"],
},
}
LAYER_TYPES = [
"conv2d", "fc", "pool2d", "bn1d", "bn2d", "relu", "lrelu", "drop",
"surv_ode", "rnn", "nnet_surv", "nnet_surv_cox", "deephit", "deepsurv",
"cox_time", "rdeephit"
]
LAYER_CATEGORY_TYPE_SPECS = {}
LAYER_RANGE_SPECS = {}
# Define LAYER_RANGE_SPECS and LAYER_CATEGORY_TYPE_SPECS for each layer type
for layer_type in LAYER_TYPES:
if layer_type == "conv2d":
is_category_types = {}
sample_specs = {
"out_channels": {
"low": 3,
"high": 8,
"scale": "log2"
},
"kernel_size": {
"low": 1,
"high": 7
},
"stride": {
"low": 0,
"high": 4
},
"padding": {
"low": 0,
"high": 4
},
}
for var_name in sample_specs:
if var_name not in is_category_types:
sample_specs[var_name]["is_int"] = True
elif layer_type == "fc":
is_category_types = {}
sample_specs = {
"out_features": {
"low": 3,
"high": 12,
"scale": "log2",
"is_int": True,
},
}
elif layer_type == "pool2d":
is_category_types = {}
sample_specs = {
"kernel_size": {
"low": 2,
"high": 3
},
"stride": {
"low": 0,
"high": 4
},
"padding": {
"low": 0,
"high": 4
}
}
for var_name in sample_specs:
sample_specs[var_name]["is_int"] = True
elif layer_type == "bn1d" or layer_type == "bn2d":
is_category_types = {"affine": True}
sample_specs = {
"momentum": {
"low": 0.05,
"high": 0.2,
},
"affine": {
"categories": [True, False]
},
}
elif layer_type == "drop":
is_category_types = {}
sample_specs = {
"p": {
"low": 0.01,
"high": 0.5,
},
}
elif layer_type == "surv_ode":
is_category_types = {
"num_layers": True,
"batch_norm": True,
"func_type": True,
"has_feature": True
}
sample_specs = {
"hidden_size": {
"low": 2,
"high": 7,
"scale": "log2",
"is_int": True,
},
"num_layers": {
"categories": [1, 2, 4]
},
"batch_norm": {
"categories": [True, False]
},
"func_type": {
"categories": [
"mlp", "exponential", "weibull", "log_logistic",
"cox_mlp_exp", "cox_mlp_mlp"
]
},
"has_feature": {
"categories": [True, False]
},
}
elif layer_type == "rnn":
is_category_types = {"rnn_type": True}
sample_specs = {
"hidden_size": {
"low": 3,
"high": 8,
"scale": "log2",
"is_int": True,
},
"num_layers": {
"low": 1,
"high": 3,
"is_int": True,
},
"rnn_type": {
"categories": ["LSTM", "GRU"]
},
}
else:
is_category_types = {}
sample_specs = {}
LAYER_CATEGORY_TYPE_SPECS[layer_type] = is_category_types
LAYER_RANGE_SPECS[layer_type] = sample_specs
if __name__ == "__main__":
pass
| jiaqima/SODEN | range_specs.py | range_specs.py | py | 4,483 | python | en | code | 13 | github-code | 36 |
38608858024 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 11 22:34:18 2018
@author: Roshan Zameer Syed
ID : 99999-2920
Description : Multivariate linear regression and backward elimination
"""
# Reading the dataset
import pandas as pd
data = pd.read_csv('Advertising.csv')
# Feature and response matrix
X = data.iloc[:,[1,2,3]].values
y = data.iloc[:,-1].values
# Splitting the dataset into Training and Test sets
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state=0)
# Linear regresssion algorithm
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,y_train)
ypred = regressor.predict(X_test)
import statsmodels.formula.api as sm
import numpy as np
# Adding new column of one's to X
X = np.append(arr = np.ones((200,1)), values = X, axis = 1)
# Running Backward elimination algorithm
X_opt = X[:,[0,1,2,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,1,2]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
import matplotlib.pyplot as plt
plt.scatter(X,y)
"""
• How many observations are in this data set ?
Ans : 200
• How many features are in this data set ?
Ans : 3 features
• What is the response for this data set ?
Ans : The last column sales is the response
• Which predictors are the most significant for this dataset ? Please explain Why ?
Ans : Column 1- TV and column 2- Radio are the most significant predictors because
their P-value is less than the threshold.
"""
| syedroshanzameer/Machine-Learning | Multi-variate Linear Regression/multiRegression.py | multiRegression.py | py | 1,600 | python | en | code | 0 | github-code | 36 |
2360857851 | """
【问题描述】一个百万富翁碰到一个陌生人,陌生人找他谈了一个换钱的计划.该计划如下:我每天给你10万,而你第一天给我一元钱,第二天我仍给你十万,你给我二元钱,第三天我仍给你十万,你给我四元钱......你每天给我的钱是前一天的两倍,直到满n(0<=n<=30)天.百万富翁非常高兴,欣然接受了这个契约.请编写一个程序,计算这n天中,陌生人给了富翁多少钱,富翁给了陌生人多少钱.
【输入形式】输入天数n(0<=n<=30)
【输出形式】控制台输出.分行给出这n天中,陌生人所付出的钱和富翁所付出的钱.输出舍弃小数部分,取整.
【样例输入】30
【样例输出】3000000
1073741823
【样例说明】两人交易了30天,陌生人给了富翁3000000的钱(以元为单位).富翁给了陌生人1073741823元.
"""
n = int(input())
poor = 0
rich = 1
richSum = 0
while n >= 1:
poor += 100000
richSum += rich
rich = rich * 2
n -= 1
print(poor)
print(richSum)
| xzl995/Python | CourseGrading/4.2.7换钱的交易.py | 4.2.7换钱的交易.py | py | 1,028 | python | zh | code | 3 | github-code | 36 |
238708718 | from scipy.misc import imsave, imresize
import numpy as np
from tqdm import tqdm
from LoadLightField import *
from LightFieldFunctions import *
from SaveLightField import *
from DepthFunctions import *
from scipy.interpolate import RectBivariateSpline
from tqdm import tqdm, trange
from time import time
from LightFieldStyleTransferMethods import *
from LightFieldStyleTransferPreprocessor import preprocess
import os
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
############################### User Parameters ##################################
loaddir = "lightfields/"
processdir = "preprocessed_data/"
savedir = "results/"
name = "Swans"
epsilon = 1.4
num_views = 9
modeldir = "StyleNet/"
preprocessing_view = 1 # Put 0 to skip preprocessing, 1 to calibrate based on adjacent view
tuned_k = False
vectorized = True
crop = 5
analysis_only = False # For generated result, regenerate visuals and loss values
# Select method parameters
method = "BP"
fuse_features = True
fuse_images = False
perceptual_loss = False
gibbs_loss = False
##################################################################################
LFdir = processdir + name + "LF/"
ensure_dir(LFdir)
if preprocessing_view:
preprocess(loaddir, LFdir, name, modeldir, num_views, crop, epsilon, preprocessing_view, tuned_k, vectorized)
# Define save location
folder_name = ""
if fuse_features:
folder_name += "FuseFeats"
if fuse_images:
folder_name += "FuseImages"
folder_name += method
if perceptual_loss:
folder_name += "PerceptualLoss"
if gibbs_loss:
folder_name += "GibbsLoss"
# Complete selected stylization method
start = time()
if method == "BP":
LFStyleTransferBP(name, LFdir, modeldir, savedir+name+"/"+folder_name+"/", num_views, fuse_features, fuse_images, perceptual_loss, gibbs_loss, analysis_only=analysis_only)
if method == "PostOpt":
LFStyleTransferPostOpt(name, LFdir, modeldir, savedir+name+"/"+folder_name+"/", num_views, fuse_features, fuse_images, perceptual_loss, gibbs_loss, analysis_only=analysis_only)
if method == "NoOpt":
LFStyleTransferNoOpt(name, LFdir, modeldir, savedir+name+"/"+folder_name+"/", num_views, fuse_features, fuse_images, analysis_only=analysis_only)
if method == "Gatys":
LFStyleTransferGatys(name, LFdir, modeldir, savedir+name+"/"+folder_name+"/", num_views, gibbs_loss = gibbs_loss, analysis_only = analysis_only)
end = time()
print("Done with style transfer " + folder_name + ". Total time: ", end-start)
| davidmhart/LightFieldStyleTransfer | LightFieldStyleTransfer.py | LightFieldStyleTransfer.py | py | 2,551 | python | en | code | 4 | github-code | 36 |
40264918799 | num = (float(input()))
number = int(100 * num)
total = 0
while number != 0:
if number >= 200:
number -= 200
elif number >= 100:
number -= 100
elif number >= 50:
number -= 50
elif number >= 20:
number -= 20
elif number >= 10:
number -= 10
elif number >= 5:
number -= 5
elif number >= 2:
number -= 2
elif number >= 1:
number -= 1
total += 1
print(f"{total}")
# ------------------------------------- Problem to resolve ------------------------------
#
# Производителите на вендинг машини искали да направят машините си да връщат възможно най-малко монети ресто.
# Напишете програма, която приема сума - рестото, което трябва да се върне и изчислява с колко най-малко
# монети може да стане това.
# Вход Изход
# 1.23 4
# -------------------------
# 2 1
# -------------------------
# 0.56 3
# -------------------------
# 2.73 5
| ivoivanov0830006/1.1.Python_BASIC | 5.While_loops/*05.Vending_coins.py | *05.Vending_coins.py | py | 1,197 | python | bg | code | 1 | github-code | 36 |
10115576337 | # coding: utf-8
import pickle
import argparse
if __name__ == '__main__':
with open('unsp_target_path_id.dump', 'rb') as f:
target_path_id = pickle.load(f)
with open('work/glove_index.dump', 'rb') as f:
glove_index = pickle.load(f)
with open('corpus/id_to_term.dump', 'rb') as f:
id_to_term = pickle.load(f)
w1_ids = []
w2_ids = []
path_ids = []
with open('corpus/id_triples', 'r') as f:
for line in f:
w1, w2, path = line.strip().split('\t')
w1 = id_to_term[int(w1)]
w2 = id_to_term[int(w2)]
if int(path) in target_path_id and w1 in glove_index.keys() and w2 in glove_index.keys():
w1_ids.append(glove_index[w1])
w2_ids.append(glove_index[w2])
path_ids.append(int(path))
unsp_data = (w1_ids, w2_ids, path_ids)
with open('unsp_data.dump', 'wb') as f:
pickle.dump(unsp_data, f)
| kwashio/filling_missing_path | unsp_data_making.py | unsp_data_making.py | py | 958 | python | en | code | 0 | github-code | 36 |
5339917183 | import numpy as np
from SMP.motion_planner.node import PriorityNode
from SMP.motion_planner.plot_config import DefaultPlotConfig
from SMP.motion_planner.search_algorithms.best_first_search import GreedyBestFirstSearch
from commonroad_route_planner.route_planner import RoutePlanner
class StudentMotionPlanner(GreedyBestFirstSearch):
"""
Motion planner implementation by students.
Note that you may inherit from any given motion planner as you wish, or come up with your own planner.
Here as an example, the planner is inherited from the GreedyBestFirstSearch planner.
"""
def __init__(self, scenario, planningProblem, automata, plot_config=DefaultPlotConfig):
super().__init__(scenario=scenario, planningProblem=planningProblem, automaton=automata,
plot_config=plot_config)
#use route_planner to get the reference route
self.route_planner = RoutePlanner(scenario = scenario,
planning_problem=planningProblem,
)
self.route_ref = self.route_planner.plan_routes().retrieve_best_route_by_orientation().reference_path
def evaluation_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own evaluation function here. #
########################################################################
node_current.priority = self.heuristic_function(node_current)
return node_current.priority
def heuristic_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own heuristic cost calculation here. #
# Hint: #
# Use the State of the current node and the information from the #
# planning problem, as well as from the scenario. #
# Some helper functions for your convenience can be found in #
# ./search_algorithms/base_class.py #
########################################################################
# 01 get the current state
path_last = node_current.list_paths[-1] #get the path
pos_current = path_last[-1].position #get the current position
time_step_current = path_last[-1].time_step #get the current timestep
vel_current = path_last[-1].velocity #get the current velocity
ori_current = path_last[-1].orientation #get the current orientation
if time_step_current > self.time_desired.end: #timeout
return np.inf
# 02 the distance from car to goal
if self.position_desired is not None:
dis_goal = self.calc_euclidean_distance(node_current)
else:
return self.myheuristic(node_current)
# 03 the distance from car to the referecne path
len_route_ref = len(self.route_ref)
index_route_ref = int((time_step_current / self.time_desired.end) * len_route_ref) - 1
dis_route_ref = self.myeuclidean(pos_current, self.route_ref[index_route_ref])
# 04 velocity
if self.position_desired is not None:
vel_ref = dis_goal / (self.time_desired.end - time_step_current + 0.1)
dif_vel = abs(vel_ref - vel_current)
else:
dif_vel = 0
return 1 * dis_goal + 1* dis_route_ref + 1 * dif_vel
def myeuclidean(self, pos_1, pos_2) -> float :
"""
input 2 positions
Returens the euclidean distance between 2 positions
"""
dis = np.sqrt(pow(pos_1[0] - pos_2[0],2) + pow(pos_1[1] - pos_2[1],2))
return dis
def myheuristic(self, node_current):
if self.reached_goal(node_current.list_paths[-1]):
return 0.0
if self.position_desired is None:
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
if np.isclose(velocity, 0):
return np.inf
else:
return self.calc_euclidean_distance(current_node=node_current) / velocity
| HNYao/CR | student.py | student.py | py | 4,403 | python | en | code | 0 | github-code | 36 |
10167749049 | from socket_webserver import Socketserver
import json
# Creating server instance
server = Socketserver()
# Configuring host and port
server.host = '127.0.0.1'
server.port = 8080
""" Two example functions to return response. Upper one returns simple json response and lower one returns html response
You could create something like views.py for handler functions and urls.py for routes
'request' contains dictionary of all the request arguments currently supported """
def home(request):
html = '''
<h1>This text is big</h1>
<p>This is small</p>
'''
return html, 200, "text/html"
def demo(request):
data = {
'name': 'Custom api endpoint made with pywebserver',
'target': request['target'],
'data': [x * 2 for x in range(30)]
}
return json.dumps(data), 200, "application/json"
routes = {
'/': home,
'/home': demo
}
server.routes = routes # Apply routes for server
server.run()
| miikalehtonen/pywebserver | main.py | main.py | py | 963 | python | en | code | 0 | github-code | 36 |
22460150491 | import zipper
import arcpy
try:
# Inputs
shapefile = arcpy.GetParameterAsText(0)
zipfile = arcpy.GetParameterAsText(1)
mode = arcpy.GetParameterAsText(2)
shape_zipper = zipper.ShapefileZipper() # Create Class Instance
result = shape_zipper.zip_shapefile(input_shapefile=shapefile, output_zipfile=zipfile,
zip_file_mode=mode)
if result:
arcpy.SetParameterAsText(3, result)
arcpy.AddMessage("!!!!!!!!!!!!!\n@@ SUCCESS @@\n!!!!!!!!!!!!!\nResult: " + result)
else:
arcpy.AddMessage("FILE NOT CREATED")
except:
arcpy.AddError("Opps...something done broke.") | igrasshoff/zip-shapefiles | ScriptToolZipSingleShapefile.py | ScriptToolZipSingleShapefile.py | py | 657 | python | en | code | 3 | github-code | 36 |
33513804346 | # -*- coding: utf-8 -*-
from collective.documentgenerator.helper.base import DisplayProxyObject
from collective.documentgenerator.helper.base import DocumentGenerationHelperView
from collective.eeafaceted.dashboard.testing import IntegrationTestCase
from DateTime import DateTime
from eea.facetednavigation.interfaces import ICriteria
from plone import api
class TestDocumentGeneration(IntegrationTestCase):
"""Test the document-generation that has been overrided from
collective.documentgenerator to be 'dashboard aware'."""
def setUp(self):
""" """
super(TestDocumentGeneration, self).setUp()
# create a folder2 that will be displayed in the dashboard
self.folder2 = api.content.create(id='folder2',
type='Folder',
title='Folder 2',
container=self.portal)
self.folder2.creation_date = self.folder2.created() - 1
self.folder2.reindexObject()
self.dashboardtemplate = api.content.create(
id='dashboardtemplate',
type='DashboardPODTemplate',
title='Dashboard template',
enabled=True,
context_variables=[{'name': 'details', 'value': '1'}],
container=self.folder2,
)
self.view = self.folder.restrictedTraverse('@@document-generation')
self.helper = self.view.get_generation_context_helper()
def test_get_generation_context(self):
"""
Changes are about 'uids' and 'brains' that are added to the
pod template generation context if possible
if nothing particular is done, every elements of the displayed
dashboard are added to the template generation context.
"""
# document-generator view is called outside dashboard from base viewlet
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertIn('view', gen_context)
self.assertNotIn('facetedQuery', gen_context)
self.assertIn('details', gen_context)
# document-generator view is called from dashboard viewlet
self.request.form['facetedQuery'] = ''
# order is respected so sort_on created
# Date catalog queries are 1 minute sensitive...
# make sure self.folder created is really older than self.folder2
self.folder.creation_date = DateTime('2015/01/01 12:00')
self.folder.reindexObject()
self.assertEquals(ICriteria(self.folder).get('c0').widget,
u'sorting')
self.request.form['c0[]'] = 'created'
self.assertEqual(self.dashboardtemplate.max_objects, 500)
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertTrue('uids' in gen_context)
self.assertEquals(len(gen_context['uids']), 3)
self.assertTrue('brains' in gen_context)
self.assertEquals(len(gen_context['brains']), 3)
self.dashboardtemplate.max_objects = 2
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 2)
self.assertEquals(len(gen_context['brains']), 2)
self.dashboardtemplate.max_objects = 3
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 3)
self.assertEquals(len(gen_context['brains']), 3)
self.assertEqual(gen_context['details'], '1')
# brains are sorted according to uids list
self.assertEquals(gen_context['uids'],
[brain.UID for brain in gen_context['brains']])
# we have 3 elements in the dashboard : self.folder and self.folder2
self.assertListEqual(['Folder', 'Folder 2', 'Dashboard template'],
[brain.Title for brain in gen_context['brains']])
# order of query is kept in brains
self.request.form['reversed'] = 'on'
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertListEqual(['Dashboard template', 'Folder 2', 'Folder'],
[brain.Title for brain in gen_context['brains']])
def test_get_generation_context_filtered_query(self):
"""
If a filter is used in the facetedQuery, elements displayed
in the dashboard are correctly given to the template.
"""
faceted_query = self.folder.restrictedTraverse('@@faceted_query')
# for now 3 elements
self.assertEquals(len(faceted_query.query()), 3)
# filter on text, 'Folder 2'
self.assertEquals(ICriteria(self.folder).get('c2').index,
u'SearchableText')
self.request.form['c2[]'] = 'Folder 2'
self.assertEquals(len(faceted_query.query()), 1)
# generation context respect query
self.request.form['facetedQuery'] = ''
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 1)
# facetedQuery is passed to the generation context as json
# reset query, back to 3 elements found
self.request.form = {}
self.assertEquals(len(faceted_query.query()), 3)
self.request.form['facetedQuery'] = ''
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 3)
# 'facetedQuery' is received as a serialized JSON of query criteria
self.request.form['facetedQuery'] = '{"c2":"Folder 2"}'
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 1)
def test_get_generation_context_filtered_uids(self):
"""We may also filter 'uids' directly if set in the REQUEST."""
# for now 2 elements
self.request.form['facetedQuery'] = ''
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 3)
self.assertEquals(len(gen_context['brains']), 3)
self.request.form['uids'] = self.folder.UID()
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['uids']), 1)
self.assertEquals(len(gen_context['brains']), 1)
def test_generation_context_with_use_objects(self):
"""Activate the field 'use_object' on the dashboard POD template"""
self.request.form['facetedQuery'] = ''
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
# so far, no objects in the generation context
self.assertEquals(gen_context.get('objects'), None)
# enable 'use_objects'
self.dashboardtemplate.use_objects = True
gen_context = self.view._get_generation_context(self.helper, self.dashboardtemplate)
self.assertEquals(len(gen_context['objects']), 3)
self.assertEquals(len(gen_context['all']), 3)
objs = [b.getObject() for b in gen_context['brains']]
for proxy_obj, helper in gen_context['objects']:
self.assertTrue(isinstance(proxy_obj, DisplayProxyObject))
self.assertTrue(isinstance(helper, DocumentGenerationHelperView))
self.assertTrue(proxy_obj.context in objs)
self.assertTrue(helper.real_context in objs)
for brain, proxy_obj, helper in gen_context['all']:
self.assertTrue(isinstance(proxy_obj, DisplayProxyObject))
self.assertTrue(isinstance(helper, DocumentGenerationHelperView))
self.assertTrue(proxy_obj.context == brain.getObject())
self.assertTrue(helper.real_context == brain.getObject())
| collective/collective.eeafaceted.dashboard | src/collective/eeafaceted/dashboard/tests/test_documentgeneration.py | test_documentgeneration.py | py | 7,963 | python | en | code | 2 | github-code | 36 |
70447240745 | import os
import shutil
import subprocess
import random
import string
from cdifflib import CSequenceMatcher
from pathlib import Path
from typing import Any
from urllib.request import urlopen
import numpy as np
from rich import print as print
from shapely.geometry import MultiPolygon
from sqlalchemy import text
from src.db.db import Database
from functools import wraps
from src.core.enums import IfExistsType
import polars as pl
import csv
from io import StringIO
import time
from src.core.enums import TableDumpFormat
from src.core.config import settings
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
total_time = te - ts
if total_time > 1:
total_time = round(total_time, 2)
total_time_string = f"{total_time} seconds"
elif total_time > 0.001:
time_miliseconds = int((total_time) * 1000)
total_time_string = f"{time_miliseconds} miliseconds"
else:
time_microseconds = int((total_time) * 1000000)
total_time_string = f"{time_microseconds} microseconds"
print(f"func: {f.__name__} took: {total_time_string}")
return result
return wrap
def make_dir(dir_path: str):
"""Creates a new directory if it doesn't already exist"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def delete_file(file_path: str) -> None:
"""Delete file from disk."""
try:
os.remove(file_path)
except OSError as e:
pass
def delete_dir(dir_path: str) -> None:
"""Delete file from disk."""
try:
shutil.rmtree(dir_path)
except OSError as e:
pass
def replace_dir(dir_path: str) -> None:
"""Delete folder from disk and recreate empty one with same path."""
delete_dir(dir_path)
os.mkdir(dir_path)
def print_hashtags():
print(
"#################################################################################################################"
)
def print_separator_message(message: str):
print_hashtags()
print_info(message)
print_hashtags()
def print_info(message: str):
print(f"[bold green]INFO[/bold green]: {message}")
def print_error(message: str):
print(f"[bold red]ERROR[/bold red]: {message}")
def print_warning(message: str):
print(f"[red magenta]WARNING[/red magenta]: {message}")
def download_link(directory: str, link: str, new_filename: str = None):
if new_filename is not None:
filename = new_filename
else:
filename = os.path.basename(link)
download_path = Path(directory) / filename
with urlopen(link) as image, download_path.open("wb") as f:
f.write(image.read())
print_info(f"Downloaded ended for {link}")
def check_string_similarity(
input_value: str, match_values: list[str], target_ratio: float
) -> bool:
"""Check if a string is similar to a list of strings.
Args:
input_value (str): Input value to check.
match_values (list[str]): List of strings to check against.
target_ratio (float): Target ratio to match.
Returns:
bool: True if the input value is similar to one of the match values.
"""
for match_value in match_values:
if input_value in match_value or match_value in input_value:
return True
elif CSequenceMatcher(None, input_value, match_value).ratio() >= target_ratio:
return True
else:
pass
return False
def check_string_similarity_bulk(
input_value: str, match_dict: dict, target_ratio: float
) -> bool:
"""Check if a string is similar to a dictionary with lists of strings.
Args:
input_value (str): Input value to check.
match_dict (dict): Dictionary with lists of strings to check against.
target_ratio (float): Target ratio to match.
Returns:
bool: True if the input value is similar to one of the match values.
"""
if input_value is None:
return False
for key, match_values in match_dict.items():
if check_string_similarity(
match_values=match_values,
input_value=input_value.lower(),
target_ratio=target_ratio,
):
return True
return False
vector_check_string_similarity_bulk = np.vectorize(check_string_similarity_bulk)
def create_pgpass(db_config):
"""Creates pgpass file for specified DB config
Args:
db_config: Database configuration.
"""
db_name = db_config.path[1:]
delete_file(f"""~/.pgpass_{db_name}""")
os.system(
"echo "
+ ":".join(
[
db_config.host,
str(db_config.port),
db_name,
db_config.user,
db_config.password,
]
)
+ f""" > ~/.pgpass_{db_name}"""
)
os.system(f"""chmod 0600 ~/.pgpass_{db_name}""")
def check_table_exists(db, table_name: str, schema: str) -> bool:
"""_summary_
Args:
db (_type_): _description_
table_name (str): _description_
schema (str): _description_
Returns:
bool: _description_
"""
check_if_exists = db.select(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = '{schema}'
AND table_name = '{table_name}'
);"""
)
return check_if_exists[0][0]
def create_table_dump(
db_config: dict, schema: str, table_name: str, data_only: bool = False
):
"""Create a dump from a table
Args:
db_config (str): Database configuration dictionary.
table_name (str): Specify the table name including the schema.
schema (str): Specify the schema.
data_only (bool, optional): Is it a data only dump. Defaults to False.
"""
try:
dir_output = os.path.join(settings.OUTPUT_DATA_DIR, table_name + ".dump")
# Delete the file if it already exists
delete_file(dir_output)
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
# Construct the pg_dump command
command = [
"pg_dump",
"-h",
db_config.host,
"-p",
db_config.port,
"-U",
db_config.user,
"-d",
db_config.path[1:],
"-t",
f"{schema}.{table_name}",
"-F",
"c",
"-f",
dir_output,
"--no-owner",
]
# Append to the end of the command if it is a data only dump
if data_only == True:
command.append("--data-only")
# Run the pg_dump command and capture the output
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
print_info(f"Successfully dumped {schema}.{table_name} to {dir_output}")
except Exception as e:
print_warning(f"The following exeption happened when dumping {table_name}: {e}")
def restore_table_dump(
db_config: dict, schema: str, table_name: str, data_only: bool = False
):
"""Restores the dump from a table
Args:
db_config (dict): Database configuration dictionary.
table_name (str): Specify the table name including the schema.
data_only (bool, optional): Is it a data only dump. Defaults to False.
Raises:
ValueError: If the file is not found.
"""
# Define the output directory
dir_output = os.path.join(settings.OUTPUT_DATA_DIR, table_name + ".dump")
# Check if the file exists
if not os.path.isfile(dir_output):
raise ValueError(f"File {dir_output} does not exist")
try:
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
# Construct the pg_dump command
command = [
"pg_restore",
"-h",
db_config.host,
"-p",
db_config.port,
"-U",
db_config.user,
"-d",
db_config.path[1:],
"--no-owner",
"--no-privileges",
dir_output,
]
# Append to -2 position of the command if it is a data only dump
if data_only == True:
command.insert(-2, "--data-only")
# Run the command
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
print_info(f"Successfully restored {table_name}.dump from {dir_output}")
except Exception as e:
print_warning(
f"The following exeption happened when restoring {table_name}: {e}"
)
def create_table_schema(db: Database, table_full_name: str):
"""Function that creates a table schema from a database dump.
Args:
db (Database): Database connection class.
table_full_name (str): Name with the schema of the table (e.g. basic.poi).
"""
db_config = db.db_config
db.perform(query="CREATE SCHEMA IF NOT EXISTS basic;")
db.perform(query="CREATE SCHEMA IF NOT EXISTS extra;")
db.perform(query="DROP TABLE IF EXISTS %s" % table_full_name)
table_name = table_full_name.split(".")[1]
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
subprocess.run(
f'pg_restore -U {db_config.user} --schema-only -h {db_config.host} --no-owner -n basic -d {db_config.path[1:]} -t {table_name} {"/app/src/data/input/dump.tar"}',
shell=True,
check=True,
)
# # TODO: Temp fix here only to convert poi.id a serial instead of integer
db.perform(
f"""
ALTER TABLE {table_full_name} DROP COLUMN IF EXISTS id;
ALTER TABLE {table_full_name} ADD COLUMN id SERIAL;
"""
)
def create_standard_indices(db: Database, table_full_name: str):
"""Create standard indices for the database on the id and geometry column.
Args:
db (Database): Database connection class.
"""
db.perform(
f"""
ALTER TABLE {table_full_name} ADD PRIMARY KEY (id);
CREATE INDEX IF NOT EXISTS {table_full_name.replace('.', '_')}_geom_idx ON {table_full_name} USING GIST (geom);
"""
)
def download_dir(self, prefix, local, bucket, client):
"""Downloads data directory from AWS S3
Args:
prefix (str): Path to the directory in S3
local (str): Path to the local directory
bucket (str): Name of the S3 bucket
client (obj): S3 client object
"""
keys = []
dirs = []
next_token = ""
base_kwargs = {
"Bucket": bucket,
"Prefix": prefix,
}
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != "":
kwargs.update({"ContinuationToken": next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get("Contents")
for i in contents:
k = i.get("Key")
if k[-1] != "/":
keys.append(k)
else:
dirs.append(k)
next_token = results.get("NextContinuationToken")
for d in dirs:
dest_pathname = os.path.join(local, d)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
def upload_dir(self, prefix, local, bucket, client):
"""Uploads data directory to AWS S3
Args:
prefix (str): Path to the directory in S3
local (str): Path to the local directory
bucket (str): Name of the S3 bucket
client (obj): S3 client object
"""
for root, dirs, files in os.walk(local):
for filename in files:
# construct the full local path
local_path = os.path.join(root, filename)
def parse_poly(dir):
"""Parse an Osmosis polygon filter file.
Based on: https://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Python_Parsing
Args:
dir (str): Path to the polygon filter file.
Returns:
(shapely.geometry.multipolygon): Returns the polygon in the poly foramat as a shapely multipolygon.
"""
in_ring = False
coords = []
with open(dir, "r") as polyfile:
for index, line in enumerate(polyfile):
if index == 0:
# first line is junk.
continue
elif index == 1:
# second line is the first polygon ring.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
elif in_ring and line.strip() == "END":
# we are at the end of a ring, perhaps with more to come.
in_ring = False
elif in_ring:
# we are in a ring and picking up new coordinates.
ring.append(list(map(float, line.split())))
elif not in_ring and line.strip() == "END":
# we are at the end of the whole polygon.
break
elif not in_ring and line.startswith("!"):
# we are at the start of a polygon part hole.
coords[-1][1].append([])
ring = coords[-1][1][-1]
in_ring = True
elif not in_ring:
# we are at the start of a polygon part.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
return MultiPolygon(coords)
# Copied from https://pynative.com/python-generate-random-string/
def get_random_string(length):
# choose from all lowercase letter
letters = string.ascii_lowercase
result_str = "".join(random.choice(letters) for i in range(length))
return result_str
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(['"{}"'.format(k) for k in keys])
if table.schema:
table_name = "{}.{}".format(table.schema, table.name)
else:
table_name = table.name
if "this_is_the_geom_column" in keys:
columns.replace(
"this_is_the_geom_column", "ST_GEOMFROMTEXT(this_is_the_geom_column)"
)
if "this_is_the_jsonb_column" in keys:
columns.replace(
"this_is_the_jsonb_column", "this_is_the_jsonb_column::jsonb"
)
sql = "COPY {} ({}) FROM STDIN WITH CSV".format(table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
# TODO: Finish docstring and add comments. Check error handling
def polars_df_to_postgis(
engine,
df: pl.DataFrame,
table_name: str,
schema: str = "public",
if_exists: IfExistsType = "replace",
geom_column: str = "geom",
srid: int = 4326,
create_geom_index: bool = True,
jsonb_column: str = False,
):
"""Blazing fast method to import a polars DataFrame into a PostGIS database with geometry and JSONB column.
Avoid using 'this_is_the_geom_column' and 'this_is_the_jsonb_column' as column names in the dataframe as they are reserved for the geometry and JSONB columns during the import.
Args:
engine (SQLAlchemy): SQLAlchemy engine
df (pl.DataFrame): Polars DataFrame
table_name (str): Name of the table to be created
schema (str, optional): Schema name. Defaults to "public".
if_exists (IfExistsType, optional): What should happen if table exist. There are the options: 'fail', 'append', 'replace'. Defaults to "replace".
geom_column (str, optional): What is the name of the geometry column in the dataframe. The geometry column should be a WKT string. The same name will also be used in the PostGIS table. Defaults to "geom".
srid (int, optional): What is the SRID of the geom. Defaults to 4326.
create_geom_index (bool, optional): Should a GIST index be created on the geometry. Defaults to True.
jsonb_column (str, optional): Add the name of column that should added as JSONB. Defaults to False.
Raises:
ValueError: Name of the geometry column is not in the dataframe
ValueError: Name of the JSONB column is not in the dataframe
ValueError: If the if_exists parameter is 'fail'
"""
# make a connection
df_pd = df.to_pandas()
db = engine.connect()
# Check if table should be created or appended
if if_exists == IfExistsType.replace.value:
df_pd.head(0).to_sql(
table_name,
engine,
method=psql_insert_copy,
index=False,
if_exists=IfExistsType.replace.value,
chunksize=1,
schema=schema,
)
print_info("Table {} will be created in schema {}.".format(table_name, schema))
columns_to_rename = {}
# Check if geom column exists and if it should be converted to geometry
if geom_column in df_pd.columns and geom_column is not None:
# Get a uuid column
random_column_name_geom = "this_is_the_geom_column"
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN {} TO {};".format(
schema, table_name, geom_column, random_column_name_geom
)
)
)
db.execute(
text(
"ALTER TABLE {}.{} ALTER COLUMN {} TYPE geometry;".format(
schema, table_name, random_column_name_geom
)
)
)
db.execute(
text(
"SELECT UpdateGeometrySRID('{}','{}','{}', {})".format(
schema, table_name, random_column_name_geom, srid
)
)
)
columns_to_rename[geom_column] = random_column_name_geom
elif geom_column not in df_pd.columns and geom_column is not None:
raise ValueError("Spefified column for Geometry not found in DataFrame")
if jsonb_column in df_pd.columns and jsonb_column is not None:
random_column_name_jsonb = "this_is_the_jsonb_column"
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN {} TO {};".format(
schema, table_name, jsonb_column, random_column_name_jsonb
)
)
)
db.execute(
text(
"ALTER TABLE {}.{} ALTER COLUMN {} TYPE JSONB USING {}::jsonb".format(
schema,
table_name,
random_column_name_jsonb,
random_column_name_jsonb,
)
)
)
columns_to_rename[jsonb_column] = random_column_name_jsonb
elif jsonb_column not in df_pd.columns and jsonb_column is not None:
raise ValueError("Spefified column for JSONB not found in DataFrame")
elif if_exists.value == IfExistsType.append.value:
print_info("Table {} in schema {} already exists".format(table_name, schema))
elif if_exists.value == IfExistsType.fail.value:
raise ValueError(
"Table {} in schema {} already exists".format(table_name, schema)
)
df_pd = df_pd.rename(columns=columns_to_rename)
# Insert data into table
df_pd.to_sql(
table_name,
engine,
method=psql_insert_copy,
index=False,
if_exists="append",
chunksize=10000,
schema=schema,
)
# Rename columns back to original names
if "this_is_the_geom_column" in df_pd.columns:
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN this_is_the_geom_column TO {};".format(
schema, table_name, geom_column
)
)
)
if "this_is_the_jsonb_column" in df_pd.columns:
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN this_is_the_jsonb_column TO {};".format(
schema, table_name, jsonb_column
)
)
)
# Create index on geom column if it does not exist and is desired
if create_geom_index == True:
idx = db.execute(
text(
"SELECT indexdef FROM pg_indexes WHERE tablename = '{}';".format(
table_name
)
)
)
if "gist" not in idx and "(geom)" not in idx:
print_info("Creating index on geom column")
db.execute(
text(
"CREATE INDEX ON {}.{} USING GIST (geom);".format(
schema, table_name
)
)
)
else:
print_info("GIST-Index on geom column already exists")
# Close connection
db.close()
def osm_crop_to_polygon(orig_file_path: str, dest_file_path: str, poly_file_path: str):
"""
Crops OSM data as per polygon file
Args:
orig_file_path (str): Path to the input OSM data file
dest_file_path (str): Path to the output OSM data file (incl. filename with extension ".pbf") where OSM data is to be written
poly_file_path (str): Path to a polygon filter file (as per the format described here: https://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format)
"""
subprocess.run(
f"osmconvert {orig_file_path} -B={poly_file_path} --complete-ways -o={dest_file_path}",
shell=True,
check=True,
)
def osm_generate_polygon(db_rd, geom_query: str, dest_file_path: str):
"""
Generates a polygon filter file for cropping OSM data
Args:
db_rd (Database): A database connection object
geom_query (str): The query to be run for retrieving geometry data for a region (returned column must be named "geom")
dest_file_path (str): Path to the output file (incl. filename with extension ".poly") where polygon data is to be written
"""
coordinates = db_rd.select(f"""SELECT ST_x(coord.geom), ST_y(coord.geom)
FROM (
SELECT (ST_dumppoints(geom_data.geom)).geom
FROM (
{geom_query}
) geom_data
) coord;"""
)
with open(dest_file_path, "w") as file:
file.write("1\n")
file.write("polygon\n")
file.write("\n".join([f" {i[0]} {i[1]}" for i in coordinates]))
file.write("\nEND\nEND")
| goat-community/data_preparation | src/utils/utils.py | utils.py | py | 23,625 | python | en | code | 0 | github-code | 36 |
38406688388 | import pandas as pd
import matplotlib.pyplot as plt
import re # regular expression
df = pd.read_csv('./csv/Travel details dataset.csv')
# drop the rows with missing values
df = df.dropna()
# [OPTIONAL] pick country name only after the comma
df['Destination'] = df['Destination'].apply(lambda x: x.split(', ')[1] if ',' in x else x)
# Convert the date columns to datetime
df['Start date']=pd.to_datetime(df['Start date'])
df['End date']=pd.to_datetime(df['End date'])
# Modify column for cost, format it into numerical values
# define a regular expression pattern to match numeric values
pattern = re.compile(r'\d+(,\d+)*\.?\d*')
# apply the regular expression pattern to the column and convert the resulting strings to numeric data type
df['Accommodation cost'] = df['Accommodation cost'].apply(lambda x: float(pattern.search(x).group().replace(',', '')) if pattern.search(x) else None)
df['Transportation cost'] = df['Transportation cost'].apply(lambda x: float(pattern.search(x).group().replace(',', '')) if pattern.search(x) else None)
def show_chart_1():
"""Number of trips per Destination Chart"""
# Count the number of trips per destination
trips_per_destination = df['Destination'].value_counts()
# create smaller figure
fig, ax = plt.subplots(figsize=(6, 3))
ax.bar(x=trips_per_destination.index, height=trips_per_destination.values)
fig.subplots_adjust(bottom=0.3)
# plt.bar(x=trips_per_destination.index, height=trips_per_destination.values)
plt.xlabel('Tujuan Wisata')
plt.ylabel('Jumlah')
plt.xticks(rotation=360-90)
plt.show()
# Accomodation type distribution
def show_chart_2():
"""Accomodation type distribution"""
trips_per_accommodation_type = df['Accommodation type'].value_counts()
plt.pie(x=trips_per_accommodation_type.values, labels=trips_per_accommodation_type.index, autopct='%1.1f%%')
plt.title('Tipe Akomodasi')
plt.show()
# Number of trips per month
def show_chart_3():
"""Number of trips per month"""
# Give choice to user to select month or year
print('Pilih periode')
print('1. Bulan (M)')
print('2. Tahun (Y)')
inp = ''
while inp.upper() != 'M' and inp.upper() != 'Y':
inp = input('Periode (M/Y) : ')
if inp.upper() == 'M' or inp.upper() == 'Y':
break
else:
print('Tolong masukkan huruf M atau Y')
# Convert start date to datetime
df['Start date'] = pd.to_datetime(df['Start date'])
# Group the trips by month
if inp.upper() == 'M':
trips_per_month = df.groupby(df['Start date'].dt.strftime('%Y-%m'))['Trip ID'].count()
else:
trips_per_month = df.groupby(df['Start date'].dt.strftime('%Y'))['Trip ID'].count()
fig, ax = plt.subplots(figsize=(10, 5))
# need more space because of the month labels longer than year labels
if inp.upper() == 'M':
fig.subplots_adjust(bottom=.15)
plt.plot(trips_per_month.index, trips_per_month.values)
plt.xticks(rotation=360-90)
plt.xlabel('Bulan')
if inp.upper() == 'Y':
plt.bar(x=trips_per_month.index, height=trips_per_month.values)
plt.xlabel('Tahun')
plt.ylabel('Jumlah')
plt.show()
# Travel Nationalist
def show_chart_4():
"""Travel Nationalist"""
nationalities = df['Traveler nationality'].value_counts()
fig, ax = plt.subplots(figsize=(10, 5))
fig.subplots_adjust(bottom=.25)
plt.bar(x=nationalities.index, height=nationalities.values)
plt.xlabel("Negara asal wisatawan")
plt.ylabel("Jumlah")
plt.xticks(rotation=360-90)
plt.show()
# Transportation cost by Accomodation type
def show_chart_5():
"""Transportation cost by Accomodation type"""
# box chart with x = accomodation_type y=accomodation_cost
labels = df['Accommodation type'].unique()
all_data = [df[df['Accommodation type'] == accomodation_type]['Transportation cost'] for accomodation_type in labels]
plt.boxplot(all_data, labels=labels)
plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda x, loc: "${:,}".format(int(x))))
plt.xlabel('Tipe Akomodasi')
plt.ylabel('Biaya Transportasi')
plt.title('Biaya Transportasi Berdasarkan Tipe Akomodasi')
plt.show()
def show_chart_6():
"""Number of Trips per Gender"""
gender_counts = df['Traveler gender'].value_counts()
fig, ax = plt.subplots(figsize=(6, 3))
labels_name = {'Male': 'Laki Laki', 'Female': 'Perempuan'} # translation purpose
labels_color = {'Male': '#3258a8', 'Female': '#f59dd0'} # blue and pink
label = gender_counts.index.map(lambda x: labels_name[x])
colors = gender_counts.index.map(lambda x: labels_color[x])
plt.pie(x=gender_counts.values, labels=label, autopct='%1.1f%%', colors=colors)
plt.title('Data Wisatawan Berdasarkan Jenis Kelamin')
plt.show()
| mbenkzz/pyt11kelompok13 | functions.py | functions.py | py | 4,955 | python | en | code | 0 | github-code | 36 |
19150508999 | import tensorflow as tf
from .util.datasetUtil import dataset , filelength
from tensorflow.keras.applications import VGG16,VGG19 ,InceptionV3
from .util.Callbacks import CustomCallback
import datetime
class inference_load():
def __init__(self,params,csvPath):
print(params)
self.csvPath = './dataset/dataset.csv'
self.evalPath = './dataset/Evaldataset.csv'
self.model = params['model']
self.inputShape = (int(params['inputShape']),int(params['inputShape']),3)
self.include_top = False
self.loss = params['loss']
self.optimizer = params['optimizer']
self.batch_size = int(params['batch_size'])
#self.metrics = [metric for metric in params['metrics']]
self.metrics = ['acc']
self.n_classes = params['n_classes']
self.learning_rate = float(params['learning_rate'])
self.epochs = int(params['epochs'])
self.modelOutputPath = 'server/results/{datetime}_{epochs}_saved_model.h5'.format(
datetime = str(datetime.datetime.now())[:10].replace('-','_'),
epochs = self.epochs
)
def load(self,x):
if(self.model.lower() == 'vgg16'):
model = VGG16(include_top=self.include_top,input_shape = self.inputShape,weights='imagenet',input_tensor=x)
return model
elif(self.model.lower() == 'vgg19'):
model = VGG19(include_top=self.include_top,input_shape = self.inputShape,weights='imagenet',input_tensor=x)
return model
elif(self.model.lower() == 'inception'):
model = InceptionV3(include_top=self.include_top,input_shape=self.inputShape,weights='imagenet',input_tensor=x)
return model
def lossParser(self):
if self.loss == 'Categorical Cross Entropy':
return 'categorical_crossentropy'
elif self.loss == 'Binary Cross Entropy':
return 'binary_crossentropy'
elif self.loss == 'Hinge':
return 'categorical_hinge'
elif self.loss == 'Mean Square Error':
return 'mean_squared_error'
def OptimizerSelector(self):
if self.optimizer == 'Adam':
return tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
elif self.optimizer == 'SGD':
return tf.keras.optimizers.SGD(learning_rate=self.learning_rate)
elif self.optimizer == 'RMSProp':
return tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate)
elif self.optimizer == 'Adadelta':
return tf.keras.optimizers.Adadelta(learning_rate=self.learning_rate)
elif self.optimizer == 'Adagrad':
return tf.keras.optimizers.Adagrad(learning_rate=self.learning_rate)
elif self.optimizer == 'Nadam':
return tf.keras.optimizers.Nadam(learning_rate=self.learning_rate)
elif self.optimizer == 'AdaMax':
return tf.keras.optimizers.AdaMax(learning_rate=self.learning_rate)
# For Vanish Gradient
def add_regularization(model, regularizer=tf.keras.regularizers.l2(0.0001)):
if not isinstance(regularizer, tf.keras.regularizers.Regularizer):
return model
for layer in model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
return model
def run(self):
trainDataset , testDataset = dataset(self.csvPath,self.evalPath,self.batch_size,self.inputShape[:2])
if len(trainDataset) == 0:
return -1
file_length = filelength(self.csvPath)
x = tf.keras.Input(shape=self.inputShape)
model = self.load(x)
model = self.add_regularization(model)
flat_layer = tf.keras.layers.Flatten()(model.layers[-1].output)
classfictaion = tf.keras.layers.Dense(int(self.n_classes),activation='softmax')(flat_layer)
steps_per_epoch = int(file_length/self.batch_size)
model = tf.keras.Model(inputs=x,outputs=classfictaion)
model.compile(loss=self.lossParser(),metrics=self.metrics,optimizer=self.OptimizerSelector())
model.summary()
model.fit_generator(
trainDataset,
validation_data=testDataset,
validation_steps=5,
epochs=self.epochs,
steps_per_epoch=steps_per_epoch,
callbacks = [CustomCallback()]
)
model.save(self.modelOutputPath) | kococo-code/Tensorflow_Automatic_Training | server/api/inference/model.py | model.py | py | 4,512 | python | en | code | 1 | github-code | 36 |
36219196616 | class Interruptor:
'''
Clase que representa un interruptor.
'''
def __init__(self,coords,tipoInterruptor,pon,quita):
'''
Constructor interruptor.
'''
self.coords = coords
self.tipoInterruptor = tipoInterruptor
self.pon = pon
self.quita = quita
def getCoordenadas(self):
'''
Obtiene las coordenadas de un interruptor.
'''
return self.coords
def isSoft(self):
'''
Obtiene True si el interruptor es soft.
Un interruptor es soft si se activa al tocarlo
'''
return self.tipoInterruptor
def getQuita(self):
'''
Obtiene el id de las celdas temporales que se desactivan al accionar el interruptor.
'''
return self.quita
def getPon(self):
'''
Obtiene el id de las celdas temporales que se activan al accionar el interruptor.
'''
return self.pon
def __str__(self):
return " "+str(self.coords)+" "+str(self.tipoInterruptor)+" pon "+self.pon+" quita "+self.quita
def __repr__(self):
return " "+str(self.coords)+" "+str(self.tipoInterruptor)+" pon "+self.pon+" quita "+self.quita
class Estado:
'''
Clase que representa un Estado de Bloxorz.
'''
def __init__(self,bloque,temporales):
'''
Constructor del Estado.
'''
self.bloque = bloque
self.temporales = temporales
def getBloque(self):
'''
Obtiene el bloque.
'''
return self.bloque
def setBloque(self,bloque):
'''
Establece el bloque.
'''
self.bloque=bloque
def getTemporalesActivadas(self):
'''
Obtiene las temporales activadas.
'''
return self.temporales
def setTemporalesActivadas(self,temporales):
'''
Establece las temporales activadas.
'''
self.temporales=temporales
def __str__(self):
return " "+str(self.bloque)+" "+str(self.temporales)
def __eq__(self, other):
b1=self.getBloque()
b2=other.getBloque()
ta1 = frozenset(self.getTemporalesActivadas())
ta2 = frozenset(other.getTemporalesActivadas())
b1.sort()
b2.sort()
return b1 == b2 and ta1==ta2
def __hash__(self):
tmp = []
for i in self.getBloque():
tmp.append(tuple(i))
tmp.sort()
hashableBlock=tuple(tmp)
return hash((hashableBlock, frozenset(self.getTemporalesActivadas())))
class Nivel:
'''
Clase que representa un nivel de Bloxorz.
'''
def __init__(self,mapa,interruptores,celdasTemporales):
'''
Constructor del Nivel.
'''
self.mapa = mapa
self.interruptores = interruptores
self.dictTemp=celdasTemporales
def getMapa(self):
'''
Obtiene el mapa.
'''
return self.mapa
def getInterruptores(self):
'''
Obtiene la lista de interruptores.
'''
return self.interruptores
def getCeldasTemporales(self):
'''
Obtiene el diccionario de celdas temporales.
La clave es el id, el valor la lista de celdas asociadas a ese id
'''
return self.dictTemp
def __str__(self):
interruptoresStr=""
for i in self.interruptores:
interruptoresStr+=str(i)+"\n"
mapaStr=""
for i in self.mapa:
mapaStr+=str(i)+"\n"
return mapaStr+"\n"+interruptoresStr+str(self.dictTemp)
class Juego:
'''
Clase que define un juego de Bloxorz.
'''
def __init__(self,nivel,estado):
'''
Constructor del Juego.
'''
self.nivel=nivel
self.estado=estado
def getNivel(self):
'''
Obtiene el Nivel.
'''
return self.nivel
def getEstado(self):
'''
Obtiene el estado.
'''
return self.estado
def setEstado(self,estado):
'''
Modifica el estado.
'''
self.estado=estado
def __str__(self):
return "Nivel \n"+str(self.nivel)+"\n\nEstado\n"+str(self.estado)
def getCoordenadasMeta(mapa):
'''
Devuelve las coordenadas de la meta.
'''
alto = len(mapa)
ancho = len(mapa[0])
for i in range(alto):
for j in range(ancho):
if mapa[i][j]==4:
return [i,j]
def mueve(estado,nivel,mov):
'''
Modifica el estado como resultado de empujar el bloque.
'''
bloque = estado.getBloque()
tempAct = estado.getTemporalesActivadas()
posNueva=[]
temporales = nivel.getCeldasTemporales()
#Calculamos las nuevas posiciones del bloque tras el movimiento
rueda1=list(map(lambda x, y : x + y,bloque[0],mov))
if estaTumbado(bloque):
rueda2=list(map(lambda x, y : x + y,bloque[1],mov))
if rueda1==bloque[1]:
posNueva=[rueda2]
elif rueda2==bloque[0]:
posNueva=[rueda1]
else:
posNueva=[rueda1, rueda2]
else:
posNueva=[rueda1, list(map(lambda x, y : x + y,rueda1, mov))]
nuevoSet = set(tempAct)
nuevoEstado = Estado(posNueva,nuevoSet)
#Comprueba si nuestro nuevo bloque está dentro de límites o encima de una casilla temporal
if estaDentro(nuevoEstado, nivel):
indice = activoInterruptor(posNueva, nivel)
#Si esta dentro de limites comprobamos si esta activando un interruptor
if indice != -1:
interruptor = nivel.getInterruptores()[indice]
temporales = activarTemporales(interruptor, estado)
nuevoEstado.setTemporalesActivadas(temporales)
return nuevoEstado
else:
return estado
def activarTemporales(interruptor, estado):
'''
Devuelve diccionario de todas las temporales mas o menos las que activamos o desactivamos ahora, respectivamente
'''
temporales = estado.getTemporalesActivadas()
pon = interruptor.getPon()
quita = interruptor.getQuita()
if(pon in temporales or pon == ''):
temporales.discard(quita)
else:
temporales.add(pon)
return temporales
def activoInterruptor(bloque, nivel):
'''
Devuelve indice de la lista de interruptores del interruptor que estoy activando, funciona tanto para soft switches como hard switches,
en el caso de que no esté activando interruptor devuelve -1
'''
interruptores = nivel.getInterruptores()
#En el caso de que la lista de interruptores este vacia (no hay interruptores en el mapa)
if not interruptores:
return -1
rueda1=bloque[0]
if estaTumbado(bloque):
rueda2=bloque[1]
else:
rueda2=[-1 -1]
max = list(range(len(interruptores)))
for i in max:
if rueda1 == interruptores[i].getCoordenadas() or rueda2 == interruptores[i].getCoordenadas():
if (estaTumbado(bloque) and interruptores[i].isSoft()) or not estaTumbado(bloque):
return i
return -1
def estaDentro(estado, nivel):
'''
Devuelve true si el bloque está dentro de límites o pisando una temporal que esté activada, si no devuelve false
'''
mapa = nivel.getMapa()
rueda1=estado.getBloque()[0]
temporales=estado.getTemporalesActivadas()
dentro1 = coordDentro(rueda1, nivel) and not esHueco(rueda1, nivel)
temp1 = hayTempActivada(rueda1, estado, nivel)
if estaTumbado(estado.getBloque()):
rueda2 = estado.getBloque()[1]
temp2 = hayTempActivada(rueda2, estado, nivel)
dentro2 = coordDentro(rueda2, nivel) and not esHueco(rueda2, nivel)
#Si ambas partes del bloque estan dentro de limites
if dentro1 and dentro2:
return True
#Si una parte del bloque esta dentro del limite y otra en una temporal
elif (dentro1 or dentro2) and (temp1 or temp2):
return True
#Si ambas partes del bloque estan en temporales
elif (temp1 and temp2):
return True
else:
#Si el bloque esta en limites o temporales
if dentro1 or temp1:
return True
return False
def coordDentro(coord, nivel):
'''
solo mira si una coordenada esta dentro de los limites
'''
mapa = nivel.getMapa();
#Si nuestra coordenada esta dentro del mapa
if(len(mapa) > coord[0] and len(mapa[0]) > coord[1] and enLimites(coord)):
return True
else:
return False
def esHueco(coord, nivel):
'''
solo mira si en una coordenada hay un cero
'''
mapa = nivel.getMapa();
if (mapa[coord[0]][coord[1]] == 0):
return True
else:
return False
def hayTempActivada(coord, estado, nivel):
'''
solo mira si en una coordenada hay una temporal activada
'''
ta = estado.getTemporalesActivadas()
ct = nivel.getCeldasTemporales()
for tmp in ta:
max = list(range(len(ct[tmp])))
for index in max:
if (ct[tmp][index] == coord):
return True
return False
def estaTumbado(bloque):
'''
Devuelve true si el bloque esta tumbado, si esta de pie devuelve false
'''
if len(bloque)==1:
return False
else:
return True
def esMeta(estado,nivel):
'''
Devuelve True si el estado es Meta.
'''
bloque = estado.getBloque()
mapa = nivel.getMapa()
if len(bloque)==1 and mapa[bloque[0][0]][bloque[0][1]]==4:
return True
else:
return False
def enLimites(coord):
'''
Comprueba que no haya componentes negativas en una coordenada
'''
for i in [0, 1]:
if coord[i] < 0:
return False
return True
| SergioBarbero/bloxorz | ModeloMueve.py | ModeloMueve.py | py | 10,080 | python | es | code | 0 | github-code | 36 |
22439203560 | import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap.dialogs import Dialog
from gui.realtime_graph import RealTimeGraph
import matplotlib.animation as animation
from gui.animation import Animation, network_traffic_in_filler, network_traffic_out_filler
from models.agents import Agent
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from gui.form import create_entry_with_label, create_form
from models.alert import Alert
create_agent_form = [{
"host_ip": {"title": "IP", "default": "0.0.0.0"},
"snmp_version": {"title": "SNMP version", "default": "3"}
}, {
"security_username": {"title": "Security username", "default": ""},
"privacy_password": {"title": "Privacy password", "default": ""},
"privacy_protocol": {"title": "Privacy protocol", "default": ""},
}, {
"auth_password": {"title": "Auth password", "default": ""},
"auth_protocol": {"title": "Auth protocol", "default": ""},
}]
traffic_in_animation = Animation(
"Network In Traffic", ylabel="Traffic In Rate (MBps)")
traffic_in_refresher = traffic_in_animation.create_animation(
network_traffic_in_filler)
traffic_out_animation = Animation(
"Network Out Traffic", ylabel="Traffic Out Rate (MBps)")
traffic_out_refresher = traffic_out_animation.create_animation(
network_traffic_out_filler)
class CreateAgentDialog(Dialog):
def __init__(self, parent=None, title='', alert=False):
super().__init__(parent, title, alert)
self.entries = {}
def create_body(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, ipadx=10, ipady=10, side=TOP)
self.entries = create_form(frame, create_agent_form)
return frame
def create_buttonbox(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, pady=1, ipadx=10, ipady=10, side=BOTTOM)
def on_click_save_agent():
agent = Agent(
host_ip=self.entries["host_ip"].get(),
snmp_version=int(self.entries["snmp_version"].get()),
security_username=self.entries["security_username"].get(),
privacy_password=self.entries["privacy_password"].get(),
privacy_protocol=self.entries["privacy_protocol"].get(),
auth_password=self.entries["auth_password"].get(),
auth_protocol=self.entries["auth_protocol"].get(),
)
engine = create_engine('sqlite:///db.sqlite3')
db = Session(engine)
db.add(agent)
db.commit()
master.destroy()
btn = ttk.Button(
master=master, text='Add agent',
compound=LEFT,
command=on_click_save_agent
)
btn.pack(side=RIGHT, ipadx=5, ipady=5, padx=(0, 15), pady=1)
return btn
create_alert_form = [{
"metric": {"title": "Metric", "default": "ifInOctets"},
"increase_threshold": {"title": "Increase threshold", "default": "30000"}
}]
class CreateAlertDialog(Dialog):
def __init__(self, parent=None, title='', alert=False):
super().__init__(parent, title, alert)
self.entries = {}
def create_body(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, ipadx=10, ipady=10, side=TOP)
self.entries = create_form(frame, create_alert_form)
return frame
def create_buttonbox(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, pady=1, ipadx=10, ipady=10, side=BOTTOM)
def on_click_save_alert():
alert = Alert(
metric=self.entries["metric"].get(),
increase_threshold=self.entries["increase_threshold"].get(),
)
engine = create_engine('sqlite:///db.sqlite3')
db = Session(engine)
db.add(alert)
db.commit()
master.destroy()
btn = ttk.Button(
master=master, text='Add alert',
compound=LEFT,
command=on_click_save_alert
)
btn.pack(side=RIGHT, ipadx=5, ipady=5, padx=(0, 15), pady=1)
return btn
class MainScreen(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pack(fill=BOTH, expand=YES)
# buttonbar
buttonbar = ttk.Frame(self, style='secondary.TFrame')
buttonbar.pack(fill=X, pady=1, side=TOP)
# add agent button
def createAgentDialog(): return CreateAgentDialog(
parent=self, title="Add new agent").show()
btn = ttk.Button(
master=buttonbar, text='Add agent',
compound=LEFT,
style='secondary',
command=createAgentDialog
)
btn.pack(side=LEFT, ipadx=5, ipady=5, padx=(1, 0), pady=1)
# add alert button
def createAlertDialog(): return CreateAlertDialog(
parent=self, title="Add new Alert").show()
btn = ttk.Button(
master=buttonbar, text='Add Alert',
compound=LEFT,
style='secondary',
command=createAlertDialog
)
btn.pack(side=LEFT, ipadx=5, ipady=5, padx=(1, 0), pady=1)
# graph
label = ttk.Label(self, text="Traffic Monitor",
bootstyle="default", font=("", 20, "bold"))
label.pack(pady=10, padx=10)
graph_in = RealTimeGraph(self, traffic_in_animation.fig)
graph_in.pack(fill=X, pady=1, side=TOP)
graph_out = RealTimeGraph(self, traffic_out_animation.fig)
graph_out.pack(fill=X, pady=1, side=TOP)
def start():
app = ttk.Window(title="TeutoMonitor",
themename="superhero", minsize=(1280, 720))
MainScreen(app)
anim_in = animation.FuncAnimation(
traffic_in_animation.fig, traffic_in_refresher, interval=1000)
anim_out = animation.FuncAnimation(
traffic_out_animation.fig, traffic_out_refresher, interval=1000)
app.mainloop()
| MatheusWoeffel/TeutoMonitor | src/gui/window.py | window.py | py | 6,033 | python | en | code | 1 | github-code | 36 |
31137840419 | # Вот тут можно посмотреть теорию: https://youtu.be/vMD6-jzgDvI?t=693
# - Запустить цикл от 11 до 20 используя for и функцию range
# - Вывести на экран числа от 14 до 18. Использовать if в цикле
for i in range(14, 19):
if i >= 14 and i <= 18:
print(i)
# Код ниже
# - Вывести на экран все буквы до y в строке str. Использовать for, а внутри if и break
#
# https://youtu.be/vMD6-jzgDvI?t=804 - тут теория
# Код ниже
str = "How are you!"
| vadimduzh/python-core | for-5.task.py | for-5.task.py | py | 633 | python | ru | code | 0 | github-code | 36 |
31141721992 | '''
Analyse observation basket
'''
import argparse
import joblib
import pandas as pd
import apriori
import helpers
from rules import RuleGenerator
parser = argparse.ArgumentParser(description='Convert Halias RDF dataset for data mining')
parser.add_argument('minsup', help='Minimum support', nargs='?', type=float, default=0.8)
args = parser.parse_args()
apriori.NUM_CORES = 1
MINSUP = args.minsup
itemsets = helpers.read_observation_basket(helpers.DATA_DIR + 'observation.basket')
all_items = list(set([item for itemset in itemsets for item in itemset]))
print(len(itemsets))
print(len(all_items))
#print(itemsets[:1])
print('\nSupport {:.3f} frequent itemsets:\n'.format(MINSUP))
freq_items = apriori.apriori(itemsets, all_items, MINSUP, verbose=True)
print(freq_items[-1])
print(len(freq_items))
joblib.dump(freq_items, helpers.DATA_DIR + 'freq_items_{:.3f}.pkl'.format(MINSUP))
ruler = RuleGenerator(itemsets, freq_items)
rules = ruler.rule_generation(0.5) #, fixed_consequents=[('varis',)])
print(len(rules))
joblib.dump(rules, helpers.DATA_DIR + 'freq_rules_{:.3f}.pkl'.format(MINSUP))
#for (rule, conf) in rules:
# print(' -> %s \t conf: {:.2f} \t supp: {:.3f}'.format(conf, ruler.support(*rule))) | razz0/DataMiningProject | src/observation_basket_analysis.py | observation_basket_analysis.py | py | 1,225 | python | en | code | 0 | github-code | 36 |
5066258041 | import math
import numpy as np
from queue import Queue, PriorityQueue
import time
import networkx as nx
import pymysql
def read_file(edges, degree, g_dict, connected_fields):
w = []
edge = {}
visit = {}
cnt = 1
sum = 0
n = 0
m = 0
for item in edges:
a = item[0]
b = item[1]
if a not in connected_fields or b not in connected_fields:
continue
m += 1
g_dict[(a, b)] = 1
g_dict[(b, a)] = 1
sum += 1
if a not in degree.keys():
degree[a] = 1
edge[a] = [b]
else:
degree[a] += 1
edge[a].append(b)
if b not in degree.keys():
degree[b] = 1
edge[b] = [a]
else:
degree[b] += 1
edge[b].append(a)
n = len(degree)
return n, m, degree, g_dict, sum, edge
def dfs(pos,id2tag,id2size,id2child):
if len(id2child[pos])==1:
return id2tag[pos],1,id2tag,id2size #返回叶子结点标签和大小
size=0
tag=0
tag2num={}
mx=0
for child in id2child[pos]:
child_tag,child_size,id2tag,id2size=dfs(child,id2tag,id2size,id2child)
if child_tag not in tag2num.keys():
tag2num[child_tag]=1
else:
tag2num[child_tag]+=1
if tag2num[child_tag]>mx:
mx=tag2num[child_tag]
mx_tag=child_tag
tag+=child_tag*child_size
size+=child_size
id2size[pos]=size
#id2tag[pos]=int(tag/size)
id2tag[pos]=mx_tag
return mx_tag,size,id2tag,id2size
def update(index, id2child, id2deep): # 更新qu[index]的所有子节点的深度
if len(id2child[index]) > 1:
for node in id2child[index]:
# qu[node]=(qu[node][0],qu[node][1],qu[node][2],qu[node][3],qu[node][4],qu[node][5],qu[node][6]+1)
id2deep[node] = id2deep[node] + 1
id2deep = update(node, id2child, id2deep)
return id2deep
def get_deep(index, id2child, id2deep):
deep = id2deep[index]
if len(id2child[index]) > 1:
for node in id2child[index]:
deep = max(deep, get_deep(node, id2child, id2deep))
return deep
def structual_entropy(edges, nodes, mx_deep,label,node_tags,dataset):
nodes = np.array(nodes)
edges = np.array(edges)
id2index = {j: i+1 for i, j in enumerate(nodes)} #从1编号
mapped_edge = np.array(list(map(id2index.get, edges.flatten())), dtype=np.int32).reshape(edges.shape)
nodes = [id2index[id] for id in nodes]
edges=list(mapped_edge)
degree = {}
g_dict = {}
n, m, degree, g_dict, sum, edge = read_file(edges, degree, g_dict, nodes)
#print("num of nodes:",n)
h1 = 0
#print(edges,nodes)
for i in range(1, n + 1):
h1 += (-degree[i] / (2.0 * sum) * math.log(degree[i] / (2.0 * sum), 2))
#print(h1)
nums = []
for i in range(1, n + 1):
nums.append(i)
qu = [(0, 2 * sum, [], [], 0)]
id2sister = {}
id2child = {0: nums}
id2deep = {0: 1}
id2fa = {0: -1}
I = {0:0}
for i in range(1, n + 1):
qu.append((degree[i], degree[i])) # 分别表示团的割边数,度的和
id2sister[i] = edge[i]
id2child[i] = [i]
id2deep[i] = 2
id2fa[i] = 0
I[i] = degree[i]
I[0]+=degree[i]
result = 0
cnt = n + 1
flag = True
#flag=False
flag2 = True
#flag2=False
delete_id = []
# print(id2sister)
iter = 1
while (flag or flag2):
# while(flag2):
flag2 = True
# while(flag2):
if flag2:
iter += 1
#print(iter)
mn = 1e9
mx = 1e-6
flag2 = False
for i in range(1, len(qu)):
if i in delete_id:
continue
item1 = qu[i]
g1 = item1[0]
for j in id2sister[i]:
item2 = qu[j]
if len(id2child[id2fa[i]]) <= 2 or j in delete_id:
# print("error")
continue
g2 = item2[0]
# new_edge=item1[3]+item2[3]
v = item1[1] + item2[1]
# new_node=item1[2]+item2[2]
v_fa = qu[id2fa[i]][1]
if (i, j) in g_dict.keys():
g = g1 + g2 - 2 * g_dict[(i, j)]
else:
g = g1 + g2
# 按照combine后熵减小最多的两个团combine
# 深度不能超过max_deep
if (g1 + g2 - g) / (2 * sum) * math.log((v_fa) / v, 2) > mx and get_deep(i, id2child,
id2deep) + 1 <= mx_deep and get_deep(
j, id2child, id2deep) + 1 <= mx_deep:
mx = (g1 + g2 - g) / (2 * sum) * math.log((v_fa) / v, 2)
add = mx
ans = (g, v)
id1 = i
id2 = j
flag2 = True
if flag2:
# print(len(qu),index1,index2)
#print('combine', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
id2fa[id1] = cnt
id2fa[id2] = cnt
# 更新子节点
id2child[cnt] = [id1, id2]
fa_id = id2fa[cnt]
# print('combine',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度
# print(qu[index1][0],qu[index2][0],ans[0])
id2deep[cnt] = id2deep[id1]
id2deep[id1] = id2deep[cnt] + 1
id2deep[id2] = id2deep[cnt] + 1
id2deep = update(id1, id2child, id2deep)
id2deep = update(id2, id2child, id2deep)
# print(mn)
result += add
# print(result)
# 更新g_dict
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
# 更新id2sister:
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
id2sister[id1] = [id2]
id2sister[id2] = [id1]
# print(id1,id2sister[id1])
# print(id2,id2sister[id2])
# print(cnt,id2sister[cnt])
# print(id2sister)
'''
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新I
qu.append(ans)
I[cnt] = qu[id1][0] + qu[id2][0]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0] - qu[cnt][0])
#print(I)
cnt += 1
flag = True
while (flag):
iter += 1
#print(iter)
flag = False
mx = 1e-5
item1 = qu[cnt - 1]
if len(id2child[id2fa[cnt - 1]]) <= 2:
break
v1 = item1[1]
g1 = item1[0]
for j in id2sister[cnt - 1]:
# 计算merge cnt和j的收益
item2 = qu[j]
if j in delete_id:
continue
v2 = item2[1]
g2 = item2[0]
# print(item1[2],item2[2],new_node)
v12 = item1[1] + item2[1]
if (cnt - 1, j) in g_dict.keys():
g12 = g1 + g2 - 2 * g_dict[(cnt - 1, j)]
else:
g12 = g1 + g2
v = item1[1] + item2[1]
# new_node=item1[2]+item2[2]
v_fa = qu[id2fa[cnt - 1]][1]
I1 = I[cnt - 1] - g1
I2 = I[j] - g2
# print(I1, I2)
# dif = (g1+g2-g12)/(2*sum)*math.log(v_fa/v,2) - (I1-g1)/(2*sum)*math.log(v/v1,2) - (I2 - g2)/(2*sum)*math.log(v/v2,2)
dif = (g1 + g2 - g12) / (2 * sum) * math.log(v_fa, 2) + (I1) / (2 * sum) * math.log(v1, 2) \
+ (I2) / (2 * sum) * math.log(v2, 2) - (I[cnt - 1] + I[j] - g12) / (2 * sum) * math.log(v, 2)
# new_node=item1[2]+item2[2]
# 计算merge后的熵
'''
after_merge = -g12 / (2 * sum) * math.log(v12 / v_fa, 2)
for node in id2child[cnt - 1] + id2child[j]:
after_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v12, 2)
# print(after_merge)
before_merge = -g1 / (2 * sum) * math.log(v1 / v_fa, 2) - g2 / (2 * sum) * math.log(v2 / v_fa, 2)
for node in id2child[cnt - 1]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v1, 2)
for node in id2child[j]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v2, 2)
dif = before_merge - after_merge
'''
'''
print(dif, dif2)
if math.fabs(dif-dif2)>1e-3:
print("!!!!!!!!!!!!!!!!!!!!!")
'''
# print(before_merge,after_merge)
if dif > mx:
mx = dif
ans = (g12, v12)
add = dif
id2 = j
flag = True
if flag:
id1 = cnt - 1
if len(id2child[id1]) > 1:
delete_id.append(id1)
if len(id2child[id2]) > 1:
delete_id.append(id2)
#print('merge', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
# 更新父亲id的子节点
id2child[cnt] = id2child[id1] + id2child[id2]
fa_id = id2fa[cnt]
# print('merge',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度和子节点的父节点
id2deep[cnt] = id2deep[id1]
for node in id2child[cnt]:
id2deep[node] = id2deep[cnt] + 1
id2fa[node] = cnt
result += add
'''
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新id2sister
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
# print(cnt,id2sister[cnt],id2sister[id1],id2sister[id2])
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
for sub_id1 in id2child[id1] + id2child[id2]:
id2sister[sub_id1] = []
for sub_id2 in id2child[id1] + id2child[id2]:
if sub_id1 != sub_id2 and (sub_id1, sub_id2) in g_dict.keys():
id2sister[sub_id1].append(sub_id2)
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
# 更新I
qu.append(ans)
I[cnt] = I[id1] + I[id2]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0] - qu[cnt][0])
#print(I)
cnt += 1
flag = True
while (flag):
iter += 1
#print(iter)
flag = False
mx = 1e-5
item1 = qu[cnt - 1]
if len(id2child[id2fa[cnt - 1]]) <= 2:
break
v1 = item1[1]
g1 = item1[0]
for j in id2sister[cnt - 1]:
# 计算merge cnt和j的收益
item2 = qu[j]
if j in delete_id:
continue
v2 = item2[1]
g2 = item2[0]
# print(item1[2],item2[2],new_node)
v12 = item1[1] + item2[1]
v = item1[1] + item2[1]
if (cnt - 1, j) in g_dict.keys():
g12 = g1 + g2 - 2 * g_dict[(cnt - 1, j)]
else:
g12 = g1 + g2
v_fa = qu[id2fa[cnt - 1]][1]
I1 = I[cnt - 1] - g1
I2 = I[j] - g2
dif = (g1+g2-g12)/(2*sum)*math.log(v_fa/v,2) - (I1)/(2*sum)*math.log(v/v1,2) - (I2)/(2*sum)*math.log(v/v2,2)
#dif = (g1 + g2 - g12) / (2 * sum) * math.log(v_fa, 2) + (I1) / (2 * sum) * math.log(v1, 2) \
#+ (I2) / (2 * sum) * math.log(v2, 2) - (I[cnt - 1] + I[j] - g12) / (2 * sum) * math.log(v, 2)
# new_node=item1[2]+item2[2]
# 计算merge后的熵
'''
after_merge = -g12 / (2 * sum) * math.log(v12 / v_fa, 2)
for node in id2child[cnt - 1] + id2child[j]:
after_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v12, 2)
# print(after_merge)
before_merge = -g1 / (2 * sum) * math.log(v1 / v_fa, 2) - g2 / (2 * sum) * math.log(v2 / v_fa, 2)
for node in id2child[cnt - 1]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v1, 2)
for node in id2child[j]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v2, 2)
dif2 = before_merge - after_merge
'''
#print("dif:",dif,dif2)
# print(before_merge,after_merge)
if dif >= mx:
mx = dif
ans = (g12, v12)
add = dif
id2 = j
flag = True
if flag:
id1 = cnt - 1
if len(id2child[id1]) > 1:
delete_id.append(id1)
if len(id2child[id2]) > 1:
delete_id.append(id2)
#print('merge', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
# 更新父亲id的子节点
id2child[cnt] = id2child[id1] + id2child[id2]
fa_id = id2fa[cnt]
# print('merge',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度和子节点的父节点
id2deep[cnt] = id2deep[id1]
for node in id2child[cnt]:
id2deep[node] = id2deep[cnt] + 1
id2fa[node] = cnt
result += add
'''
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新id2sister
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
# print(cnt,id2sister[cnt],id2sister[id1],id2sister[id2])
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
for sub_id1 in id2child[id1] + id2child[id2]:
id2sister[sub_id1] = []
for sub_id2 in id2child[id1] + id2child[id2]:
if sub_id1 != sub_id2 and (sub_id1, sub_id2) in g_dict.keys():
id2sister[sub_id1].append(sub_id2)
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
qu.append(ans)
I[cnt] = I[id1] + I[id2]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0]-qu[cnt][0])
cnt += 1
g = nx.Graph()
ids = []
edges = []
id2tag = {}
id2size = {}
id2adj=[]
# 输出树上每个节点的信息
for i, item in enumerate(qu):
if i not in delete_id:
#print(i, id2fa[i], id2deep[i], id2child[i])
ids.append(i)
if len(id2child)>1:
if i==0:
tem=id2child[i]
#tem=[]
else:
tem=[id2fa[i]]+id2child[i]
#tem = [id2fa[i]]
else:
tem = [id2fa[i]]
id2adj.append(tem)
for child in id2child[i]:
#edges.append((i, child))
edges.append((child, i))
for i,tag in enumerate(node_tags):
id2tag[i+1]=tag
id2size[i+1]=1
_,_,id2tag,id2size=dfs(0,id2tag,id2size,id2child)
sort_tag=sorted(id2tag.items(), key=lambda x: x[0])
new_tag=[item[1] for item in sort_tag]
ids=np.array(ids)
edges=np.array(edges)
id2index = {j: i for i, j in enumerate(ids)}
for i,item in enumerate(id2adj):
id2adj[i]=[id2index[adj] for adj in id2adj[i]]
mapped_edge = np.array(list(map(id2index.get, edges.flatten())), dtype=np.int32).reshape(edges.shape)
ids=[id2index[id] for id in ids]
g.add_nodes_from(list(ids))
g.add_edges_from(list(mapped_edge))
g.label=label
g.node_tags=new_tag
'''
if dataset!='':
with open('../../data/' + dataset + '/' + dataset + '_aug_3layer.txt', 'a', encoding='utf8') as f1:
f1.write(str(len(ids)) +' '+str(label)+ '\n')
for i,adj in enumerate(id2adj):
num_adj=len(adj)
adj=[str(item) for item in adj]
adj_str=' '.join(adj)
f1.write(str(new_tag[i]) + ' ' + str(num_adj) +' '+adj_str+ '\n')
'''
#print(h1, h1-result)
return g,result
def graph_augment(g,dataset):
#g有node,edge,node_tags,g.label
edges=list(g.edges())
nodes=list(g.nodes())
max_deep=4
label=g.label
return structual_entropy(edges, nodes, max_deep,label,g.node_tags,dataset)
| ryy980622/Hi-PART | src/graph_augmentation.py | graph_augmentation.py | py | 22,560 | python | en | code | 0 | github-code | 36 |
41270728697 | #!/usr/bin/env python
# encoding: utf-8
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
"""
use queue to implement BFS, needs to record level(广度优先遍历)
"""
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
queue = collections.deque()
queue.append((root, 0))
res = []
if not root:
return res
while queue:
node, depth = queue.popleft()
if node:
if len(res) <= depth:
res.insert(0, [])
res[-(depth + 1)].insert(0, node.val)
queue.insert(0, (node.left, depth + 1))
queue.insert(0, (node.right, depth + 1))
class Solution:
"""
use stack to implement DFS, record depth(深度优先遍历)
"""
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
res = []
stack = [(root, 0)]
while len(stack) > 0:
node, depth = stack.pop()
if node:
if len(res) <= depth:
res.insert(0, [])
res[-(depth+1)].append(node.val)
stack.append((node.right, depth+1))
stack.append((node.left, depth+1))
return res
class Solution:
"""
recursive DFS(递归深度有点遍历)
"""
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
res = []
self.dfs(root, 0, res)
return res
def dfs(self, root, depth, res):
if root:
if depth >= len(res):
res.insert(0, [])
res[-(depth+1)].append(root.val)
self.dfs(root.left, depth+1, res)
self.dfs(root.right, depth+1, res)
| HugoNgai/Leetcode | Code/binary_tree_level_order_traversal_II.py | binary_tree_level_order_traversal_II.py | py | 1,890 | python | en | code | 0 | github-code | 36 |
7256329607 | import random
import sys
from tkinter.messagebox import QUESTION
import inquirer
words = open("words.txt", "r")
words_two = words.read()
word_bank = words_two.split()
hell = []
str = ' '
WRONG = []
past_guesses = []
difficulty = [
inquirer.List('mode',
message = "Choose Your Difficulty",
choices = ['Hell','Hard','Medium','Soft']
)
]
play_again = [
inquirer.List('play',
message= "Wanna Go Again?",
choices = ['Yeah!','Nah...']
)
]
def choose_difficulty():
answers = inquirer.prompt(difficulty)
if 'Hard' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) < 8:
guess_me = random.choice(word_bank)
print("Hard Mode Selected")
return board_maker(guess_me)
if 'Medium' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) < 6 or len(guess_me) > 8:
guess_me = random.choice(word_bank)
print("Medium Mode Selected")
return board_maker(guess_me)
if 'Soft' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) > 6:
guess_me = random.choice(word_bank)
print("Soft Mode Selected")
return board_maker(guess_me)
if 'Hell' in answers.values():
guess_me = random.choice(word_bank)
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
hell.append("hell")
print("Heaven or Hell Let's Rock")
return play_game(scoreboard, breaker)
def board_maker(guess_me):
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
return play_game(scoreboard, breaker)
def play_game(scoreboard, breaker):
if len(WRONG) == 8:
print(f"\nLOSER!\nThe Word Was: {''.join(breaker)}\n")
mulligan = inquirer.prompt(play_again)
if 'Nah...' in mulligan.values():
sys.exit(0)
else:
WRONG.clear()
hell.clear()
past_guesses.clear()
choose_difficulty()
if scoreboard == breaker:
print(f"'\n',{''.join(breaker)}\nYOU WIN!")
mulligan = inquirer.prompt(play_again)
if 'Nah...' in mulligan.values():
sys.exit(0)
else:
WRONG.clear()
hell.clear()
past_guesses.clear()
choose_difficulty()
else:
print(f"{str.join(scoreboard)} \n GUESS ME!")
guess = input("Pick A Letter: ").lower()
if not guess.isalpha():
print("Letters only!")
play_game(scoreboard, breaker)
elif len(guess) > 1:
print("One at a time buddy.")
play_game(scoreboard, breaker)
elif guess in past_guesses:
print("You Already Tried That...")
play_game(scoreboard, breaker)
elif guess in breaker:
print("good guess!")
past_guesses.append(guess)
for correct in range(len(breaker)):
if breaker[correct] == guess:
scoreboard[correct] = breaker[correct]
play_game(scoreboard, breaker)
elif guess not in breaker:
past_guesses.append(guess)
WRONG.append("X")
print("\nwrong, dumbass\n","X" * len(WRONG),"\n",f'{8 - len(WRONG)} {"guess" if 8 - len(WRONG) == 1 else "guesses"} left!')
if "hell" in hell and len(WRONG) != 8:
past_guesses.clear()
hell_game()
play_game(scoreboard, breaker)
def hell_game():
guess_me = random.choice(word_bank)
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
print("\nThe Hell Continues")
return play_game(scoreboard, breaker)
if __name__ == "__main__":
choose_difficulty()
| Momentum-Team-13/python-mystery-word-samrespass | mystery_word.py | mystery_word.py | py | 3,941 | python | en | code | 0 | github-code | 36 |
39877228092 | import datetime
import os
import win32com.client
###### O Path nao pode conter acentuacao ou cedilha
path = ''
###### Caso queira salvar em uma das pastas do usuario logado como desktop, documentos etc...
# path = os.path.expanduser('~/Desktop/arquivos')
################################
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
######## Caso Tenha mais de um arquivo PST ########
#email = outlook.Folders['guilherme@cairotecnologia.com.br']
#inbox = email.Folders[0]
############################################
inbox = outlook.GetDefaultFolder(6)
messages = inbox.Items
messages.Sort("[ReceivedTime]", True)
first = messages.GetFirst()
######## Aqui seria para criar uma pasta dentro do path pra organizar pelo Ano dos anexos ########
#if not os.path.exists(path+"/Certificado "+str(first.ReceivedTime.date().year)):
# os.mkdir(path+"/Certificado "+str(first.ReceivedTime.date().year))
#path = path+"/Certificado "+str(first.ReceivedTime.date().year)
############################################
#attachments = first.Attachments
#attachment = attachments.Item(1)
for attachment in first.Attachments:
attachment.SaveAsFile(os.path.join(path, str(attachment))) | guimedeiross/move-attachments-outlook | mover.py | mover.py | py | 1,220 | python | en | code | 0 | github-code | 36 |
9315048543 | import os
import sys
from math import exp, log, sqrt
from iris_validation import clipper
from iris_validation.utils import ATOMIC_NUMBERS, MC_ATOM_NAMES, norm_cdf
class ReflectionsHandler(object):
def __init__(self, f_reflections=None, xmap=None, minimol=None):
self.f_reflections = f_reflections
self.xmap = xmap
self.minimol = minimol
self.hkl = clipper.HKL_info()
self.grid = None
self.spacegroup = None
self.cell = None
self.resolution = None
self.resolution_limit = None
if f_reflections is None:
if xmap is None:
print('ERROR: either a reflections file path or an xmap object must be passed as an argument')
raise Exception('ArgumentError')
try:
self.grid = xmap.grid
except AttributeError:
self.grid = None
self.spacegroup = xmap.spacegroup
self.cell = xmap.cell
else:
extension = f_reflections.split('.')[-1].lower()
if extension != 'mtz':
if extension == 'cif':
print('ERROR: mmCIF format is not currently supported for reflections data.')
else:
print('ERROR: reflections file has unrecognised extension:' + extension)
raise Exception('ExtensionError')
self._load_hkl_data()
self._calculate_structure_factors()
self._generate_xmap()
self._calculate_map_stats()
def _load_hkl_data(self):
mtzin = clipper.CCP4MTZfile()
mtzin.open_read(self.f_reflections)
mtzin.import_hkl_info(self.hkl)
if clipper.mode == 0:
mtz_labels_and_types = [ tuple(str(line).strip().split(' ')) for line in mtzin.column_labels() ]
elif clipper.mode == 1:
mtz_labels_and_types = [ tuple(str(line).strip().split(' ')) for line in mtzin.column_labels ]
mtz_column_labels, mtz_column_types = zip(*mtz_labels_and_types)
mtz_column_label_suffixes = set([ label.split('/')[-1] for label in mtz_column_labels ])
# Need a better way to choose the right headers, but I'm not familiar enough with reflections data to know what labels are common
import_complete = False
for suffix_pair in ( ('F', 'SIGF'),
('FP', 'SIGFP'),
('FP_ALL', 'SIGFP_ALL') ):
if len(mtz_column_label_suffixes & set(suffix_pair)) == 2:
try:
self.f_sigf = clipper.HKL_data_F_sigF_float(self.hkl)
mtzin.import_hkl_data(self.f_sigf, '/*/*/[' + ','.join(suffix_pair) + ']')
import_complete = True
break
except Exception as e:
print('ERROR: failed to import HKL data from reflections file')
raise(e)
if not import_complete:
print('ERROR: reflections file does not contain the required columns')
raise Exception('ColumnError')
mtzin.close_read()
if clipper.mode == 0:
spacegroup = self.hkl.spacegroup()
cell = self.hkl.cell()
resolution = self.hkl.resolution()
elif clipper.mode == 1:
spacegroup = self.hkl.spacegroup
cell = self.hkl.cell
resolution = self.hkl.resolution
self.spacegroup = spacegroup
self.cell = cell
self.resolution = resolution
self.resolution_limit = self.resolution.limit()
def _calculate_structure_factors(self, bulk_solvent=True):
#self.crystal = clipper.MTZcrystal()
#self.f_phi = clipper.HKL_data_F_phi_float(self.hkl, self.crystal)
self.f_phi = clipper.HKL_data_F_phi_float(self.hkl)
atoms = self.minimol.atom_list()
sf_calc = clipper.SFcalc_obs_bulk_float if bulk_solvent else clipper.SFcalc_obs_base_float
sf_calc(self.f_phi, self.f_sigf, atoms)
def _generate_xmap(self):
self.grid = clipper.Grid_sampling(self.spacegroup, self.cell, self.resolution)
self.xmap = clipper.Xmap_float(self.spacegroup, self.cell, self.grid)
try:
self.xmap.fft_from(self.f_phi)
except AttributeError:
self.xmap.fft_from_float(self.f_phi)
def _calculate_map_stats(self):
map_stats = clipper.Map_stats(self.xmap)
self.map_mean = map_stats.mean()
self.map_std = map_stats.std_dev()
def get_density_at_point(self, xyz):
if clipper.mode == 0:
cell = self.xmap.cell()
grid = self.xmap.grid_sampling()
elif clipper.mode == 1:
cell = self.xmap.cell
grid = self.xmap.grid_sampling
co = clipper.Coord_orth(*xyz)
co_cf_cg = co.coord_frac(cell).coord_grid(grid)
density = self.xmap.get_data(co_cf_cg)
return density
def get_density_at_atom(self, mmol_atom):
if clipper.mode == 0:
co = mmol_atom.coord_orth()
xyz = (co.x(), co.y(), co.z())
elif clipper.mode == 1:
xyz = mmol_atom.coord
return self.get_density_at_point(xyz)
def get_density_scores_at_residue(self, metrics_residue):
all_atom_scores, mainchain_atom_scores, sidechain_atom_scores = [ ], [ ], [ ]
for atom_id, atom in enumerate(metrics_residue.minimol_residue):
is_mainchain = str(atom.name()).strip() in MC_ATOM_NAMES
if clipper.mode == 0:
element = str(atom.element()).strip()
elif clipper.mode == 1:
element = atom.element.strip()
atomic_number = ATOMIC_NUMBERS[element]
density = self.get_density_at_atom(atom)
atom_score = None
density_norm = density / atomic_number
atom_score = -log(norm_cdf((density_norm - self.map_mean) / self.map_std))
all_atom_scores.append(atom_score)
if is_mainchain:
mainchain_atom_scores.append(atom_score)
else:
sidechain_atom_scores.append(atom_score)
all_score, mainchain_score, sidechain_score = None, None, None
if len(all_atom_scores) > 0:
all_score = sum(all_atom_scores) / len(all_atom_scores)
if metrics_residue.is_aa:
if len(mainchain_atom_scores) > 0:
mainchain_score = sum(mainchain_atom_scores) / len(mainchain_atom_scores)
if len(sidechain_atom_scores) > 0:
sidechain_score = sum(sidechain_atom_scores) / len(sidechain_atom_scores)
return all_score, mainchain_score, sidechain_score
| wrochira/iris-validation | iris_validation/metrics/reflections.py | reflections.py | py | 6,726 | python | en | code | 1 | github-code | 36 |
34228583172 | # -*- coding: utf-8 -*-
""" Application Factory
This is the entry point to the entire application.
"""
import os
import json
from flask import Flask, render_template, jsonify, flash, redirect, url_for
def create_app(test_config=None):
"""Create an instance of Wallowa Wildlife Checklists"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
# This secret will be overriden with the instance config.
SECRET_KEY='dev',
# Store the database in the instance folder.
DATABASE=os.path.join(app.instance_path, 'wallowawildlife.sqlite'),
# Read in the client_id for google login.
CLIENT_ID=json.loads(
open('wallowawildlife/client_secrets.json', 'r')
.read())['web']['client_id']
)
if test_config is None:
# Load the instance config.
app.config.from_pyfile('config.py', silent=True)
else:
# Otherwise, load the test config.
app.config.update(test_config)
# Make the instance folder if it doesn't exist.
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Make the database available.
from wallowawildlife.db import get_db
@app.route('/')
def index():
"""Handle the index route"""
db = get_db()
types = db.execute('SELECT * FROM creature_type').fetchall()
return render_template('front_page.html', types=types)
@app.route('/wildlife/<int:creature_id>/JSON')
def wildlifeCreatureJSON(creature_id):
"""Create JSON endpoint"""
db = get_db()
c = db.execute('SELECT * FROM creature WHERE id = ?',
(creature_id,)).fetchone()
if c:
json_creature = {'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']}
return jsonify(json_creature)
else:
return redirect(url_for('index'))
@app.route('/wildlife/<url_text>/JSON')
def wildlifeTypeJSON(url_text):
"""Create JSON endpoint"""
db = get_db()
creatures = db.execute('SELECT * FROM creature \
WHERE type_id = ?',
(url_text,)).fetchall()
if creatures:
json_creatures = [{'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']} for c in creatures]
return jsonify(json_creatures)
else:
return redirect(url_for('index'))
@app.route('/wildlife/JSON')
def wildlifeJSON():
"""Create JSON endpoint"""
db = get_db()
creatures = db.execute('SELECT * FROM creature').fetchall()
json_creatures = [{'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']} for c in creatures]
return jsonify(json_creatures)
@app.errorhandler(404)
def page_not_found(e):
"""Redirect from all unhandled URLs to the index route"""
return redirect(url_for('index'))
# Register cli db commands.
from . import db
db.init_app(app)
# Apply blueprints.
from . import auth
app.register_blueprint(auth.bp)
from . import lists
app.register_blueprint(lists.bp)
app.add_url_rule('/', endpoint='index')
return app
| wicker/Wallowa-Wildlife-Checklist-App | wallowawildlife/__init__.py | __init__.py | py | 4,136 | python | en | code | 1 | github-code | 36 |
16921327452 | #!/usr/bin/env python3
"""
Advent of Code 2022 - Elf Rucksack Reorganization
A given rucksack always has the same number of items in each of its two compartments
Lowercase item types a through z have priorities 1 through 26.
Uppercase item types A through Z have priorities 27 through 52.
Find the item type that appears in both compartments of each rucksack.
What is the sum of the priorities of those item types?
"""
import argparse
from typing import List
def get_priority(letter: str) -> int:
"""Assigns the priority to each item passed in
Args:
letter: single alphabetical letter in upper or lower case
Returns: conversion of character to expected int
"""
# ord "a" = 97
# ord "A" = 65
if letter.isupper() is False:
return ord(letter) - 96
return ord(letter) - 38
def compare_comparts(compartment1: str, compartment2: str) -> str:
"""Returns the priority of item in common between compartments
Args:
compartment1: string of letters representing items
compartment2: string of letters representing items
Returns: string of single letter shared between compartment[1|2]
"""
item_in_common = list(set(compartment1).intersection(compartment2))
return get_priority(item_in_common[0])
def elf_grouping(rucksacks: list, group_size: int) -> list:
"""Returns list of elf groups of specified size
Args:
rucksacks: list of all the rucksacks in camp
group_size: number of elves in a rucksack priority group
Returns: list of organised elf groups by group_size
"""
return [rucksacks[x:x+group_size] for x in range(0, len(rucksacks), group_size)]
def fetch_badge(group_of_bags: List[list]) -> int:
"""Returns the priority of item in common for an elf grouping
Args:
group_of_bags: A list containing the list of elf rucksacks in a group
Returns: the shared badge priority for the group of elves
"""
badge = list(set.intersection(*map(set, group_of_bags)))
return get_priority(badge[0])
# pylint: disable=R0914
def main() -> None:
"""Main function to generate total score of elven rock, paper, scissors
"""
parser = argparse.ArgumentParser("Input list to work out the elf game outcomes")
parser.add_argument("--input_list", help="Input rucksack list as txt file", type=str)
parser.parse_args()
args = parser.parse_args()
rucksacks = []
input_file = args.input_list
with open (input_file, encoding="utf-8") as file:
rucksack_in = file.read()
rucksacks = rucksack_in.splitlines()
item_dups = []
for rucksack in rucksacks:
rucksack_objs = list(rucksack)
rucksack_size = len(rucksack_objs)
compartment_size = int(rucksack_size / 2)
comp1 = slice(0, compartment_size)
comp2 = slice(compartment_size, int(len(rucksack_objs)))
item_dups.append(compare_comparts(rucksack_objs[comp1], rucksack_objs[comp2]))
sum_of_priorities = sum(item_dups)
print(f"Sum of priorities in duplicates: {sum_of_priorities}\n")
rucksack_list = [ list(x) for x in rucksacks ]
rucksack_groups = elf_grouping(rucksack_list, 3)
badge_priorities = [ fetch_badge(x) for x in rucksack_groups ]
sum_badge_priors = sum(badge_priorities)
print(f"Sum of badge priorities: {sum_badge_priors}\n")
if __name__ == main():
main()
| CristiGuijarro/NN-aoc-2022 | scripts/elf_rucksack_priorities.py | elf_rucksack_priorities.py | py | 3,390 | python | en | code | 0 | github-code | 36 |
4822893898 | import base64
import sys
from github import Github
ACCESS_TOKEN = ''
REPO_NAME = ''
ACCESS_TOKEN = sys.argv[1]
REPO_NAME = sys.argv[2]
g = Github(ACCESS_TOKEN)
repo = g.get_repo(REPO_NAME)
contents = repo.get_contents("/README.md")
contents_bkp = repo.get_contents("/docs/README.md")
base = contents.content
base = base.replace('\n', '')
text = base64.b64decode(base).decode('utf-8')
base_bkp = contents_bkp.content
base_bkp = base_bkp.replace('\n', '')
text_bkp = base64.b64decode(base_bkp).decode('utf-8')
if text != text_bkp:
repo.update_file(contents_bkp.path,
"docs(README): synchronize README files", text,
contents_bkp.sha)
else:
pass
| BobAnkh/LinuxBeginner | scripts/sync.py | sync.py | py | 697 | python | en | code | 6 | github-code | 36 |
19852300497 | import sys
from faucetpay import faucetpay
sys.path.append("./dbfolder")
from mysqlfunc import mysqldb
faucet=faucetpay()
class payuser:
def sendreward(campaignid,userid):
get_user_email=mysqldb().get_user_Wallet(userid)
ref=mysqldb().get_user_ref(userid)
get_user_referral=mysqldb().get_user_Wallet(ref)
info=mysqldb().campaignid_to_info(campaignid)
cpc=info[5]
adbudget=info[6]
if adbudget-cpc>0 and not mysqldb().is_completed_already(userid,campaignid):
mysqldb().add_ad_completion(userid,campaignid)
total_reward=cpc*100000000
mysqldb().decrease_ad_budget(campaignid)
user_reward=faucet.send_trx_to_user(get_user_email,total_reward/2)
aff_reward=faucet.send_trx_to_user(get_user_referral,total_reward/10)
mysqldb().add_transaction(userid,campaignid,0,total_reward/2,user_reward['payout_id'])
mysqldb().add_transaction(ref,userid,1,total_reward/10,aff_reward['payout_id'])
return True
else:
return False
| developermano/clickbot | payment/pay.py | pay.py | py | 1,086 | python | en | code | 0 | github-code | 36 |
8403757246 | from math import sqrt
import sys
def Input(line):
is_error = True
while is_error:
is_error = False
try:
coeff = int(line)
except ValueError:
try:
coeff = float(line)
except ValueError:
is_error = True
line = input("Некорректный ввод, повторите попытку: ")
return coeff
def A_Input(arg):
try:
coeff = int(arg)
except ValueError:
try:
coeff = float(arg)
except ValueError:
coeff = "e"
return coeff
print("|Аникин Филипп Автандилович, ИУ5-53Б|\n")
A_incorrect = True
if len(sys.argv)>1:
print("<Режим принятия аргументов из КС>")
if len(sys.argv) == 4:
A = A_Input(sys.argv[1])
B = A_Input(sys.argv[2])
C = A_Input(sys.argv[3])
A_incorrect = False
if A == "e" or B == "e" or C == "e":
print("*Некорректные аргументы, переход на ручной ввод*")
A_incorrect = True
else:
print("*Некорректное количество аргументов, переход на ручной ввод*")
A_incorrect = True
if A_incorrect == True:
print("<Введите коэффициенты биквадратного уравнения>")
line = input("A = ")
A = Input(line)
line = input("B = ")
B = Input(line)
line = input("C = ")
C = Input(line)
print("======================================================")
print("A = ", A, "; B = ", B, "; C = ", C, sep='')
D = B*B - 4*A*C
if D-int(D) == 0:
D = int(D)
print("Дискриминант =",D)
print("------------------------------------------------------")
if A != 0:
if D >= 0:
B = -B
A = A + A
D = sqrt(D)
Q1 = (B+D)/A
Q2 = (B-D)/A
if D == 0:
Q2 = -1
D = -1
if Q1 > 0:
D = 1
Q1 = sqrt(Q1)
if Q1-int(Q1) == 0:
Q1 = int(Q1)
print("X", D, " = ", Q1, ", X", D+1, " = ", -Q1, sep='')
D = D + 2
elif Q1 == 0:
D = 1
Q1 = int(Q1)
print("X", D, " = ", Q1, sep='')
D = D + 1
if Q2 >= 0:
Q2 = sqrt(Q2)
if Q2-int(Q2) == 0:
Q2 = int(Q2)
if D == -1:
D = 1
print("X", D, " = ", Q2, ", X", D+1, " = ", -Q2, sep='')
elif Q2 == 0:
if D == -1:
D = 1
Q2 = int(Q2)
print("X", D, " = ", Q2, sep='')
if D == -1:
print("Действительных корней нет")
else:
print("Действительных корней нет")
else:
if B!= 0:
Q = -C/B
if Q >= 0:
Q = sqrt(Q)
if Q-int(Q) == 0:
Q = int(Q)
print("X1 = ", -Q, ", X2 = ", Q, sep='')
else:
print("Действительных корней нет")
else:
if C != 0:
print("Действительных корней нет")
else:
print("Решение - любое число")
print("======================================================") | MEHT9IPA/Labs_web | Lab_1/Lab_1.py | Lab_1.py | py | 3,397 | python | ru | code | 0 | github-code | 36 |
73604408105 | import sys
import os
import re
from dijkstra import calculate_path
# MAPA:
'''
Consiste de dos conjuntos <E,C>. E es un conjunto de esquinas {e1,e2,e3, ….} y C las calles que conectan dichas esquinas.
C es un conjunto de ternas ordenadas {<e1,e2,c>,<e3,e4,c>, <e2,e1,c>} que expresa la dirección y el largo de las calles (c = distancia entre e1 y e2).
'''
# 1 - Representar esquinas y calles como un grafo dirigido y ponderado mediante listas de adyacencia.
class Vertex:
def __init__(self,key):
self.key = key
color = None
parent = None
distance = 'infinite'
f = None
class Graph:
# vertices_list = [v1,v2,v3,...,vn]
# edges_list = [(v1,v2),(v2,v3),...,(vi,vj)]
def __init__(self,vertices_list,edges_list):
# {'e1': memory_element_e1, 'e2': memory_element_e2....}
self.vertices_list = vertices_list
# [['e1','e2',100],['e3','e4',150],...]
self.edges_list = edges_list
self.adj_list = [[] for _ in range(len(self.vertices_list))]
for i in range(len(edges_list)):
vertex_u = self.vertices_list[edges_list[i][0]]
vertex_v = self.vertices_list[edges_list[i][1]]
weight_u_v = int(edges_list[i][2])
self.adj_list[vertex_u.key - 1].append((vertex_v,weight_u_v))
def draw_graph(self):
for i in range(len(self.adj_list)):
print('|',i+1,'|-->',end="")
for vertex in self.adj_list[i]:
print('|',vertex[0].key,'(w:',vertex[1],')|',end="")
print()
def create_map(local_path):
try:
with open(local_path) as file:
# Read the vertices line
vertices_line = file.readline().strip()
# Extract the vertices from the first line
vertices = vertices_line.split("=")[1].strip().strip("{}").split(",")
# Read the edges line
edges_line = file.readline().strip()
# Extract the vertices from the second line
edges_string = edges_line.split("=")[1].strip().strip("{}")
# Regex edges expression
edges_string_regexed = re.findall(r'<(.*?)>', edges_string)
edges_list = []
for edge in edges_string_regexed:
edges_list.append(edge.split(","))
# Ordeno vertices
sorted_vertices = sorted(vertices)
# Creo dict de objetos vertex
vertices_objects_dict = instantiate_vertex_objects(sorted_vertices)
# Creo mapa
uber_map = Graph(vertices_objects_dict,edges_list)
write_to_disk(uber_map,'map')
# Creo diccionario para los elementos del mapa
map_elements = {}
write_to_disk(map_elements,'map_elements')
path_matrix = calculate_path(uber_map)
write_to_disk(path_matrix,'path_matrix')
print("Map created successfully")
except FileNotFoundError:
print('Error: No such file or directory.')
def instantiate_vertex_objects(elements_list):
dict_vertices = {}
for i in range(len(elements_list)):
dict_vertices[elements_list[i]] = Vertex(int(elements_list[i][1:]))
return dict_vertices
# Functions for writing to and reading from disk
def write_to_disk(data, objectType):
import pickle
file_name = objectType + '_serialized.bin'
with open(file_name,'wb') as file:
pickle.dump(data,file)
def read_from_disk(local_path):
import pickle
with open(local_path,'br') as file:
data = pickle.load(file)
return data
# Los elementos fijos y moviles seran guardados en un diccionario de python
def load_fix_element(name,address):
# Asi como leo el mapa deberia tambien guardar en disco el diccionario de elementos?
uber_map = read_from_disk('map_serialized.bin')
map_elements = read_from_disk('map_elements_serialized.bin')
# Valido que no exista el elemento en el mapa
if (check_name_validity(name,'fixed')):
if (name not in map_elements):
parsed_address = parse_address_input(address)
if (check_element_address(uber_map,parsed_address)):
map_elements[name] = {'address': parsed_address}
write_to_disk(map_elements,'map_elements')
print(f"Fixed element {name} loaded with address: {address}")
else:
print('Not a valid address in map')
else:
print('The element already exists in map.')
else:
print('Not a valid name for a fixed map element')
def load_movil_element(name,address,amount):
uber_map = read_from_disk('map_serialized.bin')
map_elements = read_from_disk('map_elements_serialized.bin')
if (check_name_validity(name,'movil')):
if (name not in map_elements):
parsed_address = parse_address_input(address)
if (check_element_address(uber_map,parsed_address)):
if (amount >= 0):
map_elements[name] = {'address': parsed_address, 'amount': amount}
write_to_disk(map_elements,'map_elements')
print(f"Movil element {name} with amount {amount} loaded with address {address} ")
else:
print('The amount must be at least 0')
else:
print('Not a valid address in map')
else:
print('The element already exists in map.')
else:
print('Not a valid name for a movil map element')
def get_address(map,name):
address = map[name]['address']
return address
def at_same_location(map,name1,name2):
if get_address(map,name1) == get_address(map,name2):
return True
else:
return False
def check_name_validity(name,type):
if (type == 'fixed'):
pattern = r'[HATSEKI]\d+'
valid_name = re.match(pattern,name)
if (type == 'movil'):
pattern = r'[PC]\d+'
valid_name = re.match(pattern,name)
if valid_name:
return True
else:
return False
def parse_address_input(address_input):
pattern = r"<(\w+),(\d+)>"
matches = re.findall(pattern, address_input)
result = [(match[0], int(match[1])) for match in matches]
return result
def check_element_address(map,address):
# Address in form [('ex',d1),('ey',d2)]
vertex_u = map.vertices_list[address[0][0]]
vertex_v = map.vertices_list[address[1][0]]
for element in map.adj_list[vertex_u.key - 1]:
vertex = element[0]
if (vertex == vertex_v):
return True
for element in map.adj_list[vertex_v.key - 1]:
vertex = element[0]
if (vertex == vertex_u):
return True
print("There is no street connecting these corners.")
return False
# Creacion de viaje
def create_trip(person,location):
uber_map = read_from_disk('map_serialized.bin')
map_elements = read_from_disk('map_elements_serialized.bin')
#Determinar si la persona esta en el mapa
if (person in map_elements):
pair_vertex = (map_elements[person]['address'][0][0],map_elements[person]['address'][1][0])
person_vertex = person_vertex_ref(map_elements,person,pair_vertex) #Toma el vertice referente a la persona
#filtro los vehiculos
map_cars = {clave: valor for clave, valor in map_elements.items() if clave.startswith('C')} #Extraigo el diccionario de autos
map_cars_key = list(map_cars.keys())
#A los vehiculos restantes les obtengo su vertice referencia
cars_ref = [] #Almacenara tuplas (vehiculos,vertices)
for car in map_cars_key:
car_vertexpair = (map_elements[car]['address'][0][0],map_elements[car]['address'][1][0])
cars_ref.append([car,car_vertex_ref(map_elements,car,car_vertexpair)])
path_matrix = read_from_disk('path_matrix_serialized.bin') #Leo de memoria la matriz de caminos
#Veo las distancias de los caminos correspondientes a la referencia de los autos
valueperson_to_ref = calcule_ref(map_elements[person],person_vertex)
for v in cars_ref:
car_vertexpair = (map_elements[v[0]]['address'][0][0],map_elements[v[0]]['address'][1][0])
valuecar_to_ref = calcule_ref(map_elements[v[0]], car_vertex_ref(map_elements,v[0],car_vertexpair))
v.append(path_matrix[person_vertex-1][v[1]-1][0] + valuecar_to_ref + valueperson_to_ref)
cars_ref = sorted(cars_ref, key=lambda x: x[2])
payment = False
#Devolucion de autos más cercanos
#Calculo del costo
for i in range(0,len(cars_ref)):
costo = (cars_ref[i][2] + map_elements[cars_ref[i][0]]['amount'])/4
if costo >= map_elements[person]['amount'] and i==0:
print('No posee suficiente dinero para un viaje')
break
elif costo >= map_elements[person]['amount']:
break
elif i+1 == 4:
break
else:
payment = True
print(f"{i+1}.Auto {cars_ref[i][0]}: distancia {cars_ref[i][2]}, costo: {costo}")
if payment:
minpath = []
if len(location)>3:
destiny_address = parse_address_input(location)
if check_element_address(uber_map,destiny_address):
virtual_location = 'destiny'
map_elements[virtual_location] = {'address':destiny_address}
write_to_disk(map_elements,'map_elemets')
location = virtual_location
pair_vertex = (map_elements[location]['address'][0][0],map_elements[location]['address'][1][0])
location_vertex = person_vertex_ref(map_elements,location,pair_vertex)
minpath.append(location_vertex)
nextVertex = path_matrix[person_vertex-1][location_vertex-1][1]
while nextVertex != None:
minpath.append(nextVertex.key)
nextVertex = path_matrix[person_vertex-1][nextVertex.key-1][1]
print('Recorrido más corto para llegar a destino a través de las esquinas:')
print(minpath)
travel_accepted = input("Desea aceptar el viaje? si/no: ")
if (travel_accepted == 'si'):
# Devolver lista de los 3 autos para elegir
cars_available = []
for i in range(len(cars_ref)):
if i < 3:
cars_available.append(cars_ref[i][0])
else:
break
selected_car = input(f"Que auto desea elegir: {cars_available}")
if selected_car == cars_available[0]:
print(f"Ha seleccionado el auto {cars_available[0]}")
if selected_car == cars_available[1]:
print(f"Ha seleccionado el auto {cars_available[1]}")
if selected_car == cars_available[2]:
print(f"Ha seleccionado el auto {cars_available[2]}")
else:
return
else:
print(f"Error. Person {person} not in map.")
def calcule_ref(element,reference):
uber_map = read_from_disk('map_serialized.bin')
if uber_map.vertices_list[element['address'][0][0]].key == reference:
return element['address'][0][1]
elif uber_map.vertices_list[element['address'][1][0]].key == reference:
return element['address'][1][1]
def person_vertex_ref(map_elements,person,pair_vertex):
#Tomo el mapa de memoria
uber_map = read_from_disk('map_serialized.bin')
#uber_map.draw_graph()
#Toma los pares de vertices referentes a la persona
vertex1 = uber_map.vertices_list[pair_vertex[0]]
vertex2 = uber_map.vertices_list[pair_vertex[1]]
sense = ''
for v in uber_map.adj_list[vertex1.key-1]:
if v[0] == vertex2:
sense = '12'
for v in uber_map.adj_list[vertex2.key-1]:
if v[0] == vertex1 and sense == '':
sense = '21'
elif v[0] == vertex1 and sense == '12':
sense = 'd'
#Si la calle es doble sentido, busco el camino mas corto
if sense == 'd':
if map_elements[person]['address'][0][1] <= map_elements[person]['address'][1][1]:
return vertex1.key
else:
return vertex2.key
#Si el sentido es de 1 hacia 2 devolvera el vertice 1
elif sense == '12':
return vertex1.key
else:
#Caso contrario devuelve el vertice 2
return vertex2.key
#print(vertex1.key, vertex2.key)
#print(sense)
def car_vertex_ref(map_elements,car,pair_vertex):
#Tomo el mapa de memoria
uber_map = read_from_disk('map_serialized.bin')
#Toma los pares de vertices referentes a la persona
vertex1 = uber_map.vertices_list[pair_vertex[0]]
vertex2 = uber_map.vertices_list[pair_vertex[1]]
sense = ''
for v in uber_map.adj_list[vertex1.key-1]:
if v[0] == vertex2:
sense = '12'
for v in uber_map.adj_list[vertex2.key-1]:
if v[0] == vertex1 and sense == '':
sense = '21'
elif v[0] == vertex1 and sense == '12':
sense = 'd'
#Si la calle es doble sentido, busco el camino mas corto
if sense == 'd':
if map_elements[car]['address'][0][1] <= map_elements[car]['address'][1][1]:
return vertex1.key
else:
return vertex2.key
#Si el sentido es de 1 hacia 2 devolvera el vertice 2
elif sense == '12':
return vertex2.key
else:
#Caso contrario devuelve el vertice 1
return vertex1.key | gabriags/project_AyEDII | code-uber/service.py | service.py | py | 13,703 | python | en | code | 0 | github-code | 36 |
26409439119 |
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
dict = {}
res = []
lista = []
#哈哈!这个是我自己写的hash table计算单词出现的频率 牛逼吧
#但是性能没下面的好嘻嘻
'''
for i in words:
dict[i]=len([x for x in words if x == i])
'''
#用collections里面的Counter函数专门用来统计每个元素出现的频率
from collections import Counter
dict = (Counter(words))
''' 关于Counter这个函数:
from collections import Counter
listb = ["i","love","leetcode","i","love","coding"]
a = (Counter(listb))
print(a)
>>>Counter({'i': 2, 'love': 2, 'leetcode': 1, 'coding': 1})
print(dict(a))
>>>{'i': 2, 'love': 2, 'leetcode': 1, 'coding': 1}
'''
lst = list(dict.items())
#返回一个list,里面是元组(键和值)
for i in lst:
lista.append((i[1]*(-1),i[0]))
#将键和值在元组里换位置,把值(频率)放在前面*(-1),键(单词)放在后面
heapq.heapify(lista)
#堆化 化成小堆,因为乘了负1,所以在最上面的绝对值最大,频率最大
for _ in range(k):
element = heapq.heappop(lista)
res.append(element[1])
return res
| lpjjj1222/leetcode-notebook | 692. Top K Frequent Words.py | 692. Top K Frequent Words.py | py | 1,460 | python | zh | code | 0 | github-code | 36 |
41571056432 | import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
#读取数据
file_dir='E:\\GDBT_LR\\loan\\'
train_data='gbdt_train.csv'
test_data='gdbt_test.csv'
train=pd.read_csv(file_dir+train_data)
test=pd.read_csv(file_dir+test_data)
#删除无用参数
del train['Unnamed: 0']
del test['Unnamed: 0']
#取数据集
data=train[data_list]
test_data=test[data_list]
#构造训练集和测试集
feature=[x for x in data_list if x!='loan_status']
X_train=data[feature]
y_train=data['loan_status']
X_test=test_data[feature]
y_test=test_data['loan_status']
# 构造lgb分类器
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss'},
'num_leaves': 64,
'num_trees': 100,
'learning_rate': 0.01,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# 设置叶子节点
num_leaf = 64
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=lgb_train)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict and get data on leaves, training data
y_pred = gbm.predict(X_train, pred_leaf=True)
print(np.array(y_pred).shape)
print(y_pred[0])
#样本个数行,树个数*叶子树列矩阵
transformed_training_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf],
dtype=np.int64) # N * num_tress * num_leafs
#将转换矩阵按叶子树划分,将叶子预测的节点位置添加标记,标记位置为temp数组,在大矩阵中,在相应位置处的元素加一
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_training_matrix[i][temp] += 1
#预测集做同样的处理
y_pred = gbm.predict(X_test, pred_leaf=True)
print('Writing transformed testing data')
transformed_testing_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], dtype=np.int64)
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_testing_matrix[i][temp] += 1
lm = LogisticRegression(penalty='l2',C=0.05) # logestic model construction
lm.fit(transformed_training_matrix,y_train) # fitting the data
y_pred_test = lm.predict_proba(transformed_testing_matrix) # Give the probabilty on each label
print(y_pred_test)
NE = (-1) / len(y_pred_test) * sum(((1+y_test)/2 * np.log(y_pred_test[:,1]) + (1-y_test)/2 * np.log(1 - y_pred_test[:,1])))
print("Normalized Cross Entropy " + str(NE))
#以阀值为0.5看查全率与查准率
def get_pcr(y_tar,y_pre):
id_list=[]
for i in range(len(y_tar)):
if y_tar[i]==1:
id_list.append(i)
right_n=0
for i in id_list:
if y_pre[i][0]<y_pre[i][1]:
right_n+=1
pre_id=[]
for i in range(len(y_pre)):
if y_pre[i][0]<y_pre[i][1]:
pre_id.append(i)
good_pre=set(pre_id)&set(id_list)
print('查准率为:{}'.format(len(good_pre)/len(pre_id)))
print('查全率为:{}'.format(right_n/len(id_list)))
get_pcr(y_test,y_pred_test)#查准率为:0.9205776173285198,查全率为:0.6623376623376623
y_pred_train = lm.predict_proba(transformed_training_matrix) # Give the probabilty on each label
get_pcr(y_train,y_pred_train)#查准率为:0.9971139971139971,查全率为:0.9262734584450402
| hu-minghao/my_program | 贷款违约预测/LGB_LR.py | LGB_LR.py | py | 3,631 | python | en | code | 0 | github-code | 36 |
70806964903 | import sys
from math import log2, ceil
sys.stdin = open('input.txt')
def make_linked(node, left, right): # 배열의 숫자가 세그먼트 트리의 어느 인덱스에 저장되었는지 알기 위함
if left >= right:
linked[left] = node # 리프노드인 경우 배열에 저장
return
make_linked(node*2, left, (left+right)//2) # 왼쪽 노드들 탐색
make_linked(node*2+1, (left+right)//2+1, right) # 오른쪽 노드들 탐색
return
def query(node, left, right, start, end): # 탐색
if start <= left and right <= end: # 찾으려는 범위에 완전히 속하면 현재 노드의 값 반환
return segment_tree[node]
elif end < left or right < start: # 찾으려는 범위가 속하지 않으면 0 반환
return 0
left_nodes = query(node*2, left, (left+right)//2, start, end) # 왼쪽 노드들 탐색
right_nodes = query(node*2+1, (left+right)//2+1, right, start, end) # 오른쪽 노드들 탐색
return left_nodes + right_nodes # 일부만 겹치면 왼쪽 노드와 오른쪽 노드에서 겹치는거 찾아서 값 반환
def modify(node):
if node <= 0: # 루트노드까지 모두 바꿨으면 종료
return
segment_tree[node] = segment_tree[node*2] + segment_tree[node*2+1] # 자식 노드들의 합
modify(node//2) # 부모 노드 탐색
N, M = map(int, sys.stdin.readline().split()) # 숫자의 개수, 명령의 개수
segment_tree = [0] * 2**(ceil(log2(N))+1) # 세그먼트 트리 초기화
linked = [0] * (N+1) # 배열의 숫자별로 세그먼트 트리 리프노드의 인덱스
make_linked(1, 1, N) # 위치 찾기
for _ in range(M):
command, a, b = map(int, sys.stdin.readline().split())
if not command: # 합을 구할때
if a > b: # b가 더 크면 a와 스왑
a, b = b, a
print(query(1, 1, N, a, b)) # 탐색
else:
segment_tree[linked[a]] = b # 배열의 숫자 값 변경
modify(linked[a]//2) # 부모 노드들의 값 변경 | unho-lee/TIL | CodeTest/Python/BaekJoon/2268.py | 2268.py | py | 2,569 | python | ko | code | 0 | github-code | 36 |
34627321771 | from pathlib import Path
import os
import datetime
import json
import h5py
import numpy as np
import pandas as pd
import click
import tensorflow as tf
from src.data.tf_data_hdf5 import get_tf_data, RandomStandardization
from src.models.models import unet_model, unetclassif_model
from src.models.losses import CustomLoss, MaskedDiceLoss
from src.models.callbacks import EarlyStopping
from src.models.evaluation import evaluate_pred_volume
DEBUG = False
project_dir = Path(__file__).resolve().parents[2]
splits_path = project_dir / "data/splits.json"
if DEBUG:
EPOCHS = 3
else:
EPOCHS = 400
plot_only_gtvl = False
@click.command()
@click.option("--config", type=click.Path(exists=True))
@click.option("--upsampling-kind", type=click.STRING, default="upsampling")
@click.option("--split", type=click.INT, default=0)
@click.option("--alpha", type=click.FLOAT, default=0.25)
@click.option("--w-gtvl", type=click.FLOAT, default=1.0)
@click.option("--w-gtvt", type=click.FLOAT, default=0.0)
@click.option("--w-lung", type=click.FLOAT, default=0.0)
@click.option("--gpu-id", type=click.STRING, default="0")
@click.option("--random-angle", type=click.FLOAT, default=None)
@click.option("--center-on", type=click.STRING, default="GTVl")
@click.option("--loss-type", type=click.STRING, default="sum_of_dice")
@click.option('--oversample/--no-oversample', default=False)
@click.option('--pretrained/--no-pretrained', default=True)
@click.option('--multitask/--no-multitask', default=False)
@click.option("--random-position/--no-random-position", default=True)
def main(config, upsampling_kind, split, alpha, w_gtvl, w_gtvt, w_lung, gpu_id,
random_angle, center_on, loss_type, oversample, pretrained, multitask,
random_position):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
h5_file = h5py.File(
project_dir / "data/processed/hdf5_2d/data_selected_slices.hdf5", "r")
if not pretrained:
n_channels = 2
else:
n_channels = 3
if oversample:
steps_per_epoch = 40
else:
steps_per_epoch = None
clinical_df = pd.read_csv(
project_dir /
"data/clinical_info_with_lung_info.csv").set_index("patient_id")
with open(splits_path, "r") as f:
splits_list = json.load(f)
ids_train = splits_list[split]["train"]
ids_val = splits_list[split]["val"]
ids_test = splits_list[split]["test"]
preprocessor = RandomStandardization()
preprocessor_nrdm = RandomStandardization(p=0.0)
if multitask:
f = lambda x, y, plc_status, patient: (preprocessor(x),
(y, plc_status))
f_nrdm = lambda x, y, plc_status, patient: (preprocessor_nrdm(x),
(y, plc_status))
else:
f = lambda x, y, plc_status, patient: (preprocessor(x), y)
f_nrdm = lambda x, y, plc_status, patient: (preprocessor_nrdm(x), y)
ds_train = get_tf_data(h5_file,
clinical_df,
patient_list=ids_train,
shuffle=True,
oversample=oversample,
random_angle=random_angle,
random_position=random_position,
center_on=center_on,
n_channels=n_channels).map(f).batch(16)
ds_val = get_tf_data(h5_file,
clinical_df,
patient_list=ids_val,
center_on="GTVl",
random_slice=False,
n_channels=n_channels).map(f_nrdm).batch(4)
ids_val_pos = [p for p in ids_val if clinical_df.loc[p, "plc_status"] == 1]
ids_val_neg = [p for p in ids_val if clinical_df.loc[p, "plc_status"] == 0]
ds_sample = get_tf_data(h5_file,
clinical_df,
patient_list=ids_val_pos[:2] + ids_val_neg[:1],
center_on="GTVt",
random_slice=False,
n_channels=n_channels).map(f_nrdm).batch(3)
if multitask:
sample_images, sample_outputs = next(
ds_sample.take(1).as_numpy_iterator())
sample_seg = sample_outputs[0]
model = unetclassif_model(3,
upsampling_kind=upsampling_kind,
pretrained=pretrained)
else:
sample_images, sample_seg = next(ds_sample.take(1).as_numpy_iterator())
model = unet_model(3,
upsampling_kind=upsampling_kind,
pretrained=pretrained)
sample_seg = np.stack(
[sample_seg[..., 0], sample_seg[..., 1], sample_seg[..., -1]], axis=-1)
if multitask:
losses = [
MaskedDiceLoss(
w_lung=w_lung,
w_gtvt=w_gtvt,
w_gtvl=w_gtvl,
),
tf.keras.losses.BinaryCrossentropy()
]
else:
losses = MaskedDiceLoss(
w_lung=w_lung,
w_gtvt=w_gtvt,
w_gtvl=w_gtvl,
)
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-3),
loss=losses,
run_eagerly=False,
)
dir_name = (
"unet__" +
f"prtrnd_{pretrained}__a_{alpha}__wt_{w_gtvt}__wl_{w_lung}__wgtvl_{w_gtvl}"
f"upsmpl_{upsampling_kind}__" +
f"split_{split}__ovrsmpl_{oversample}__" + f"con_{center_on}" +
f"ltyp_{loss_type}__mltsk_{multitask}__" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
callbacks = list()
if not DEBUG:
log_dir = str((project_dir / ("logs/fit/" + dir_name)).resolve())
callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1))
file_writer_image = tf.summary.create_file_writer(log_dir + '/images')
def log_prediction(epoch, logs):
# Use the model to predict the values from the validation dataset.
if multitask:
sample_pred, sample_pred_pstatus = model.predict(sample_images)
else:
sample_pred = model.predict(sample_images)
if plot_only_gtvl:
sample_pred[..., 0] = 0
sample_pred[..., 2] = 0
# Log the confusion matrix as an image summary.
with file_writer_image.as_default():
tf.summary.image("Validation images",
np.stack(
[
sample_images[..., 0],
sample_images[..., 1],
np.zeros_like(sample_images[..., 0]),
],
axis=-1,
),
step=epoch)
tf.summary.image("Predictions", sample_pred, step=epoch)
tf.summary.image("GTs", sample_seg, step=epoch)
callbacks.extend([
tf.keras.callbacks.LambdaCallback(on_epoch_end=log_prediction),
EarlyStopping(
minimal_num_of_epochs=350,
monitor='val_loss',
patience=20,
verbose=0,
mode='min',
restore_best_weights=True,
)
])
model.fit(
x=ds_train,
epochs=EPOCHS,
validation_data=ds_val,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
)
if multitask:
model.trainable = True
callbacks.pop(-1)
callbacks.append(
EarlyStopping(
minimal_num_of_epochs=0,
monitor='val_loss',
patience=20,
verbose=0,
mode='min',
restore_best_weights=True,
))
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss=losses,
run_eagerly=False,
)
model.fit(
x=ds_train,
epochs=EPOCHS,
validation_data=ds_val,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
)
model_dir = project_dir / ("models/" + dir_name)
model_dir.mkdir()
model.save(model_dir / "model_weight")
roc_test = evaluate_pred_volume(
model,
ids_test,
h5_file,
clinical_df,
n_channels=n_channels,
multitask=multitask,
preprocessor=preprocessor_nrdm,
)
roc_val = evaluate_pred_volume(
model,
ids_val,
h5_file,
clinical_df,
n_channels=n_channels,
multitask=multitask,
preprocessor=preprocessor_nrdm,
)
print(f"The ROC AUC for the val and "
f"test are {roc_val} and {roc_test} respectively.")
if __name__ == '__main__':
main() | voreille/plc_segmentation | src/models/train_model.py | train_model.py | py | 9,009 | python | en | code | 0 | github-code | 36 |
37039563232 | from django.contrib.gis.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Country(models.Model):
"""Class for country info"""
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=3)
geom = models.MultiPolygonField(srid=4326)
class Meta:
ordering = ["name"]
db_table = "country"
def __unicode__(self):
return smart_str(self.name)
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class Project(models.Model):
""""""
name = models.CharField(max_length=100)
start_year = models.IntegerField()
pager_status = models.URLField(null=True, blank=True)
mailing_list = models.URLField(null=True, blank=True)
template_link = models.URLField(null=True, blank=True)
termsofuse_link = models.URLField(null=True, blank=True)
class Meta:
ordering = ["name"]
db_table = "project"
def __unicode__(self):
return smart_str(self.name)
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class Organization(models.Model):
"""This table is useful to save info about organizations """
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=25)
address = models.CharField(max_length=250, null=True, blank=True)
city = models.CharField(max_length=250, null=True, blank=True)
country = models.ForeignKey(
Country, on_delete=models.PROTECT, null=True, blank=True
)
email = models.EmailField(null=True, blank=True)
website = models.URLField(max_length=150, null=True, blank=True)
image = models.ImageField(upload_to="logo/organizations/", null=True, blank=True)
geom = models.PointField(
srid=4326,
null=True,
blank=True,
help_text=_("The position of the organization"),
)
class Meta:
ordering = ["name"]
db_table = "organization"
def __unicode__(self):
if self.country:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.country.shortname)
)
else:
return smart_str("{na}".format(na=self.name))
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class ResearchGroup(models.Model):
""""""
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=25)
organization = models.ForeignKey(Organization, on_delete=models.PROTECT)
email = models.EmailField(null=True, blank=True)
website = models.URLField(max_length=150, null=True, blank=True)
image = models.ImageField(upload_to="logo/organizations/", null=True, blank=True)
projects = models.ManyToManyField(Project, through='ResearchGroupProject')
geom = models.PointField(
srid=4326,
null=True,
blank=True,
help_text=_("The position of the organization"),
)
class Meta:
ordering = ["name"]
db_table = "research_group"
def __unicode__(self):
if self.organization.shortname:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.organization.shortname)
)
if self.organization.name:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.organization.name)
)
else:
return smart_str("{na}".format(na=self.name))
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class ResearchGroupProject(models.Model):
researchgroup = models.ForeignKey(ResearchGroup, on_delete=models.PROTECT)
project = models.ForeignKey(Project, on_delete=models.PROTECT)
year = models.IntegerField()
contact_people = models.TextField()
class User(AbstractUser):
"""Extent the abstract user class"""
bio = models.TextField(max_length=500, null=True, blank=True)
image = models.ImageField(upload_to="users/", null=True, blank=True)
#TODO could a person be connected with more then one group?
research_group = models.ManyToManyField(ResearchGroup)
projects = models.ManyToManyField(Project)
euromammals_username = models.TextField(max_length=500, null=True, blank=True)
| EUROMAMMALS/website | core/models.py | models.py | py | 4,541 | python | en | code | 0 | github-code | 36 |
24454040368 | import time
from tqdm.auto import tqdm
def show_info_me():
""" Показывает инфо о коллеге """
about_me = {
'ФИО': 'Левченко Алексей',
'Должность': 'Ведущий исследователь данных',
'Блок': 'Технологии',
'Делаю': 'рекомендательные системы в HR',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_patrik():
about_me = {
'ФИО': 'Патрикеев Михаил Алексеевич',
'Должность': 'Ведущий инженер по разработке',
'Блок': 'Розничный бизнес',
'Делаю': 'Тестирую социальные и зарплатные решения',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_you_v():
about_me = {
'Имя': 'Валерий',
'Город': 'Самара',
'Должность': 'Главный специалист',
'Блок': 'Операционный центр',
'Занимаюсь': 'Анализом счетов по банковским картам'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_zylkov():
about_me = {
'ФИО': 'Зыльков Павел',
'Должность': 'Инженер по сопровождению',
'Блок': 'Технологии',
'Делаю': 'Социальные и зарплатные решения'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me_():
about_me = {
'ФИО': 'Шахиев Азамат Рафикович',
'Должность': 'Старший специалист отдела безопасности',
'Блок': 'Сервисы',
'Делаю': 'Сопровождение технических средств безопасности'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya():
about_me = {
'ФИО': 'Нетяга Светлана',
'Должность': 'Главный аудитор',
'Подразделение': 'Управление внутреннего аудита',
'Делаю': 'анализ розничных некредитных операций'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me2():
about_me = {
'ФИО': 'Солодова Наталья',
'Должность': 'клиентский менеджер',
'Блок': 'ДомКлик'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya_ii():
about_me = {
'ФИО': 'Исайкина Ирина',
'Должность': 'Заместитель руководителя ВСП',
'Подразделение': 'ВСП',
'Делаю': 'занимаюсь обслуживанием клиентов и решением их проблем',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me_eb():
about_me = {
'ФИО': 'Евгений Бодягин',
'Должность': 'Эксперт Центра подбора в инновационные направления бизнеса',
'Блок': 'HR',
'Делаю': 'Методологию подбора, в т.ч. и подбор D-people и сбор статистики по подбору'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_you_eg():
about_me = {
'ФИО': 'Евгений Головачев',
'Город': 'Самара',
'Должность': 'клиентский менеджер',
'Блок': 'ДомКлик',
'Занимаюсь': 'Помощь клиентам ипотечного кредитования'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya_av():
about_me = {
'ФИО': 'Аня Великобратова',
'Должность': 'КМ',
'Подразделение': 'КИБ СРБ'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
info_list = [
show_info_me,
info_pro_menya,
show_info_me_,
show_info_me2,
show_info_patrik,
show_info_you_eg,
info_pro_menya_ii,
show_info_you_v,
show_info_zylkov,
show_info_me_eb,
info_pro_menya_av
]
if __name__ == "__main__":
for show_info in info_list:
show_info()
for i in tqdm(range(30)):
time.sleep(1)
print('Спасибо за инфо!')
print('_' * 30)
| kcundel/python_da_course | Lesson1/about.py | about.py | py | 5,203 | python | ru | code | 0 | github-code | 36 |
27006538329 | """Pytest fixtures for huesensors tests."""
from copy import deepcopy
from unittest.mock import MagicMock, patch
import pytest
from aiohue import Bridge
from aiohue.sensors import GenericSensor
from homeassistant.components.hue import DOMAIN as HUE_DOMAIN
from homeassistant.components.hue import HueBridge
from homeassistant.components.hue.sensor_base import SensorManager
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import slugify
from custom_components.huesensor.data_manager import (
BINARY_SENSOR_MODELS,
HueSensorBaseDevice,
HueSensorData,
)
from .sensor_samples import (
MOCK_GEOFENCE,
MOCK_ZLLLightlevel,
MOCK_ZLLPresence,
MOCK_ZLLTemperature,
)
DEV_ID_SENSOR_1 = "SML_00:17:88:01:02:00:af:28-02"
async def entity_test_added_to_hass(
data_manager: HueSensorData,
entity: HueSensorBaseDevice,
):
"""Test routine to mock the internals of async_added_to_hass."""
entity.hass = data_manager.hass
if entity.unique_id.startswith(BINARY_SENSOR_MODELS):
entity.entity_id = f"binary_sensor.test_{slugify(entity.name)}"
else:
entity.entity_id = f"remote.test_{slugify(entity.name)}"
await entity.async_added_to_hass()
assert data_manager.available
assert entity.unique_id in data_manager.sensors
class MockAsyncCounter:
"""
Call counter for the hue data coordinator.
Used to mock and count bridge updates done with
`await bridge.sensor_manager.coordinator.async_request_refresh()`.
"""
_counter: int = 0
def __await__(self):
"""Dumb await."""
yield
def __call__(self, *args, **kwargs):
"""Call just returns self, increasing counter."""
self._counter += 1
return self
@property
def call_count(self) -> int:
"""Return call counter."""
return self._counter
def add_sensor_data_to_bridge(bridge, sensor_key, raw_data):
"""Append a sensor raw data packed to the mocked bridge."""
bridge.sensors[sensor_key] = GenericSensor(
raw_data["uniqueid"], deepcopy(raw_data), None
)
def _make_mock_bridge(idx_bridge, *sensors):
bridge = MagicMock(spec=Bridge)
bridge.sensors = {}
for i, raw_data in enumerate(sensors):
add_sensor_data_to_bridge(
bridge, f"{raw_data['type']}_{idx_bridge}_{i}", raw_data
)
return bridge
def _mock_hue_bridges(bridges):
# mocking HueBridge at homeassistant.components.hue level
hue_bridges = {}
for i, bridge in enumerate(bridges):
coordinator = MagicMock(spec=DataUpdateCoordinator)
coordinator.async_request_refresh = MockAsyncCounter()
sensor_manager = MagicMock(spec=SensorManager)
sensor_manager.coordinator = coordinator
hue_bridge = MagicMock(spec=HueBridge)
hue_bridge.api = bridge
hue_bridge.sensor_manager = sensor_manager
hue_bridges[i] = hue_bridge
return hue_bridges
@pytest.fixture
def mock_hass():
"""Mock HA object for tests, including some sensors in hue integration."""
hass = MagicMock(spec=HomeAssistant)
hass.data = {
HUE_DOMAIN: _mock_hue_bridges(
[
_make_mock_bridge(
0,
MOCK_ZLLPresence,
MOCK_ZLLLightlevel,
MOCK_ZLLTemperature,
),
_make_mock_bridge(1, MOCK_GEOFENCE),
]
)
}
hass.config = MagicMock()
hass.states = MagicMock()
return hass
def patch_async_track_time_interval():
"""Mock hass.async_track_time_interval for tests."""
return patch(
"custom_components.huesensor.data_manager.async_track_time_interval",
autospec=True,
)
| robmarkcole/Hue-sensors-HASS | tests/conftest.py | conftest.py | py | 3,842 | python | en | code | 346 | github-code | 36 |
8540293674 | import sys
result = {'C':0,'H':0,'O':0}
eachCnt = []
chemical = sys.stdin.readline().rstrip()
M = chemical.replace('+', ' ').replace('=',' ').split(' ') # 일단 분해하고보자
def solve():
global eachCnt
word = ['C','H','O']
for i in range(1,11):
for j in range(1,11):
for k in range(1,11):
cnt = 0
for w in word:
if(eachCnt[0][w]*i + eachCnt[1][w]*j == eachCnt[2][w]*k):
cnt+=1
if(cnt == 3):
print(i,j,k,sep=' ')
return
for m in M:
temp = {'C':0,'H':0,'O':0}
for i in range(len(m)):
if(m[i]=='C'):
if( i != len(m)-1 and m[i+1].isdigit()):
temp[m[i]]+=int(m[i+1])
else:
temp[m[i]]+=1
elif(m[i]=='H'):
if(i != len(m)-1 and m[i+1].isdigit()):
temp[m[i]]+=int(m[i+1])
else:
temp[m[i]]+=1
elif(m[i]=='O'):
if(i != len(m)-1 and m[i+1].isdigit()):
temp[m[i]]+=int(m[i+1])
else:
temp[m[i]]+=1
eachCnt.append(temp)
solve()
| namhyo01/algo_python | 1907.py | 1907.py | py | 1,212 | python | en | code | 0 | github-code | 36 |
28919190196 | from PIL import Image
import glob
import random
import os
from collections import defaultdict
#################################
test_percentage = 0.20
def partitionRankings(rawRatings, testPercent):
# https://stackoverflow.com/questions/23299099/trying-to-split-list-by-percentage
howManyNumbers = int(round(testPercent*len(rawRatings)))
shuffled = rawRatings[:]
random.shuffle(shuffled)
return shuffled[howManyNumbers:], shuffled[:howManyNumbers]
#################################################################
#### Make Directories (if needed)
#################################################################
caboodle = defaultdict(list)
categories = [x[1] for x in os.walk('./DATA')][0]
if 'train' in categories:
categories.remove('train')
categories.remove('validate')
for category in categories:
val_dir = 'DATA/validate/'+str(category)
train_dir = 'DATA/train/'+str(category)
subdata = []
if not os.path.exists(val_dir):
os.makedirs(val_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# Read images (currently either png or jpg format)
for filename in glob.glob('DATA/'+str(category)+'/*.jpg'):
im = Image.open(filename)
keep = im.copy()
subdata.append(keep)
im.close()
for filename in glob.glob('DATA/'+str(category)+'/*.png'):
im = Image.open(filename)
keep = im.copy()
subdata.append(keep)
im.close()
random.shuffle(subdata)
train_sample, test_sample = partitionRankings(subdata, test_percentage)
# Read images (save shuffled images to new train/validate folders in a jpg format)
for i in range(len(train_sample)):
train_sample[i].save(train_dir+'/'+str(category) + str(i) + '.jpg')
for i in range(len(test_sample)):
test_sample[i].save(val_dir+'/'+str(category) + str(i) + '.jpg')
#################################################################
#### Make labels.txt
#################################################################
f = open('DATA/labels.txt', 'w')
for category in categories:
f.write(category+'\n')
f.close()
| melissadale/YouTubeTutorials | TF-Records/DivideData.py | DivideData.py | py | 2,165 | python | en | code | 1 | github-code | 36 |
21120525047 | # coding: utf-8
import torch
import sys
from torch import nn
from TTS.utils.text.symbols import symbols
from TTS.layers.tacotron import Prenet, Encoder, Decoder, PostCBHG
class Tacotron(nn.Module):
def __init__(self,
embedding_dim=256,
linear_dim=1025,
mel_dim=80,
r=5,
padding_idx=None):
super(Tacotron, self).__init__()
self.r = r
self.mel_dim = mel_dim
self.linear_dim = linear_dim
self.embedding = nn.Embedding(
len(symbols), embedding_dim, padding_idx=padding_idx)
#print(" | > Number of characters : {}".format(len(symbols)))
self.embedding.weight.data.normal_(0, 0.3)
self.encoder = Encoder(embedding_dim)
self.decoder = Decoder(256, mel_dim, r)
self.postnet = PostCBHG(mel_dim)
self.last_linear = nn.Sequential(
nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
nn.Sigmoid())
def forward(self, characters, mel_specs=None, mask=None):
B = characters.size(0)
inputs = self.embedding(characters)
# batch x time x dim
encoder_outputs = self.encoder(inputs)
# batch x time x dim*r
mel_outputs, alignments, stop_tokens = self.decoder(
encoder_outputs, mel_specs, mask)
# Reshape
# batch x time x dim
mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
linear_outputs = self.postnet(mel_outputs)
linear_outputs = self.last_linear(linear_outputs)
return mel_outputs, linear_outputs, alignments, stop_tokens
| JRC1995/Chatbot | TTS/models/tacotron.py | tacotron.py | py | 1,644 | python | en | code | 79 | github-code | 36 |
26620703554 | import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.dates as mdates
import mpl_finance as mpl
from tkinter import *
from yahoo_fin.stock_info import get_data
import pandas as pd
import plotly.graph_objects as go
class AutoPlot:
def __init__(self):
master = Tk()
Label(master, text="Stock Ticker").grid(row=0)
Label(master, text="Range Start").grid(row=1)
Label(master, text="Range End").grid(row=2)
self.e1 = Entry(master)
self.e2 = Entry(master)
self.e3 = Entry(master)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
self.e3.grid(row=2, column=1)
Button(master, text='Quit', command=master.destroy).grid(row=3, column=0, sticky=W, pady=4)
Button(master, text='Show', command=self.make_plot).grid(row=3, column=1, sticky=W, pady=4)
mainloop()
def make_plot(self):
#Extracting data
df = get_data("{ticker}".format(ticker=self.e1.get()),
start_date = self.e2.get(), end_date = self.e3.get())
df.index = pd.to_datetime(df.index)
fig = go.Figure(data=[go.Candlestick(x=df.index,
open=df['open'],
high=df['high'],
low=df['low'],
close=df['close'])])
fig.show()
AutoPlot()
| MihaiGroza/Automated-Candlestick-Chart-Plot | CandleStick_Chart_Building.py | CandleStick_Chart_Building.py | py | 1,397 | python | en | code | 0 | github-code | 36 |
24842877273 | # -*- coding: utf-8 -*-
from os import path
import os
from wordcloud import WordCloud, STOPWORDS
import requests
import matplotlib.pyplot as plt
# from scipy.misc import imread
import numpy as np
from PIL import Image
import jieba
import jieba.posseg as pseg
import jieba.analyse
def makeCiyun(file_name):
d = path.dirname(__file__)
# Read the whole text.
text = open(path.join(d, file_name), encoding="utf8").read()
jieba_info = jieba.cut(text, cut_all=True)
font = os.path.join(os.path.dirname(__file__), "ziti.otf")
imgmask = "255fk.jpg"
alice_mask = np.array(Image.open(path.join(d, imgmask)))
# lower max_font_size
wordcloud = WordCloud(
max_font_size=40, font_path=font, mask=alice_mask,
stopwords=STOPWORDS
).generate(jieba_info)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
wordcloud.to_file(path.join(d, "xiaoguo.png"))
import json
def getInfo(productId, page):
url = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv7667&productId=" + \
productId + "&score=0&sortType=5&page=" + str(page) + "&pageSize=10&isShadowSku=0&fold=1"
header = {
'Host': 'club.jd.com',
'Referer': "https://item.jd.com/" + productId + ".html"
}
content = requests.get(url, headers=header).content
content = content[len("fetchJSON_comment98vv7667("):-2]
# print(type(content))
# open("li.txt", 'w').write(str(content))
# print(content)
content = json.loads((content).decode("GBK"))
comments = content['comments']
infos = ""
for item in comments:
# print(item['content'])
# files.write(item['content'] + "\n")
infos += item['content'] + "\n"
# break
return infos
# print(content)
# files.close()
def start(productId):
file_name = "jd_" + productId + ".txt"
try:
os.remove(file_name)
except Exception as ex:
pass
files = open(file_name, 'a', encoding="utf8")
for i in range(100):
infos = getInfo(productId, i)
files.write(infos)
print("finish", i)
files.write("//*\n")
files.close()
makeCiyun(file_name)
# start("4213316")
makeCiyun("jd_4213316.txt")
| Montage-LSM/ciyun | index_jieba.py | index_jieba.py | py | 2,267 | python | en | code | 0 | github-code | 36 |
5657068183 | import json
import os
import requests
from utils import is_snapshot_week, get_dependency_version, get_latest_tag, get_snapshot_branch, \
get_dependency_version_from_tags
github_token = os.getenv("GITHUB_TOKEN")
headers = {"Authorization": "Bearer " + github_token}
def build_message():
message = '@navigation-ios '
releases_url = "https://api.github.com/repos/mapbox/mapbox-navigation-ios/releases"
releases = requests.get(releases_url, headers=headers).json()
if is_snapshot_week(releases):
message += 'Navigation SDK snapshot must be released today (rc or GA release was not released this week).\n'
else:
message += 'Navigation SDK snapshot must not be released today (rc or GA release was released this week).\n'
return message
maps_releases = requests.get(
'https://api.github.com/repos/mapbox/mapbox-maps-ios/releases',
headers=headers
).json()
maps_version = get_dependency_version(maps_releases)
if maps_version:
message += ':white_check_mark: Maps ' + maps_version + ' is ready.\n'
else:
message += ':siren: Expected Maps release was not released.\n'
nav_native_tags = requests.get(
'https://api.github.com/repos/mapbox/mapbox-navigation-native-ios/tags',
headers=headers
).json()
nav_native_version = get_dependency_version_from_tags(nav_native_tags)
if nav_native_version:
message += ':white_check_mark: Nav Native ' + nav_native_version + ' is ready.\n'
else:
message += ':siren: Expected Nav Native release was not released.\n'
tags = requests.get('https://api.github.com/repos/mapbox/mapbox-navigation-ios/tags', headers=headers).json()
latest_tag = get_latest_tag(tags)
snapshot_branch = get_snapshot_branch(latest_tag)
message += 'Snapshot branch is *' + snapshot_branch + '*.\n'
message += '*Release time is today night.*\n'
return message
def send_message(message):
payload = {'text': message, 'link_names': 1}
slack_url = os.getenv("SLACK_WEBHOOK")
requests.post(slack_url, data=json.dumps(payload))
message = build_message()
send_message(message)
| mapbox/mapbox-navigation-ios | scripts/snapshot/pre-snapshot-check.py | pre-snapshot-check.py | py | 2,170 | python | en | code | 821 | github-code | 36 |
39127008451 | # the question link https://codingcompetitions.withgoogle.com/kickstart/round/000000000019ffc8/00000000002d82e6
T = int(input())#input of number of test cases
for x in range(1, T + 1):
n=int(input())#no of entries
s = str(input())#input of entries
c=0
tnop = list(s.split(" "))#conversion of entries to list
for i in range(1,len(tnop)-1):
if int(tnop[i])>int(tnop[i-1]) and int(tnop[i])>int(tnop[i+1]):#checking of the strictly greater than condition
c+=1
print("Case #{}: {}".format(x, c), flush=True)
| NIKHILDUGAR/googlekickstartpy | 2020bBikeTour.py | 2020bBikeTour.py | py | 548 | python | en | code | 4 | github-code | 36 |
12087063894 | import os
import json
import argparse
from multiprocessing import Pool
import string
import shutil
# external libraries
from numpy import argmax
from rouge import Rouge
from tqdm import tqdm
def ROUGE(hypsumm, refsumm):
rouge = Rouge()
rouge.metrics = ['rouge-2']
rouge.stats = ['r']
ref = '\n'.join(refsumm)
hyp = '\n'.join(hypsumm)
if len(hyp.strip()) < 10: return 0
if len(ref.strip()) < 10: return 0
scores = rouge.get_scores(hyp, ref, avg = True)
return scores['rouge-2']['r']
def AVGROUGE(hypsumm, refsumm):
rouge = Rouge()
rouge.stats = ['f']
ref = '\n'.join(refsumm)
hyp = '\n'.join(hypsumm)
if len(hyp.strip()) < 10: return 0
if len(ref.strip()) < 10: return 0
scores = rouge.get_scores(hyp, ref, avg = True)
val = scores['rouge-1']['f'] + scores['rouge-2']['f'] + scores['rouge-l']['f']
return val / 3
###########################################################################
# This class selects 'num_sents' sentences from the full text
# for each sentence in the summary wrt the highest average ROUGE scores.
# Ref: Narayan et.al. NAACL 2018. "Ranking sentences for extractive
# summarization with reinforcement learning"
###########################################################################
class AVGROUGEscorer:
def __init__(self, judgesents, summsents):
self.judgesents = judgesents
self.summsents = summsents
self.labels = [False for sent in judgesents]
def getLabels(self, num_sent = 3):
# [facets: [support groups: [sent indices]]]
for sent in self.summsents:
# get scores with all judgesents
scores = list(map(lambda x: AVGROUGE([sent], [x]), self.judgesents))
# mark top labels
for i in range(num_sent):
index = int(argmax(scores))
self.labels[index] = True
scores[index] = -1
return self.labels
###########################################################################
# This class selects greedily selects the maximal sentences from full text
# to maximize ROUGE scores wrt the summary.
# Ref: Nallapati et. al. AAAI 2017. "SummaRuNNer: A Recurrent Neural Network
# based Sequence Model for Extractive Summarization of Documents"
###########################################################################
class ROUGEscorer:
def __init__(self, judgesents, summsents):
self.judgesents = judgesents
self.summsents = summsents
self.currsents = []
self.labels = [False for sent in judgesents]
def score(self, i):
if self.labels[i]: return 0
t = self.judgesents[i]
if len(t.translate(t.maketrans('', '', string.punctuation + string.whitespace))) < 5: return 0
new = self.currsents + [self.judgesents[i]]
score = ROUGE(new, self.summsents)
return score
def getmaxscore(self):
# with Pool(N_PROCESS) as p:
# scores = p.map(self.score, range(len(self.judgesents)))
# p.close()
# p.terminate()
scores = list(map(self.score, range(len(self.judgesents))))
index = argmax(scores)
return index, scores[index]
def getLabels(self, min_labels = 10):
currscore = 0.0
while True:
# select sent index which maximises ROUGE
index, maxscore = self.getmaxscore()
if maxscore <= currscore and len(self.currsents) >= min_labels: break
currscore = maxscore
self.currsents.append(self.judgesents[index])
self.labels[index] = True
#print(len(self.currsents), len(self.judgesents))
return self.labels
def prepare(judgepath, summarypath):
with open(judgepath) as fp:
judgesents = fp.read().splitlines()
with open(summarypath) as fp:
summsents = fp.read().splitlines()
data = {}
# prepare doc
data['doc'] = '\n'.join(judgesents)
# prepare summ
data['summaries'] = '\n'.join(summsents)
scorer = MODEL(judgesents, summsents)
labels = scorer.getLabels()
# prepare labels
data['labels'] = '\n'.join(map(lambda x: str(int(x)), labels))
return data
def generateData(f):
#print(f)
try:
d = prepare(os.path.join(JUDGEPATH, f), os.path.join(SUMMPATH, f))
d['file'] = f
assert len(d['doc'].splitlines()) == len(d['labels'].splitlines()), "INCORRECT Number of sentences and labels"
with open(os.path.join(tmpdir, f), 'w') as fout:
json.dump(d, fout)
except Exception as args:
print("ERROR in", f)
print(args)
#%% MAIN
if __name__ == '__main__':
#PARAMS
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("base_dir", type=str, help="base directory where the other files and folders are present")
parser.add_argument("--method", type=str, choices=['avg_rg', 'm_rg'], default="avg_rg", help="method to use for generating labels.")
parser.add_argument("--separator", type=str, default="$$$", help="separator used in output docs, to separate between the text and labels.")
parser.add_argument("--n_process", type=int, default=1, help="number of subprocesses to use (for parallel computation).")
parser.add_argument("--judgement_dir", type=str, default="judgement", help="subdirectory containing the judgements.")
parser.add_argument("--summary_dir", type=str, default="summary", help="subdirectory containing the summaries.")
parser.add_argument("--tmp_dir", type=str, default="tmp", help="temporary directory where the files will be stored. This directory can be deleted after running.")
parser.add_argument("--out_dir", type=str, default="labelled", help="subdirectory where the output will be stored.")
parser.add_argument("--out_json", type=str, default="labelled.jsonl", help="json-line file where the output will be stored.")
parser.add_argument("--remove_tmp", action='store_true', help="if given any existing files inside tmp_dir will be deleted first. Else they will be reused (they won't be calculated again).")
args = parser.parse_args()
BASE = args.base_dir
JUDGEPATH = os.path.join(BASE, args.judgement_dir)
SUMMPATH = os.path.join(BASE, args.summary_dir)
OUTPATH_JSON = os.path.join(BASE, args.out_json)
OUTPATH = os.path.join(BASE, args.out_dir)
tmpdir = os.path.join(BASE, args.tmp_dir)
METHOD = args.method
if METHOD == 'avg_rg': MODEL = AVGROUGEscorer
elif METHOD == 'm_rg': MODEL = ROUGEscorer
SEP = args.separator
KEEP_TMP = not args.remove_tmp
N_PROCESS = args.n_process
###########################################################################
# CODE STARTS
if not KEEP_TMP:
shutil.rmtree(tmpdir)
if not os.path.exists(tmpdir): os.mkdir(tmpdir)
files = set(next(os.walk(JUDGEPATH))[2])
excludefiles = set(next(os.walk(tmpdir))[2])
files = [f for f in (files - excludefiles)]# if int(f.split('.')[0]) <= 3500]
if N_PROCESS > 1:
with Pool(N_PROCESS) as p:
list(tqdm(p.imap_unordered(generateData, files), total = len(files)))
else:
list(tqdm(map(generateData, files), total = len(files)))
files = next(os.walk(tmpdir))[2]
if not os.path.exists(OUTPATH): os.mkdir(OUTPATH)
with open(OUTPATH_JSON, 'w') as fout:
for f in tqdm(files):
with open(os.path.join(tmpdir, f)) as fp:
try: d = json.load(fp)
except:
os.system('rm ' + os.path.join(tmpdir, f))
continue
with open(os.path.join(OUTPATH, f), 'w') as fout2:
for line, label in zip(d['doc'].split('\n'), d['labels'].split('\n')):
print(line, SEP, label, sep = '', file = fout2)
print(json.dumps(d), file = fout)
| Law-AI/summarization | extractive/abs_to_ext/extractive_labels.py | extractive_labels.py | py | 9,688 | python | en | code | 139 | github-code | 36 |
43300686084 |
# XXX there is much grot here.
# some of this comes from trying to present a reasonably intuitive and
# useful interface, which implies a certain amount of DWIMmery.
# things surely still could be more transparent.
class FormException(Exception):
pass
class Instruction(object):
def __init__(self, fields):
self.fields = fields
self.lfields = [k for (k,v) in fields.iteritems()
if isinstance(v, str)]
#if not self.lfields:
# self.assemble() # for error checking only
def assemble(self):
r = 0
for field in self.fields:
r |= field.encode(self.fields[field])
return r
class IBoundDesc(object):
def __init__(self, desc, fieldmap, assembler):
self.fieldmap = fieldmap
self.desc = desc
self.assembler = assembler
def calc_fields(self, args, kw):
fieldsleft = list(self.desc.fields)
fieldvalues = {}
for fname in kw:
kw[fname] = self.fieldmap[fname]
for d in (self.desc.specializations, kw):
for field in d:
fieldsleft.remove(field)
fieldvalues[field] = d[field]
for i in range(min(len(self.desc.defaults), len(fieldsleft) - len(args))):
f, v = self.desc.defaults[i]
fieldvalues[f] = v
fieldsleft.remove(f)
for a in args:
field = fieldsleft.pop(0)
fieldvalues[field] = a
return fieldvalues, fieldsleft
def __call__(self, *args, **kw):
fieldvalues, sparefields = self.calc_fields(args, kw)
if sparefields:
raise FormException('fields %s left'%sparefields)
self.assembler.insts.append(Instruction(fieldvalues))
class IBoundDupDesc(IBoundDesc):
def calc_fields(self, args, kw):
s = super(IBoundDupDesc, self)
fieldvalues, sparefields = s.calc_fields(args, kw)
for k, v in self.desc.dupfields.iteritems():
fieldvalues[k] = fieldvalues[v]
return fieldvalues, sparefields
class IDesc(object):
boundtype = IBoundDesc
def __init__(self, fieldmap, fields, specializations, boundtype=None):
self.fieldmap = fieldmap
self.fields = fields
self.specializations = specializations
self.defaults = ()
if boundtype is not None:
self.boundtype = boundtype
for field in specializations:
if field not in fields:
raise FormException(field)
def __get__(self, ob, cls=None):
if ob is None: return self
return self.boundtype(self, self.fieldmap, ob)
def default(self, **defs):
assert len(defs) == 1
f, v = defs.items()[0]
self.defaults = self.defaults + ((self.fieldmap[f], v),)
return self
def __call__(self, **more_specializatons):
s = self.specializations.copy()
ms = {}
ds = {}
for fname, v in more_specializatons.iteritems():
field = self.fieldmap[fname]
if field not in self.fields:
raise FormException("don't know about '%s' here" % field)
if isinstance(v, str):
ds[field] = self.fieldmap[v]
else:
ms[field] = v
s.update(ms)
if len(s) != len(self.specializations) + len(ms):
raise FormException("respecialization not currently allowed")
if ds:
fields = list(self.fields)
for field in ds:
fields.remove(field)
return IDupDesc(self.fieldmap, tuple(fields), s, ds)
else:
r = IDesc(self.fieldmap, self.fields, s, self.boundtype)
r.defaults = tuple([(f, d) for (f, d) in self.defaults if f not in s])
return r
def match(self, inst):
c = 0
for field in self.fields:
if field in self.specializations:
if field.decode(inst) != self.specializations[field]:
return 0
else:
c += 1
return c
def __repr__(self):
l = []
for field in self.fields:
if field in self.specializations:
l.append('%s=%r'%(field.name, self.specializations[field]))
else:
l.append(field.name)
r = '%s(%s)'%(self.__class__.__name__, ', '.join(l))
if self.boundtype is not self.__class__.boundtype:
r += ' => ' + self.boundtype.__name__
return r
def disassemble(self, name, inst, labels, pc):
kws = []
for field in self.fields:
if field not in self.specializations:
v = field.decode(inst)
for f, d in self.defaults:
if f is field:
if d == v:
break
else:
kws.append('%s=%s'%(field.name, field.r(inst, labels, pc)))
return "%-5s %s"%(name, ', '.join(kws))
class IDupDesc(IDesc):
boundtype = IBoundDupDesc
def __init__(self, fieldmap, fields, specializations, dupfields):
super(IDupDesc, self).__init__(fieldmap, fields, specializations)
self.dupfields = dupfields
def match(self, inst):
for field in self.dupfields:
df = self.dupfields[field]
if field.decode(inst) != df.decode(inst):
return 0
else:
return super(IDupDesc, self).match(inst)
class Form(object):
fieldmap = None
def __init__(self, *fnames):
self.fields = []
bits = {}
overlap = False
for fname in fnames:
if isinstance(fname, str):
field = self.fieldmap[fname]
else:
field = fname
if field.overlap:
overlap = True
for b in range(field.left, field.right+1):
if not overlap and b in bits:
raise FormException("'%s' and '%s' clash at bit '%s'"%(
bits[b], fname, b))
else:
bits[b] = fname
self.fields.append(field)
def __call__(self, **specializations):
s = {}
for fname in specializations:
field = self.fieldmap[fname]
if field not in self.fields:
raise FormException("no nothin bout '%s'"%fname)
s[field] = specializations[fname]
return IDesc(self.fieldmap, self.fields, s)
def __repr__(self):
return '%s(%r)'%(self.__class__.__name__, [f.name for f in self.fields])
| mozillazg/pypy | rpython/jit/backend/ppc/form.py | form.py | py | 6,650 | python | en | code | 430 | github-code | 36 |
74791355622 | import constant
from loguru import logger
from managers import AudioManager
from threading import Event, Thread
class Autonomous(object):
def __init__(self, audio_manager: AudioManager):
self.audio_manager = audio_manager
self.event: Event = Event()
self.event.set()
self.thread: Thread | None = None
def start(self):
if not self.event.is_set():
return
logger.info("Starting Automation")
self.event.clear()
Thread(target=self.run, name="Autonomous Thread").start()
def stop(self):
if self.event.is_set():
return
logger.info("Stopping Automation")
self.event.set()
def toggle(self):
if self.event.is_set():
self.start()
else:
self.stop()
def run(self):
self.audio_manager.play_random_sound(constant.AUTO_CATEGORY)
while not self.event.wait(constant.AUTO_INTERVAL):
self.audio_manager.play_random_sound(constant.AUTO_CATEGORY)
| dezil/R2 | autonomous.py | autonomous.py | py | 1,035 | python | en | code | 1 | github-code | 36 |
8522405005 | import unittest
from BaseTestCases.BaseTestCase import BaseTestCase, os
from Pages.Deployment_Group import DG_Create
from Pages.LoginPage import LoginPage
from DataSource.read_excel import read_excel
from time import sleep
from ddt import ddt,data,unpack
@ddt
class test_DG_Create (BaseTestCase):
@data(*read_excel.get_data_from_excel(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + '\Data\Test_Data.xlsx','DG'))
@unpack
def test_Create_DG(self,DGname,DGdesc,DGDB):
self.driver.implicitly_wait(30)
LoginPage.login(self,'Administrator','P@ssw0rd')
sleep(3)
DG_Create.DG_screenlink(self)
sleep(3)
DG_Create.DG_createlink(self)
sleep(3)
DG_Create.DG_DetailsPopup(self,DGname,DGdesc,int(DGDB))
#Actual_Msg = DG_Create.DG_toast
DG_Create.save_close_btn(self)
sleep(3)
self.assertEqual(DG_Create.Toast(self),"Deployment Group " + DGname + " has been created.")
# self.assertTrue(DG_Create.DG_toast(DGname))
#print(DG_Create.DG_toast)
if __name__ == '__main__':
unittest.main()
| EFarag/ACE_Project | TestCases/test_DG_Valid_create.py | test_DG_Valid_create.py | py | 1,130 | python | en | code | 0 | github-code | 36 |
34341861666 |
#m7homework7b-SetsDictionaries_2
# Pickled Vegetables
import pickle
def pickled_vege():
pickvege = {'tomato' : '5.00', 'squash' : '2.34'}
print(pickvege)
c = pickvege['tomato'] # print value
print(c)
pickvege['lemon'] = '.25' # add
print(pickvege)
del pickvege['tomato'] # delete
print(pickvege)
pickvege.pop('squash') # remove
print(pickvege)
k = pickvege.keys()
print(k)
f = open('C://pickled.txt', 'wb') # open file for binary writing
pickle.dump(pickvege, f) # write data to new file
f.close() # close the file
d = open('C://pickled.txt', 'rb') # open file to read
read_data = pickle.load(d) # unpickle
print(read_data)
pickled_vege()
| chnldnh/CMPR114_Python | Module7/Module7_HW/m7hw7b_setsDictionaries_2.py | m7hw7b_setsDictionaries_2.py | py | 773 | python | en | code | 0 | github-code | 36 |
70774463784 | from artist_data import ArtistData
import numpy as np
import igraph
class Network:
def __init__(self, data):
self._data = data
self._graph = igraph.Graph()
def graph(self):
return self._graph
def init(self):
self._graph.add_vertices(list(self._data.artists.keys()))
edges = [(max(key, i), min(key, i)) for key, item in self._data.adjacency.items() for i in item]
edges = list(set(edges))
self._graph.add_edges(edges)
self._graph.vs['name'] = [item['name'] for item in self._data.artists.values()]
self._graph.vs['followers'] = [item['followers'] for item in self._data.artists.values()]
self._graph.vs['popularity'] = [item['popularity'] for item in self._data.artists.values()]
self._graph.es['popularity'] = [(self._data.artists[edge[0]]['popularity'] + self._data.artists[edge[1]]['popularity']) / 2 for edge in edges]
def draw(self, layout_name='large', file_format='pdf'):
'''
Draw created artists graph. Showing labels only for artists with popularity in 0.9 quantile.
param: layout_name: name of algorithm to use for layout. Available options: see igraph documentation
type: layout_name: str
param: file_format: name of format to which graph should be saved
type: file_format: str
'''
visual_style = {}
visual_style['edge_width'] = [item/50 for item in self._graph.es['popularity']]
visual_style['vertex_color'] = [[0, 1, 0, 0.9] if item['name'] == self._data.name else [1, 0, 0, 0.4] for item in self._data.artists.values()]
quantile = np.quantile([item['popularity'] for item in self._data.artists.values()], 0.95)
visual_style['vertex_label'] = [item['name'] if item['popularity'] > quantile or item['name'] == self._data.name else '' for item in self._data.artists.values()]
visual_style['vertex_label_size'] = [20 if item['name'] == self._data.name else 7 for item in self._data.artists.values()]
visual_style['vertex_label_color'] = [[0, 1, 0, 1] if item['name'] == self._data.name else [0, 0, 0, .8] for item in self._data.artists.values()]
visual_style['vertex_size'] = [item/5 for item in self._graph.vs['popularity']]
name = self._data.name.replace(' ', '')
igraph.plot(
self._graph,
f'data/{name}/net_l{self._data.depth}.{file_format}',
**visual_style,
order=list(self._data.artists.keys()).reverse(),
vertex_frame_width=.1,
layout=self._graph.layout(layout_name),
bbox=(1000,1000),
autocurve=True)
if __name__ == '__main__':
data = ArtistData('Giuseppe Verdi', depth=3)
data.load_adjacency()
data.load_artists()
n = Network(data)
n.init()
n.draw(file_format='png')
| jakubsob/SpotifyArtistsNetwork | network.py | network.py | py | 2,904 | python | en | code | 0 | github-code | 36 |
33646432106 | import asyncio
import threading
import time
import speech_recognition as sr
r = sr.Recognizer()
# def do(audio):
def srcVoice(n, audio):
for i in range(n, 0, -1):
print('sssssss')
# threading.Thread(target=r.recognize_google, args=(audio))
words = r.recognize_google(audio)
print(words, '$$$$$$$$$$$$')
break
def audioLis(source):
print('running')
try:
audio = r.listen(source, 3, 6)
try:
DN = threading.Thread(target=srcVoice, args=(1, audio))
DN.start()
except Exception as e:pass
except Exception as e:
print(e, '<<<<<<<<<<<<<<<<<<<')
source = 'None'
with sr.Microphone() as source:
while True:
audioLis(source)
# async
# await
| giribabu22/assistant-Nikki-python | thread_voice_src/script.py | script.py | py | 770 | python | en | code | 4 | github-code | 36 |
10625869502 | from typing import List
from eth_vertigo.incremental.store import MutationRecord, IncrementalMutationStore
from eth_vertigo.core import Mutation
class IncrementalRecorder:
def record(self, mutations: List[Mutation]) -> IncrementalMutationStore:
store = IncrementalMutationStore()
store.known_mutations = list(
map(
self._mutation_to_record,
[m for m in mutations if m.crime_scenes]
)
)
return store
@staticmethod
def _mutation_to_record(mutation: Mutation) -> MutationRecord:
result = MutationRecord()
result.new_text = mutation.value
result.original_text = mutation.original_value
result.source_file_name = mutation.source_file_name
result.location = ":".join(map(str, mutation.location))
result.line_number = mutation.line_number
result.crime_scenes = mutation.crime_scenes
return result
| JoranHonig/vertigo | eth_vertigo/incremental/record.py | record.py | py | 958 | python | en | code | 180 | github-code | 36 |
2671254266 | from typing import TypeAlias, Union
from const import MAX_SLOT_NUM, DiffusionSVCInferenceType, EnumInferenceTypes, EmbedderType, VoiceChangerType
from dataclasses import dataclass, asdict, field
import os
import json
@dataclass
class ModelSlot:
slotIndex: int = -1
voiceChangerType: VoiceChangerType | None = None
name: str = ""
description: str = ""
credit: str = ""
termsOfUseUrl: str = ""
iconFile: str = ""
speakers: dict = field(default_factory=lambda: {})
@dataclass
class RVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "RVC"
modelFile: str = ""
indexFile: str = ""
defaultTune: int = 0
defaultIndexRatio: int = 0
defaultProtect: float = 0.5
isONNX: bool = False
modelType: str = EnumInferenceTypes.pyTorchRVC.value
samplingRate: int = -1
f0: bool = True
embChannels: int = 256
embOutputLayer: int = 9
useFinalProj: bool = True
deprecated: bool = False
embedder: EmbedderType = "hubert_base"
sampleId: str = ""
speakers: dict = field(default_factory=lambda: {0: "target"})
@dataclass
class MMVCv13ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "MMVCv13"
modelFile: str = ""
configFile: str = ""
srcId: int = 107
dstId: int = 100
isONNX: bool = False
samplingRate: int = 24000
speakers: dict = field(default_factory=lambda: {107: "user", 100: "zundamon", 101: "sora", 102: "methane", 103: "tsumugi"})
@dataclass
class MMVCv15ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "MMVCv15"
modelFile: str = ""
configFile: str = ""
srcId: int = 0
dstId: int = 101
f0Factor: float = 1.0
isONNX: bool = False
samplingRate: int = 24000
speakers: dict = field(default_factory=lambda: {})
f0: dict = field(default_factory=lambda: {})
@dataclass
class SoVitsSvc40ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "so-vits-svc-40"
modelFile: str = ""
configFile: str = ""
clusterFile: str = ""
dstId: int = 0
isONNX: bool = False
sampleId: str = ""
defaultTune: int = 0
defaultClusterInferRatio: float = 0.0
noiseScale: float = 0.0
speakers: dict = field(default_factory=lambda: {1: "user"})
@dataclass
class DDSPSVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "DDSP-SVC"
modelFile: str = ""
configFile: str = ""
diffModelFile: str = ""
diffConfigFile: str = ""
dstId: int = 0
isONNX: bool = False
sampleId: str = ""
defaultTune: int = 0
enhancer: bool = False
diffusion: bool = True
acc: int = 20
kstep: int = 100
speakers: dict = field(default_factory=lambda: {1: "user"})
@dataclass
class DiffusionSVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "Diffusion-SVC"
modelFile: str = ""
isONNX: bool = False
modelType: DiffusionSVCInferenceType = "combo"
dstId: int = 1
sampleId: str = ""
defaultTune: int = 0
defaultKstep: int = 20
defaultSpeedup: int = 10
kStepMax: int = 100
nLayers: int = 20
nnLayers: int = 20
speakers: dict = field(default_factory=lambda: {1: "user"})
embedder: EmbedderType = "hubert_base"
samplingRate: int = 44100
embChannels: int = 768
@dataclass
class BeatriceModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "Beatrice"
modelFile: str = ""
dstId: int = 1
speakers: dict = field(default_factory=lambda: {1: "user1", 2: "user2"})
ModelSlots: TypeAlias = Union[ModelSlot, RVCModelSlot, MMVCv13ModelSlot, MMVCv15ModelSlot, SoVitsSvc40ModelSlot, DDSPSVCModelSlot, DiffusionSVCModelSlot, BeatriceModelSlot]
def loadSlotInfo(model_dir: str, slotIndex: int) -> ModelSlots:
slotDir = os.path.join(model_dir, str(slotIndex))
jsonFile = os.path.join(slotDir, "params.json")
if not os.path.exists(jsonFile):
return ModelSlot()
jsonDict = json.load(open(os.path.join(slotDir, "params.json")))
slotInfoKey = list(ModelSlot.__annotations__.keys())
slotInfo = ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
if slotInfo.voiceChangerType == "RVC":
slotInfoKey.extend(list(RVCModelSlot.__annotations__.keys()))
return RVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "MMVCv13":
slotInfoKey.extend(list(MMVCv13ModelSlot.__annotations__.keys()))
return MMVCv13ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "MMVCv15":
slotInfoKey.extend(list(MMVCv15ModelSlot.__annotations__.keys()))
return MMVCv15ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "so-vits-svc-40":
slotInfoKey.extend(list(SoVitsSvc40ModelSlot.__annotations__.keys()))
return SoVitsSvc40ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "DDSP-SVC":
slotInfoKey.extend(list(DDSPSVCModelSlot.__annotations__.keys()))
return DDSPSVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "Diffusion-SVC":
slotInfoKey.extend(list(DiffusionSVCModelSlot.__annotations__.keys()))
return DiffusionSVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "Beatrice":
slotInfoKey.extend(list(BeatriceModelSlot.__annotations__.keys()))
return BeatriceModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
else:
return ModelSlot()
def loadAllSlotInfo(model_dir: str):
slotInfos: list[ModelSlots] = []
for slotIndex in range(MAX_SLOT_NUM):
slotInfo = loadSlotInfo(model_dir, slotIndex)
slotInfo.slotIndex = slotIndex # スロットインデックスは動的に注入
slotInfos.append(slotInfo)
return slotInfos
def saveSlotInfo(model_dir: str, slotIndex: int, slotInfo: ModelSlots):
slotDir = os.path.join(model_dir, str(slotIndex))
print("SlotInfo:::", slotInfo)
slotInfoDict = asdict(slotInfo)
slotInfo.slotIndex = -1 # スロットインデックスは動的に注入
json.dump(slotInfoDict, open(os.path.join(slotDir, "params.json"), "w"), indent=4)
| w-okada/voice-changer | server/data/ModelSlot.py | ModelSlot.py | py | 6,366 | python | en | code | 12,673 | github-code | 36 |
24844526241 | from django.contrib import admin
from .models import PrivateChat, Message
# Register your models here.
@admin.register(PrivateChat)
class PrivateChatAdmin(admin.ModelAdmin):
"""Filters, displays and search for django admin"""
list_filter = ('user1', 'user2', )
list_display = ('user1', 'user2')
search_fields = ['user1', 'user2']
@admin.register(Message)
class MessageAdmin(admin.ModelAdmin):
"""Filters, displays and search for django admin"""
list_filter = ('chat', 'sender', )
list_display = ('chat', 'sender', 'message', 'date')
search_fields = ['chat', 'sender'] | lexach91/DateLoc | chat/admin.py | admin.py | py | 605 | python | en | code | 1 | github-code | 36 |
2280932575 | """
Домашнее задание.
Ввод с клавиатуры. Если строка введённая с клавиатуры - это число, то поделить первое на второе.
Обработать ошибку деления на ноль.
Если второе число 0, то программа запрашивает ввод чисел заново.
Также если были введены буквы, то обработать исключение.
"""
def input_number(): # Объявляем функцию input_number
while True: # Запускаем цикл
num1 = input("Введите первое число: ") # Запрос у пользователя ввести первое число
num2 = input("Введите второе число: ") # Запрос у пользователя ввести второе число
try: # инструкция, которая может привести к исключению
result = int(num1) / int(num2)
print(result)
except ZeroDivisionError: # Перехватываем исключение деления на ноль
result = 0
print(f"Произошло деление на {result}. Введите числа заново.") #
except ValueError:
# Перехватываем исключение, тип данных "str" не может быть преобразован в "int" (введены не цифры а буквы)
print("Преобразование прошло неудачно! Вы ввели буквы вместо чисел! До свидание!")
break # Останавливаем цикл
input_number() # Вызываем функцию
| OlegPodg/Python_lesson | Podgornyj_104_lesson14.py | Podgornyj_104_lesson14.py | py | 1,827 | python | ru | code | 0 | github-code | 36 |
19868628701 | from ducktape.services.background_thread import BackgroundThreadService
from ducktape.utils.util import wait_until
import os
import subprocess
def is_int(msg):
"""Default method used to check whether text pulled from console consumer is a message.
return int or None
"""
try:
return int(msg)
except:
return None
"""
0.8.2.1 ConsoleConsumer options
The console consumer is a tool that reads data from Kafka and outputs it to standard output.
Option Description
------ -----------
--blacklist <blacklist> Blacklist of topics to exclude from
consumption.
--consumer.config <config file> Consumer config properties file.
--csv-reporter-enabled If set, the CSV metrics reporter will
be enabled
--delete-consumer-offsets If specified, the consumer path in
zookeeper is deleted when starting up
--formatter <class> The name of a class to use for
formatting kafka messages for
display. (default: kafka.tools.
DefaultMessageFormatter)
--from-beginning If the consumer does not already have
an established offset to consume
from, start with the earliest
message present in the log rather
than the latest message.
--max-messages <Integer: num_messages> The maximum number of messages to
consume before exiting. If not set,
consumption is continual.
--metrics-dir <metrics dictory> If csv-reporter-enable is set, and
this parameter isset, the csv
metrics will be outputed here
--property <prop>
--skip-message-on-error If there is an error when processing a
message, skip it instead of halt.
--topic <topic> The topic id to consume on.
--whitelist <whitelist> Whitelist of topics to include for
consumption.
--zookeeper <urls> REQUIRED: The connection string for
the zookeeper connection in the form
host:port. Multiple URLS can be
given to allow fail-over.
"""
class ConsoleConsumer(BackgroundThreadService):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/console_consumer"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "console_consumer.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "console_consumer.properties")
logs = {
"consumer_stdout": {
"path": STDOUT_CAPTURE,
"collect_default": False},
"consumer_stderr": {
"path": STDERR_CAPTURE,
"collect_default": False},
"consumer_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, topic, message_validator=None, from_beginning=True, consumer_timeout_ms=None):
"""
Args:
context: standard context
num_nodes: number of nodes to use (this should be 1)
kafka: kafka service
topic: consume from this topic
message_validator: function which returns message or None
from_beginning: consume from beginning if True, else from the end
consumer_timeout_ms: corresponds to consumer.timeout.ms. consumer process ends if time between
successively consumed messages exceeds this timeout. Setting this and
waiting for the consumer to stop is a pretty good way to consume all messages
in a topic.
"""
super(ConsoleConsumer, self).__init__(context, num_nodes)
self.kafka = kafka
self.args = {
'topic': topic,
}
self.consumer_timeout_ms = consumer_timeout_ms
self.from_beginning = from_beginning
self.message_validator = message_validator
self.messages_consumed = {idx: [] for idx in range(1, num_nodes + 1)}
@property
def start_cmd(self):
args = self.args.copy()
args['zk_connect'] = self.kafka.zk.connect_setting()
args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE
args['stderr'] = ConsoleConsumer.STDERR_CAPTURE
args['config_file'] = ConsoleConsumer.CONFIG_FILE
cmd = "export LOG_DIR=%s;" % ConsoleConsumer.LOG_DIR
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\";" % ConsoleConsumer.LOG4J_CONFIG
cmd += " /opt/kafka/bin/kafka-console-consumer.sh --topic %(topic)s --zookeeper %(zk_connect)s" \
" --consumer.config %(config_file)s" % args
if self.from_beginning:
cmd += " --from-beginning"
cmd += " 2>> %(stderr)s | tee -a %(stdout)s &" % args
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i console_consumer | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
# Create and upload config file
if self.consumer_timeout_ms is not None:
prop_file = self.render('console_consumer.properties', consumer_timeout_ms=self.consumer_timeout_ms)
else:
prop_file = self.render('console_consumer.properties')
self.logger.info("console_consumer.properties:")
self.logger.info(prop_file)
node.account.create_file(ConsoleConsumer.CONFIG_FILE, prop_file)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=ConsoleConsumer.LOG_FILE)
node.account.create_file(ConsoleConsumer.LOG4J_CONFIG, log_config)
# Run and capture output
cmd = self.start_cmd
self.logger.debug("Console consumer %d command: %s", idx, cmd)
for line in node.account.ssh_capture(cmd, allow_fail=False):
msg = line.strip()
if self.message_validator is not None:
msg = self.message_validator(msg)
if msg is not None:
self.messages_consumed[idx].append(msg)
def start_node(self, node):
super(ConsoleConsumer, self).start_node(node)
def stop_node(self, node):
node.account.kill_process("java", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.2,
err_msg="Timed out waiting for consumer to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
| sundapeng/kafka | tests/kafkatest/services/console_consumer.py | console_consumer.py | py | 8,250 | python | en | code | 0 | github-code | 36 |
27653577571 | from pages.courses.register_courses_page import Register_courses_page
import unittest
import pytest
from utilities.teststatus import StatusVerify
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
class Register_course_tests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetup(self, oneTimeSetUp):
self.rcp = Register_courses_page(self.driver)
self.ts = StatusVerify(self.driver)
@pytest.mark.run(order=1)
def test_Invalid_Enrollment(self):
self.rcp.enterCourseToEnroll ("Javascript")
self.rcp.selectCourseToEnroll()
self.rcp.enterCreditCardinformation("4900000000000086", "1218", "123", "560102")
# self.rcp.enterCardNumber("4900000000000086")
# self.rcp.enterCardExp("1218")
# self.rcp.enterCardCvc("123")
# self.rcp.enterpostalcode("560102")
self.rcp.enrollInCourse()
result = self.rcp.captureErrorMsg()
#self.ts.markFinal("test_Invalid_Enrollment", result, "The card was declined.")
assert result == "The card was declined."
| akanksha2306/selenium_python_practice | tests/courses/test_register_courses.py | test_register_courses.py | py | 1,073 | python | en | code | 0 | github-code | 36 |
71591819304 | from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from .models import Album, Track
from .serializer import AlbumSerializer
# Function based views
@api_view(['GET', 'POST'])
def album_list(request):
#Function to GET all albums
if request.method == 'GET':
albums = Album.objects.all()
title = request.GET.get('album_name', None)
if title is not None:
albums = albums.filter(title__icontains=title)
albums_serializer = AlbumSerializer(albums, many=True)
return JsonResponse(albums_serializer.data, safe=False)
# Function to POST new album
elif request.method == 'POST':
album_data = JSONParser().parse(request)
album_serializer = AlbumSerializer(data=album_data)
if album_serializer.is_valid():
album_serializer.save()
return JsonResponse(album_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(album_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def album_detail(request, pk):
#Find album by id (pk)
try:
album = Album.objects.get(pk=pk)
except Album.DoesNotExist:
return JsonResponse({'message': 'El album no existe'}, status=status.HTTP_404_NOT_FOUND)
# Function to GET a single album
if request.method == 'GET':
authentication_class = (TokenAutentication,)
album_serializer = AlbumSerializer(album)
return JsonResponse(album_serializer.data)
# Function to PUT a single album
elif request.method == 'PUT':
album_data = JSONParser().parse(request)
album_serializer = AlbumSerializer(album, data=album_data)
if album_serializer.is_valid():
album_serializer.save()
return JsonResponse(album_serializer.data)
return JsonResponse(album_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Function to DELETE a single album
elif request.method == 'DELETE':
album.delete()
return JsonResponse({'message':'El album ha sido borrado'}, status=status.HTTP_204_NO_CONTENT) | Gabospa/Rest_Framework_API | catalog/views.py | views.py | py | 2,419 | python | en | code | 0 | github-code | 36 |
10204405519 | from flask import Flask,request,jsonify
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'library'
mysql = MySQL(app)
def getQuery(sql):
cursor = mysql.connection.cursor()
cursor.execute(sql)
data=cursor.fetchall()
return data
def Query(sql):
cursor = mysql.connection.cursor()
cursor.execute(sql)
mysql.connection.commit()
@app.route("/")
def index():
return {"code":200,"msg":"READY"}
@app.route("/get-books")
def getBooks():
try:
return {"code":200,"data":getQuery("select * from books")}
except Exception as e:
return {"code":400}
@app.route("/get-book/<book_id>")
def getBook(book_id):
try:
return {"code":200,"data":getQuery("select * from books where book_id={0}".format(book_id))}
except Exception as e:
return {"code":400}
@app.route("/new-book",methods=["POST"])
def addBook():
try:
name=request.form.get("name")
Query("insert into books(name) values('{0}')".format(name))
return {"code":200}
except Exception as e:
return {"code":400,"msg":str(e)}
@app.route("/update-book/<book_id>",methods=["PUT"])
def updateBook(book_id):
try:
name=request.form.get("name")
Query("update books set name='{0}' where book_id={1}".format(name,book_id))
return {"code":200}
except Exception as e:
return {"code":400}
@app.route("/delete-book/<book_id>",methods=["DELETE"])
def deleteBook(book_id):
try:
Query("delete from books where book_id={0}".format(book_id))
return {"code":200}
except Exception as e:
return {"code":400}
if __name__ == '__main__':
app.run(debug=True,use_reloader=True)
| EdgarPozas/APILibraryInFlask | app.py | app.py | py | 1,832 | python | en | code | 0 | github-code | 36 |
70913641705 | import cv2
import numpy as np
import os
import path
import face_recognition
import getopt, sys
def getOriginalData(file):
count_vertices = 0
count_faces = 0
original_coordinates = []
faces_indices = []
texture_coordinates = []
texture_indices = []
oc_file = open("Original_Vertices.txt", "w")
fi_file = open("Face_Indices.txt", "w")
tc_file = open("Texture_Coordinates.txt", "w")
ti_file = open("Texture_Indices.txt", "w")
for line in file.readlines():
content = line.split(" ")
# 顶点数据
if content[0] == "v":
count_vertices += 1
coordinate = []
for i in range(1, 4):
num = float(content[i].replace("\n", ""))
coordinate.append(num)
original_coordinates.append(coordinate)
oc_file.write(str(coordinate) + "\n")
# 三角面片数据
if content[0] == "f":
count_faces += 1
vertex_indices = []
face_texture = []
for i in range(1, 4):
a = int(content[i].split("/")[0])
b = int(content[i].split("/")[1])
vertex_indices.append(a)
face_texture.append(b)
faces_indices.append(vertex_indices)
texture_indices.append(face_texture)
fi_file.write(str(vertex_indices) + "\n")
ti_file.write(str(face_texture) + "\n")
# 纹理数据
if content[0] == "vt":
coordinate = [float(content[1]), float(content[2].replace("\n", ""))]
tc_file.write(str(coordinate) + "\n")
texture_coordinates.append(coordinate)
print("共有三角网格顶点 " + str(count_vertices) + " 个")
print("共有三角网格面片 " + str(count_faces) + " 个")
oc_file.close()
fi_file.close()
tc_file.close()
ti_file.close()
return np.array(original_coordinates, dtype=np.float32),\
np.array(faces_indices, dtype=np.int32), \
np.array(texture_indices, dtype=np.int32), \
np.array(texture_coordinates, dtype=np.float32)
def getRoundingCoordinates(coordinates):
rc_file = open("Rounding_Vertices.txt", "w")
rounding_coordinates = np.zeros(coordinates.shape, dtype=np.int32)
for i in range(coordinates.shape[0]):
for j in range(coordinates.shape[1]):
rounding_coordinates[i][j] = int(round(coordinates[i][j], 4) * 10000)
for coordinate in rounding_coordinates:
rc_file.write(str(coordinate) + "\n")
rc_file.close()
return rounding_coordinates
def getAdjustedCoordinates(coordinates, x_min, y_min):
ac_file = open("Adjusted_Vertices.txt", "w")
adjusted_coordinates = np.zeros(coordinates.shape, dtype=np.int32)
print("偏移量 x : " + str(x_min) + "\ty : " + str(y_min))
for i in range(coordinates.shape[0]):
adjusted_coordinates[i][0] = coordinates[i][0] - x_min - 1
adjusted_coordinates[i][1] = coordinates[i][1] - y_min - 1
adjusted_coordinates[i][2] = coordinates[i][2]
for coordinate in adjusted_coordinates:
ac_file.write(str(coordinate) + "\n")
ac_file.close()
return adjusted_coordinates
def renderTexture(texture_coordinates, vertices_coordinates, vertices_indices,
texture_indices, texture_file, image):
'''
对图像进行着色,遍历每个三角面片,获取当前三角面片的顶点索引与贴图索引
通过顶点索引与贴图索引获得顶点坐标与贴图坐标
将三角形按照重心与中线分为三个小四边形并进行着色
:param texture_coordinates: 纹理贴图坐标
:param vertices_coordinates: 顶点坐标
:param vertices_indices: 三角面片顶点索引
:param texture_indices: 三角面片贴图索引
:param texture_file: 贴图文件
:return:
'''
texture = cv2.imread(texture_file, cv2.IMREAD_COLOR)
# 获取纹理图像大小
height, width, channels = texture.shape
print("纹理贴图尺寸: " + str(height) + " , " + str(width) + " , " + str(channels))
# 遍历各面
for i in range(vertices_indices.shape[0] - 1, 0, -1):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点的贴图索引
index_ta = texture_indices[i][0] - 1
index_tb = texture_indices[i][1] - 1
index_tc = texture_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 获取当前三角面片顶点的贴图坐标
ta = texture_coordinates[index_ta]
tb = texture_coordinates[index_tb]
tc = texture_coordinates[index_tc]
# 获取贴图 BGR 值,注意贴图首先是高(行),其次是宽(列)
ca = texture[getTexturePosition(height, 1 - ta[1]), getTexturePosition(width, ta[0])]
cb = texture[getTexturePosition(height, 1 - tb[1]), getTexturePosition(width, tb[0])]
cc = texture[getTexturePosition(height, 1 - tc[1]), getTexturePosition(width, tc[0])]
# 求三角形重心坐标
gravity_centre = []
for j in range(3):
gravity_centre.append(int((va[j] + vb[j] + vc[j]) / 3))
# 求各顶点对边中点,注意此时应当舍弃 z 坐标
ab = [int((va[0] + vb[0]) / 2), int((va[1] + vb[1]) / 2)]
ac = [int((va[0] + vc[0]) / 2), int((va[1] + vc[1]) / 2)]
bc = [int((vc[0] + vb[0]) / 2), int((vc[1] + vb[1]) / 2)]
cv2.fillConvexPoly(image, np.array([[va[0], va[1]], ab, [gravity_centre[0],
gravity_centre[1]], ac], dtype=np.int32), ca.tolist())
cv2.fillConvexPoly(image, np.array([[vb[0], vb[1]], ab, [gravity_centre[0],
gravity_centre[1]], bc], dtype=np.int32), cb.tolist())
cv2.fillConvexPoly(image, np.array([[vc[0], vc[1]], bc, [gravity_centre[0],
gravity_centre[1]], ac], dtype=np.int32), cc.tolist())
cv2.imwrite("Textured.jpg", color_image)
return
def renderDepth(vertices_coordinates, vertices_indices, depth_image, min_depth, max_depth):
# 生成深度图像
# 遍历各面
temp_depth_a = np.zeros(depth_image.shape, np.uint8)
temp_depth_b = np.zeros(depth_image.shape, np.uint8)
for i in range(vertices_indices.shape[0] - 1, 0, -1):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 计算三角面片平均深度
mean_depth = (va[2] + vb[2] + vc[2]) / 3
# 归一化深度为 0-255 的灰度
scale = int((mean_depth - min_depth) / (max_depth - min_depth) * 255)
grey_scale = [scale, scale, scale]
cv2.fillConvexPoly(temp_depth_a, np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32), grey_scale)
for i in range(vertices_indices.shape[0]):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 计算三角面片平均深度
mean_depth = (va[2] + vb[2] + vc[2]) / 3
# 归一化深度为 0-255 的灰度
scale = int((mean_depth - min_depth) / (max_depth - min_depth) * 255)
grey_scale = [scale, scale, scale]
cv2.fillConvexPoly(temp_depth_b, np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32), grey_scale)
for row in range(depth_image.shape[0]):
for col in range(depth_image.shape[1]):
front = 0
grey_a = temp_depth_a[row][col][0]
grey_b = temp_depth_b[row][col][0]
if grey_a <= grey_b:
front = grey_b
else:
front = grey_a
depth_image[row][col] = front
cv2.imwrite("Depth.jpg", depth_image)
return
def drawTriangularMesh(vertices_indices, coordinates, image, color):
for faces_index in vertices_indices:
# 由索引获取坐标
# print("三角网格索引 " + str(faces_index))
vertex_a = coordinates[faces_index[0] - 1]
vertex_b = coordinates[faces_index[1] - 1]
vertex_c = coordinates[faces_index[2] - 1]
# print("三角面片顶点坐标为 " + str(vertex_a) + "\t" + str(vertex_b) + "\t" + str(vertex_c))
cv2.line(image, (vertex_a[0], vertex_a[1]), (vertex_b[0], vertex_b[1]), color)
cv2.line(image, (vertex_c[0], vertex_c[1]), (vertex_b[0], vertex_b[1]), color)
cv2.line(image, (vertex_a[0], vertex_a[1]), (vertex_c[0], vertex_c[1]), color)
for coordinate in coordinates:
# 注意图片的坐标是 height, width
image[int(coordinate[1]), int(coordinate[0])] = black
return
def getTexturePosition(length, ratio):
p = int(np.floor(length * ratio))
if p >= length:
p = length - 1
return p
def getFaceFeatures():
#利用 FaceAlignment 获取特征坐标点
image = face_recognition.load_image_file("./Textured.jpg")
face_landmarks_list = face_recognition.face_landmarks(image)
alignment_image = cv2.imread("./Textured.jpg", cv2.IMREAD_COLOR)
alignment_coordinates = open("Alignment.txt", "w")
if len(face_landmarks_list) >= 1:
print("成功检测面部")
else:
print("未检测到面部,请核查输入文件!")
return
for face_landmarks in face_landmarks_list:
# 打印此图像中每个面部特征的位置
facial_features = [
'chin',
'left_eyebrow',
'right_eyebrow',
'nose_bridge',
'nose_tip',
'left_eye',
'right_eye',
'top_lip',
'bottom_lip'
]
for facial_feature in facial_features:
print("The {} in this face has the following points: {}"
.format(facial_feature, face_landmarks[facial_feature]))
for facial_feature in facial_features:
#alignment_image[][] = face_landmarks[facial_feature]
alignment_coordinates.write(facial_feature + ": " + str(face_landmarks[facial_feature]) + "\n")
alignment_coordinates.close()
return face_landmarks_list[0]
def drawLandmarks(face_landmarks, image):
red = (0, 0, 255)
for face_landmark in face_landmarks.values():
for coordinate in face_landmark:
cv2.circle(image, coordinate, 5, red, -1)
cv2.imwrite("FaceLandMarked.jpg", image)
def landmarksDictToList(face_landmarks):
all_coordinates = []
for face_landmark in face_landmarks.values():
for coordinate in face_landmark:
all_coordinates.append(coordinate)
return all_coordinates
def getSurroundFaces(adjusted_coordinates, vertices_indices, all_coordinates):
landmarks_dict = {}
faces_dict = {}
landmark_triangles = open("Landmark_Triangles.txt", "w")
for coordinate in all_coordinates:
landmarks_dict.update({})
faces_dict.update({str(coordinate): []})
for vertices_index in vertices_indices:
index_a = vertices_index[0] - 1
index_b = vertices_index[1] - 1
index_c = vertices_index[2] - 1
va = adjusted_coordinates[index_a]
vb = adjusted_coordinates[index_b]
vc = adjusted_coordinates[index_c]
for coordinate in all_coordinates:
if cv2.pointPolygonTest(np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32),
coordinate, False) >= 0:
faces_dict[str(coordinate)].append([va, vb, vc])
for landmark, triangle in faces_dict.items():
landmark_triangles.write(str(landmark) + ":\t" + str(triangle) + "\n")
landmark_triangles.close()
refined = refineTriangleFaces(faces_dict)
return refined
def refineTriangleFaces(faces_dict):
refined = {}
for landmark, triangles in faces_dict.items():
if len(triangles) == 1:
refined.update({str(landmark): triangles[0]})
elif len(triangles) == 0:
refined.update({str(landmark): []})
else:
depth = []
for triangle in triangles:
z = triangle[0][2] + triangle[1][2] + triangle[2][2]
depth.append(z)
index = np.argmin(depth)
refined.update({str(landmark): triangles[index]})
refined_file = open("refined.txt", "w")
for k, v in refined.items():
refined_file.write(str(k) + ":\t" + str(v) + "\n")
refined_file.close()
return refined
def getDistance(feature_a, index_a, feature_b, index_b, landmark_triangles, feature_landmarks, xy = True):
# to be continue
distance = 0
return distance
def getGlassesDistanceInformation(face_landmarks):
information_file = open("manInformation.txt", "w")
distances = []
a = (face_landmarks['chin'][16][0] - face_landmarks['chin'][0][0])/10
b = (face_landmarks['right_eye'][3][0] - face_landmarks['left_eye'][0][0])/10
c = (face_landmarks['right_eye'][1][0] + face_landmarks['right_eye'][2][0] - face_landmarks['left_eye'][1][0] - face_landmarks['left_eye'][2][0]) / 20
d = (face_landmarks['right_eye'][1][0] - face_landmarks['left_eye'][3][0])/10
h = (face_landmarks['right_eye'][2][1] - face_landmarks['right_eyebrow'][2][1])/10
f = (face_landmarks['nose_bridge'][3][1] - face_landmarks['nose_bridge'][0][1])/10
g = round(0.7 * h, 1)
e = round(2.2 * f, 1)
distances.append(a)
distances.append(b)
distances.append(c)
distances.append(d)
distances.append(e)
distances.append(f)
distances.append(g)
distances.append(h)
print("配镜所需参数依次为...")
for distance in distances:
information_file.write(str(distance) + "\n")
print(str(distance) + " (mm)")
information_file.close()
return distances
def cutPointCloud(old_obj, threshold):
# 对 z 轴设定阈值,以消除 z 坐标重复
# obj 文件包含 顶点 v, 纹理 vt, 三角面片 f(f中存放顶点与纹理的索引) 信息
# 通过 z 坐标过滤顶点 v,此时包含顶点 v 的三角面片也应当去除
# 若去除三角面片,则其含有的纹理信息也应当去除
# 首先遍历原始 obj 文件,获取顶点列表、纹理列表、三角面片索引信息列表
# 遍历顶点列表,获取要去除的顶点的索引
# 遍历三角面片索引信息列表,将含有要去除的索引的三角面片信息去除
# 记录所去除的面的纹理坐标,遍历纹理列表并去除
# 最终形成新的文件
cut_obj = open("cut_obj.obj", "w")
original_vertices = []
original_texture = []
original_faces = []
remove_vertices_indices = set()
remove_face_indices = set()
remove_texture_indices = set()
for line in old_obj.readlines():
content = line.split(" ")
# 顶点数据
if content[0] == "v":
original_vertices.append(line)
# 三角面片数据
if content[0] == "f":
original_faces.append(line)
# 纹理数据
if content[0] == "vt":
original_texture.append(line)
old_obj.close()
print("未裁剪文件 顶点:\t" + str(len(original_vertices)) +
"\t纹理:\t" + str(len(original_texture)) + "\t三角面:\t" + str(len(original_faces)))
for index, line in enumerate(original_vertices):
content = line.split(" ")
if float(content[3]) > threshold:
remove_vertices_indices.add(index)
else:
continue
for index, line in enumerate(original_faces):
content = line.split(" ")
for i in range(1, 4):
v = int(content[i].split("/")[0])
vt = int(content[i].split("/")[1])
if v - 1 in remove_vertices_indices:
remove_face_indices.add(index)
remove_texture_indices.add(vt - 1)
print("需去除点:\t" + str(len(remove_vertices_indices)) + "\t去除纹理:\t" +
str(len(remove_texture_indices)) + "\t去除网格:\t" + str(len(remove_face_indices)))
# 注意,f 信息中储存的索引是从 1 开始的
# 所以上述代码索引都是从 0 开始的
for index, line in enumerate(original_vertices):
if index not in remove_vertices_indices:
cut_obj.write(line)
else:
# 本行仅是行号补位作用,无意义
cut_obj.write("v 0.042966 -0.094774 0.43439\n")
for index, line in enumerate(original_texture):
if index not in remove_texture_indices:
cut_obj.write(line)
else:
cut_obj.write("vt 0.14193 0.20604\n")
for index, line in enumerate(original_faces):
if index not in remove_face_indices:
cut_obj.write(line)
cut_obj.close()
opts, args = getopt.getopt(sys.argv[1:], "o:t:")
obj_file_path = r"C:\Users\liyanxiang\Desktop\head\resUnWarpMesh.obj"
texture_file_path = r"C:\Users\liyanxiang\Desktop\head\clonedBlur.png"
for opt, value in opts:
print("输入文件 : " + value)
if opt == "-o":
print("obj 文件路径为 : " + value)
obj_file_path = value
if opt == "-t":
print("texture 文件路径为 : " + value)
texture_file_path = value
obj_file = open(obj_file_path, "r")
threshold = 0.45
cutPointCloud(obj_file, threshold)
cut_obj = open("cut_obj.obj", "r")
# original_coordinates 原始顶点坐标
# vertices_indices 三角面片顶点索引
# texture_indices 三角面片贴图索引
# texture_coordinates 贴图坐标
original_coordinates, vertices_indices, texture_indices, texture_coordinates = getOriginalData(cut_obj)
cut_obj.close()
rounding_coordinates = getRoundingCoordinates(original_coordinates)
x_max = np.max(rounding_coordinates[:, 0])
x_min = np.min(rounding_coordinates[:, 0])
y_max = np.max(rounding_coordinates[:, 1])
y_min = np.min(rounding_coordinates[:, 1])
z_max = np.max(rounding_coordinates[:, 2])
z_min = np.min(rounding_coordinates[:, 2])
print("X max: " + str(x_max) + "\t\tX min: " + str(x_min) + "\nY max: " + str(y_max) +
"\t\tY min: " + str(y_min) + "\nZ max: " + str(z_max) + "\t\tZ min: " + str(z_min))
height = int(y_max - y_min)
width = int(x_max - x_min)
depth = int(z_max - z_min)
print("图片高度为: " + str(height))
print("图片宽度为: " + str(width))
adjusted_coordinates = getAdjustedCoordinates(rounding_coordinates, x_min, y_min)
color_image = np.zeros((height, width, 3), np.uint8)
depth_image = np.zeros((height, width, 3), np.uint8)
white = (255, 255, 255)
green = (0, 255, 0)
black = (0, 0, 0)
color_image[:, :] = white
depth_image[:, :] = white
faces_coordinates_file = open("Faces_Coordinates.txt", "w")
'''
for coordinate in rounding_coordinates:
blank_image[int(coordinate[1] - y_min - 1)][int(coordinate[0] - x_min - 1)] = black
'''
drawTriangularMesh(vertices_indices, adjusted_coordinates, color_image, green)
cv2.imwrite("Triangular.jpg", color_image)
renderTexture(texture_coordinates, adjusted_coordinates, vertices_indices, texture_indices, texture_file_path, color_image)
drawTriangularMesh(vertices_indices, adjusted_coordinates, color_image, green)
cv2.imwrite("TextureCombineTriangle.jpg", color_image)
renderDepth(adjusted_coordinates, vertices_indices, depth_image, z_max, z_min)
face_landmarks = getFaceFeatures()
drawLandmarks(face_landmarks, color_image)
all_coordinates = landmarksDictToList(face_landmarks)
landmarks_faces = getSurroundFaces(adjusted_coordinates, vertices_indices, all_coordinates)
distances = getGlassesDistanceInformation(face_landmarks)
faces_coordinates_file.close()
cv2.imshow("Created", color_image)
cv2.imwrite("Created.jpg", color_image)
print("image saved!")
#cv2.waitKey(0)
#cv2.destroyWindow()
| liyanxiangable/3DFaceAlignment | FaceAlignment.py | FaceAlignment.py | py | 20,504 | python | en | code | 3 | github-code | 36 |
18967680389 | import tensorlayer as tl
from tensorlayer.layers import *
def vox_res_module(x, prefix, is_train=True, reuse=False):
w_init = tf.truncated_normal_initializer(stddev=0.01)
bn1 = BatchNormLayer(x, act=tf.nn.relu, is_train=is_train, name=prefix + "bn1")
conv1 = Conv3dLayer(bn1, shape=[1, 3, 3, 64, 64], strides=[1, 1, 1, 1, 1], W_init=w_init, name=prefix + "conv1")
bn2 = BatchNormLayer(conv1, act=tf.nn.relu, is_train=is_train, name=prefix + "bn2")
conv2 = Conv3dLayer(bn2, shape=[3, 3, 3, 64, 64], strides=[1, 1, 1, 1, 1], W_init=w_init, name=prefix + "conv2")
out = ElementwiseLayer([x, conv2], combine_fn=tf.add, name=prefix + "out")
return out
def vox_res_net(x, is_train=True, reuse=False, n_out=3):
with tf.variable_scope("VoxResNet", reuse=reuse):
tl.layers.set_name_reuse(reuse)
w_init = tf.truncated_normal_initializer(stddev=0.01)
inputs = InputLayer(x, name="input")
conv1a = Conv3dLayer(inputs, shape=[3, 3, 3, 1, 32], strides=[1, 1, 1, 1, 1], W_init=w_init, name="conv1a")
bn1 = BatchNormLayer(conv1a, act=tf.nn.relu, is_train=is_train, name="bn1")
conv1b = Conv3dLayer(bn1, shape=[1, 3, 3, 32, 32], strides=[1, 1, 1, 1, 1], W_init=w_init, name="conv1b")
bn2 = BatchNormLayer(conv1b, act=tf.nn.relu, is_train=is_train, name="bn2")
conv1c = Conv3dLayer(bn2, shape=[3, 3, 3, 32, 64], strides=[1, 2, 2, 2, 1], W_init=w_init, name="conv1c")
res1a = vox_res_module(conv1c, "res1a-", is_train)
res2a = vox_res_module(res1a, "res2a-", is_train)
bn3 = BatchNormLayer(res2a, act=tf.nn.relu, is_train=is_train, name="bn3")
conv4 = Conv3dLayer(bn3, shape=[3, 3, 3, 64, 64], strides=[1, 2, 2, 2, 1], W_init=w_init, name="conv4")
res3a = vox_res_module(conv4, "res3a-", is_train)
res4a = vox_res_module(res3a, "res4a-", is_train)
bn4 = BatchNormLayer(res4a, act=tf.nn.relu, is_train=is_train, name="bn4")
conv7 = Conv3dLayer(bn4, shape=[3, 3, 3, 64, 64], strides=[1, 2, 2, 2, 1], W_init=w_init, name="conv7")
res5a = vox_res_module(conv7, "res5a-", is_train)
res6a = vox_res_module(res5a, "res6a-", is_train)
inputs_shape = tf.shape(x)
decon_output_shape = [inputs_shape[0], inputs_shape[1], inputs_shape[2], inputs_shape[3], 4]
decon0a = DeConv3dLayer(conv1b, shape=[3, 3, 3, 4, 32], output_shape=decon_output_shape,
strides=[1, 1, 1, 1, 1], W_init=w_init, name="decon0a")
decon1a = DeConv3dLayer(res2a, shape=[2, 2, 2, 4, 64], output_shape=decon_output_shape, strides=[1, 2, 2, 2, 1],
W_init=w_init, name="decon1a")
decon2a = DeConv3dLayer(res4a, shape=[4, 4, 4, 4, 64], output_shape=decon_output_shape, strides=[1, 4, 4, 4, 1],
W_init=w_init, name="decon2a")
decon3a = DeConv3dLayer(res6a, shape=[8, 8, 8, 4, 64], output_shape=decon_output_shape, strides=[1, 8, 8, 8, 1],
W_init=w_init, name="decon3a")
classifier0a = Conv3dLayer(decon0a, shape=[1, 1, 1, 4, n_out], strides=[1, 1, 1, 1, 1], W_init=w_init,
name="classifier0a")
classifier1a = Conv3dLayer(decon1a, shape=[1, 1, 1, 4, n_out], strides=[1, 1, 1, 1, 1], W_init=w_init,
name="classifier1a")
classifier2a = Conv3dLayer(decon2a, shape=[1, 1, 1, 4, n_out], strides=[1, 1, 1, 1, 1], W_init=w_init,
name="classifier2a")
classifier3a = Conv3dLayer(decon3a, shape=[1, 1, 1, 4, n_out], strides=[1, 1, 1, 1, 1], W_init=w_init,
name="classifier3a")
out = ElementwiseLayer([classifier0a, classifier1a, classifier2a, classifier3a], combine_fn=tf.add,
name="out")
if is_train:
return [classifier0a, classifier1a, classifier2a, classifier3a, out]
else:
return out
| txin96/VoxResNet | model.py | model.py | py | 4,063 | python | en | code | 16 | github-code | 36 |
37360525865 | import argparse
import glob
import json
import logging
import os
import platform
import re
import traceback
from pathlib import Path
import fitz
if platform.system() == "Windows":
logdir = Path(os.environ['USERPROFILE']) / ".pdf_guru"
else:
logdir = Path(os.environ['HOME']) / ".pdf_guru"
logdir.mkdir(parents=True, exist_ok=True)
logpath = str(logdir / "pdf.log")
cmd_output_path = str(logdir / "cmd_output.json")
def dump_json(path, obj):
with open(path, "w", encoding="utf-8") as f:
json.dump(obj, f, ensure_ascii=False)
def parse_range(page_range: str, page_count: int, is_multi_range: bool = False, is_reverse: bool = False, is_unique: bool = True):
# e.g.: "1-3,5-6,7-10", "1,4-5", "3-N", "even", "odd"
page_range = page_range.strip()
if page_range in ["all", ""]:
roi_indices = list(range(page_count))
return roi_indices
if page_range == "even":
roi_indices = list(range(0, page_count, 2))
return roi_indices
if page_range == "odd":
roi_indices = list(range(1, page_count, 2))
return roi_indices
roi_indices = []
parts = page_range.split(",")
neg_count = sum([p.startswith("!") for p in parts])
pos_count = len(parts) - neg_count
if neg_count > 0 and pos_count > 0:
raise ValueError("页码格式错误:不能同时使用正向选择和反向选择语法")
if pos_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part.split("-")
if len(out) == 1:
if out[0] == "N":
roi_indices.append([page_count-1])
else:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(roi_indices))
roi_indices.sort()
if neg_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part[1:].split("-")
if len(out) == 1:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
if is_reverse:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
return roi_indices
def batch_process(func):
def wrapper(*args, **kwargs):
print(f"args: {args}")
print(f"kwargs: {kwargs}")
doc_path = kwargs['doc_path']
if "*" in doc_path:
path_list = glob.glob(doc_path)
print.debug(f"path_list length: {len(path_list) if path_list else 0}")
if path_list:
del kwargs['doc_path']
for path in path_list:
func(*args, doc_path=path, **kwargs)
else:
func(*args, **kwargs)
func(*args, **kwargs)
return wrapper
@batch_process
def convert_docx2pdf(doc_path: str, output_path: str = None):
try:
from docx2pdf import convert
if output_path is None:
p = Path(doc_path)
output_path = str(p.parent / f"{p.stem}.pdf")
convert(doc_path, output_path)
dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logging.error(traceback.format_exc())
dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
@batch_process
def convert_pdf2docx(doc_path: str, page_range: str = "all", output_path: str = None):
try:
from pdf2docx import Converter
doc = fitz.open(doc_path)
roi_indices = parse_range(page_range, doc.page_count)
cv = Converter(doc_path)
if output_path is None:
p = Path(doc_path)
output_path = str(p.parent / f"{p.stem}.docx")
cv.convert(output_path, pages=roi_indices)
cv.close()
dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logging.error(traceback.format_exc())
dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def main():
parser = argparse.ArgumentParser(description="Convert functions")
parser.add_argument("input_path", type=str, help="pdf文件路径")
parser.add_argument("--source-type", type=str, choices=["pdf", 'png', "jpg", "svg", "docx"], default="pdf", help="源类型")
parser.add_argument("--target-type", type=str, choices=['png', "svg", "docx"], default="png", help="目标类型")
parser.add_argument("--page_range", type=str, default="all", help="页码范围")
parser.add_argument("-o", "--output", type=str, help="输出文件路径")
args = parser.parse_args()
if args.source_type == "pdf":
if args.target_type == "docx":
convert_pdf2docx(doc_path=args.input_path, page_range=args.page_range, output_path=args.output)
if __name__ == '__main__':
main() | kevin2li/PDF-Guru | thirdparty/convert_external.py | convert_external.py | py | 5,838 | python | en | code | 941 | github-code | 36 |
8439340663 | # Given an array of numbers which is sorted in ascending order and is rotated ‘k’ times around a pivot, find ‘k’.
#
# You can assume that the array does not have any duplicates.
# Input: [10, 15, 1, 3, 8]
# Output: 2
# Explanation: The array has been rotated 2 times.
def count_rotations(arr):
l, r = 0, len(arr) - 1
count = 0
while l < r:
m = l + (r-l)//2
if arr[l] < arr[r]:
return l
if arr[m] > arr[l]:
l = m + 1
else:
r = m
count = l
return count + 1 # the array has not been rotated
def main():
print(count_rotations([10, 15, 1, 3, 8]))
print(count_rotations([4, 5, 7, 9, 10, -1, 2]))
print(count_rotations([1, 3, 8, 10]))
if __name__ == "__main__":
main()
| kashyapa/coding-problems | educative.io/easy-binary-search/10_rotation_count.py | 10_rotation_count.py | py | 788 | python | en | code | 0 | github-code | 36 |
22771811138 | # -*- coding: utf-8 -*-
# This file is part of CFVVDS.
#
# CFVVDS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# CFVVDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CFVVDS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx
#Simple printing function. Need more info about vanguard decks.
#TO DO:
class DeckPrinter(wx.Printout):
def __init__(self, deck):
wx.Printout.__init__(self)
self.Deck = deck
self.StartX = 40
self.StartY = 40
self.VSpacer = 60
self.CurrentX = self.StartX
self.CurrentY = self.StartY
self.FirstFont = wx.Font(pointSize=48, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD, faceName='Arial')
self.SecondFont = wx.Font(pointSize=48, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_NORMAL, faceName='Arial')
self.ThirdFont = wx.Font(pointSize=54, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD, faceName='Arial')
def OnBeginDocument(self, start, end):
return super(DeckPrinter, self).OnBeginDocument(start, end)
def OnEndDocument(self):
super(DeckPrinter, self).OnEndDocument()
def OnBeginPrinting(self):
super(DeckPrinter, self).OnBeginPrinting()
def OnEndPrinting(self):
super(DeckPrinter, self).OnEndPrinting()
def OnPreparePrinting(self):
super(DeckPrinter, self).OnPreparePrinting()
def HasPage(self, page):
if page <= 1:
return True
else:
return False
def GetPageInfo(self):
return (1, 1, 1, 1)
#TO DO: Change structure to fit CFV decks
def OnPrintPage(self, page):
monsters = self.Deck.GetMonsters()
triggers = self.Deck.GetTrigger()
maindeckcount = len(monsters) + len(triggers)
dc = self.GetDC()
dc.SetFont(self.ThirdFont)
dc.DrawText('Main Deck: ' + str(maindeckcount), self.CurrentX, self.CurrentY)
self.NewLine()
self.NewLine()
dc.SetFont(self.FirstFont)
dc.DrawText('Normal Units: ' + str(len(monsters)), self.CurrentX, self.CurrentY)
self.NewLine()
dc.SetFont(self.SecondFont)
for c in monsters:
dc.DrawText(c.Name, self.CurrentX, self.CurrentY)
self.NewLine()
self.NewLine()
dc.SetFont(self.FirstFont)
dc.DrawText('Trigger Units: ' + str(len(triggers)), self.CurrentX, self.CurrentY)
self.NewLine()
dc.SetFont(self.SecondFont)
for c in triggers:
dc.DrawText(c.Name, self.CurrentX, self.CurrentY)
self.NewLine()
return True
def NewLine(self):
self.CurrentY += self.VSpacer
def AddVSpace(self, n):
self.CurrentY += n
def AddHSpace(self, n):
self.CurrentX += n | swak/cardfight-vanguard-vds | printer.py | printer.py | py | 3,420 | python | en | code | 0 | github-code | 36 |
35319837996 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-18 20:14:05
from __future__ import unicode_literals, division, absolute_import, print_function
import requests
import shutil
import sys
import signal
import os
import traceback
import time
import logging
import bs4
from lxml import html
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool
from fake_useragent import UserAgent
from .const import USER_AGENT_WIN, DEFAULT_REQUEST_TIMEOUT
from .compat import urlparse, json, basestring
from .utils import url_to_filename
logger = logging.getLogger('commons')
############################################################
#
# Network Functions
#
############################################################
random_ua = UserAgent()
default_timeout = DEFAULT_REQUEST_TIMEOUT
def get_headers(url):
u = urlparse(url)
return {
'Referer': '{0}://{1}/'.format(u.scheme, u.netloc),
'User-Agent': '%s' % random_ua.chrome
# 'User-Agent': '%s %s' % (USER_AGENT_WIN, time.time())
}
def request(method, url, encoding=None, **kwargs):
r = requests.request(method, url, timeout=default_timeout,
headers=get_headers(url), **kwargs)
r.encoding = encoding or 'utf-8'
if r.status_code >= 400:
raise IOError("HTTP %s [%s]" % (r.status_code, r.url))
return r
def get(url, encoding=None, **kwargs):
return request('get', url, encoding=encoding, **kwargs)
def post(url, encoding=None, **kwargs):
return request('post', url, encoding=encoding, **kwargs)
def get_stream(url, encoding=None, **kwargs):
return request('get', url, encoding=encoding, stream=True, **kwargs)
def clean_html(text, **kwargs):
c = html.clean.Cleaner(page_structure=False, style=True, **kwargs)
return c.clean_html(html.fromstring(text))
def soup(url, encoding=None, clean=False):
r = get(url, encoding)
text = clean_html(r.text) if clean else r.text
return bs4.BeautifulSoup(text, 'html.parser')
def download_file(url, output=None, filename=None, **kwargs):
assert isinstance(url, basestring), 'url must be basestring'
assert not filename or isinstance(filename, basestring), 'filename must be None or basestring'
assert not output or isinstance(output, basestring), 'output must be None or basestring'
filename = filename or url_to_filename(url)
output = output or 'output'
if not os.path.exists(output):
os.makedirs(output)
filepath = os.path.join(output, filename)
logger.debug('download_file from=%s, to=%s' % (url, filepath))
if not os.path.exists(filepath):
r = get_stream(url, **kwargs)
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
logger.info('download_file saved %s' % url)
else:
logger.info('download_file skip %s' % url)
return filepath
############################################################
#
# Thread and Process Functions
#
############################################################
class ThreadPoolExecutorStackTraced(ThreadPoolExecutor):
#https://stackoverflow.com/questions/19309514
def submit(self, fn, *args, **kwargs):
"""Submits the wrapped function instead of `fn`"""
return super(ThreadPoolExecutorStackTraced, self).submit(
self._function_wrapper, fn, *args, **kwargs)
def _function_wrapper(self, fn, *args, **kwargs):
"""Wraps `fn` in order to preserve the traceback of any kind of
raised exception
"""
try:
return fn(*args, **kwargs)
except Exception:
# Creates an exception of the same type with the traceback as message
raise sys.exc_info()[0](traceback.format_exc())
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_pool(func, args, pool_size=4, retry_max=0, sleep=60):
def _initializer():
signal.signal(signal.SIGINT, signal.SIG_IGN)
r = None
retry = 0
while retry <= retry_max:
pool = Pool(pool_size, _initializer)
try:
r = pool.map_async(func, args)
r.get(999999)
pool.close()
logger.info('Task execution completely.')
break
except KeyboardInterrupt as e:
logging.info('Task terminated by user.', e)
pool.terminate()
break
except Exception as e:
pool.terminate()
retry += 1
traceback.print_exc()
if retry <= retry_max:
next_delay = sleep * (retry % 6 + 1)
logger.info('Task error: {0}, {1} retry in {2}s'.format(
e, retry_max - retry, next_delay))
time.sleep(sleep * next_delay)
finally:
pool.join()
return r.get()
| mcxiaoke/python-labs | lib/commons.py | commons.py | py | 5,317 | python | en | code | 7 | github-code | 36 |
70858172905 | from functools import reduce, wraps
import tensorflow as tf
from tensorflow.keras.layers import Add, BatchNormalization, LeakyReLU, Conv2D, ZeroPadding2D, UpSampling2D
from tensorflow.keras.layers import Concatenate
from keras.layers.merge import add
from tensorflow.keras.regularizers import l2
L2_FACTOR = 1e-5
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by CustomBatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
interim_model = compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(epsilon=0.001, trainable=False),
LeakyReLU(alpha=0.1 ))
return interim_model
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloConv2D."""
#darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
#darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(Conv2D)
def YoloConv2D(*args, **kwargs):
"""Wrapper to set Yolo parameters for Conv2D."""
yolo_conv_kwargs = {'kernel_regularizer': l2(L2_FACTOR)}
yolo_conv_kwargs['bias_regularizer'] = l2(L2_FACTOR)
yolo_conv_kwargs.update(kwargs)
#yolo_conv_kwargs = kwargs
return Conv2D(*args, **yolo_conv_kwargs)
def CustomBatchNormalization(*args, **kwargs):
if tf.__version__ >= '2.2':
from tensorflow.keras.layers.experimental import SyncBatchNormalization
BatchNorm = SyncBatchNormalization
else:
BatchNorm = BatchNormalization
return BatchNorm(*args, **kwargs)
def yolo3_predictions(feature_maps, feature_channel_nums, num_classes):
f13, f26, f52 = feature_maps
f13_channels, f26_channels, f52_channels = feature_channel_nums
# feature map 1 head & output (13x13 for 416 input) - starting with 1024 filters
x, y1 = make_last_layers(f13, f13_channels, 3 * (num_classes + 5), predict_id='1')
# upsample fpn merge for feature maps 1 and 2
x = compose(DarknetConv2D_BN_Leaky(f26_channels//2, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,f26])
# feature map 2 head & output (26x26 for 416 input) - starting with 512 filters
x, y2 = make_last_layers(f26, f26_channels, 3 * (num_classes + 5), predict_id='2')
# upsample fpn merge for feature maps 2 and 3
x = compose(DarknetConv2D_BN_Leaky(f52_channels//2, (1, 1)),
UpSampling2D(2))(x)
x = Concatenate()([x, f52])
# feature map 3 head & output (52x52 for 416 input) - starting with 128 filters
x, y3 = make_last_layers(f52, f52_channels//2, 3 * (num_classes + 5), predict_id='3')
return y1, y2, y3
def make_last_layers(x, num_filters, out_filters, predict_filters=None, predict_id='1'):
'''
Following the pred_yolo1, pred_yolo2 and pred_yolo3 as per exriencor code
https://github.com/experiencor/keras-yolo3
'''
# if predict_id == '1' or predict_id == '2':
# # Conv2D_BN_Leaky layers followed by a Conv2D_linear layer
# y = compose(
# DarknetConv2D_BN_Leaky(num_filters, (3, 3)),
# DarknetConv2D(out_filters, (1, 1), name='predict_conv_' + predict_id))(x)
#
# if predict_id == '3':
# 6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer
# num_filters here 128
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
if predict_filters is None:
predict_filters = num_filters * 2
y = compose(
DarknetConv2D_BN_Leaky(predict_filters, (3, 3)),
DarknetConv2D(out_filters, (1, 1), name='predict_conv_' + predict_id))(x)
return x, y
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloConv2D."""
# darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
# darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides') == (2, 2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(Conv2D)
def YoloConv2D(*args, **kwargs):
"""Wrapper to set Yolo parameters for Conv2D."""
yolo_conv_kwargs = {'kernel_regularizer': l2(L2_FACTOR)}
yolo_conv_kwargs['bias_regularizer'] = l2(L2_FACTOR)
yolo_conv_kwargs.update(kwargs)
#yolo_conv_kwargs = kwargs
return Conv2D(*args, **yolo_conv_kwargs)
def conv_block(inp, convs, do_skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and do_skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1, 0), (1, 0)))(x) # unlike tensorflow darknet prefer left and top paddings
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same',
# unlike tensorflow darknet prefer left and top paddings
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, trainable=False,
name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if do_skip else x
##### | jmajumde/MyMScProj | jmod/onestage/yolov3/models/layers.py | layers.py | py | 6,268 | python | en | code | 2 | github-code | 36 |
7880901877 | import json
from enum import Enum
from typing import Union
from pyspark.sql import Column
import pyspark.sql.functions as F
class ModelType(Enum):
CLASSIFICATION = 1
REGRESSION = 2
class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (object,), {})):
pass
class LossModelTypeMapper(Singleton):
"""
Mapper for losses -> model type
"""
def __init__(self):
loss_to_model_type = {}
loss_to_model_type.update(
{'mean_squared_error': ModelType.REGRESSION,
'mean_absolute_error': ModelType.REGRESSION,
'mse': ModelType.REGRESSION,
'mae': ModelType.REGRESSION,
'cosine_proximity': ModelType.REGRESSION,
'mean_absolute_percentage_error': ModelType.REGRESSION,
'mean_squared_logarithmic_error': ModelType.REGRESSION,
'logcosh': ModelType.REGRESSION,
'binary_crossentropy': ModelType.CLASSIFICATION,
'categorical_crossentropy': ModelType.CLASSIFICATION,
'sparse_categorical_crossentropy': ModelType.CLASSIFICATION})
self.__mapping = loss_to_model_type
def get_model_type(self, loss):
return self.__mapping.get(loss)
def register_loss(self, loss, model_type):
if callable(loss):
loss = loss.__name__
self.__mapping.update({loss: model_type})
class ModelTypeEncoder(json.JSONEncoder):
def default(self, obj):
if obj in [e for e in ModelType]:
return {"__enum__": str(obj)}
return json.JSONEncoder.default(self, obj)
def as_enum(d):
if "__enum__" in d:
name, member = d["__enum__"].split(".")
return getattr(ModelType, member)
else:
return d
def argmax(col: Union[str, Column]) -> Column:
"""
returns expression for finding the argmax in an array column
:param col: array column to find argmax of
:return: expression which can be used in `select` or `withColumn`
"""
return F.expr(f'array_position({col}, array_max({col})) - 1')
| maxpumperla/elephas | elephas/utils/model_utils.py | model_utils.py | py | 2,344 | python | en | code | 1,568 | github-code | 36 |
28408907957 | from source.hh_api.headhunter_api import HHApi
from source.jsonhandler.jsonhandler import JSONHandler
from source.sj_api.superjob_api import SJApi
from source.vacancies.vacancy import Vacancy
def search_vacancies():
keywords = input('Введите поисковой запрос: \n')
hh = HHApi(keywords)
sj = SJApi(keywords)
json_handler = JSONHandler([hh, sj])
json_handler.save()
all_vacancies = [Vacancy(
id=vacancy['id'],
url=vacancy['url'],
name=vacancy['name'],
salary=vacancy['salary'],
description=vacancy['description'],
requirements=vacancy['requirements'],
area=vacancy['area'],
platform=vacancy['platform'],
) for vacancy in json_handler.get_all_vacancies()]
while True:
top = int(input('Введите количество вакансий для вывода \n'))
sort_vacancies = input('Хотите отсортировать по заработной плате? \n').lower()
if sort_vacancies == 'да':
all_vacancies.sort(
key=lambda vacancy: vacancy.salary['from'] if vacancy.salary and 'from' in vacancy.salary and
vacancy.salary['from'] is not None else float(
'-inf'), reverse=True)
for vacancy in all_vacancies[:top]:
print(vacancy)
break
elif sort_vacancies == 'нет':
for vacancy in all_vacancies[:top]:
if vacancy.salary and 'from' in vacancy.salary:
print(vacancy)
break
else:
print('Введите да или нет.')
if __name__ == '__main__':
print(search_vacancies())
| Memorizu/Job_Parser_upd | main.py | main.py | py | 1,778 | python | en | code | 0 | github-code | 36 |
30111928056 | def ex1(n):
return [[el for el in range(1, n + 1)]] * n
def ex2(matrix):
return [el[::-1] for el in matrix]
def ex3(m1, m2):
try:
return [[min(m1[i][j], m2[i][j]) for j in range(max(len(m1[i]), len(m2[i])))] for i in range(max(len(m1), len(m2)))]
except IndexError:
raise IndexError("Matricile nu au aceleasi dimensiuni")
def ex4(n):
return [[0 if i == j else 1 if i < j else -1 for j in range(n)] for i in range(n)]
def ex5(l1, l2):
return [[0 if l1[i] % 2 == l2[j] % 2 else 1 for j in range(len(l2))] for i in range(len(l1))]
if __name__ == "__main__":
# ex1
n = 4
print("Ex1")
print(ex1(n))
# ex2
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print("Ex2")
print(ex2(matrix))
# ex3
try:
m1 = [
["a", "b", "c"],
["d", "e", "f"],
["g", "h", "i"]
]
m2 = [
["b", "c", "d"],
["c", "d", "h"],
["a", "p", "i"]
]
print("Ex3")
print(ex3(m1, m2))
except IndexError as e:
print(e)
# ex4
n = 3
print("Ex4")
print(ex4(n))
# ex5
l1 = [3, 5]
l2 = [1, 2, 3, 4, 5]
print("Ex5")
print(ex5(l1, l2))
| daneel95/Master_Homework | FirstYear/NLP/Lab2/Tema/ex9.py | ex9.py | py | 1,268 | python | en | code | 0 | github-code | 36 |
27574816900 | n = int(input())
a = [int(i) for i in input().split()]
a.sort()
res = 1
for i in a:
if i > res:
break
elif i == res:
res += 1
print(res)
| Kinhs/Python-PTIT | PY02018 - Số nhỏ nhất còn thiếu.py | PY02018 - Số nhỏ nhất còn thiếu.py | py | 161 | python | en | code | 0 | github-code | 36 |
37339853215 | from collections import deque
infinity = float("inf")
def make_graph():
# identical graph as the YouTube video: https://youtu.be/Tl90tNtKvxs
return [
[0, 10, 0, 10, 0, 0],
[0, 0, 4, 2, 8, 0],
[0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 9, 0],
[0, 0, 6, 0, 0, 10],
[0, 0, 0, 0, 0, 0],
]
# find paths from source to sink with breadth-first search
def bfs(G, source, sink, parent):
visited = [False] * len(G)
queue = deque()
queue.append(source)
visited[source] = True
while queue:
node = queue.popleft()
for i in range(len(G[node])):
if visited[i] is False and G[node][i] > 0:
queue.append(i)
visited[i] = True
parent[i] = node
return True if visited[sink] else False
def ford_fulkerson(G, source, sink):
# This array is filled by breadth-first search (bfs) and stores path
parent = [-1] * (len(G))
max_flow = 0
while bfs(G, source, sink, parent):
path_flow = infinity
s = sink
while s != source:
# Find the minimum value in selected path
path_flow = min(path_flow, G[parent[s]][s])
s = parent[s]
max_flow += path_flow
v = sink
# add or subtract flow based on path
while v != source:
u = parent[v]
G[u][v] -= path_flow
G[v][u] += path_flow
v = parent[v]
return max_flow
def main():
G = make_graph()
source = 0
sink = 5
max_flow = ford_fulkerson(G, source, sink)
print(f'Maximum flow: {max_flow}')
main()
| msambol/dsa | maximum_flow/ford_fulkerson.py | ford_fulkerson.py | py | 1,683 | python | en | code | 211 | github-code | 36 |
10829429705 | #!/usr/bin/env python
import os, sys, pkg_resources
import json
from collections import namedtuple
from functools import partial
import html5lib
from ..vendor.pluginbase.pluginbase import PluginBase
Key = namedtuple("Key", ["name","version"])
__all__ = ['plugins_get_mgr', 'plugins_load',
'plugins_show', 'plugins_close']
class PluginManager(object):
"""
Manage the various plugins in the project
"""
def __init__(self, paths=[]):
self.order = ['backend', 'repomanager',
'metadata',
'validator', 'transformer',
'instrumentation',
'representation']
self.plugins = {
'backend': {},
'instrumentation': {},
'repomanager': {},
'metadata': {},
'validator': {},
'transformer': {},
'representation': {}
}
self.sources = {}
thisdir = os.path.abspath(os.path.dirname(__file__))
def get_path(p):
return os.path.abspath(os.path.join(thisdir,
"../contrib",
p))
allplugins = [
{
'package': 'backend',
'base': get_path('backends'),
},
{
'package': 'instrumentation',
'base': get_path('instrumentations'),
},
{
'package': 'repomanager',
'base': get_path('repomanagers'),
},
{
'package': 'metadata',
'base': get_path('metadata'),
},
{
'package': 'validator',
'base': get_path('validators'),
},
{
'package': 'transformer',
'base': get_path('transformers'),
},
{
'package': 'representation',
'base': get_path('representations'),
},
]
for p in allplugins:
plugin_base = PluginBase(package=p['package'],
searchpath=[p['base']])
source = plugin_base.make_plugin_source(
searchpath=[],
identifier="Plugin Manager")
for plugin_name in source.list_plugins():
# print("Loading plugin", p['base'], plugin_name)
plugin = source.load_plugin(plugin_name)
plugin.setup(self)
self.sources[p['package']] = source
self.discover_all_plugins()
def discover_all_plugins(self):
"""
Load all plugins from dgit extension
"""
for v in pkg_resources.iter_entry_points('dgit.plugins'):
m = v.load()
m.setup(self)
def register(self, what, obj):
"""
Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin
"""
# print("Registering pattern", name, pattern)
name = obj.name
version = obj.version
enable = obj.enable
if enable == 'n':
return
key = Key(name, version)
self.plugins[what][key] = obj
def search(self, what, name=None, version=None):
"""
Search for a plugin
"""
filtered = {}
# The search may for a scan (what is None) or
if what is None:
whats = list(self.plugins.keys())
elif what is not None:
if what not in self.plugins:
raise Exception("Unknown class of plugins")
whats = [what]
for what in whats:
if what not in filtered:
filtered[what] = []
for key in self.plugins[what].keys():
(k_name, k_version) = key
if name is not None and k_name != name:
continue
if version is not None and k_version != version:
continue
if self.plugins[what][key].enable == 'n':
continue
filtered[what].append(key)
# print(filtered)
return filtered
def gather_configs(self):
"""
Gather configuration requirements of all plugins
"""
configs = []
for what in self.order:
for key in self.plugins[what]:
mgr = self.plugins[what][key]
c = mgr.config(what='get')
if c is not None:
c.update({
'description': mgr.description
})
# print("Gathering configuration from ", c)
configs.append(c)
return configs
def update_configs(self, config):
"""
Gather configuration requirements of all plugins
"""
for what in self.plugins: # backend, repo etc.
for key in self.plugins[what]: # s3, filesystem etc.
# print("Updating configuration of", what, key)
self.plugins[what][key].config(what='set', params=config)
return
def show(self, what, name, version, details):
filtered = self.search(what, name, version)
if len(filtered) > 0:
for what in self.order:
print("========")
print(what)
print("========")
if len(filtered[what]) == 0:
print("None\n")
continue
for k in filtered[what]:
obj = self.plugins[what][k]
print("%s (%s) :" % k,
obj.description)
if details:
print(" Supp:", obj.support)
print("")
else:
print("No backends found")
def get_by_key(self, what, key):
return self.plugins[what][key]
def get_by_repo(self, username, dataset):
keys = list(self.plugins['repomanager'].keys())
for k in keys:
try:
repomanager = self.plugins['repomanager'][k]
repokey = repomanager.find(username, dataset)
break
except:
repomanager = None
repokey = None
return (repomanager, repokey)
def get(self, what, name):
filtered = self.search(what, name)
filtered = filtered[what]
if len(filtered) > 0:
return self.plugins[what][filtered[0]]
else:
return None
def shutdown(self):
for what in self.sources:
self.sources[what].cleanup()
pluginmgr = None
def plugins_load():
"""
Load plugins from various sources:
- dgit/plugins
- dgit_extensions package
"""
global pluginmgr
# Auto clone if they have not been already shutdown
if pluginmgr is not None:
plugins_close()
pluginmgr = PluginManager([])
# pluginmgr.show()
def plugins_close():
global pluginmgr
pluginmgr.shutdown()
pluginmgr = None
def plugins_show(what=None, name=None, version=None, details=False):
"""
Show details of available plugins
Parameters
----------
what: Class of plugins e.g., backend
name: Name of the plugin e.g., s3
version: Version of the plugin
details: Show details be shown?
"""
global pluginmgr
return pluginmgr.show(what, name, version, details)
def plugins_get_mgr():
"""
Get the global plugin manager
"""
global pluginmgr
return pluginmgr
def plugins_get_config():
global pluginmgr
return pluginmgr.config()
if __name__ == '__main__':
plugins_load()
plugins_show()
plugins_close()
| pingali/dgit | dgitcore/plugins/common.py | common.py | py | 7,921 | python | en | code | 15 | github-code | 36 |
11712750390 | import pyautogui
import pyperclip
import time
import schedule
# 카카오톡에 메시지를 보내는 코드를 send_message 함수로 생성
def send_message():
threading.Timer(10, send_message).start()
# KakaoPicture1.png 파일과 동일한 그림을 찾아 좌표 출력
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture1.png')
print(picPosition)
# 앞의 과정에서 찾지 못했다면 KakaoPicture2.png 파일과 동일한 그림을 찾아 좌표 출력
if picPosition is None:
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture2.png')
print(picPosition)
# 앞의 과정에서 찾지 못했다면 KakaoPicture3.png 파일과 동일한 그림을 찾아 좌표 출력
if picPosition is None:
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture3.png')
print(picPosition)
# 이미지에서 중간 좌표값 찾기
ClickPosition = pyautogui.center(picPosition)
pyautogui.doubleClick(ClickPosition) # 더블 클릭
# 이 메세지는 자동으로 보내는 메세지입니다를 붙여넣고 1초 동안 기다림
pyperclip.copy("이 메세지는 자동으로 보내는 메세지입니다")
pyautogui.hotkey("ctrl", "v")
time.sleep(1.0)
# 엔터를 누르고 1초 동안 기다림
pyautogui.write(["enter"])
time.sleep(1.0)
# esc를 눌러 창을 닫고 1초 동안 기다림
pyautogui.write(["escape"])
time.sleep(1.0)
# 매 10초마다 send_message 함수를 실행할 스케쥴 등록
schedule.every(10).seconds.do(send_message)
# schedule.run_pending() 함수는 계속 실행되면서 스케쥴에 등록된 함수를 설정 시간마다 실행
while True:
schedule.run_pending()
time.sleep(1) | WoojinJeonkr/Python-and-40-works-to-learn-while-making | 11. PC_Kakao_Talk_Automation_Using_Automouse/ScheduleRunAutomationKakaoTalk.py | ScheduleRunAutomationKakaoTalk.py | py | 1,883 | python | ko | code | 1 | github-code | 36 |
74579426342 | #generali
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.template import loader
from django.db.models import Count
from django.contrib import messages
from django.contrib.auth import get_user_model
from django_email_verification import send_email
#blog
#tag engine
from taggit.models import Tag
#paginator per blog
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
#modelli per gestire i post del blog
from .models import Post, Commenti, slider_1, slider_2, home_gallery, titoli_landing_page, content_chisiamo, content_mappa, candidato, link_footer, banner_lavora_con_noi,content_lavora_con_noi,Servizi,Certificati, img_hero
#autenticazione e registrazione
from django.contrib.auth import authenticate, logout
from django.contrib.auth.forms import UserCreationForm
#form vari
from .forms import nome_utente, password, EmailPostForm, FormCommenti, registrazione, lavoraconnoi
#chat
from django.core.mail import send_mail
import socket
#API REST
#framework
from rest_framework import generics
#serializzatore
from .serializers import post_serializer
#classi per API REST
#lista Post API
class post_list_api(generics.ListAPIView):
#query da visualizzare
queryset = Post.objects.all()
#serializzazione query
serializer_class = post_serializer
#dettagli Post API
class post_detail_api(generics.RetrieveAPIView):
#query da visualizzare
queryset = Post.objects.all()
#serializzazione query
serializer_class = post_serializer
#lista servizi
def serv_list(request,nome=None):
hero = img_hero.objects.filter(current = True).first()
servizi = Servizi.objects.all()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
#dettaglio servizio
if nome:
nome_servizio = get_object_or_404(Servizi, slug=nome)
template = loader.get_template('landing_page/servizio.html')
return render(request, 'landing_page/servizio.html', {'nome_servizio':nome_servizio , 'footer':footer , 'titoli':titoli,})
template = loader.get_template('landing_page/serv_list.html')
return render(request, 'landing_page/serv_list.html', {'servizi':servizi, 'footer':footer, 'titoli':titoli, })
#lista certificati
def cert_list(request,nome_certificato=None):
hero = img_hero.objects.filter(current = True).first()
certificati = Certificati.objects.all()[:5]
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
#dettaglio certificato
if nome_certificato:
certificato = get_object_or_404(Certificati, slug=nome_certificato)
template = loader.get_template('landing_page/certificato.html')
return render(request, 'landing_page/certificato.html', {'certificato': certificato , 'footer':footer, 'titoli':titoli, 'hero': hero })
template = loader.get_template('landing_page/serv_list.html')
return render(request, 'landing_page/cert_list.html', {'certificati':certificati, 'footer':footer , 'titoli':titoli, 'hero': hero})
#home page e link homepage
def index(request):
hero = img_hero.objects.filter(current = True).first()
slide1 = slider_1.objects.filter(current=True).first()
slide2 = slider_2.objects.filter(current=True).first()
galleria = home_gallery.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
template = loader.get_template('landing_page/index.html')
return render(request, 'landing_page/index.html', {'slide1': slide1, 'slide2': slide2, 'galleria' : galleria, 'titoli':titoli, 'footer':footer, 'hero' : hero })
def chisiamo(request):
hero = img_hero.objects.filter(current = True).first()
galleria_chisiamo = Post.objects.all()
footer = link_footer.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
contenuto = content_chisiamo.objects.filter(current=True).first()
template = loader.get_template('landing_page/chi_siamo.html')
return render(request, 'landing_page/chi_siamo.html', {'galleria_chisiamo' : galleria_chisiamo, 'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def mappa(request):
hero = img_hero.objects.filter(current = True).first()
footer = link_footer.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
contenuto = content_mappa.objects.filter(current=True).first()
template = loader.get_template('landing_page/mappa.html')
return render(request, 'landing_page/mappa.html', {'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def registrati(request):
hero = img_hero.objects.filter(current = True).first()
footer = link_footer.objects.filter(current=True).first()
contenuto = content_mappa.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
form = registrazione()
if request.method == "POST":
form = registrazione(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get("username")
#Conferma creazione account via email#
#da completare#
#-----------------------------------------------------------#
#email = form.cleaned_data.get("email")
#user = get_user_model().objects.create(username=usernameT, password=password, email=email)
#user.is_active = False # Example
#send_email(user)
#-----------------------------------------------------------#
messages.success(request, 'Account creato,benvenuto,' + username + '!')
return redirect('/landing_page/')
return render(request, 'landing_page/registrati.html', {'form' : form, 'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def lavora_con_noi(request):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
contenuto = content_lavora_con_noi.objects.filter(current=True).first()
banner = banner_lavora_con_noi.objects.filter(current=True).first()
if request.method == 'POST':
forml = lavoraconnoi(request.POST, request.FILES)
if forml.is_valid():
forml.save()
return redirect('/landing_page/')
else:
forml = lavoraconnoi()
return render(request, 'landing_page/lavora_con_noi.html', {'forml' : forml, 'footer':footer, 'contenuto':contenuto, 'banner':banner , 'titoli':titoli, 'hero': hero})
#gestione login
def login(request):
c = {}
c.update(csrf(request))
return render(request, 'landing_page/index.html', c)
def logout(request):
logout(request)
redirect('landing_page/index')
#autenticazione dei dati inseriti in "login"
def authentication(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username = username, password = password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/accounts/loggedin')
else:
return HttpResponseRedirect('/accounts/invalid')
#gestione post
#lista post
#-------------------------------------------------------------------------------------------------------------------------------------------------------#
def post_list(request, tag_slug=None):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 3)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'landing_page/post/list.html', {'page': page, 'posts': posts, 'tag': tag, 'footer':footer , 'titoli':titoli, 'hero': hero})
#-------------------------------------------------------------------------------------------------------------------------------------------------------#
#view post singolo
def post_detail(request, year, month, day, post):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
#lista di commenti attivi sul blog
commenti = post.commenti.filter(attivo=True)
new_comment = None
if request.method == 'POST':
#è stato postato un commento
form_commento = FormCommenti(data=request.POST)
if form_commento.is_valid():
#creazione di un oggetto commento senza salvataggio su db#
nuovo_commento = form_commento.save(commit=False)
#assegna il post corrente al commento#
nuovo_commento.post = post
#salvataggio del commento su db#
nuovo_commento.save()
#lista di post simili#
post_tags_ids = post.tags.values_list('id', flat=True)
post_simili = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
post_simili = post_simili.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
return render(request, 'landing_page/post/detail.html', {'post' : post, 'commenti' : commenti, 'nuovo_commento' : nuovo_commento, 'form_commento' : form_commento, 'post_simili' : post_simili, 'footer':footer , 'titoli':titoli, 'hero': hero})
else:
form_commento = FormCommenti()
#lista di post simili#
post_tags_ids = post.tags.values_list('id', flat=True)
post_simili = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
post_simili = post_simili.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
return render(request, 'landing_page/post/detail.html', {'post' : post, 'commenti' : commenti, 'form_commento' : form_commento, 'post_simili' : post_simili, 'footer':footer , 'titoli':titoli, 'hero': hero})
#condivisione post via e-mail
def post_share(request, post_id):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
#validazione dati nel form
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
titolo = f"{cd['name']} vorrebbe farti leggere questo: {post.title}"
messaggio = f"{post_url}\n\n {cd['comments']}"
#invio effettivo mail#
#-----------------------------------------------------------------#
send_mail(titolo,messaggio,'espositogerardo94@gmail.com',[cd['to']])
#-----------------------------------------------------------------#
sent = True
else:
form = EmailPostForm()
return render(request, 'landing_page/post/share.html',{'post' : post, 'form' : form, 'sent' : sent, 'hero': hero })
| gitsh1t/vetrina_test | landing_page/views.py | views.py | py | 12,116 | python | en | code | 0 | github-code | 36 |
72000657063 | # -*- coding: utf-8 -*-
# French language sounds configuration
from tts import filename, NO_ALTERNATE, PROMPT_SYSTEM_BASE, PROMPT_CUSTOM_BASE
systemSounds = []
sounds = []
for i in range(100):
systemSounds.append((str(i), filename(PROMPT_SYSTEM_BASE + i)))
for i in range(10):
systemSounds.append((str(100 * (i + 1)), filename(PROMPT_SYSTEM_BASE + 100 + i)))
for i, s in enumerate(["une", "onze", "vingt et une", "trente et une", "quarante et une", "cinquante et une", "soixante et une", "soixante et onze", "quatre vingt une"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 110 + i)))
for i, s in enumerate(["virgule", "et", "moins", "minuit", "midi"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 119 + i)))
for i, s in enumerate(["volts", u"ampères", u"milli ampères", u"knots", u"mètres seconde", u"pieds par seconde", u"kilomètre heure", u"miles par heure", u"mètres", "pieds", u"degrés", u"degrés fahrenheit", "pourcents", u"milli ampères / heure", "watt", "db", "tours minute", "g", u"degrés", "millilitres", "onces", "heure", "minute", "seconde"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 125 + i)))
# for i, s in enumerate(["timer", "", "tension", "tension", u"émission", u"réception", "altitude", "moteur",
# "essence", u"température", u"température", "vitesse", "distance", "altitude", u"élément lipo",
# "total lipo", "tension", "courant", "consommation", "puissance", u"accelération X", u"accelération Y", u"accelération Z",
# "orientation", "vario"]):
# systemSounds.append((s, filename(PROMPT_SYSTEM_BASE+146+i)))
for i, s in enumerate(["virgule 0", "virgule 1", "virgule 2", "virgule 3", "virgule 4", "virgule 5", "virgule 6", "virgule 7", "virgule 8", "virgule 9"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 180 + i)))
for s, f, a in [(u"Trim centré", "midtrim", 495),
(u"Trim maximum atteint", "endtrim", NO_ALTERNATE),
(u"Batterie radio faible !", "lowbatt", 485),
(u"Radio inactive !", "inactiv", 486),
(u"Alerte manche des gaz", "thralert", 481),
(u"Alerte inters", "swalert", 482),
(u"éprome corrompue", "eebad", 483),
(u"Bienvenue sur Open TI X!", "tada", 480),
(u"vingt secondes", "timer20", 500),
(u"trente secondes", "timer30", 501),
(u"A1,faible", "a1_org", NO_ALTERNATE),
(u"A1,critique", "a1_red", NO_ALTERNATE),
(u"A2,faible", "a2_org", NO_ALTERNATE),
(u"A2,critique", "a2_red", NO_ALTERNATE),
(u"A3,faible", "a3_org", NO_ALTERNATE),
(u"A3,critique", "a3_red", NO_ALTERNATE),
(u"A4,faible", "a4_org", NO_ALTERNATE),
(u"A4,critique", "a4_red", NO_ALTERNATE),
(u"Signal RF, faible", "rssi_org", NO_ALTERNATE),
(u"Signal RF, critique", "rssi_red", NO_ALTERNATE),
(u"Antenne défectueuse", "swr_red", NO_ALTERNATE),
(u"Plus de télémétrie", "telemko", NO_ALTERNATE),
(u"Télémétrie retrouvée", "telemok", NO_ALTERNATE),
(u"Signal écolage perdu", "trainko", NO_ALTERNATE),
(u"Signal écolage retrouvé", "trainok", NO_ALTERNATE),
]:
systemSounds.append((s, filename(f, a)))
for i, (s, f) in enumerate([(u"altitude", "altitude"),
(u"température moteur", "moteur"),
(u"température contrôleur", "cntrleur"),
(u"train rentré", "gearup"),
(u"train sorti", "geardn"),
(u"volets rentrés", "flapup"),
(u"volets sortis", "flapdn"),
(u"atterrissage", "attero"),
(u"écolage", "trnon"),
(u"fin écolage", "trnoff"),
(u"moteur coupé", "engoff"),
]):
sounds.append((s, filename(f, PROMPT_CUSTOM_BASE + i)))
| Ingwie/NextStepRc-2.18 | radio/util/tts_fr.py | tts_fr.py | py | 4,215 | python | en | code | 14 | github-code | 36 |
497207117 | import abc
import six
from dagster_spark.configs_spark import spark_config
from dagster_spark.utils import flatten_dict
from pyspark.sql import SparkSession
from dagster import Field, check, resource
def spark_session_from_config(spark_conf=None):
spark_conf = check.opt_dict_param(spark_conf, 'spark_conf')
builder = SparkSession.builder
flat = flatten_dict(spark_conf)
for key, value in flat:
builder = builder.config(key, value)
return builder.getOrCreate()
class PySparkResourceDefinition(six.with_metaclass(abc.ABCMeta)):
def __init__(self, spark_conf):
self._spark_session = spark_session_from_config(spark_conf)
@property
def spark_session(self):
return self._spark_session
@property
def spark_context(self):
return self.spark_session.sparkContext
def stop(self):
self._spark_session.stop()
@abc.abstractmethod
def get_compute_fn(self, fn, solid_name):
pass
class SystemPySparkResource(PySparkResourceDefinition):
def get_compute_fn(self, fn, solid_name):
return fn
@resource(
{
'spark_conf': spark_config(),
'stop_session': Field(
bool,
is_optional=True,
default_value=True,
description='Whether to stop the Spark session on pipeline completion. '
'Defaults to True.',
),
}
)
def pyspark_resource(init_context):
pyspark = SystemPySparkResource(init_context.resource_config['spark_conf'])
try:
yield pyspark
finally:
if init_context.resource_config['stop_session']:
pyspark.stop()
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-pyspark/dagster_pyspark/resources.py | resources.py | py | 1,647 | python | en | code | 2 | github-code | 36 |
34447008316 | """
TITLE: Set.add()
INPUT:
7
UK
China
USA
France
New Zealand
UK
France
OUTPUT:
5
"""
n = int(input())
myset = set()
for _ in range(n):
myset.add(input())
print(len(myset))
| bakliwalvaibhav1/Python-HackerRank | 04) Sets/set_add.py | set_add.py | py | 182 | python | en | code | 1 | github-code | 36 |
4738433123 | from operator import attrgetter
from django.contrib.auth import get_user_model
from django.db.models import (
CASCADE, SET_NULL, BooleanField, CharField, CheckConstraint, DateTimeField,
F, ForeignKey, IntegerChoices, IntegerField, JSONField, ManyToManyField,
Model, Q, SlugField, TextField, UniqueConstraint)
from django.db.models.fields import URLField
from django.db.models.fields.related import OneToOneField
from django.urls import reverse
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from mdanchors import AnchorConverter
from drugcombinator.managers import DrugManager, InteractionManager
from drugcombinator.modelfields import ListField
from drugcombinator.tasks import ping_webarchive
from drugcombinator.utils import get_libravatar_url, markdown_allowed
class LastModifiedModel(Model):
last_modified = DateTimeField(
auto_now=True,
verbose_name=_("last modification")
)
class Meta:
abstract = True
class Drug(LastModifiedModel):
name = CharField(
max_length=128,
verbose_name=_("name")
)
slug = SlugField(
unique=True,
verbose_name=_("identifier")
)
description = TextField(
default='',
blank=True,
verbose_name=_("description"),
help_text=markdown_allowed()
)
risks = TextField(
default='',
blank=True,
verbose_name=_("general risks"),
help_text=format_lazy(
'{text}<br/>{notice}',
text=_(
"Risks specific to combinations involving this substance "
"that do not depend on a specific interaction."),
notice=markdown_allowed()
)
)
effects = TextField(
default='',
blank=True,
verbose_name=_("general effects"),
help_text=format_lazy(
'{text}<br/>{notice}',
text=_(
"Effects specific to combinations involving this "
"substance that do not depend on a specific "
"interaction."),
notice=markdown_allowed()
)
)
aliases = ListField(
verbose_name=_("aliases"),
help_text=_("One alias per line. No need to duplicate case.")
)
interactants = ManyToManyField(
'self',
symmetrical=True,
through='Interaction',
verbose_name=_("interactants")
)
category = ForeignKey(
'Category',
SET_NULL,
null=True,
blank=True,
related_name='drugs',
verbose_name=_("category")
)
common = BooleanField(
default=True,
verbose_name=_("common"),
help_text=_(
"Common substances are displayed as buttons in the app.")
)
# History manager will be added through simple_history's register
# function in translation.py, after the translated fields are
# added by modeltranslation
objects = DrugManager()
def __str__(self):
return self.name
@property
def interactions(self):
return Interaction.objects.filter(
Q(from_drug=self) | Q(to_drug=self)
)
# In Django 3.1.0, the Drug.interactants field accessor only returns
# Drug objects from the Interaction.to_drug field, but misses ones
# from the Interaction.from_drug field. This property is a
# workaround, as this limitation may be removed at framework level
# one day.
@property
def all_interactants(self):
return (
self.interactants.all()
| Drug.objects.filter(
interactions_from__in=self.interactions_to.all()
)
)
def get_absolute_url(self, namespace=None):
name = 'drug'
if namespace:
name = f"{namespace}:{name}"
return reverse(name, kwargs={'slug': self.slug})
class Meta:
verbose_name = _("substance")
ordering = ('slug',)
class Interaction(LastModifiedModel):
class Synergy(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("synergy", "Unknown"))
NEUTRAL = (1, pgettext_lazy("synergy", "Neutral"))
ADDITIVE = (5, _("Additive"))
DECREASE = (2, _("Decrease"))
INCREASE = (3, _("Increase"))
MIXED = (4, _("Mixed"))
class Risk(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("risk", "Unknown"))
NEUTRAL = (1, pgettext_lazy("risk", "Neutral"))
CAUTION = (2, _("Caution"))
UNSAFE = (3, _("Unsafe"))
DANGEROUS = (4, _("Dangerous"))
class Reliability(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("reliability", "Unknown"))
HYPOTHETICAL = (1, _("Hypothetical"))
INFERRED = (2, _("Inferred"))
PROVEN = (3, _("Proven"))
from_drug = ForeignKey(
'Drug',
CASCADE,
related_name='interactions_from',
verbose_name=_("first interactant")
)
to_drug = ForeignKey(
'Drug',
CASCADE,
related_name='interactions_to',
verbose_name=_("second interactant")
)
names = ListField(
verbose_name=_("slang names"),
help_text=_(
"One name per line. The first one can be emphasized in the "
"app.")
)
risk = IntegerField(
choices=Risk.choices,
default=Risk.UNKNOWN,
verbose_name=_("risks")
)
synergy = IntegerField(
choices=Synergy.choices,
default=Synergy.UNKNOWN,
verbose_name=_("synergy")
)
risk_reliability = IntegerField(
choices=Reliability.choices,
default=Reliability.UNKNOWN,
verbose_name=_("risks reliability")
)
effects_reliability = IntegerField(
choices=Reliability.choices,
default=Reliability.UNKNOWN,
verbose_name=_("synergy and effects reliability")
)
risk_description = TextField(
default='',
blank=True,
verbose_name=_("risks description"),
help_text=markdown_allowed()
)
effect_description = TextField(
default='',
blank=True,
verbose_name=_("effects description"),
help_text=markdown_allowed()
)
notes = TextField(
default='',
blank=True,
verbose_name=_("notes"),
help_text=_(
"This field is only displayed on this admin site and is "
"shared between all users and languages.")
)
is_draft = BooleanField(
default=True,
verbose_name=_("draft"),
help_text=_(
"In case of work-in-progress, uncertain or incomplete "
"data.")
)
uris = JSONField(
default=dict,
editable=False,
verbose_name=_("URIs"),
help_text=_(
"URIs extracted from these interaction data texts, mapped "
"to their last Wayback Machine snapshot date.")
)
# History manager will be added throug simple_history's register
# function in translation.py, after the translated fields are
# added by modeltranslation
objects = InteractionManager()
def __str__(self):
return f"{self.from_drug.name} + {self.to_drug.name}"
def get_absolute_url(self, namespace=None):
name = 'combine'
if namespace:
name = f"{namespace}:{name}"
return reverse(name, kwargs={
'slugs': (self.from_drug.slug, self.to_drug.slug)
})
def other_interactant(self, drug):
index = self.interactants.index(drug)
return self.interactants[not index]
@property
def slug(self):
return f"{self.from_drug.slug}_{self.to_drug.slug}"
@property
def interactants(self):
return (self.from_drug, self.to_drug)
@interactants.setter
def interactants(self, interactants):
interactants = sorted(interactants, key=attrgetter('slug'))
self.from_drug, self.to_drug = interactants
def sort_interactants(self):
# The interactants property setter will handle interactants
# reordering
self.interactants = self.interactants
def extract_uris(self):
"""Extract URIs from this model `risk_description` and
`effect_description` text fields."""
return set().union(*map(
lambda field: AnchorConverter(field).uris,
(self.risk_description, self.effect_description)
))
def update_uris(self):
"""Update stored URIs according to this model text fields.
If a URI was already extracted, it will not be modified.
Unused URIs will be removed.
New URIs will be added with a `None` value.
"""
self.uris = {
uri: getattr(self.uris, uri, None)
for uri in self.extract_uris()
}
def schedule_webarchive_ping(self):
ping_webarchive(self.id, self.uris)()
def save(self, process_uris=True, *args, **kwargs):
self.sort_interactants()
if process_uris:
self.update_uris()
super().save(*args, **kwargs)
if process_uris:
self.schedule_webarchive_ping()
@classmethod
def get_dummy_risks(cls):
return [cls(risk=risk) for risk in cls.Risk.values]
@classmethod
def get_dummy_synergies(cls):
return [cls(synergy=synergy) for synergy in cls.Synergy.values]
class Meta:
constraints = (
CheckConstraint(
check=~Q(from_drug=F('to_drug')),
name='interactants_inequals'
),
UniqueConstraint(
fields=('from_drug', 'to_drug'),
name='interactants_unique_together'
)
)
verbose_name = _("interaction")
class Category(LastModifiedModel):
name = CharField(
max_length=128,
verbose_name=_("name")
)
slug = SlugField(
unique=True,
verbose_name=_("identifier")
)
description = TextField(
default='',
blank=True,
verbose_name=_("description")
)
def __str__(self):
return self.name
class Meta:
verbose_name = _("category")
verbose_name_plural = _("categories")
class Note(LastModifiedModel):
title = CharField(
max_length=128,
default=_("Untitled note"),
verbose_name=_("title")
)
content = TextField(
default='',
blank=True,
verbose_name=_("content"),
help_text=_(
"Notes are only displayed on this admin site and are shared "
"between all users and languages.")
)
related_drugs = ManyToManyField(
'Drug',
related_name='notes',
blank=True,
verbose_name=_("involved substances"),
help_text=_(
"If this note involves specific substances, you can "
"optionally set them here.")
)
def __str__(self):
return self.title
class Meta:
verbose_name = _("note")
class Contributor(Model):
user = OneToOneField(
get_user_model(),
CASCADE,
related_name='profile',
verbose_name=_("user")
)
page = URLField(
default='',
blank=True,
max_length=128,
verbose_name=_("personal page"),
help_text=_(
"This link may be used in public contributors lists.")
)
display = BooleanField(
default=False,
verbose_name=_("show publicly"),
help_text=_("Show this profile in public contributors lists.")
)
@property
def avatar_url(self):
return get_libravatar_url(
email=self.user.email,
https=True,
size=150,
default='identicon'
)
def __str__(self):
return self.user.username
class Meta:
verbose_name = _("contributor profile")
verbose_name_plural = _("contributor profiles")
| x-yzt/mixtures | drugcombinator/models.py | models.py | py | 12,000 | python | en | code | 7 | github-code | 36 |
20884673923 | #Start with an empty list and add in each new input (check if the data is valid)
my_list = []
total = 0
for i in range(7):
while True:
day_sales = input(f"Sales for day {i+1}: ")
if day_sales.isdigit():
days_sales = int(day_sales)
if days_sales >= 0:
my_list.append(days_sales)
total += days_sales
break
print("Sorry, that is not a valid number. Please try again.")
print(my_list)
average = total / 7
highest = max(my_list)
lowest = min(my_list)
print("")
print(f"Total sales: {total}")
print(f"Average sales per day: {average:.2f}")
print(f"Highest sales day: {highest} (day {my_list.index(highest) + 1})")
print(f"Lowest sales day: {lowest} (day {my_list.index(lowest) + 1})")
| Chenxinnnn/Chenxin-Undergrad-CS002 | HW 8/GuChenxin_assign8_part0.py | GuChenxin_assign8_part0.py | py | 803 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.