seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
11540490270 | from DirectedGraphClass import *
#***********************************************************************#
# Bellman-Ford Algorithm #
#***********************************************************************#
#Provide the number of vertices
numberOfVertices = 5
#Select source of the graph
source = 0
directedGraphVar = DirectedGraph(numberOfVertices)
createDirectedGraph(directedGraphVar, source)
BellmanFordFunction(directedGraphVar)
directedGraphVar.printShortestPath() | GauthamBT/Bellman-Ford-Algorithm | GraphMainProgram.py | GraphMainProgram.py | py | 527 | python | en | code | 0 | github-code | 13 |
74908647696 | import requests
from lxml import etree
import re
import asyncio
import aiohttp
import aiofiles
import os
from urllib.parse import urljoin
from Crypto.Cipher import AES
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
}
# merge the all movie clip
def merge_movie_clip():
# windows command: copy /b a.ts+b.ts c.ts "movie.mp4"
# macOS/linux command: cat a.ts b.ts c.ts > movie.mp4
# Merge in order
file_list = []
with open("second_m3u8.txt", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
continue
else:
line = line.strip()
file_name = line.split("/")[-1]
file_list.append(file_name)
# switch directory to after_decryption
os.chdir("after_decryption")
# segment merge
n = 1
temp = []
for i in range(len(file_list)):
# Every 20 merges
file_name = file_list[i]
temp.append(file_name)
if i % 20 == 0 and i != 0:
command = f"cat {' '.join(temp)} > {n}.ts"
os.system(command)
print(f"{n}.ts is finished.")
temp = []
n += 1
# merge the last part
command = f"cat {' '.join(temp)} > {n}.ts"
os.system(command)
temp = []
n += 1
# The second merger
for i in range(1, n):
temp.append(f"{i}.ts")
# merge the all part
command = f"cat {' '.join(temp)} > 春夏秋冬又一春.mp4"
os.system(command)
# decrypt the clip of movie
async def decrypt_clip(file_path, key):
file_name = file_path.split("/")[-1]
new_file_path = f"after_decryption/{file_name}"
async with aiofiles.open(file_path, "rb") as f, aiofiles.open(new_file_path, "wb") as f2:
content = await f.read()
# create a decryptor
decryptor = AES.new(key, AES.MODE_CBC, b'\x00' * 16)
# decrypt
decrypted = decryptor.decrypt(content)
# write to file
await f2.write(decrypted)
print("decrypt:", file_name, "is finished.")
# decrypt all movie
async def decrypt(key):
tasks = []
with open("second_m3u8.txt", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
continue
else:
line = line.strip()
file_name = line.split("/")[-1]
file_path = f"before_decryption/{file_name}"
# create task to decrypt
task = asyncio.create_task(decrypt_clip(file_path, key))
tasks.append(task)
await asyncio.gather(*tasks)
def get_key():
with open("second_m3u8.txt", "r", encoding="utf-8") as f:
file_content = f.read()
obj = re.compile(r'URI="(?P<key_url>.*?)"', re.S)
key_url = obj.search(file_content).group('key_url')
response = requests.get(key_url, headers=headers)
return response.content
async def download_movie_clip(url, sem):
async with sem:
file_name = url.split("/")[-1]
file_path = f"before_decryption/{file_name}"
print("start download:", file_name)
flag = True
for i in range(10):
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers, verify_ssl=False) as response:
content = await response.read()
async with aiofiles.open(file_path, "wb") as f:
await f.write(content)
print("download:", file_name, "is finished.")
flag = False
break
except Exception as e:
print(file_name, "is failed.", e)
continue
if flag:
with open("failed_list.txt", "a", encoding="utf-8") as f:
f.write(url + "\n")
# download all movie
async def download_movie():
# control the number of tasks
sem = asyncio.Semaphore(100)
tasks = []
with open("second_m3u8.txt", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
continue
else:
line = line.strip()
# create task
task = asyncio.create_task(download_movie_clip(line, sem))
tasks.append(task)
await asyncio.gather(*tasks)
# download m3u8 file
def download_m3u8(url):
response = requests.get(url, headers=headers)
with open("first_m3u8.txt", "w", encoding="utf-8") as f:
f.write(response.text)
print("first m3u8 file is downloaded.")
response.close()
with open("first_m3u8.txt", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
continue
else:
line = line.strip()
# download second m3u8 file
line = urljoin(url, line)
response = requests.get(line, headers=headers)
with open("second_m3u8.txt", "w", encoding="utf-8") as f:
f.write(response.text)
print("second m3u8 file is downloaded.")
response.close()
break
# get m3u8_url
def get_m3u8_url(url):
response = requests.get(url, headers=headers)
obj = re.compile(r'url: "(?P<m3u8>.*?)"', re.S)
m3u8 = obj.search(response.text).group('m3u8')
return m3u8
# get the iframe url
def get_ifram_url(url):
for i in range(10):
try:
response = requests.get(url, headers=headers)
tree = etree.HTML(response.text)
src = tree.xpath("//iframe/@src")[0]
return src
except Exception as e:
print(e)
def main():
url = "http://www.wbdy.tv/play/63690_1_1.html"
# get iframe url
iframe_url = get_ifram_url(url)
iframe_url = urljoin(url, iframe_url)
# Get the url of m3u8 through iframe url
m3u8 = get_m3u8_url(iframe_url)
# download m3u8 file
download_m3u8(m3u8)
# Download movies via m3u8
asyncio.run(download_movie())
# Get decrypted key
key = get_key()
# Decrypt
asyncio.run(decrypt(key))
# merge the clip
merge_movie_clip()
if __name__ == '__main__':
main()
| TBSAAA/Web-crawler | case/async_movie.py | async_movie.py | py | 6,368 | python | en | code | 0 | github-code | 13 |
10520673424 | from database.db_wrapper import DBwrapper
from threading import Thread
from main import logger, stop_and_restart
def admin_method(func):
"""Decorator for marking methods as admin-only methods, so that strangers can't use them"""
def admin_check(bot, update):
db = DBwrapper.get_instance()
user = update.message.from_user
if user.id in db.get_admins():
return func(bot, update)
else:
update.message.reply_text('You have not the needed permissions to do that!')
logger.warning(
"User {} ({}, @{}) tried to use admin function '{}'!".format(user.id, user.first_name, user.username,
func.__name__))
return admin_check
@admin_method
def restart(bot, update):
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start() | Deses/PG40x30 | pg40x30/commands/adminCommands.py | adminCommands.py | py | 927 | python | en | code | 0 | github-code | 13 |
9129757917 | from telegram import *
from telegram.ext import *
from requests import *
updater = Updater(token="5369531550:AAFdpCUzqBJxcG0th98XQGddqZc3vSRBwKI")
dispatcher = updater.dispatcher
allowedUsernames = [1241390756,1030952653]
# this commands is use to
print("Bot starting.....................\n")
# print(Update._effective_user.name)
def startCommand(update: Update, context: CallbackContext):
u_id= update.message.from_user.id
name = update.message.from_user.first_name
if u_id not in allowedUsernames:
context.bot.send_message(chat_id=update.effective_chat.id,
text="you are banned by admin ")
else:
buttons = [[KeyboardButton('sem1&sem2')], [KeyboardButton('sem3')]]
context.bot.send_message(chat_id=update.effective_chat.id,
text=f"{name} Welcome to my bot! ", reply_markup=ReplyKeyboardMarkup(buttons))
print(name)
print(u_id)
def check_user(update: Update, context: CallbackContext):
u_id= update.message.from_user.id
if u_id in allowedUsernames:
return True
return False
def sem3(update: Update, context: CallbackContext):
update.message.reply_text('bhargavbhai')
sem3_button = [[KeyboardButton('D.S.A.')], [KeyboardButton('D.B.M.S.')], [KeyboardButton('p&s')], [
KeyboardButton('ETC')], [KeyboardButton('ic')], [KeyboardButton('d.f.')], [KeyboardButton('exit')]]
context.bot.send_message(chat_id=update.effective_chat.id,
text="Welcome to my bot! ", reply_markup=ReplyKeyboardMarkup(sem3_button))
def messageHandler(update: Update, context: CallbackContext):
if 'exit' in update.message.text:
# function call to messege handeler
startCommand(update, context)
# if 'Exit' in update.message.text:
if 'sem3' in update.message.text:
# if 'dsa' in update.message.text:
sem3(update, context)
# //how to create exit button in python telegram bot?
if 'sem1&sem2' in update.message.text:
sem1_button = [[KeyboardButton('B.M.E.')], [KeyboardButton('MATHS1')], [KeyboardButton('MATHS2')], [KeyboardButton('ENGLISH')], [KeyboardButton('E.G.D')], [
KeyboardButton('Environmental Science')], [KeyboardButton('P.P.S.')], [KeyboardButton('B.E.E.')], [KeyboardButton('Physics')], [KeyboardButton('B.E.')], [KeyboardButton('exit')]]
context.bot.send_message(chat_id=update.effective_chat.id,
text="Welcome to my bot! ", reply_markup=ReplyKeyboardMarkup(sem1_button))
def forward_to_user(chat_id, message_id, user_id, bot):
try:
bot.forward_message(
chat_id=user_id, from_chat_id=chat_id, message_id=message_id)
except Exception as e:
print("Failed to forward message to user:", e)
def forward_document_to_user(chat_id, document_id, user_id, bot):
try:
bot.send_document(chat_id=user_id, document=document_id)
except Exception as e:
print("Failed to forward document to user:", e)
# SEM3
if 'send' in update.message.text:
chat_id = update.message.chat_id
file_url = "https://drive.google.com/uc?export=download&id=163zCrXnaqzzlch36DR8NnzouFBejnPB4"
try:
context.bot.send_document(
chat_id=chat_id, document=file_url, caption='This is a my document')
except Exception as e:
print(e)
print("Failed to send")
context.bot.send_message(chat_id=update.effective_chat.id,text="Somethingwentrong")
if 'D.S.A.' in update.message.text:
# here forward the massage for dsa
message = update.message
chat_id = update.message.chat_id
update.message.reply_text('BOOKs')
file_url = "https://drive.google.com/uc?export=download&id=11j0xsgTVU7JjncJQZKirVBzhey-uKMk9"
context.bot.send_document(chat_id=chat_id, document=file_url, caption='This is a my document')
# context.bot.send_message(chat_id=1241390756, text=message.text)
# update.message.forward_from_message_id(8532)
update.message.reply_text('PYSQs')
# here you can forward the dsa pysqs
update.message.reply_text('NOTEs')
# here you can forward the dsa notes
if 'D.B.M.S.' in update.message.text:
# here forward the massage for dbms
update.message.reply_text('BOOKs')
try:
context.bot.forward_message(
chat_id=update.effective_chat.id, from_chat_id=5369531550, message_id=1241390756)
except Exception as e:
print(e)
# here you can forward the dbms books
update.message.reply_text('PYSQs')
# here you can forward the dbms pysqs
update.message.reply_text('NOTEs')
# here you can forward the dbms notes
if 'd.f.' in update.message.text:
chat_id = update.message.chat_id
message_id = update.message.message_id
print(message_id)
print(chat_id)
print(type(chat_id))
# Forward the message to the user
user_id = update.message.from_user.id
forward_to_user(chat_id, message_id, user_id, context.bot)
# here forward the massage for d.f.
update.message.reply_text('BOOKs')
# here you can forward the d.f. books
update.message.reply_text('PYSQs')
# here you can forward the d.f. pysqs
update.message.reply_text('NOTEs')
# here you can forward the d.f. notes
if 'p&s' in update.message.text:
if update.message.document is not None:
# Get the chat ID and document ID
chat_id = update.message.chat_id
document_id = "BQACAgUAAxkBAAMqZG4Dz5OCgjoSCENlodJcKDvamAsAApUNAAIgI3FXWLpws2zwVXgvBA"
# Forward the document to the user
user_id = update.message.from_user.id
forward_document_to_user(chat_id, document_id, user_id, context.bot)
# Reply to the group
update.message.reply_text('Document forwarded to the user.')
else:
update.message.reply_text('No document found in the message.')
# here forward the massage for p&s
update.message.reply_text('BOOKs')
# here you can forward the p&s books
update.message.reply_text('PYSQs')
# here you can forward the p&s pysqs
update.message.reply_text('NOTEs')
# here you can forward the p&s notes
if 'ETC' in update.message.text:
# here forward the massage for ETC
update.message.reply_text('BOOKs')
# here you can forward the ETC books
update.message.reply_text('PYSQs')
# here you can forward the ETC pysqs
update.message.reply_text('NOTEs')
# here you can forward the ETC notes
if 'ic' in update.message.text:
# here forward the massage for ic
update.message.reply_text('BOOKs')
# here you can forward the ic books
update.message.reply_text('PYSQs')
# here you can forward the ic pysqs
update.message.reply_text('NOTEs')
# here you can forward the ic notes
# ==SEM1 AND SEM2==#
if 'B.M.E.' in update.message.text:
# here forward the massage for B.M.E.
update.message.reply_text('BOOKs')
# here you can forward the B.M.E. books
update.message.reply_text('PYSQs')
# here you can forward the B.M.E. pysqs
update.message.reply_text('NOTEs')
# here you can forward the B.M.E. notes
if 'MATHS1' in update.message.text:
# here forward the massage for MATHS1
update.message.reply_text('BOOKs')
# here you can forward the MATHS1 books
update.message.reply_text('PYSQs')
# here you can forward the MATHS1 pysqs
update.message.reply_text('NOTEs')
# here you can forward the MATHS1 notes
if 'MATHS2' in update.message.text:
# here forward the massage for MATHS2
update.message.reply_text('BOOKs')
# here you can forward the MATHS2 books
update.message.reply_text('PYSQs')
# here you can forward the MATHS2 pysqs
update.message.reply_text('NOTEs')
# here you can forward the MATHS2 notes
if 'ENGLISH' in update.message.text:
# here forward the massage for ENGLISH
update.message.reply_text('BOOKs')
# here you can forward the ENGLISH books
update.message.reply_text('PYSQs')
# here you can forward the ENGLISH pysqs
update.message.reply_text('NOTEs')
# here you can forward the ENGLISH notes
if 'E.D.G.' in update.message.text:
# here forward the massage for E.D.G.
update.message.reply_text('BOOKs')
# here you can forward the E.D.G. books
update.message.reply_text('PYSQs')
# here you can forward the E.D.G. pysqs
update.message.reply_text('NOTEs')
# here you can forward the E.D.G. notes
if 'Environmental Science' in update.message.text:
# here forward the massage for Environmental Science
update.message.reply_text('BOOKs')
# here you can forward the Environmental Science books
update.message.reply_text('PYSQs')
# here you can forward the Environmental Science pysqs
update.message.reply_text('NOTEs')
# here you can forward the Environmental Science notes
if 'P.P.S.' in update.message.text:
# here forward the massage for P.P.S.
update.message.reply_text('BOOKs')
# here you can forward the P.P.S. books
update.message.reply_text('PYSQs')
# here you can forward theP.P.S. pysqs
update.message.reply_text('NOTEs')
# here you can forward the P.P.S. notes
if 'B.E.E.' in update.message.text:
# here forward the massage for B.E.E.
update.message.reply_text('BOOKs')
# here you can forward the B.E.E. books
update.message.reply_text('PYSQs')
# here you can forward theB.E.E. pysqs
update.message.reply_text('NOTEs')
# here you can forward the B.E.E. notes
if 'B.E.' in update.message.text:
# here forward the massage for B.E.
update.message.reply_text('BOOKs')
# here you can forward the B.E. books
update.message.reply_text('PYSQs')
# here you can forward theB.E. pysqs
update.message.reply_text('NOTEs')
# here you can forward the B.E. notes
if 'Physics' in update.message.text:
# here forward the massage for Physics
update.message.reply_text('BOOKs')
# here you can forward the Physics books
update.message.reply_text('PYSQs')
# here you can forward thePhysics pysqs
update.message.reply_text('NOTEs')
# here you can forward the Physics notes
dispatcher.add_handler(CommandHandler("start", startCommand))
# dispatcher.add_handler(CommandHandler("start", startCommand))
dispatcher.add_handler(MessageHandler(Filters.text, messageHandler))
updater.start_polling()
| Vbhargavj/python | Project/telegrambot/gtu.py | gtu.py | py | 11,153 | python | en | code | 1 | github-code | 13 |
70095272337 | import threading
"""
Semaphore 是用于控制进入数量的锁
文件,读,写,写一般只是用于一个线程写,读可以允许有多个,
做爬虫,控制并发数量
semaphore的acqiure和release方法有点不一样,减法原理,加法原理
semaphore内部的实现是用的condition
Queue也是内部实现是用的condition,看Queue源码
"""
import time
class HtmlSpider(threading.Thread):
def __init__(self, url, sem):
super().__init__()
self.url = url
self.sem = sem
def run(self):
time.sleep(2)
print("got html text success")
self.sem.release()
class UrlProducer(threading.Thread):
def __init__(self, sem):
super().__init__()
self.sem = sem
def run(self):
for i in range(20):
self.sem.acquire()
html_thread = HtmlSpider("https://baidu.com/{}".format(i), self.sem)
html_thread.start()
if __name__ == "__main__":
sem = threading.Semaphore(3)
url_producer = UrlProducer(sem)
url_producer.start()
| Zbiang/Python-IO | multi-threaded and multi-process/thread_semaphore.py | thread_semaphore.py | py | 1,068 | python | en | code | 0 | github-code | 13 |
31943868580 | from functools import cache
from typing import List
# @lc code=start
class Solution:
def atMostNGivenDigitSet(self, digits: List[str], n: int) -> int:
s = str(n)
@cache
def f(i: int, is_limit: bool, is_num: bool) -> int:
if i == len(s):
# 如果填了数字,则为 1 种合法方案
return int(is_num)
res = 0
if not is_num:
# 前面不填数字,那么可以跳过当前数位,也不填数字
# is_limit 改为 False,因为没有填数字,位数都比 n 要短,自然不会受到 n 的约束
# is_num 仍然为 False,因为没有填任何数字
res = f(i + 1, False, False)
# 根据是否受到约束,决定可以填的数字的上限
up = s[i] if is_limit else '9'
for d in digits:
if d > up:
break
# is_limit:如果当前受到 n 的约束,且填的数字等于上限,那么后面仍然会受到 n 的约束
# is_num 为 True,因为填了数字
res += f(i + 1, is_limit and d == up, True)
return res
return f(0, True, False)
# @lc code=end
| wylu/leetcodecn | src/python/p900to999/902.最大为-n-的数字组合.py | 902.最大为-n-的数字组合.py | py | 1,277 | python | zh | code | 3 | github-code | 13 |
22334892595 | #Leetcode 853. Car Fleet
class Solution1:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:
stack = []
for pos, s in sorted(zip(position, speed))[::-1]:
print(pos,s)
dist = target - pos
if not stack:
stack.append(dist / s)
elif dist / s > stack[-1]:
stack.append(dist / s)
print(stack)
return len(stack)
class Solution2:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:
timeToTarget = [float(target-pos)/s for pos, s in sorted(zip(position,speed))]
result = 0
current = 0
for time in timeToTarget[::-1]:
if time > current:
result +=1
current = time
return result | komalupatil/Leetcode_Solutions | Medium/Car Fleet.py | Car Fleet.py | py | 841 | python | en | code | 1 | github-code | 13 |
32073838436 | # Evaluation on output images from conditional diffusion model
# Using DeepFace emotion prediction from https://github.com/serengil/deepface
import os
import wandb, torch
from ddpm_conditional import *
from fastcore.all import *
from modules import *
from fer_data import fer_dataset
from embedding_utils import prepare_cnn, cnn_embed, prepare_vae, vae_embed
from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt
import json
from PIL import Image
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, precision_recall_fscore_support
## Classwise evaluation results
def calc_on_label(y_true, y_pred, model_name):
print('-------------'+model_name+'------------------')
precision, recall, fscore, support = precision_recall_fscore_support(y_true, y_pred, labels = emotion_label)
print(emotion_label)
print('precision: {}'.format(precision))
print('recall: {}'.format(recall))
print('fscore: {}'.format(fscore))
print('support: {}'.format(support))
print()
## Score Calculation
def calc_score(y_true, y_pred):
print("fscore:", f1_score(y_true, y_pred, average="macro"))
print("precision:", precision_score(y_true, y_pred, average="macro"))
print("recall:", recall_score(y_true, y_pred, average="macro"))
## Input file from the directory for complete evaluation pipeline
def total_eval(directory):
## Correspondance from label to emotion
emotion_label = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
y_true_total = []
y_pred_total = []
y_true_label = []
y_pred_label = []
y_true_cnn = []
y_pred_cnn = []
y_true_vae = []
y_pred_vae = []
## Without Face detection -> take the image as a whole for emotion detection
for orig_label in os.listdir(directory):
f = os.path.join(directory, orig_label)
if os.path.isdir(f):
print(f)
for img_path in os.listdir(f):
if img_path[-5:] != '.jpeg':
continue
desired_label = img_path[:-5].split('_')[-1]
model = img_path[:-5].split('_')[0]
if (model == 'ema') or (model == 'non'):
continue
if int(img_path[:-5].split('_')[1]) > 5:
continue
full_img_path = os.path.join(f, img_path)
img = cv2.imread(full_img_path)
## Analyze emotion
demoraphy = DeepFace.analyze(img_path=full_img_path,
actions = ['emotion'], enforce_detection = False)
demo_json = json.loads(json.dumps(demoraphy))
dominate_emotion = demo_json['dominant_emotion']
if model == 'label':
y_true_label.append(emotion_label[int(desired_label)])
y_pred_label.append(dominate_emotion)
elif model == 'cnn':
y_true_cnn.append(emotion_label[int(desired_label)])
y_pred_cnn.append(dominate_emotion)
elif model == 'vae':
y_true_vae.append(emotion_label[int(desired_label)])
y_pred_vae.append(dominate_emotion)
# Calculate score for label embedding
calc_on_label(y_true_label, y_pred_label, 'LABEL')
calc_score(y_true_label, y_pred_label)
# Calculate score for CNN embedding
calc_on_label(y_true_cnn, y_pred_cnn, 'CNN')
calc_score(y_true_cnn, y_pred_cnn)
# Calculate score for VAE embedding
calc_on_label(y_true_vae, y_pred_vae, 'VAE')
calc_score(y_true_vae, y_pred_vae)
def main():
torch.cuda.empty_cache()
# Number of labels
n = 7
device = "cuda"
directory = 'output_img'
# Load prevoius checkpoints
diffuser = Diffusion(noise_steps=1000, img_size=64, num_classes=7, c_in=1, c_out=1, use_sem=None)
ckpt = torch.load("/home/ubuntu/CS230/model_ckpt/DDPM_conditional_aug/ckpt.pt")
ema_ckpt = torch.load("/home/ubuntu/CS230/model_ckpt/DDPM_conditional_aug/ema_ckpt.pt")
diffuser.model.load_state_dict(ckpt)
diffuser.ema_model.load_state_dict(ema_ckpt)
vae_diffuser = Diffusion(noise_steps=1000, img_size=64, num_classes=10, c_in=1, c_out=1, use_sem='vae')
ckpt = torch.load("/home/ubuntu/CS230/models/vae_resume/ckpt.pt")
ema_ckpt = torch.load("/home/ubuntu/CS230/models/vae_resume/ema_ckpt.pt")
vae_diffuser.model.load_state_dict(ckpt)
vae_diffuser.ema_model.load_state_dict(ema_ckpt)
cnn_diffuser = Diffusion(noise_steps=1000, img_size=64, num_classes=10, c_in=1, c_out=1, use_sem='cnn')
ckpt = torch.load("/home/ubuntu/CS230/models/cnn_encode/ckpt.pt")
ema_ckpt = torch.load("/home/ubuntu/CS230/models/cnn_encode/ema_ckpt.pt")
cnn_diffuser.model.load_state_dict(ckpt)
cnn_diffuser.ema_model.load_state_dict(ema_ckpt)
# load images
train_dataloader = torch.load('/home/ubuntu/CS230/dataset/fer_train_32.pt')
val_dataloader = torch.load('/home/ubuntu/CS230/dataset/fer_val_32.pt')
# Generate 5 samples for each label
orig_sample_count = [0]*7
device = "cuda"
for idx, (img, label) in enumerate(val_dataloader):
if orig_sample_count == [5]*7:
break
# process input
t = torch.randint(low=999, high=1000, size=(img.shape[0],))
t = t.to(device)
img = img.to(device).float()
label = label.to(device)
# get noised imgs
x_t, noise = vae_diffuser.noise_images(img, t)
print(img.shape, x_t.shape)
# denoise for all labels
for i in range(2, x_t.shape[0]):
if orig_sample_count == [5]*7:
break
orig_sample_count[label[i]] += 1
print(orig_sample_count)
z = x_t[i].unsqueeze(axis=0).expand(7, -1, -1, -1)
x = img[i].unsqueeze(axis=0).expand(7, -1, -1, -1) # [7, 1, 48, 48]
labels = torch.arange(7).long().to(device)
sampled_images = diffuser.sample(use_ema=True, labels=labels, seed=0, init_noise=z)
vae_sampled_images = vae_diffuser.sample(use_ema=True, labels=labels, seed=0, init_noise=z, ref_images=x)
cnn_sampled_images = cnn_diffuser.sample(use_ema=True, labels=labels, seed=0, init_noise=z, ref_images=x)
# Save images in orig_label folder, from current label count to desired label
for j in range(n):
im = Image.fromarray(sampled_images[j].squeeze().cpu().numpy())
im.save(f"./{directory}/{label[i]}/label_{orig_sample_count[label[i]]}_{j}.jpeg")
im = Image.fromarray(vae_sampled_images[j].squeeze().cpu().numpy())
im.save(f"./{directory}/{label[i]}/vae_{orig_sample_count[label[i]]}_{j}.jpeg")
im = Image.fromarray(cnn_sampled_images[j].squeeze().cpu().numpy())
im.save(f"./{directory}/{label[i]}/cnn_{orig_sample_count[label[i]]}_{j}.jpeg")
# Go through evaluation pipeline
total_eval(directory)
if __name__ == '__main__':
main() | TangYihe/CS230 | eval.py | eval.py | py | 7,100 | python | en | code | 4 | github-code | 13 |
14463320442 | from abc import abstractmethod
from typing import Callable, Dict, Iterable, Mapping, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.functional import Tensor
from tqdm import tqdm
from neuroaiengines.utils.signals import create_decoding_fn
import pandas as pd
#pylint: disable=not-callable
from functools import partial
import warnings
class NanValueError(ValueError):
pass
class EarlyTermination(Exception):
pass
class TBTTModule(nn.Module):
@abstractmethod
def update_parameterizations(self):
"""
Since TBTT only updates the optimizer params at certain points, some models may increase speed from updating paramerizations of those parameters only when the optimization happens.
"""
pass
class TBPTT():
"""
Truncated backpropagation through time optimization
This class is mostly from https://discuss.pytorch.org/t/implementing-truncated-backpropagation-through-time/15500/4
"""
def __init__(self,
one_step_module :TBTTModule,
loss_module:nn.Module,
k1:int,
k2:int,
optimizer:torch.optim.Optimizer,
lr_schedulers:Optional[Iterable[nn.Module]]=None,
backprop_k1_states:bool=False,
epoch_callback:Optional[Callable]=None,
cumulative_loss:bool=False
):
"""
params:
-------
one_step_module:
Model to be optimized. Has the call signature
# output, new_state = one_step_module(input, state)
loss_module:
Loss function. Has the call signature
# loss = loss_module(output, target)
k1,k2:
every k1 steps, backprop loss k2 states.
good visualization below.
optim:
Optimizer for the model.
Should be initialized already with one_step_module.parameters()
lr_schedulers:
A learning rate scheduler or list of learning rate schedulers for the optimizer. They will be applied in the order given.
backprop_k1_states:
Uses the loss from the last k1 outputs/targets. Otherwise, the loss is computed at the most recent target/output and backpropped through the last k1 states.
epoch_callback:
after a training epoch, call this function with the epoch result dict
cumulative_loss:
Accumulate loss instead of using last value
"""
self.one_step_module = one_step_module
self.model = one_step_module
self.loss_module = loss_module
self.k1 = k1
self.k2 = k2
# TODO unsure about this line -- for my code, having this true breaks it.
# I'm still unclear on pytorch and retain graph.
self.retain_graph = k1 < k2
self.retain_graph = True
self.mk1k2 = max(k1,k2)
# self.retain_graph = False
# You can also remove all the optimizer code here, and the
# train function will just accumulate all the gradients in
# one_step_module parameters
self.optimizer = optimizer
self.requires_closure = isinstance(optimizer, torch.optim.LBFGS)
self.losses = []
self.parameters = None
self.states = None
self.backprop_k1_states = backprop_k1_states
self.epoch_callback = epoch_callback
self.cumulative_loss = cumulative_loss
# Assume LR schedulers initialized
if lr_schedulers is None:
self.lr_schedulers = []
else:
try:
iter(lr_schedulers)
self.lr_schedulers = lr_schedulers
except TypeError:
# Not iterable
self.lr_schedulers = [lr_schedulers]
def train(self, input_sequence: Iterable[Tuple[torch.Tensor,torch.Tensor]], init_state: torch.Tensor):
"""Trains on a single input sequence
Args:
input_sequence (Iterable[Tuple[torch.Tensor,torch.Tensor]]): Input sequence
init_state (torch.Tensor): initial state
Raises:
NanValueError: If state has a nan value in it
Returns:
dict: information about the training, including state, mean_loss, and final_loss
"""
states = [(None, init_state) ] # (prev_state, curr_state)
targets = []
outputs = []
inputs = []
stage_loss = None
total_loss = 0.
cum_loss = tensor(0.,requires_grad=True)
for i, (inp, target) in enumerate(input_sequence):
# Get the "current" state from the last timestep
state = states[-1][1].detach()
if torch.any(torch.isnan(state)):
raise NanValueError(f'State contains nan values : {state}')
state.requires_grad=True
output, new_state = self.one_step_module(inp, state)
states.append((state, new_state))
while len(states) > self.mk1k2:
# Delete stuff that is too old
del states[0]
if self.backprop_k1_states or self.requires_closure:
targets.append(target)
outputs.append(output)
inputs.append(inp)
while len(outputs) > self.mk1k2:
# Delete stuff that is too old
del outputs[0]
del targets[0]
del inputs[0]
# Calculate loss to track
stage_loss = self.loss_module(output, target)
if self.cumulative_loss:
cum_loss = cum_loss + stage_loss
total_loss += stage_loss.item()
# k1 steps have gone, time to backprop
if (i+1)%self.k1 == 0:
self.optimizer.zero_grad()
if (not self.backprop_k1_states) and (not self.cumulative_loss):
# backprop last module (keep graph only if they ever overlap)
stage_loss.backward(retain_graph=self.retain_graph)
elif self.cumulative_loss:
cum_loss.backward(retain_graph=self.retain_graph)
# Go back k2 states
for j in range(self.k2-1):
# if we get all the way back to the "init_state", stop
if states[-j-2][0] is None:
break
# If backpropping states, do it here
if ((j < self.k1) and self.backprop_k1_states) and (not self.cumulative_loss):
loss = self.loss_module(outputs[-j-1], targets[-j-1])
loss.backward(retain_graph=True)
curr_grad = states[-j-1][0].grad
states[-j-2][1].backward(curr_grad, retain_graph=self.retain_graph)
# curr_grad = states[-j-1][0].grad
# states[-j-2][1].backward(curr_grad, retain_graph=self.retain_graph)
if self.requires_closure:
self.optimizer.step(partial(self.closure, states, targets, inputs))
self.one_step_module.update_parameterizations()
else:
self.optimizer.step()
self.one_step_module.update_parameterizations()
# Reset cumulative loss
if self.cumulative_loss:
cum_loss = tensor(0., requires_grad=True)
return {'mean_loss' : total_loss/(i+1),
'final_loss' : stage_loss,
'states': np.array([s[1].data.numpy() for s in states])}
def closure(self, states, targets, inputs):
# self.optimizer.zero_grad()
# state = states[0][1].detach() # state to start from -- we are going forward!
# if self.backprop_k1_states:
# for i,(inp,target) in enumerate(zip(inputs, targets)):
# output, state = self.one_step_module(inp,state)
# loss = self.loss_module(output, target)
# loss.backward(retain_graph=self.retain_graph)
# else:
# output,_ = self.one_step_module(inputs[-1],states[-1][1].detach())
# loss = self.loss_module(output, targets[-1])
# loss.backward(retain_graph=self.retain_graph)
self.optimizer.zero_grad()
outputs = []
state = states[0][1]
new_states = [(None, state)]
for inp in inputs:
state = new_states[-1][1].detach()
state.requires_grad=True
output,new_state = self.one_step_module(inp,state)
new_states.append((state,new_state))
outputs.append(output)
if not self.backprop_k1_states:
loss = self.loss_module(outputs[-1], targets[-1])
# backprop last module (keep graph only if they ever overlap)
loss.backward(retain_graph=self.retain_graph)
# Go back k2 states
loss = tensor(0., requires_grad=True)
for j in range(self.k2-1):
# if we get all the way back to the "init_state", stop
if new_states[-j-2][0] is None:
break
# If backpropping states, do it here
if j < self.k1 and self.backprop_k1_states:
loss_ = self.loss_module(outputs[-j-1], targets[-j-1])
if self.cumulative_loss:
loss = loss + loss_
else:
loss.backward(retain_graph=True)
# curr_grad = new_states[-j-1][0].grad
# print(new_states[-j-2][1], new_states[-j-1][0])
# new_states[-j-2][1].backward(curr_grad, retain_graph=self.retain_graph)
if self.cumulative_loss:
loss.backward(retain_graph=True)
return loss
def batch_train(self,
input_sequencer: Iterable[Iterable[Tuple[torch.Tensor, torch.Tensor]]],
initial_conditioner: Union[Callable, Iterable],
epoch_callback: Optional[Callable]=None,
progress_bar=True):
"""
Trains of a bunch of sequences
params:
-------
input_sequencer:
Iterable of iterables that contain inputs and targets
initial_conditioner:
function that returns an initial state OR an initial state that remains constant
callable must have signature f(epoch_number, (inputs, targets))
epoch_callback:
a callback that is called with form f(train_return_dict, epoch_number)
train_return_dict includes mean_loss, final_loss, and states from training. See train().
"""
# self.mean_losses = np.ones(len(input_sequencer))*np.nan
# self.final_losses = np.ones(len(input_sequencer))*np.nan
# self.states = None
if self.parameters is None:
self.parameters = pd.DataFrame(columns=[n for n,v in self.one_step_module.named_parameters()])
self.parameters.loc[0,:] = {n:v.detach().clone().numpy() for n,v in self.one_step_module.named_parameters()}
nancount = 0
if not callable(initial_conditioner):
initial_conditioner = lambda i,d: initial_conditioner
for i,d in tqdm(enumerate(input_sequencer),"Epochs",total=len(input_sequencer), disable=not progress_bar):
init_state = initial_conditioner(i,d)
try:
r = self.train(zip(*d), init_state)
nancount=0
except NanValueError:
warnings.warn(f'Received nan values in training epoch {i}, continuing...')
nancount += 1
if nancount > 10:
warnings.warn('Received 10 nan values in a row, returning...')
break
continue
# self.final_losses[i] = r['final_loss']
# self.mean_losses[i] = r['mean_loss']
# if self.states is None:
# self.states = np.ones((len(input_sequencer), *r['states'].shape))*np.nan
# self.states[i,:,:] = r['states']
self.parameters.loc[i+1,:] = {n:v.detach().clone().numpy() for n,v in self.one_step_module.named_parameters()}
try:
for lr_scheduler in self.lr_schedulers:
# lr_scheduler.step(r['mean_loss'])
lr_scheduler.step()
# Check if all optimizer param groups LR are close to zero -- no need to continue training!
lrs = [group['lr'] for group in self.optimizer.param_groups]
if np.all(np.array(lrs) <= 1e-8):
raise EarlyTermination
if epoch_callback is not None:
epoch_callback(r,i)
except EarlyTermination:
print(f'Learning rates reached {lrs}. Exiting early!')
break
def test(self,
input_sequence : Iterable[Tuple[torch.Tensor, torch.Tensor]],
initial_conditioner : Callable,
):
"""
Tests on an input sequence
params:
-------
input_sequence:
The input sequence composed of inputs, targets
init_state:
initial state
"""
init_state = initial_conditioner(0,input_sequence)
states = [(None, init_state)]
total_loss = 0
with torch.no_grad():
for j, (inp, target) in enumerate(input_sequence):
state = states[-1][1].detach()
output, new_state = self.one_step_module(inp, state)
states.append((state, new_state))
loss = self.loss_module(output, target).item()
total_loss += loss
return {'mean_loss' : total_loss/(j+1),
'final_loss' : loss,
'states': np.array([s[1].data.numpy() for s in states])}
def cosine_similarity(x1,x2):
"""
Cosine similarity (CS)
"""
return 1 - torch.matmul(x1,x2)/(torch.linalg.norm(x1)*torch.linalg.norm(x2))
def mult_similarity(x1,x2):
"""
CS*MSE
"""
return cosine_similarity(x1,x2)*nn.MSELoss()(x1,x2)
def MSELoss(x1,x2):
"""
MSE
"""
return nn.MSELoss()(x1,x2)
def add_similarity(x1,x2,a=1,b=1):
"""
a*CS+b*MSE
"""
return a*cosine_similarity(x1,x2)+b*nn.MSELoss()(x1,x2)
def max_similarity(x1,x2):
"""
CS+DiffMax
"""
return cosine_similarity(x1,x2)+torch.abs(torch.max(x1)-torch.max(x2))
from neuroaiengines.utils.signals import create_pref_dirs
from torch import tensor
def create_hilbert_loss_fn(sz):
epg_pref_dirs = create_pref_dirs(sz,centered=True)
epg_pref_dirs = np.tile(epg_pref_dirs, (2,1))
epg_pref_dirs_inv = tensor(np.linalg.pinv(epg_pref_dirs))
def decode_epgs(act):
act = act*5 - 1
sc = torch.matmul(epg_pref_dirs_inv,act)
return sc
def hilbert(x1,x2):
s1,c1 = decode_epgs(x1)
s2,c2 = decode_epgs(x2)
n1,n2 = torch.linalg.norm(x1), torch.linalg.norm(x2)
w = torch.abs(n1-n2)/n1
cc = c1*c2 + s1*s2 + (s1*c2 - c1*s2)
return torch.abs(w*cc)
return hilbert
def create_angluar_cosine_difference(sz):
decode = create_decoding_fn(sz, sincos=True, backend=torch)
def cosine_difference(x1,x2):
sc1 = decode(x1)
sc2 = decode(x2)
return cosine_similarity(sc1,sc2)
return cosine_difference
def create_angular_cosine_diff_mse(sz, cs_weight=1, mse_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
mse = nn.MSELoss()
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + mse(x1,x2)*mse_weight
return combined
def create_angular_cosine_diff_norm(sz, cs_weight=1, norm_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
norm = torch.linalg.norm
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + torch.dist(norm(x1),norm(x2))*norm_weight
return combined
def create_angular_cosine_diff_variance(sz, cs_weight=1, var_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
var = torch.var
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + torch.square(torch.dist(var(x1),var(x2)))*var_weight
return combined
def create_decoded_mse(sz: int, **kwargs) -> Tensor:
"""
Erik's magical loss function.
returns mse_loss(decode(x1), decode(x2))
Args:
sz : Size of the expect input vector
Returns:
Tensor: Loss
"""
decode = create_decoding_fn(sz, sincos=True, backend=torch, **kwargs)
mse = nn.MSELoss()
def decoded_mse(x1,x2):
sc1 = decode(x1)
sc2 = decode(x2)
return mse(sc1,sc2)
return decoded_mse
def create_decoded_mse_var(sz, mse_lambda=1, var_lambda=1, **kwargs):
mse = create_decoded_mse(sz, **kwargs)
var = torch.var
def combined(x1,x2):
return mse(x1,x2)*mse_lambda + torch.square(torch.dist(var(x1),var(x2)))*var_lambda
return combined
def create_angular_cosine_diff_minmax(sz, cs_weight=1, min_weight=1, max_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
min = torch.min
max = torch.max
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + torch.dist(min(x1),min(x2))*min_weight + torch.dist(max(x1),max(x2))*max_weight
return combined
def create_angular_cosine_diff_singular_variance(sz, cs_weight=1, var_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
var = torch.var
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + (1 - var(x1))*var_weight
return combined
def create_angular_cosine_diff_minmean(sz, cs_weight=1, min_weight=1, mean_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
min = torch.min
mean = torch.mean
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + torch.dist(min(x1),min(x2))*min_weight + torch.dist(mean(x1),mean(x2))*mean_weight
return combined
def create_angular_cosine_diff_varmean(sz, cs_weight=1, var_weight=1, mean_weight=1):
cs_fn = create_angluar_cosine_difference(sz)
var = torch.var
mean = torch.mean
def combined(x1,x2):
return cs_fn(x1,x2)*cs_weight + torch.dist(var(x1),var(x2))*var_weight + torch.dist(mean(x1),mean(x2))*mean_weight
return combined
def create_combined_loss(*loss_fns):
"""
Creates a linear combination of loss functions
"""
all_loss_fns = []
all_weights = []
for loss_fn in loss_fns:
if isinstance(loss_fn, tuple):
loss_fn,weight = loss_fn
else:
weight = 1
all_loss_fns.append(loss_fn)
all_weights.append(weight)
def combined_loss(x1,x2):
loss = tensor(0)
for loss_fn, weight in zip(all_loss_fns, all_weights):
loss += weight*loss_fn(x1,x2)
return loss
return combined_loss
| aplbrain/seismic | neuroaiengines/optimization/torch.py | torch.py | py | 19,212 | python | en | code | 0 | github-code | 13 |
30604684176 | import pygame
from pygame.locals import *
import sys
pygame.init()
WINDOW_TITLE = "Basic Controls"
MAX_FPS = 120
BG_COLOR = (255, 255, 255)
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
SCREEN = pygame.display.set_mode((1280, 720), flags=SRCALPHA)
PAPER = pygame.Surface(size=(SCREEN_WIDTH, SCREEN_HEIGHT), flags=SRCALPHA)
PAPER.fill(BG_COLOR)
SCREEN.blit(PAPER, (0, 0))
CLOCK = pygame.time.Clock()
FONT = pygame.font.SysFont("Arial", 12)
RUNNING = True
DELTA_TIME = 0
pygame.display.set_caption(WINDOW_TITLE)
def hud_debug(screen: pygame.Surface) -> None:
text = FONT.render(
f"FPS: {round(CLOCK.get_fps(), 2)}",
True,
(0, 0, 0),
BG_COLOR,
)
screen.blit(text, (0, 0))
def draw_board(
row=10,
col=10,
cell_width=20,
cell_height=20,
color: Color = (255, 255, 255, 255),
) -> pygame.Surface:
board_w = col * cell_width
board_h = row * cell_height
board_surface = pygame.Surface(size=(board_w, board_h), flags=SRCALPHA)
board_surface.fill(BG_COLOR)
for x in range(col):
for y in range(row):
rect = pygame.Rect(x * cell_width, y * cell_height, cell_width, cell_height)
pygame.draw.rect(board_surface, color, rect, 1)
return board_surface
def main():
board_alpha = 255 / 2
board_row = 40
board_col = 50
while RUNNING:
PAPER.fill(BG_COLOR)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_KP_MINUS:
board_alpha = (board_alpha - 50) if board_alpha - 50 > 0 else 255
if event.key == pygame.K_KP_PLUS:
board_alpha = (board_alpha + 50) if board_alpha + 50 < 255 else 255
if event.key == pygame.K_PAGEUP:
board_row += 1
board_col += 1
if event.key == pygame.K_PAGEDOWN:
board_row -= 1 if board_row - 1 > 0 else 0
board_col -= 1 if board_col - 1 > 0 else 0
hud_debug(PAPER)
board = draw_board(
board_row,
board_col,
cell_width=15,
cell_height=15,
color=(0, 0, 0, board_alpha),
)
PAPER.blit(board, ((SCREEN_WIDTH / 2) - (board.get_width() / 2), 50))
SCREEN.blit(PAPER, (0, 0))
CLOCK.tick(MAX_FPS)
pygame.display.update()
if __name__ == "__main__":
main()
| metalvexis/PygameBasics | basic/basic_controls.py | basic_controls.py | py | 2,554 | python | en | code | 0 | github-code | 13 |
39740397717 | import os
import math
import operator
import datetime
import itertools
import random
from copy import deepcopy
from collections import defaultdict
from django.utils.safestring import mark_safe
from . import utils
from .models import *
ordinal = lambda n: "{}{}".format(n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
class EventResult( object ):
__slots__ = ('status', 'participant', 'license_holder', 'event', 'rank', 'starters', 'value_for_rank', 'category', 'original_category', 'ignored')
def __init__( self, result, rank, starters, value_for_rank ):
self.status = result.status
self.participant = result.participant
self.license_holder = result.participant.license_holder
self.event = result.event
self.rank = rank
self.starters = starters
self.value_for_rank = value_for_rank
self.original_category = self.category = result.participant.category
self.ignored = False
@property
def is_finisher( self ):
return self.status == Result.cFinisher
@property
def upgraded( self ):
return self.category != self.original_category
@property
def team_name( self ):
team = self.participant.team
return team.name if team else ''
@property
def status_rank( self ):
return self.rank if self.status == 0 else 99999 # 0 == Result.cFinisher
@property
def rank_text( self ):
if self.status != Result.cFinisher:
return next(v for v in Result.STATUS_CODE_NAMES if v[0] == self.status)[1]
return ordinal( self.rank )
def __repr__( self ):
return utils.removeDiacritic(
'("{}",{}: event="{}",{}, rank={}, strs={}, vfr={}, oc={})'.format(
self.license_holder.full_name(), self.license_holder.pk,
self.event.name, self.event.pk,
self.rank, self.starters, self.value_for_rank, self.original_category.code_gender
)
)
def extract_event_results( sce, filter_categories=None, filter_license_holders=None ):
series = sce.series
if not filter_categories:
filter_categories = series.get_categories()
if filter_license_holders and not isinstance(filter_license_holders, set):
filter_license_holders = set( filter_license_holders )
if not isinstance(filter_categories, set):
filter_categories = set( filter_categories )
get_value_for_rank = sce.get_value_for_rank_func()
# Create a map between categories and waves.
category_pk = [c.pk for c in filter_categories]
category_wave = {}
for w in sce.event.get_wave_set().all():
for c in w.categories.filter( pk__in=category_pk ):
category_wave[c] = w
if not category_wave:
return []
# Organize the results by wave based on the event results.
wave_results = defaultdict( list )
event_results = (sce.event.get_results()
.filter(participant__category__in=filter_categories)
)
if series.ranking_criteria != 0: # If not rank by points, compute lap counts in the query.
event_results = sce.event.add_laps_to_results_query( event_results )
event_results = (event_results
.order_by('wave_rank')
.select_related('participant', 'participant__license_holder', 'participant__category', 'participant__team')
)
for rr in event_results:
wave_results[category_wave[rr.participant.category]].append( rr )
# Report the results by wave.
eventResults = []
for w, results in wave_results.items():
if w.rank_categories_together:
get_rank = operator.attrgetter('wave_rank')
get_starters = operator.attrgetter('wave_starters')
else:
get_rank = operator.attrgetter('category_rank')
get_starters = operator.attrgetter('category_starters')
rr_winner = None
for rr in results:
if w.rank_categories_together:
if not rr_winner:
rr_winner = rr
else:
if not rr_winner or rr_winner.participant.category != rr.participant.category:
rr_winner = rr
if filter_license_holders and rr.participant.license_holder not in filter_license_holders:
continue
rank = get_rank( rr )
value_for_rank = get_value_for_rank(rr, rank, rr_winner)
if value_for_rank:
eventResults.append( EventResult(rr, rank, get_starters(rr), value_for_rank) )
return eventResults
def extract_event_results_custom_category( sce, custom_category_name ):
custom_category = sce.event.get_custom_category_set().filter(name=custom_category_name).first()
if not custom_category:
return []
series = sce.series
get_value_for_rank = sce.get_value_for_rank_func()
results = custom_category.get_results()
eventResults = []
rr_winner = None
for rank, rr in enumerate(results, 1):
if not rr_winner:
rr_winner = rr
value_for_rank = get_value_for_rank(rr, rank, rr_winner)
if value_for_rank:
eventResults.append( EventResult(rr, rank, len(results), value_for_rank) )
return eventResults
def adjust_for_upgrades( series, eventResults ):
if series.ranking_criteria != 0:
return
has_zero_factor = False
upgradeCategoriesAll = set()
factorPathPositions = []
for sup in series.seriesupgradeprogression_set.all():
if sup.factor == 0.0:
has_zero_factor = True
path = list( suc.category for suc in sup.seriesupgradecategory_set.all() )
position = {cat:i for i, cat in enumerate(path)}
path = set( path )
upgradeCategoriesAll |= path
factorPathPositions.append( [sup.factor, path, position] )
if not factorPathPositions:
return
# Organize results by license holder, then by category and result.
competitionCategories = defaultdict( lambda: defaultdict(list) )
for rr in eventResults:
if rr.category in upgradeCategoriesAll:
competitionCategories[rr.license_holder][rr.category].append( rr )
for lh_categories in competitionCategories.values():
if len(lh_categories) == 1:
continue
for factor, path, position in factorPathPositions:
upgradeCategories = { cat: rrs for cat, rrs in lh_categories.items() if cat in path }
if len(upgradeCategories) <= 1:
continue
highestPos, highestCategory = -1, None
for cat in upgradeCategories.keys():
pos = position[cat]
if pos > highestPos:
highestPos, highestCategory = pos, cat
for cat, rrs in upgradeCategories.items():
if cat == highestCategory:
continue
power = highestPos - position[cat]
for rr in rrs:
rr.category = highestCategory
rr.value_for_rank *= (factor ** power)
break
# Remove any trace of previous results if the factor was zero.
if has_zero_factor:
eventResults[:] = [rr for rr in eventResults if rr.value_for_rank > 0.0]
def series_results( series, categories, eventResults ):
scoreByPoints = (series.ranking_criteria == 0)
scoreByTime = (series.ranking_criteria == 1)
scoreByPercent = (series.ranking_criteria == 2)
bestResultsToConsider = series.best_results_to_consider
mustHaveCompleted = series.must_have_completed
showLastToFirst = series.show_last_to_first
considerMostEventsCompleted = series.consider_most_events_completed
numPlacesTieBreaker = series.tie_breaking_rule
# Filter all results for this category.
if categories is not None:
categories = set( list(categories) )
eventResults = [rr for rr in eventResults if rr.category in categories]
# If not scoring by points, trim out all non-finisher status (DNF, DNS, etc.) as any finish time does not count.
if not scoreByPoints:
eventResults = [rr for rr in eventResults if rr.is_finisher]
if not eventResults:
return [], []
eventResults.sort( key=operator.attrgetter('event.date_time', 'event.name', 'rank') )
# Assign a sequence number to the events in increasing date_time order.
events = sorted( set(rr.event for rr in eventResults), key=operator.attrgetter('date_time') )
eventSequence = {e:i for i, e in enumerate(events)}
lhEventsCompleted = defaultdict( int )
lhPlaceCount = defaultdict( lambda : defaultdict(int) )
lhTeam = defaultdict( lambda: '' )
lhResults = defaultdict( lambda : [None] * len(events) )
lhFinishes = defaultdict( lambda : [None] * len(events) )
lhValue = defaultdict( float )
percentFormat = '{:.2f}'
floatFormat = '{:0.2f}'
# Get the individual results for each lh, and the total value.
for rr in eventResults:
lh = rr.license_holder
lhTeam[lh] = rr.participant.team.name if rr.participant.team else ''
lhResults[lh][eventSequence[rr.event]] = rr
lhValue[lh] += rr.value_for_rank
lhPlaceCount[lh][rr.rank] += 1
lhEventsCompleted[lh] += 1
# Remove if minimum events not completed.
lhOrder = [lh for lh, results in lhResults.items() if lhEventsCompleted[lh] >= mustHaveCompleted]
# Adjust for the best results.
if bestResultsToConsider > 0:
for lh, rrs in lhResults.items():
iResults = [(i, rr) for i, rr in enumerate(rrs) if rr is not None]
if len(iResults) > bestResultsToConsider:
if scoreByTime:
iResults.sort( key=(lambda x: (x[1].value_for_rank, x[0])) )
else: # scoreByPoints
iResults.sort( key=(lambda x: (-x[1].value_for_rank, x[0])) )
for i, rr in iResults[bestResultsToConsider:]:
lhValue[lh] -= rr.value_for_rank
rrs[i].ignored = True
lhEventsCompleted[lh] = bestResultsToConsider
lhGap = {}
if scoreByTime:
def sort_key( r ):
key = [-lhEventsCompleted[r], lhValue[r]] # Decreasing events completed, then increasing time.
key.extend( -lhPlaceCount[r][k] for k in range(1, numPlacesTieBreaker+1) ) # Decreasing count for each finish place.
key.extend( (rr.status_rank if rr else 9999999) for rr in reversed(lhResults[r]) ) # Reverse last rank. If didn't participate, rank at 9999999.
return key
lhOrder.sort( key=sort_key )
# Compute the time gap.
if lhOrder:
leader = lhOrder[0]
leaderValue = lhValue[leader]
leaderEventsCompleted = lhEventsCompleted[leader]
lhGap = { r : lhValue[r] - leaderValue if lhEventsCompleted[r] == leaderEventsCompleted else None for r in lhOrder }
else: # Score by points.
def sort_key( r ):
key = [-lhValue[r]] # Decreasing points (adjusted for best events).
if considerMostEventsCompleted:
key.append( -lhEventsCompleted[r] ) # Events completed.
key.extend( -lhPlaceCount[r][k] for k in range(1, numPlacesTieBreaker+1) ) # Decreasing count for each finish place.
key.extend( (rr.status_rank if rr else 9999999) for rr in reversed(lhResults[r]) ) # Reverse last rank. If didn't participate, rank at 9999999.
return key
lhOrder.sort( key=sort_key )
# Compute the gap.
lhGap = {}
if lhOrder:
leader = lhOrder[0]
leaderValue = lhValue[leader]
lhGap = { r : leaderValue - lhValue[r] for r in lhOrder }
# List of:
# license_holder, team, totalValue, gap, [list of results for each event in series]
categoryResult = [[lh, lhTeam[lh], lhValue[lh], lhGap[lh]] + [lhResults[lh]] for lh in lhOrder]
if showLastToFirst:
events.reverse()
for lh, team, value, gap, results in categoryResult:
results.reverse()
return categoryResult, events
def get_results_for_category( series, category ):
related_categories = series.get_related_categories( category )
eventResults = []
for sce in series.seriescompetitionevent_set.all():
eventResults.extend( extract_event_results(sce, related_categories) )
adjust_for_upgrades( series, eventResults )
return series_results( series, series.get_group_related_categories(category), eventResults )
def get_results_for_custom_category_name( series, custom_category_name ):
eventResults = []
for sce in series.seriescompetitionevent_set.all():
eventResults.extend( extract_event_results_custom_category(sce, custom_category_name) )
return series_results( series, None, eventResults )
def get_callups_for_wave( series, wave, eventResultsAll=None ):
event = wave.event
competition = event.competition
RC = event.get_result_class()
randomize = series.randomize_if_no_results
series_categories = set( series.get_categories() )
participants = set( wave.get_participants_unsorted().exclude(bib__isnull=True).select_related('license_holder') )
license_holders = set( p.license_holder for p in participants )
p_from_lh = {p.license_holder:p for p in participants}
callups = []
FakeFinishValue = 1.0
categories_seen = set()
for c in wave.categories.all():
if c in categories_seen:
continue
group_categories = set( series.get_group_related_categories(c) )
categories_seen |= group_categories
related_categories = series.get_related_categories( c )
if eventResultsAll:
eventResults = [deepcopy(er) for er in eventResultsAll if er.license_holder in license_holders]
else:
eventResults = []
for sce in series.seriescompetitionevent_set.all():
if sce.event.date_time < event.date_time:
eventResults.extend( extract_event_results(sce, related_categories, license_holders) )
# Add "fake" Results for all participants in the current event with 1.0 as value_for_rank.
fakeResult = RC( event=event, status=0 )
for p in participants:
if p.category in series_categories and p.category in group_categories:
fakeResult.participant = p
eventResults.append( EventResult(fakeResult, 1, 1, FakeFinishValue) )
# The fake results ensure any upgraded athletes's points will be considered properly if this is their first upgraded race.
adjust_for_upgrades( series, eventResults )
# Compute the series standings.
categoryResult, events = series_results( series, group_categories, eventResults )
# Remove entries beyond the callup max.
del categoryResult[series.callup_max:]
# Get the values we need and subtract for the FakeFinishValue.
p_values = [[p_from_lh[lh], value-FakeFinishValue] for lh, team, value, gap, results in categoryResult]
# Return the participant and the points value.
if randomize:
# Randomize athletes with no results.
random.seed( hash((competition.id, series.id, c.id, wave.id)) )
for p_start, (p, value) in enumerate(p_values):
if value == 0.0:
r = p_values[p_start:]
random.shuffle( r )
p_values[p_start:] = r
break
else:
# Remove athletes with no results.
p_values = [[p, value] for p, value in p_values if value]
if p_values:
callups.append( (
sorted(group_categories, key=operator.attrgetter('sequence')),
p_values,
)
)
# Returns a list of tuples (list of categories, list of (participants, points))
return callups
| esitarski/RaceDB | core/series_results.py | series_results.py | py | 14,230 | python | en | code | 12 | github-code | 13 |
41843057732 | """
Chapter 3
Zynab Ali
"""
def main():
# Number range corresponds to day of week
day = int(input('\nEnter a number between 1 and 7:'))
if day == 1:
print('Monday\n')
elif day == 2:
print('Tuesday\n')
elif day == 3:
print('Wednesday\n')
elif day == 4:
print('Thursday\n')
elif day == 5:
print('Friday\n')
elif day == 6:
print('Saturday\n')
elif day == 7:
print('Sunday\n')
else:
print('Error. Value is outside perameters.\n')
if __name__ == '__main__':
main()
| xen0bia/college | csce160/lab3/c3e1.py | c3e1.py | py | 571 | python | en | code | 0 | github-code | 13 |
39556678789 | # coding: utf-8
import pandas as pd
import re
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler, Imputer
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from collections import defaultdict
random_seed = 3398655969
df_train = pd.read_csv('train.csv')
# Dropping unneeded columns
df_train = df_train.drop(['PassengerId', 'Cabin', 'Ticket'], axis = 1)
# Processing the 'Name' column.
p = re.compile(r"\b[a-z]*[.]\s+", re.IGNORECASE)
name_map = defaultdict(int)
name_map.update({'mrs.': 0, 'mr.': 0, 'mlle.': 0,
'mme.': 0, 'ms.': 0, 'miss.': 0,
'master.': 0, 'dr.': 1, 'rev.': 1,
'major.': 2, 'don.': 1, 'dona.': 1,
'countess.': 2, 'lady.': 2, 'sir.': 2,
'col.': 2, 'capt.': 2, 'jonkheer.': 2})
df_train['Name'] = df_train['Name'].apply(lambda name: p.search(name).group(0).strip())
df_train['Name'] = df_train['Name'].apply(lambda name: name_map[name.lower()])
# Processing the 'Age' column
age_median = df_train['Age'].median(skipna = True)
df_train['Age'].fillna(value = age_median, inplace = True)
age_se = StandardScaler()
df_train['Age'] = age_se.fit_transform(df_train['Age'].values.reshape(-1,1))
# Processing the 'Fare' column
fare_median = df_train['Fare'].median(skipna = True)
fare_se = StandardScaler()
df_train['Fare'] = fare_se.fit_transform(df_train['Fare'].values.reshape(-1,1))
# Processing the 'Parch' column
parch_se = StandardScaler()
df_train['Parch'] = parch_se.fit_transform(df_train['Parch'].values.reshape(-1,1))
# Processing the "SibSp' column
sibsp_se = StandardScaler()
df_train['SibSp'] = sibsp_se.fit_transform(df_train['SibSp'].values.reshape(-1,1))
# Processing the 'Sex' column
sex_encoder = LabelEncoder()
df_train['Sex'] = sex_encoder.fit_transform(df_train['Sex'])
# Processing the 'Embarked' column
embarked_encoder = LabelEncoder()
df_train['Embarked'] = df_train['Embarked'].fillna(df_train['Embarked'].mode()[0])
df_train['Embarked'] = embarked_encoder.fit_transform(df_train['Embarked'])
embarked_ohe = OneHotEncoder(sparse = False)
ohe_encoding = embarked_ohe.fit_transform(df_train['Embarked'].values.reshape(-1, 1))
df_train = df_train.drop(['Embarked'], axis = 1)
df_train = pd.concat([df_train, pd.DataFrame(ohe_encoding[:, :-1], columns = ['Embarked0', 'Embarked1'])], axis = 1)
df_train['Sex'] = np.where(df_train['Sex'] == 0, -1, 1)
df_train = pd.concat([df_train, pd.Series(df_train['Sex'] * df_train['Pclass'])], axis = 1)
X = df_train.iloc[:, 1:].values
y = df_train['Survived'].values
rfc = RandomForestClassifier(n_jobs=-1, n_estimators=10,max_features=None,min_samples_split=8,max_depth = 18,random_state=random_seed)
rfc.fit(X, y)
df_test = pd.read_csv('test.csv')
df_test.drop(['Cabin', 'Ticket'], axis = 1, inplace = True)
df_test['Name'] = df_test['Name'].apply(lambda name: p.search(name).group(0).strip())
df_test['Name'] = df_test['Name'].apply(lambda name: name_map[name.lower()])
df_test['Age'].fillna(value = age_median, inplace = True)
df_test['Age'] = age_se.transform(df_test['Age'].values.reshape(-1, 1))
df_test['Fare'].fillna(value = fare_median, inplace = True)
df_test['Fare'] = fare_se.transform(df_test['Fare'].values.reshape(-1, 1))
df_test['Parch'] = parch_se.transform(df_test['Parch'].values.reshape(-1,1))
df_test['SibSp'] = sibsp_se.transform(df_test['SibSp'].values.reshape(-1,1))
df_test['Sex'] = sex_encoder.transform(df_test['Sex'])
df_test['Sex'] = np.where(df_test['Sex'] == 0, -1, 1)
df_test['Embarked'] = df_test['Embarked'].fillna('S')
df_test['Embarked'] = embarked_encoder.transform(df_test['Embarked'])
ohe_encoding = embarked_ohe.transform(df_test['Embarked'].values.reshape(-1, 1))
df_test = df_test.drop(['Embarked'], axis = 1)
df_test = pd.concat([df_test, pd.DataFrame(ohe_encoding[:, :-1], columns = ['Embarked0', 'Embarked1'])], axis = 1)
df_test = pd.concat([df_test, pd.Series(df_test['Sex'] * df_test['Pclass'])], axis = 1)
y_pred = rfc.predict(df_test.values[:, 1:])
out = pd.concat([df_test['PassengerId'], pd.DataFrame(y_pred, columns = ['Survived'])], axis = 1)
out.to_csv('preds.csv')
| MMAesawy/Kaggle-Titanic | console.py | console.py | py | 4,270 | python | en | code | 0 | github-code | 13 |
42286224562 | from dash import Dash, html, dcc
import dash
import os
external_stylesheets = ['https://bootswatch.com/5/flatly/bootstrap.min.css']
app = Dash(__name__,use_pages=True,external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.H1('Quantibike - Analysis Dashboards'),
html.Div(
[
html.Div(
dcc.Link(
f"{page['name']} - {page['path']}", href=page["relative_path"]
)
)
for page in dash.page_registry.values()
]
),
dash.page_container
])
if __name__ == '__main__':
app.run_server(debug=True) | Nomandes/quantibike-dash | app/app.py | app.py | py | 649 | python | en | code | 0 | github-code | 13 |
73663006096 | import pandas as pd
import numpy as np
from alphaml.engine.components.data_preprocessing.imputer import impute_df, impute_dm
from alphaml.engine.components.data_manager import DataManager
def test_impute_df():
df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'],
columns=["one", "two", "three"])
df["four"] = "bar"
df2 = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
df2 = impute_df(df2)
print("original df:")
print(df)
print("preprocessed df:")
print(df2)
def test_impute_dm():
train_x = np.array([["a", 1, "python", 4.5],
["b", 2, "c++", 6.8],
["c", 10, "java", 4.8]])
valid_x = np.array([["a", 1, "scala", 4.5],
["c", 2, "c++", 6.8],
["d", 10, "python", 4.8]])
test_x = np.array([["a", 1, "scala", 4.5]])
train_x[2][0] = "???"
train_x[2][2] = "???"
valid_x[0][1] = np.nan
test_x[0][-1] = np.nan
dm = DataManager()
dm.feature_types = ["Categorical", "Discrete", "Categorical", "Float"]
dm.train_X = train_x.astype(np.object)
dm.val_X = valid_x.astype(np.object)
dm.test_X = test_x.astype(np.object)
dm = impute_dm(dm, "???")
print(dm.feature_types)
print(dm.train_X)
print("----------------------------")
print(dm.val_X)
print("----------------------------")
print(dm.test_X)
if __name__ == '__main__':
test_impute_dm()
| dingdian110/alpha-ml | test/data_preprocessing/test_imputer.py | test_imputer.py | py | 1,498 | python | en | code | 1 | github-code | 13 |
16508169573 | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPainter, QPen, QBrush
from PyQt5.QtWidgets import QWidget
class GradientWindow(QWidget):
def __init__(self, screen, controller):
super().__init__()
self.controller = controller
self.setAttribute(Qt.WA_NativeWindow)
self.screen = screen
self.windowHandle().setScreen(screen)
self.setGeometry(screen.geometry())
def paintEvent(self, event):
color = self.controller.get_current_color()
painter = QPainter(self)
painter.setBrush(QBrush(color, Qt.SolidPattern))
painter.setPen(QPen(color))
painter.drawRect(0, 0, self.width(), self.height())
def keyPressEvent(self, event):
self.controller.close_windows()
def mousePressEvent(self, event):
self.controller.close_windows()
| pmineev/GradientScreensaver | gradient_window.py | gradient_window.py | py | 847 | python | en | code | 0 | github-code | 13 |
32335979038 | from sympy import symbols, simplify, oo
from sympy.solvers import solve
Gf = symbols('G_f') # filler modulus
Gm = symbols('G_m') # matrix modulus
G = symbols('G') # composite modulus
[t, s] = symbols(['t', 's'], positive=True) # exponents in Kotula model
phif = symbols('phi_f', positive=True) # filler fraction
phifc = symbols('phi_f^c', positive=True) # critical filler fraction
Ap = (1-phifc)/phifc
# specify function to solve to get G
F = ((1-phif)*(Gm**(1/s)-G**(1/s))/(Gm**(1/s)+Ap*G**(1/s)) +
phif*(Gf**(1/t)-G**(1/t))/(Gf**(1/t)+Ap*G**(1/t)))
# now set Gm to zero
G2 = F.subs(Gm, 0)
G2solve = simplify(solve(G2, G)[0])
# set Gf to infinity
G1 = F.limit(Gf, oo)
G1solve = solve(G1, G)[0]
| msecore/python | 432/kotula_fit.py | kotula_fit.py | py | 710 | python | en | code | 0 | github-code | 13 |
20972566309 | import re
from copy import copy
from pathlib import Path
from bootstrapy.templates import get_templates
from bs4 import BeautifulSoup, Comment, Tag
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import PythonLexer
""" <ul>
<li>
<a class="text-decoration-none" href="/posts/altplotlib_example.html">
altplotlib example
</a>
</li>
<li>
<a class="text-decoration-none" href="/posts/latex_to_png.html">
latex to png
</a>
</li>
</ul>
"""
def post_list_index(names, hrefs):
post_soup = BeautifulSoup("", "html.parser")
ul = post_soup.new_tag("ul", id="post-list-index")
post_soup.append(ul)
for name, href in zip(names, hrefs):
li = post_soup.new_tag("li")
a = post_soup.new_tag(
"a", attrs={"class": "text-decoration-none", "href": href}
)
a.string = name
li.append(a)
ul.append(li)
return ul
def get_template_name(soup):
pattern = re.compile(r"template:\s*(.*?)$")
comments = soup.find_all(text=lambda e: isinstance(e, Comment))
name = ""
for comment in comments:
match = pattern.search(comment)
if match is not None:
name = match.group(1)
comment.extract()
break
return name or "frame"
def list_posts(root_path=""):
post_index = Path(root_path) / "pages" / "posts.html"
index_soup = BeautifulSoup(post_index.read_text(), "html.parser")
content_child = list(
tag for tag in index_soup.find(id="content").children if isinstance(tag, Tag)
)[-1]
names = []
hrefs = []
posts = list((Path(root_path) / "posts").glob("*.html"))
for post in posts:
with post.open() as f:
soup = BeautifulSoup(f.read(), "html.parser")
title_comment = soup.find(
string=lambda text: isinstance(text, Comment) and ("title:" in text)
)
if title_comment is None:
# title = get_config(root_path)["name"]
title = str(post.name).replace(".html", "").replace("_", " ")
else:
title = re.match(r"title:\s*(.*?)$", title_comment).group(1)
title_tag = soup.find("title")
if title_tag is None:
title_tag = soup.new_tag("title")
soup.head.append(title_tag)
title_tag.string = title
with post.open("w") as f:
f.write(soup.prettify())
names.append(title)
hrefs.append(f"../posts/{post.name}")
previous_list = index_soup.find(id="post-list-index")
if previous_list is not None:
previous_list.extract()
index = post_list_index(names, hrefs)
content_child.append(index)
print("Indexing posts...")
post_index.write_text(index_soup.prettify())
def get_pages(root_path=""):
fill_templates(root_path, "pages")
def get_posts(root_path=""):
fill_templates(root_path, "posts")
def fill_templates(root_path, directory):
page_inputs = list((Path(root_path) / f"_{directory}").glob("*.html"))
for page_input in page_inputs:
page_output = Path(root_path) / f"{directory}" / page_input.name
print(f"Constructing {page_input} ---> {page_output}")
with page_input.open() as f:
page_data = BeautifulSoup(f.read(), "html.parser")
template_name = get_template_name(page_data)
working_copy = copy(get_templates()[template_name](site_name=root_path))
working_copy.find(id="content").append(page_data)
add_vega_plots(working_copy, root_path)
highlight_html(working_copy)
with page_output.open("w") as f:
f.write(working_copy.prettify())
def get_all(root_path=""):
get_posts(root_path)
get_pages(root_path)
def add_vega_head(soup):
sources = [
"https://cdn.jsdelivr.net/npm//vega@5",
"https://cdn.jsdelivr.net/npm//vega-lite@4.8.1",
"https://cdn.jsdelivr.net/npm//vega-embed@6",
]
style = soup.new_tag("style")
style.string = ".error {color: red;}"
soup.head.append(style)
for source in sources:
soup.head.append(
soup.new_tag("script", attrs={"type": "text/javascript", "src": source})
)
def add_vega_plots(soup, root_path=""):
asset_directory = Path(root_path) / "assets"
vega_comments = soup.find_all(
string=lambda text: isinstance(text, Comment) and ("vega:" in text)
)
vega_added = False
vega_pattern = re.compile("vega: (.*?)$")
for comment in vega_comments:
if not vega_added:
add_vega_head(soup)
vega_added = True
vega_file_name = vega_pattern.search(comment.string).group(1)
vega_file = (asset_directory / vega_file_name).with_suffix(".html")
print(f"Including Vega: {vega_file}")
with open(vega_file) as f:
vega_soup = BeautifulSoup(f.read(), "html.parser")
vega_body = vega_soup.body
vega_body.name = "div"
comment.replace_with(vega_body)
def add_pygments_css(soup):
tag = soup.new_tag(
"link", attrs={"href": "../css/pygments.css", "rel": "stylesheet"},
)
soup.head.append(tag)
def highlight_html(soup):
css_added = False
for item in soup.find_all("code", lang="python"):
if not css_added:
add_pygments_css(soup)
code_text = highlight(item.text, PythonLexer(), HtmlFormatter(wrapcode=True))
s2 = BeautifulSoup(code_text, "html.parser")
item.parent.replace_with(s2)
| evanr70/bootstrapy | src/bootstrapy/pages.py | pages.py | py | 5,610 | python | en | code | 0 | github-code | 13 |
71585932819 | # coding=utf-8
"""只取训练集和测试集中出现的用户ID"""
"""
event_attendees.csv文件:共5维特征
event_id:活动ID
yes, maybe, invited, and no:以空格隔开的用户列表, 分别表示该活动参加的用户、可能参加的用户,被邀请的用户和不参加的用户.
"""
import pandas as pd
import numpy as np
import scipy.sparse as ss
import scipy.io as sio
# 保存数据
import cPickle
from sklearn.preprocessing import normalize
"""总的用户数目超过训练集和测试集中的用户, 为节省处理时间和内存,先去处理train和test,得到竞赛需要用到的事件和用户
然后对在训练集和测试集中出现过的事件和用户建立新的ID索引 先运行user_event.py, 得到事件列表文件:PE_userIndex.pkl"""
"""读取之前算好的测试集和训练集中出现过的活动"""
# 读取训练集和测试集中出现过的事件列表
eventIndex = cPickle.load(open("PE_eventIndex.pkl", 'rb'))
n_events = len(eventIndex)
print("number of events in train & test :%d" % n_events)
# 读取数据
"""
统计某个活动,参加和不参加的人数,计算活动热度
"""
# 活动活跃度
eventPopularity = ss.dok_matrix((n_events, 1))
f = open("event_attendees.csv", 'rb')
# 字段:event_id,yes, maybe, invited, and no
f.readline() # skip header
for line in f:
cols = line.strip().split(",")
eventId = str(cols[0]) # event_id
if eventIndex.has_key(eventId):
i = eventIndex[eventId] # 事件索引
# yes - no
eventPopularity[i, 0] = \
len(cols[1].split(" ")) - len(cols[4].split(" "))
f.close()
eventPopularity = normalize(eventPopularity, norm="l1",
axis=0, copy=False)
sio.mmwrite("EA_eventPopularity", eventPopularity)
print(eventPopularity.todense())
| JerryCatLeung/Event-Recommentation-Engine-Challenge | 6event_attendees.py | 6event_attendees.py | py | 1,845 | python | zh | code | 1 | github-code | 13 |
3922700607 | # python program to calculate ROI
# FIRST IS INCOME,
#Then calculate Expenses
# Then calculate Investments
# finally return cashflow*12/investments
from roicalculator import ROICalculator
def main():
print("Welcome To BIGGER POCKETS!")
name = input("Enter your name to get started: ").strip().title()
property = input("Enter name of the property: ").strip().title()
location = input("Enter location of property: ").strip().title()
calculator = ROICalculator(name, property, location)
calculator.runCalculator()
main() | dylan-dot-c/ROI_Calculator | roi.py | roi.py | py | 543 | python | en | code | 0 | github-code | 13 |
17245028016 | import math
t = {"C": ['P','L'], "P":['R','S'], "R":['L','C'], "L":['S','P'], "S":['C','R']}
n = int(input())
h=[[]]
for i in range(n):
inputs = input().split()
numplayer = int(inputs[0])
signplayer = inputs[1]
h[-1]+=[(numplayer,signplayer)]
for i in range(int(math.log(n, 2))):
h+=[[]]
for a,b in zip(h[-2][::2],h[-2][1::2]):
win = a if a[0]<b[0] else b
if b[1] in t[a[1]]:
win = a
if a[1] in t[b[1]]:
win = b
h[-1]+=[win]
champion = h[-1][0]
print(champion[0])
print(' '.join(reversed([str(h[i][h[i].index(champion)-1][0]) if h[i].index(champion)%2==1 else str(h[i][h[i].index(champion)+1][0]) for i in range(-2, -len(h)-1, -1)]))) | DJAHIDDJ13/CG | training/easy/rock-paper-scissors-lizard-spock/solution_0.py | solution_0.py | py | 716 | python | en | code | 0 | github-code | 13 |
5221101728 | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
# Used in Theory.py:
class Student:
def __init__(self, name, major, gpa, is_in_class):
self.name = name
self.major = major
self.gpa = gpa
self.is_in_class = is_in_class
def eligible(self):
if self.gpa >= 3.5:
return True
else:
return False | QuangAnhP/Uni | Python/Misc/main.py | main.py | py | 889 | python | en | code | 0 | github-code | 13 |
4791245108 | #!/usr/bin/python
# -*- coding: utf-8 -*-
class Solution(object):
def run(self, data: list) -> list:
result = []
for i in set(data):
n = data.count(i)
if n > 2:
n = 2
result.extend([i] * n)
return result
if __name__ == "__main__":
nums = [1, 1, 1, 2, 2, 3]
result = Solution().run(nums)
print(result)
| LeroyK111/BasicAlgorithmSet | 代码实现算法/remove-duplicates-from-sorted-array-i.py | remove-duplicates-from-sorted-array-i.py | py | 418 | python | en | code | 1 | github-code | 13 |
16465594943 | from flask import Flask, render_template, redirect, url_for, session, request,flash,abort
from model.goods import queryAll, Goods, countG
from flask_paginate import Pagination, get_page_args
from model.tools import delexcel, DBSession,isfile,deleteTable
from model.pages import Pagination
from flask import Blueprint
from model.audit import addadv
from model.proxy import queryAll, Proxy,queryVIPsbyId,queryProxyByid,delproxyByid,addproxy
from model.user import queryUserByname, adduser1, count, User, queryAll, queryUserByid,deluserByid
homeProxy = Blueprint(name="homeProxy", import_name=__name__)
@homeProxy.route('/showVIP')
def showVIP():
if request.method == 'GET':
proxyId = request.values.get('proxyId')
number = request.values.get('vip')
user = DBSession.query(User).filter(User.proxyId == proxyId).all()
if user==None or number < '1':
return render_template('show.html', info="这个代理没有任何下线")
else:
return render_template('showVIP.html', user=user)
@homeProxy.route('/adproxy', methods=['GET', 'POST'])
def adproxy():
if request.method == 'GET':
username = session.get('username')
a = DBSession.query(Proxy).filter(Proxy.username == username).all()
if username == None:
return render_template('fshow.html', info='请登录之后再来这个模块')
elif a:
userid = request.values.get('userid')
p = DBSession.query(Proxy).filter(Proxy.userid == userid).first()
countB = DBSession.query(User).filter(User.proxyId == p.proxyId).count()
countP = DBSession.query(User.userid).filter(User.proxyId == p.proxyId).count()
return render_template('adproxy.html', countB=countB, countP=countP)
else:
return render_template('fshow.html', info='你还不是代理不能进入这个模块')
@homeProxy.route('/applyproxy', methods=['GET', 'POST'])
def applyproxy():
if request.method == 'GET':
return render_template('applyproxy.html')
if request.method == 'POST':
username = request.form.get('username')
userid = session.get('userid')
email = request.form.get('email')
telephone = request.form.get('telephone')
addadv(userid, username, email, telephone)
return render_template('fshow.html', info="申请成功!请等待管理员回复") | q513021617/FlaskTaoBaokeSite | controller/home/homeProxy.py | homeProxy.py | py | 2,421 | python | en | code | 1 | github-code | 13 |
6639455535 | import os
# --- Coeficiente Binomial(nCr) --- #
def ncr(a, b):
## --- Calculadora Factorial --- ##
def f(n):
if n <= 1:
return n
else:
return n * f(n - 1)
### --- Variables factoriales --- ###
c = a - b
fac_a = f(a)
fac_b = f(b)
fac_c = f(c)
### --- Fórmula --- ###
return fac_a / (fac_b * fac_c)
## ----------------------------- ##
# EntryPoint:
if __name__ == '__main__':
while True:
z = input('Presiona Enter para continuar \n Presiona Z para salir ')
os.system('clear')
if z.lower() == 'z':
break
a = int(input('Dame un número A: '))
b = int(input('Dame un número B: '))
res = ncr(a, b)
os.system('clear')
print(f'El coeficiente Binomial es {res}')
| Cervantes21/Estadistica_computacional | distribucion-binomial/binomial.py | binomial.py | py | 833 | python | en | code | 1 | github-code | 13 |
71497062739 | # 언어 : Python
# 날짜 : 2021.09.17
# 문제 : KOREATECH JUDGE > 카드 정리(1150번)
# 소요시간: 4' 36"
# ==============================================================
def solution():
remove_list = ["A", "E", "I", "O", "U"]
string = input()
cnt = 0
for i, s in enumerate(string):
if s.upper() in remove_list:
cnt += len(string[i + 1 :])
return cnt
for _ in range(int(input())):
print(solution()) | eunseo-kim/Algorithm | Koreatech Judge/카드정리.py | 카드정리.py | py | 457 | python | ko | code | 1 | github-code | 13 |
2817842385 | from app.utils import parse_args, get_arg_parser, init_es
from app.shards import scan_shard, get_shards_to_routing
if __name__ == '__main__':
parser = get_arg_parser()
args = parse_args(parser)
es = init_es(args)
shards_to_routing = get_shards_to_routing(es, args.index, args.doc_type)
jobs = []
for shard, routing in shards_to_routing.items():
print(f"shard: {shard}, routing: {routing}")
query = {"query": {"match_all": {}}}
scan_shard(es, args.index, args.doc_type, query, routing, lambda doc: print(doc))
| amityo/es-parallel-scan | sync.py | sync.py | py | 562 | python | en | code | 3 | github-code | 13 |
5848201 | import markdown
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from apps.folders.folders import select_folder
from apps.folders.models import Folder
from apps.notes.forms import NoteForm
from apps.notes.models import Note
@login_required
def index(request):
"""Display a list of folders and notes, along with a note.
Notes:
Always displays folders.
If a folder is selected, displays the notes for a folder.
If a note is selected, displays the note.
"""
user = request.user
page = "notes"
folders = Folder.objects.filter(user=user, page=page).order_by("name")
selected_folder = select_folder(request, "notes")
if selected_folder:
notes = Note.objects.filter(user=user, folder_id=selected_folder.id)
else:
notes = Note.objects.filter(user=user, folder_id__isnull=True)
notes = notes.order_by("subject")
selected_note_id = request.user.notes_note
try:
selected_note = Note.objects.filter(pk=selected_note_id).get()
except ObjectDoesNotExist:
selected_note = None
if selected_note:
selected_note.note = markdown.markdown(selected_note.note)
context = {
"page": page,
"edit": False,
"folders": folders,
"selected_folder": selected_folder,
"notes": notes,
"selected_note": selected_note,
}
return render(request, "notes/content.html", context)
@login_required
def select(request, id):
"""Select a note for display, redirect to index.
Args:
id (int): a Note instance id
"""
user = request.user
user.notes_note = id
user.save()
return redirect("/notes/")
@login_required
def add(request):
"""Add a new note.
Notes:
GET: Display new note form.
POST: Add note to database.
"""
user = request.user
folders = Folder.objects.filter(user=user, page="notes").order_by("name")
selected_folder = select_folder(request, "notes")
if request.method == "POST":
# create a bound note form loaded with the post values
# this will render even if the post values are invalid
form = NoteForm(request.POST)
if form.is_valid():
note = form.save(commit=False)
note.user = user
note.save()
# deselect previously selected note
try:
old = Note.objects.filter(user=user, selected=1).get()
except Note.DoesNotExist:
pass
else:
old.selected = 0
old.save()
# select newest note for user
new = Note.objects.filter(user=user).latest("id")
new.selected = 1
new.save()
return redirect("notes")
else:
# request is a get request
# create unbound note form
if selected_folder:
form = NoteForm(initial={"folder": selected_folder.id})
else:
form = NoteForm()
# set the initial range of values for folder attribute
form.fields["folder"].queryset = Folder.objects.filter(
user=user, page="notes"
).order_by("name")
context = {
"page": "notes",
"edit": False,
"add": True,
"folders": folders,
"selected_folder": selected_folder,
"action": "/notes/add",
"form": form,
}
return render(request, "notes/content.html", context)
@login_required
def edit(request, id):
"""Edit a note.
Args:
id (int): A Note instance id
Notes:
GET: Display note form.
POST: Update note in database.
"""
user = request.user
folders = Folder.objects.filter(user=user, page="notes").order_by("name")
selected_folder = select_folder(request, "notes")
note = get_object_or_404(Note, pk=id)
if request.method == "POST":
try:
note = Note.objects.filter(user=request.user, pk=id).get()
except ObjectDoesNotExist:
raise Http404("Record not found.")
form = NoteForm(request.POST, instance=note)
if form.is_valid():
note = form.save(commit=False)
note.user = user
note.save()
return redirect("notes")
else:
if selected_folder:
form = NoteForm(instance=note, initial={"folder": selected_folder.id})
else:
form = NoteForm(instance=note)
form.fields["folder"].queryset = Folder.objects.filter(
user=user, page="notes"
).order_by("name")
context = {
"page": "notes",
"edit": True,
"add": False,
"folders": folders,
"selected_folder": selected_folder,
"action": f"/notes/{id}/edit",
"form": form,
"note": note,
}
return render(request, "notes/content.html", context)
@login_required
def delete(request, id):
"""Delete a note.
Args:
id (int): a Note instance id
"""
try:
note = Note.objects.filter(user=request.user, pk=id).get()
except ObjectDoesNotExist:
raise Http404("Record not found.")
note.delete()
return redirect("notes")
| jamescrg/minhome | apps/notes/views.py | views.py | py | 5,358 | python | en | code | 0 | github-code | 13 |
19272757890 | class Solution:
def climbStairs(self, n: int) -> int:
prev1, prev2 = 1, 2 # n = 1 & n = 2 answer
steps = 0
arr = [prev1, prev2] # preset known answers
for i in range(2, n): # start DP from unknown ans
prev1, prev2 = arr[-1], arr[-2]
steps = prev1 + prev2
arr.append(steps) # store ans
return arr[n - 1] # get newest ans | ytchen175/leetcode-pratice | 0070-climbing-stairs/0070-climbing-stairs.py | 0070-climbing-stairs.py | py | 439 | python | en | code | 0 | github-code | 13 |
17061214934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RecipientInfoOrder import RecipientInfoOrder
class UserInvoiceInfoOrder(object):
def __init__(self):
self._address = None
self._bank_account = None
self._bank_name = None
self._company_name = None
self._ip_role_id = None
self._recipient_info = None
self._tax_no = None
self._telephone = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def bank_account(self):
return self._bank_account
@bank_account.setter
def bank_account(self, value):
self._bank_account = value
@property
def bank_name(self):
return self._bank_name
@bank_name.setter
def bank_name(self, value):
self._bank_name = value
@property
def company_name(self):
return self._company_name
@company_name.setter
def company_name(self, value):
self._company_name = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def recipient_info(self):
return self._recipient_info
@recipient_info.setter
def recipient_info(self, value):
if isinstance(value, RecipientInfoOrder):
self._recipient_info = value
else:
self._recipient_info = RecipientInfoOrder.from_alipay_dict(value)
@property
def tax_no(self):
return self._tax_no
@tax_no.setter
def tax_no(self, value):
self._tax_no = value
@property
def telephone(self):
return self._telephone
@telephone.setter
def telephone(self, value):
self._telephone = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.bank_account:
if hasattr(self.bank_account, 'to_alipay_dict'):
params['bank_account'] = self.bank_account.to_alipay_dict()
else:
params['bank_account'] = self.bank_account
if self.bank_name:
if hasattr(self.bank_name, 'to_alipay_dict'):
params['bank_name'] = self.bank_name.to_alipay_dict()
else:
params['bank_name'] = self.bank_name
if self.company_name:
if hasattr(self.company_name, 'to_alipay_dict'):
params['company_name'] = self.company_name.to_alipay_dict()
else:
params['company_name'] = self.company_name
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.recipient_info:
if hasattr(self.recipient_info, 'to_alipay_dict'):
params['recipient_info'] = self.recipient_info.to_alipay_dict()
else:
params['recipient_info'] = self.recipient_info
if self.tax_no:
if hasattr(self.tax_no, 'to_alipay_dict'):
params['tax_no'] = self.tax_no.to_alipay_dict()
else:
params['tax_no'] = self.tax_no
if self.telephone:
if hasattr(self.telephone, 'to_alipay_dict'):
params['telephone'] = self.telephone.to_alipay_dict()
else:
params['telephone'] = self.telephone
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UserInvoiceInfoOrder()
if 'address' in d:
o.address = d['address']
if 'bank_account' in d:
o.bank_account = d['bank_account']
if 'bank_name' in d:
o.bank_name = d['bank_name']
if 'company_name' in d:
o.company_name = d['company_name']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'recipient_info' in d:
o.recipient_info = d['recipient_info']
if 'tax_no' in d:
o.tax_no = d['tax_no']
if 'telephone' in d:
o.telephone = d['telephone']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/UserInvoiceInfoOrder.py | UserInvoiceInfoOrder.py | py | 4,593 | python | en | code | 241 | github-code | 13 |
73492621776 | # Identical Sentences
# Find by Path Compression
def Find(u,parent):
if parent[u]==-1:
return u
parent[u]=Find(parent[u],parent)
return parent[u]
# Union by Rank
def Union(x,y,rank,parent):
if rank[x]>rank[y]:
parent[y]=x
elif rank[x]<rank[y]:
parent[x]=y
else:
parent[y]=x
rank[x]+=1
def IdenticalSentences(n,m,p,word1,word2,pairs):
if n!=m:
return False
StringId={}
IdString={}
uid=0
for u,v in pairs:
if u not in StringId:
StringId[u]=uid
IdString[uid]=u
uid+=1
if v not in StringId:
StringId[v]=uid
IdString[uid]=v
uid+=1
parent=[-1 for i in range(uid)]
rank=[0 for i in range(uid)]
for u,v in pairs:
x=Find(StringId[u],parent)
y=Find(StringId[v],parent)
if x!=y:
Union(x,y,rank,parent)
for i in range(n):
if word1[i]==word2[i]:
continue
if word1[i] not in StringId or word2[i] not in StringId:
return False
x=Find(StringId[word1[i]],parent)
y=Find(StringId[word2[i]],parent)
if x!=y:
return False
return True
def main():
n=int(input())
word1=list(input().split())
m=int(input())
word2=list(input().split())
p=int(input())
pairs=[]
for i in range(p):
pairs.append(list(input().split()))
ans=IdenticalSentences(n,m,p,word1,word2,pairs)
if ans==True:
print('Sentences are Identical')
else:
print('Sentences are Not Identical')
if __name__=='__main__':
main() | Ayush-Tiwari1/DSA | Days.41/1.Identical-Sentences.py | 1.Identical-Sentences.py | py | 1,651 | python | en | code | 0 | github-code | 13 |
21521003593 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Model, optimizers
# (1)标准卷积模块
def conv_block(input_tensor, filters, alpha, kernel_size=(3, 3), strides=(1, 1)):
# 超参数alpha控制卷积核个数
filters = int(filters * alpha)
# 卷积+批标准化+激活函数
x = layers.Conv2D(filters, kernel_size,
strides=strides, # 步长
padding='same', # 0填充,卷积后特征图size不变
use_bias=False)(input_tensor) # 有BN层就不需要计算偏置
x = layers.BatchNormalization()(x) # 批标准化
x = layers.ReLU(6.0)(x) # relu6激活函数
return x # 返回一次标准卷积后的结果
# (2)深度可分离卷积块
def depthwise_conv_block(input_tensor, point_filters, alpha, depth_multiplier, strides=(1, 1)):
# 超参数alpha控制逐点卷积的卷积核个数
point_filters = int(point_filters * alpha)
# ① 深度卷积--输出特征图个数和输入特征图的通道数相同
x = layers.DepthwiseConv2D(kernel_size=(3, 3), # 卷积核size默认3*3
strides=strides, # 步长
padding='same', # strides=1时,卷积过程中特征图size不变
depth_multiplier=depth_multiplier, # 超参数,控制卷积层中间输出特征图的长宽
use_bias=False)(input_tensor) # 有BN层就不需要偏置
x = layers.BatchNormalization()(x) # 批标准化
x = layers.ReLU(6.0)(x) # relu6激活函数
# ② 逐点卷积--1*1标准卷积
x = layers.Conv2D(point_filters, kernel_size=(1, 1), # 卷积核默认1*1
padding='same', # 卷积过程中特征图size不变
strides=(1, 1), # 步长为1,对特征图上每个像素点卷积
use_bias=False)(x) # 有BN层,不需要偏置
x = layers.BatchNormalization()(x) # 批标准化
x = layers.ReLU(6.0)(x) # 激活函数
return x # 返回深度可分离卷积结果
def conv_block_withoutrelu(
inputs,
filters,
kernel_size=(3, 3),
strides=(1, 1)
):
x = tf.keras.layers.Conv2D(filters, kernel_size=kernel_size, strides=strides, padding='same', use_bias=False)(
inputs)
x = tf.keras.layers.BatchNormalization()(x)
return x
def mobileinvertedblock(inputs,inc,midc,outc,midkernelsize=(5,5)):
x = conv_block(inputs,midc,1,kernel_size=(1,1))
if inc >= outc:
strides = (1,1)
else:
strides = (2,2)
x = layers.DepthwiseConv2D(kernel_size=midkernelsize,
strides=strides, # 步长
padding='same', # strides=1时,卷积过程中特征图size不变
depth_multiplier=1, # 超参数,控制卷积层中间输出特征图的长宽
use_bias=False)(x) # 有BN层就不需要偏置
x = layers.BatchNormalization()(x) # 批标准化
x = layers.ReLU(6.0)(x) # relu6激活函数
x = conv_block_withoutrelu(x,outc,kernel_size=(1,1))
if inc == outc:
return x+inputs
else:
return x
# def kwsmodel( input_shape,classes=3):
# # 创建输入层
# inputs = layers.Input(shape=input_shape)
# x = conv_block(inputs, 16, 1, strides=(2, 2)) # 步长为2,压缩宽高,提升通道数
# x = conv_block(x, 32, 1)
# x = mobileinvertedblock(x,32,32,64)
# x = mobileinvertedblock(x,64,80,64,midkernelsize=(3,3))
# x = mobileinvertedblock(x, 64, 80, 128)
# x = mobileinvertedblock(x, 128, 128, 128)
# x = mobileinvertedblock(x, 128, 96, 128, midkernelsize=(3, 3))
# x = mobileinvertedblock(x,128,168,168,midkernelsize=(3,3))
# x = mobileinvertedblock(x, 168, 196, 168)
# x = mobileinvertedblock(x, 168, 168, 168, midkernelsize=(3, 3))
# # x = mobileinvertedblock(x, 128, 168, 168, midkernelsize=(3, 3))
# x = mobileinvertedblock(x, 168, 256, 168, midkernelsize=(3, 3))
# # x = mobileinvertedblock(x, 168, 256, 256, midkernelsize=(3, 3))
# x = mobileinvertedblock(x, 168, 256, 128, midkernelsize=(3, 3))
# # 调整输出特征图x的特征图个数
# # 卷积层,将特征图x的个数转换成分类数
# x = layers.GlobalAveragePooling2D()(x) # 通道维度上对size维度求平均
# x = layers.Dropout(0.5)(x)
# x = layers.Dense(classes*20)(x)
# x = layers.ReLU()(x)
# x = layers.Dense(classes * 10)(x)
# x = layers.Dense(classes)(x)
# x = layers.Softmax()(x)
# # 构建模型
# model = Model(inputs, x)
# # 返回模型结构
# return model
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Softmax, Activation, BatchNormalization, Flatten, Dropout, DepthwiseConv2D
from tensorflow.keras.layers import MaxPool2D, AvgPool2D, AveragePooling2D, GlobalAveragePooling2D,ZeroPadding2D,Input,Embedding,PReLU
def kwsmodel(dim0=16):
dst_h = 64
dst_w = 40
dst_ch = 1;
model = Sequential()
model.add(Conv2D(dim0, (3, 3), padding='same', strides=(2, 2), input_shape=(dst_h, dst_w, dst_ch), name='ftr00'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 32x32
model.add(DepthwiseConv2D((3, 3), padding='same', name='ftr01'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 32x32
model.add(Conv2D(dim0 * 2, (3, 3), padding='same', strides=(2, 2), name='ftr10'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 16x16
model.add(DepthwiseConv2D((5, 5), padding='same', name='ftr11'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 32x32
model.add(Conv2D(dim0 * 4, (3, 3), padding='same', strides=(2, 2), name='ftr20'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 8x8
model.add(DepthwiseConv2D((3, 3), padding='same', name='ftr21'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 32x32
model.add(Conv2D(dim0 * 8, (3, 3), padding='same', strides=(2, 2), name='ftr30'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 8x8
model.add(DepthwiseConv2D((3, 3), padding='same', name='ftr31'));
model.add(BatchNormalization());
model.add(Activation('relu')); # 32x32
# model.add(DepthwiseConv2D((16,3), padding = 'valid', name='ftr40'));model.add(BatchNormalization());model.add(Activation('relu')); #8x8
# model.add(Flatten())
# model.add(BatchNormalization())
model.add(GlobalAveragePooling2D(name='GAP'))
# model.add(AveragePooling2D(10,7))
model.add(Dropout(0.5))
model.add(Dense(dim0 * 4, name="fc0"))
model.add(Dense(dim0 * 2, name="fc1"))
model.add(Dense(2, name="fc2"))
model.add(Activation('softmax', name="sm"))
return model
if __name__ == '__main__':
# 获得模型结构
model = kwsmodel()
# # 查看网络模型结构
model.summary()
# model.save("./mbtest.h5", save_format="h5")
# print(model.layers[-3])
# model = tf.keras.models.load_model("./mbtest.h5")
# model.summary() | yuyun2000/kws | model.py | model.py | py | 7,479 | python | en | code | 1 | github-code | 13 |
2462872761 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 11:08:46 2015
@author: Ryan-Rhys
"""
import numpy
import matplotlib
from matplotlib import pyplot as plt
wj = [0,2.9,4.0,8.9]
fj = [9.7,4.95,41.55,207.76]
gj = [3.21,0.67,2.22,8.50]
wj2 = [0,3.87,8.37,23.46]
fj2 = [40.11,59.61,122.55,1031.19]
gj2 = [0,2.62,6.41,27.57]
wj3 = [0,3.0,4.8]
fj3 = [53.0,5.0,104.0]
gj3 = [1.8,0.8,4.4]
omega_ranges = range(300,701)
epsilon_vals_real1 = []
epsilon_vals_imag1 = []
epsilon_vals_real2 = []
epsilon_vals_imag2 = []
epsilon_vals_real3 = []
epsilon_vals_imag3 = []
omega = 300.0
j = 1j
while omega < 701.0:
i = 0
x1, x2 = 0.0, 0.0
y1, y2 = 0.0, 0.0
while i < 4:
x1 += (1.0 + (fj[i])/((wj[i])**2 - (1240.0/omega)**2 - (j*(1240.0/omega)*gj[i]) )).real
y1 += (1.0 + (fj[i])/((wj[i])**2 - (1240.0/omega)**2 - (j*(1240.0/omega)*gj[i]) )).imag
x2 += (1.0 + (fj2[i])/((wj2[i])**2 - (1240.0/omega)**2 - (j*(1240.0/omega)*gj2[i]) )).real
y2 += (1.0 + (fj2[i])/((wj2[i])**2 - (1240.0/omega)**2 - (j*(1240.0/omega)*gj2[i]) )).imag
i += 1
epsilon_vals_real1.append(x1)
epsilon_vals_imag1.append(y1)
epsilon_vals_real2.append(x2)
epsilon_vals_imag2.append(y2)
omega += 1.0
omega2 = 300
while omega2 < 701.0:
i = 0
x3 = 0.0
y3 = 0.0
while i < 3:
x3 += (1.0 + (fj3[i])/((wj3[i])**2 - (1240.0/omega2)**2 - (j*(1240.0/omega2)*gj3[i]) )).real
y3 += (1.0 + (fj3[i])/((wj3[i])**2 - (1240.0/omega2)**2 - (j*(1240.0/omega2)*gj3[i]) )).imag
i += 1
epsilon_vals_real3.append(x3)
epsilon_vals_imag3.append(y3)
omega2 += 1.0
plt.figure(1)
plt.title('Irani Fit')
plt.xlabel('Omega (nm)')
plt.ylabel('Epsilon(omega)')
plt.plot(omega_ranges, epsilon_vals_real1, 'r', label = 'Re')
plt.plot(omega_ranges, epsilon_vals_imag1, 'g', label = 'Im')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.figure(2)
plt.title('DESY Fit')
plt.xlabel('Omega')
plt.ylabel('Epsilon(omega)')
plt.plot(omega_ranges, epsilon_vals_real2, 'r', label = 'Re')
plt.plot(omega_ranges, epsilon_vals_imag2, 'g', label = 'Im')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.figure(3)
plt.title('Johnson and Christy Fit')
plt.xlabel('Omega')
plt.ylabel('Epsilon(omega)')
plt.plot(omega_ranges, epsilon_vals_real3, 'r', label = 'Re')
plt.plot(omega_ranges, epsilon_vals_imag3, 'g', label = 'Im')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
| Ryan-Rhys/Nanoparticle-Systems | Parsegian_Comparison_Script.py | Parsegian_Comparison_Script.py | py | 2,651 | python | en | code | 2 | github-code | 13 |
16456833715 |
###########################################################################
# 1)Дан список слов который вводит пользователь.
# Напишите программу, которая создает новый список, содержащий только уникальные слова из исходного списка.
# ВАРИАНТ 1
# a = {'Apple', 'Mango', 'Mango', 'Banana', 'Banana', 'Orange'}
# b = {'Car', 'Tank', 'Apple', 'Bike', 'Orange', 'Plane'}
# z = a.intersection(b)
# print(z)
# ВАРИАНТ 2
###########################################################################
# 3)Напишите программу, которая принимает список слов от пользователя
# и выводит самое длинное слово в списке, используя функцию max() с параметром key=len.
# slovo_1 = input("Введите Ваше слово: ")
# slovo_2 = input("Введите еще слово: ")
# slovo_3 = input("Введите последнее слово: ")
# a = max(slovo_1, slovo_2, slovo_3, key=len)
# print("Самое длинное слово: ", a)
###########################################################################
# 4) Напишите программу, которая принимает список чисел от пользователя
# и выводит сумму всех чисел, используя функцию sum().
num_1 = int(input("Введите число: "))
num_2 = int(input("Введите еще число: "))
num_3 = int(input("И последнее: "))
a = sum(num_1, num_2, num_3)
print("сумма всех введенных2 чисел: ", a)
words = input("Nums: ").split
words = int()
###########################################################################
# 5) Напишите программу, которая принимает список слов от пользователя
# и выводит список слов в обратном порядке, используя метод reverse().
# slovo_1 = input("Введите Ваши слова: ").split()
# slovo_1.reverse()
# print(slovo_1) | DiasGonzales/Lessons | 3_lesson_HM.py | 3_lesson_HM.py | py | 2,244 | python | ru | code | 0 | github-code | 13 |
38540539445 | import math
import random
import time
import sys
import copy
from functools import reduce
##############
# Game Board #
##############
class Board(object):
# Class constructor.
#
# PARAM [2D list of int] board: the board configuration, row-major
# PARAM [int] w: the board width
# PARAM [int] h: the board height
# PARAM [int] n: the number of tokens to line up to win
def __init__(self, board, w, h, n):
"""Class constructor"""
# Board data
self.board = board
# Board width
self.w = w
# Board height
self.h = h
# How many tokens in a row to win
self.n = n
# Current player
self.player = 1
# Clone a board.
#
# RETURN [board.Board]: a deep copy of this object
def copy(self):
"""Returns a copy of this board that can be independently modified"""
cpy = Board(copy.deepcopy(self.board), self.w, self.h, self.n)
cpy.player = self.player
return cpy
# Check if a line of identical tokens exists starting at (x,y) in direction (dx,dy)
#
# PARAM [int] x: the x coordinate of the starting cell
# PARAM [int] y: the y coordinate of the starting cell
# PARAM [int] dx: the step in the x direction
# PARAM [int] dy: the step in the y direction
# RETURN [Bool]: True if n tokens of the same type have been found, False otherwise
def is_line_at(self, x, y, dx, dy):
"""Return True if a line of identical tokens exists starting at (x,y) in direction (dx,dy)"""
# Avoid out-of-bounds errors
if ((x + (self.n-1) * dx >= self.w) or
(y + (self.n-1) * dy < 0) or (y + (self.n-1) * dy >= self.h)):
return False
# Get token at (x,y)
t = self.board[y][x]
# Go through elements
for i in range(1, self.n):
if self.board[y + i*dy][x + i*dx] != t:
return False
return True
# Check if a line of identical tokens exists starting at (x,y) in any direction
#
# PARAM [int] x: the x coordinate of the starting cell
# PARAM [int] y: the y coordinate of the starting cell
# RETURN [Bool]: True if n tokens of the same type have been found, False otherwise
def is_any_line_at(self, x, y):
"""Return True if a line of identical tokens exists starting at (x,y) in any direction"""
return (self.is_line_at(x, y, 1, 0) or # Horizontal
self.is_line_at(x, y, 0, 1) or # Vertical
self.is_line_at(x, y, 1, 1) or # Diagonal up
self.is_line_at(x, y, 1, -1)) # Diagonal down
# Calculate the game outcome.
#
# RETURN [int]: 1 for Player 1, 2 for Player 2, and 0 for no winner
def get_outcome(self):
"""Returns the winner of the game: 1 for Player 1, 2 for Player 2, and 0 for no winner"""
for x in range(self.w):
for y in range(self.h):
if (self.board[y][x] != 0) and self.is_any_line_at(x,y):
return self.board[y][x]
return 0
# Adds a token for the current player at the given column
#
# PARAM [int] x: The column where the token must be added; the column is assumed not full.
#
# NOTE: This method switches the current player.
def add_token(self, x):
"""Adds a token for the current player at column x; the column is assumed not full"""
# Find empty slot for token
y = 0
while self.board[y][x] != 0:
y = y + 1
self.board[y][x] = self.player
# Switch player
if self.player == 1:
self.player = 2
else:
self.player = 1
# Returns a list of the columns with at least one free slot.
#
# RETURN [list of int]: the columns with at least one free slot
def free_cols(self):
"""Returns a list of the columns with at least one free slot"""
return [x for x in range(self.w) if self.board[-1][x] == 0 ]
# Prints the current board state.
def print_it(self):
print("+", "-" * self.w, "+", sep='')
for y in range(self.h-1, -1, -1):
print("|", sep='', end='')
for x in range(self.w):
if self.board[y][x] == 0:
print(" ", end='')
else:
print(self.board[y][x], end='')
print("|")
print("+", "-" * self.w, "+", sep='')
print(" ", end='')
for i in range(self.w):
print(i, end='')
print("")
###########################
# Alpha-Beta Search Agent #
###########################
__score_weights = [0,10,50,5000,1000000]
__offensiveness = 0.35
__board_x = 6
__board_y = 7
__connect_n = 4
# Heuristic that determines the maximum depth based on turn
def __depth_heuristic(state):
turn = 0
for r in state.board:
for c in r:
if c != 0:
turn += 1
print("Turn: ", turn)
if (turn < (state.h-2) * (state.w-1) and state.n == 4) or turn < (state.h-1) * (state.w-1):
depth = 6
else:
depth = (state.h * state.w) - turn
return depth
# Computes the value, action of a max value node in pruning
def __max_value(state, depth, alpha, beta):
win_state = state.get_outcome()
if win_state == state.player:
return __score_weights[4], -1
elif win_state != 0:
return -__score_weights[4], -1
if len(state.free_cols()) == 0:
return 0, -1
if depth >= 0:
utility = __utility(state.board, state.player)
return utility, -1
else:
best = (-math.inf,-1)
for s, a in __get_successors(state):
new_utility = __min_value(s, depth + 1, alpha, beta)
if new_utility >= best[0]:
best = (new_utility, a)
alpha = max(alpha, best[0])
if best[0] >= beta:
return best
return best
# Computes the value, action of a max value node in pruning
def __min_value(state, depth, alpha, beta):
win_state = state.get_outcome()
if win_state == state.player:
return __score_weights[4]
elif win_state != 0:
return -__score_weights[4]
if len(state.free_cols()) == 0:
return 0
if depth >= 0:
return __utility(state.board, state.player)
else:
worst = math.inf
for s, __ in __get_successors(state):
new_utility, __ = __max_value(s, depth + 1, alpha, beta)
worst = min(worst, new_utility)
beta = min(beta, worst)
if worst <= alpha:
return worst
return worst
# Pick a column for the agent to play (External interface).
def go(brd):
"""Search for the best move (choice of column for the token)"""
depth = -__depth_heuristic(brd)
__, action = __max_value(brd, depth, -math.inf, math.inf)
return action
# Get the successors of the given board.
def __get_successors(brd):
"""Returns the reachable boards from the given board brd. The return value is a tuple (new board state, column number where last token was added)."""
# Get possible actions
freecols = brd.free_cols()
# Are there legal actions left?
if not freecols:
return []
# Make a list of the new boards along with the corresponding actions
succ = []
for col in freecols:
# Clone the original board
nb = brd.copy()
# Add a token to the new board
# (This internally changes nb.player, check the method definition!)
nb.add_token(col)
# Add board to list of successors
succ.append((nb,col))
return succ
#Utility function that takes a board_state and its player value
def __utility(board_state, player):
scores = [0,0] #Array that stores the score of both players as the function loops through the cells and directions
for dx, dy in [(1,0),(1,1),(0,1),(1,-1)]:#Loops through directions/ dx dy combinations
for i in range(__board_x): #Loops through rows
for j in range(__board_y): #Loops through columns
this = board_state[i][j] #Gets the value/piece in current cell (0-empty, 1-current player, 2-opponent)
sequence = 1 if this != 0 else 0 #Initializes the sequence size if the first cell is not empty
#Iterative variables for step
i_step = i
j_step = j
for step in range(__connect_n-1): #Iterate steps from current cell
i_step += dx
j_step += dy
#Checks for off-bounds steps
if i_step >= __board_x or i_step < 0 or j_step >= __board_y or j_step < 0:
sequence = 0 #Reset sequence value to 0
break
next = board_state[i_step][j_step] #Gets next cell
if this == 0 and next > 0:
# If all cells so far were empty and the next is not, update this sequence to match the player with piece on next cell
this = next
sequence = 1
elif this != next:
if next == 0:
continue
# If the cells dont match and are from different players, this sequence cant be a winning sequence for either
# Resets sequence to 0
sequence = 0
break
else:
# Else, the piece is from the same player and the sequence count increases
if this > 0:
sequence += 1
#Adds score based on sequence and weights predefined in the class
scores[this-1] += __score_weights[sequence]
score = __offensiveness * scores[0] - (1 - __offensiveness) * scores[1]
if player == 2:
score = -1 * score
return score #Returns the score for the state
def makeDecision(grid):
board = Board(grid,7,6,4)
outcome = board.get_outcome()
if outcome != 0:
decision = -1*outcome
else:
decision = go(board) + 1
return decision | Oporto/IndustrialRobotics | pysource/alpha_beta_agent.py | alpha_beta_agent.py | py | 10,333 | python | en | code | 1 | github-code | 13 |
19446201125 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 12:38:50 2017
@author: T366159
"""
''' ### PROJET PREDICTION RETARDS DE VOLS ### '''
''' ############################# IMPORTS ######################################### #'''
import pylab as P #
import matplotlib.pyplot as plt
import pandas as pd #
import numpy as np
from scipy.stats import skew, boxcox , probplot #
###############################################################################
''' ############################################### ANNEXES #################################################
'''
def scatter_quanti(udata):
for i in udata.columns:
print(i)
X=udata[i]
if (X.dtypes.name in ['float64', 'int64' , 'int32'] and i!='RTD') :
plt.scatter( X, udata['RTD'], lw=0, alpha=.08, color='k' , label='bonjouuuuuur' )
plt.show()
ppp=input()
def categ_ccw(x):
if x in [0.0 , 1.0]: #'clear sky' or x=='Sky is Clear':
x='A'
elif x in [2.0,3.0,4.0,5.0]: #=='broken clouds' or x=='few clouds' or x=='mist' or x=='light intensity drizzle' or x=='drizzle':
x='B'
elif x in [6.0,7.0,8.0,9.0]: #=='light rain' or x=='scattered clouds' or x=='thunderstorm with light rain' or x=='thunderstorm' or x=='moderate rain' or x=='light intensity drizzle rain':
x='C'
elif x in [10.0 , 11.0 , 12.0, 13.0]: #=='heavy rain' or x=='heavy intensity rain' or x=='very heavy rain' or x=='overcast clouds' or x=='thunderstorm with heavy rain'or x=='thunderstorm with rain':
x='D'
elif x in [14.0,15.0,16.0,17.0,18.0,19.0,20.0,22.0,23.0,24.0,25.0]: #=='heavy rain' or x=='heavy intensity rain' or x=='very heavy rain' or x=='overcast clouds' or x=='thunderstorm with heavy rain'or x=='thunderstorm with rain':
x='E'
elif x in [21.0]: #=='heavy rain' or x=='heavy intensity rain' or x=='very heavy rain' or x=='overcast clouds' or x=='thunderstorm with heavy rain'or x=='thunderstorm with rain':
x='F'
return x
def categ_eqpmt(x):
if (x=='AB'or x=='AH' or x=='CA' or x=='CB'or x=='EC' or x=='MD' or x=='NC' or x=='NZ'):
x='A'
elif x in ['CC', 'CD', 'DA','DB','EB','DF','MA','NO']:
x='B'
elif x in ['AV']:
x='D'
else:
x='C'
return x
def categ_catavi(x):
if x in ['0','M','U']:
x='A'
elif x in ['A','B','Q','S']:
x='B'
else:
x='C'
return x
def categ_rtd(x):
if x<=-3: # in ['0','M','U']:
Y='A'
elif x in [-2, -1, 0, 1, 2]:
Y='B'
elif ((x>2) and (x<16)):
Y='C'
else:
Y='D'
return Y
def categ_dou(x):
if x=='M':
x='I'
return x
def print_details(name_dtf,variable):
print("Description of {0}".format(variable))
print()
print(name_dtf[variable].describe())
print()
print(name_dtf[variable].value_counts())
print();
def interr (data, liste, listeq):
a=[]
for i in data.columns: # listeVarbis :
if i in liste:
print(i)
try:
a=data[data[i] != '?']
#print(pdata[pdata[i] != '?'])
except TypeError:
print('error')
try:
a=a[a[i] != ' ']
#print(pdata[pdata[i] != '?'])
except TypeError:
print('errorbis')
data=a
if i in listeq:
data[i]=data[i].astype(np.integer)
elif i=='hour':
print('none')
else:
data[i]=data[i].astype('category')
else:
del data[i]
return (data)
def regularization(data,liste):
for i in liste:
print(i)
C=data[i].value_counts().index.unique()
C=set(C)
print (C)
for j in C:
if (isinstance( j, int )):
print(j)
#data.loc[data[i]==j,i]='%s' %(j)
u="'%s'" %(j)
indexs=[data[i]==u]
#u = urllib.quote("'" + j + "'")
print(u)
for k in indexs:
data[k][i]= j
'''
U=data[data[i]==j]
U[i]='%s' %(j)
data[data[i]==j]=U'''
def mean_cat(data,var, val=False):
b=data.groupby([var]).mean()
if val:
print("give me some limit value!")
value=input()
#valueb=np.percentile(data.RTD,25)
else:
value=np.percentile(data.RTD,75)
#valueb=np.percentile(data.RTD,25)
a=b[(b.RTD>float(value))] # '''| (b.RTD<float(valueb))'''
a["mod"]=a.index
a["cnt"]=a['mod'].apply(lambda i: len(data[data[var]==i].values))
print (a[['RTD','mod','cnt']])
plt.scatter(a.RTD,a.cnt)
return a[['RTD','mod','cnt']]
def dispersion(data):
for i in data.columns:
print(i)
X=data[i]
if X.dtypes.name in ['category']:
Y= data.groupby(i).mean().dropna()
print(Y['RTD'])
label=Y['RTD'].index
plt.xticks( np.arange(2*len(label)) , label , rotation='vertical')
#plt.yticks( np.arange(60) , [-50 , 0 , 50 ,100 ,150 , 200])
plt.plot(Y.RTD.values, 'o-', color='r', lw=2, label='Retard Average' , alpha=.4)
plt.show()
ans=input()
elif X.dtypes.name in ['float64', 'int64', 'int32'] :
print("next variable")
#plt.xticks( np.arange(25) , rotation='vertical')
plt.hist(X)#, bins=np.arange(25)
plt.show()
probplot(X, dist="norm", plot=P)
plt.show()
ans=input()
def print_distribution(name_dtf,variable1 : str, value1, variable2 : str, value2) :
#défine an error block in the case valux not in set(variablex)
#get the indices of the two cases we want ot study
answer1000=0
dic1=create_index(name_dtf[variable1])
dic2=create_index(name_dtf[variable2])
#get the indices of the intersection of the two cases (so we can have the relevant set)
#print(max(dic1.get(value1)),max(dic2.get(value2))
indices=set(dic1[value1]).intersection(dic2[value2])
#convert to list type in order to select automatically the right part of the dataframe
indices=list(indices)
#print (max(indices))
#get the variable we want to study within the case set before by the arguments
#define error in the case what the user enter is not a variable
#while answer1000!=1:
#print("Give variable to print in case where {0}={1} and {2}={3}".format(variable1, value1, variable2, value2))
#target=RTD #input()
#while target not in name_dtf.columns:
# target=input()
print("Here is the histogram of {1} established on {0} values".format(len(indices),RTD))
PLT=pd.DataFrame({RTD : name_dtf.iloc[indices][RTD]}, columns=[RTD])
try:
print(PLT.columns)
#plt.subplots(4,2,figsize=(8,8))
PLT[target].hist(alpha=0.5, bins=96)
#plt.hist(PLT[target],alpha=0.5,bins=96, by=name_dtf.iloc[indices]['CARD_TYP_CD'])
plt.xlabel('Time in seconds')
plt.title("{0}={1} and {2}={3}".format(variable1, value1, variable2, value2))
plt.legend()
plt.show()
except NameError:
print("You probably did not compile the packages dude")
#print("Do you want to print another variable for this case ?")
#answer1000=1 #input()
#if 'y' not in answer:
# break
def clean(feature_names, data_to_clean): # USES PRINT_DETAILS
ylambda=0
print("Do you want to interact with this program?")
ans='n' # input()
if 'y' in ans:
interact=True
else:
interact=False
print("Do you want to see output?")
ans1='n' #input()
if 'y' in ans1:
show=True
else:
show=False
minimum=10
for i in range(len(feature_names)):
print( "Etape N°: ",i+1," / " , len(feature_names))
# try:
if ((data_to_clean[feature_names[i]].dtypes == 'int64') | (data_to_clean[feature_names[i]].dtypes == 'int32') | (data_to_clean[feature_names[i]].dtypes == 'float64')):
if (show):
#if (interact):
#print("c'est quantitatif!")
data_to_clean[feature_names[i]].plot(kind="box")
plt.show()
data_to_clean[feature_names[i]].plot(kind="hist")
plt.show()
print(data_to_clean[feature_names[i]].describe())
probplot(data_to_clean[feature_names[i]], dist="norm", plot=P)
P.show()
print(abs(skew(data_to_clean[feature_names[i]])))
if abs(skew(data_to_clean[feature_names[i]]))>1:
o=skew(data_to_clean[feature_names[i]])
print('Normality of the features is ' , o , " (mind that the best is 0)" )
gg=min(data_to_clean[feature_names[i]])
if gg>0:
gg=0
else:
gg=abs(gg)+1
if abs(skew(boxcox(data_to_clean[feature_names[i]]+gg)[0])) < abs(skew(np.sqrt(data_to_clean[feature_names[i]]+gg))):
data_to_clean[feature_names[i]] , lambdaa=boxcox(data_to_clean[feature_names[i]]+gg)
print("lambda =" , lambdaa, " for " , feature_names[i])
if feature_names[i]=='RTD':
# global ylambda
ylambda=lambdaa
else:
data_to_clean[feature_names[i]]=np.sqrt(data_to_clean[feature_names[i]]+gg)
print("c'est sqrt!")
print("variable ", feature_names[i], " processed")
data_to_clean[feature_names[i]].plot(kind="hist")
plt.show()
probplot(data_to_clean[feature_names[i]], dist="norm", plot=P)
P.show()
else:
#
kdata=data_to_clean
X=kdata[feature_names[i]].value_counts()
if (interact):
print("c'est qualitatif!")
print_details(data_to_clean,feature_names[i])
listeb=[]
for j in range(len(X)):
if (X.values[j] < minimum):
listeb=[X.index[u] for u in range(j,len(X))]
for f in listeb:
kdata=kdata[kdata[feature_names[i]]!=f]
break
if '?' in X.index:
kdata=kdata[kdata[feature_names[i]]!='?']
if interact:
print("no more interrogations!")
if (interact) :
print("you're okay getting rid of data under represented?")
anssss=input()
if 'y' in anssss:
data_to_clean=kdata
print("done")
else:
data_to_clean=kdata
print("done")
if show:
print(data_to_clean[feature_names[i]].value_counts())
#print("Les variables vous semblent-elles liées?")
#data.boxplot('RTD',feature_names[i])
kdata=[]
# except TypeError:
if (interact) & (i!=len(feature_names)-1):
print("Do you want stop this?")
answer=input()
if 'y' in answer:
return data_to_clean
break
elif (i==len(feature_names)-1):
print("l'interface de préparation de données est terminée")
return (data_to_clean,ylambda)
def invboxcox(y,ld):
if ld == 0:
return(np.exp(y))
else:
return(np.exp(np.log(ld*y+1)/ld))
def normalization (feature_names, data_to_clean):
for i in range(len(feature_names)):
print( "Etape N°: ",i+1," / " , len(feature_names))
if (((data_to_clean[feature_names[i]].dtypes == 'int64') | (data_to_clean[feature_names[i]].dtypes == 'int32') | (data_to_clean[feature_names[i]].dtypes == 'float64')) & (feature_names[i] !='RTD')) :
print(abs(skew(data_to_clean[feature_names[i]])))
if abs(skew(data_to_clean[feature_names[i]]))>1:
o=skew(data_to_clean[feature_names[i]])
print('Normality of the features is ' , o , " (mind that the best is 0)" )
gg=min(data_to_clean[feature_names[i]])
if gg>0:
gg=0
else:
gg=abs(gg)+1
print("gg = " , gg)
if abs(skew(boxcox(data_to_clean[feature_names[i]]+gg)[0])) < abs(skew(np.sqrt(data_to_clean[feature_names[i]]+gg))):
data_to_clean[feature_names[i]] , lambdaa=boxcox(data_to_clean[feature_names[i]]+gg)
print("lambda =" , lambdaa, " for " , feature_names[i])
else:
data_to_clean[feature_names[i]]=np.sqrt(data_to_clean[feature_names[i]]+gg)
print("c'est sqrt!")
print("variable ", feature_names[i], " processed")
data_to_clean[feature_names[i]].plot(kind="hist")
plt.show()
probplot(data_to_clean[feature_names[i]], dist="norm", plot=P)
P.show()
return data_to_clean
'''
def scatter_quanti(data):
for i in listeVarbis:
print(i)
X=data[i]
if (X.dtypes.name in ['float64', 'int64'] and i!='RTD') :
plt.scatter( X, data['RTD'], lw=0, alpha=.08, color='k' , label='bonjouuuuuur' )
plt.show()
ppp=input()
'''
| OthmaneZiyati/Flight-Delay-Prediction- | V2_annexes.py | V2_annexes.py | py | 14,447 | python | en | code | 0 | github-code | 13 |
15343389004 | from pptx import Presentation
import copy
from pptx.dml.color import RGBColor
def replace_text_in_slide(slide, replacement_dict, font, font_color):
# замена шаблонного текста на сгенерированный
for shape in slide.shapes:
if shape.has_text_frame:
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
for key, value in replacement_dict.items():
run.font.name = font
run.font.color.rgb = RGBColor(int(font_color[0]), int(font_color[1]), int(font_color[2]))
run.text = run.text.replace(key, str(value))
run.font.color.rgb = RGBColor(int(font_color[0]), int(font_color[1]), int(font_color[2]))
def merge_powerpoint_ppts(presentation, output_loc, layout, placeholders, font, font_color, value):
pres_loc2 = 'templates/powerpointbase.com-1079.pptx'
"""
layouts:
2_2 - title, 5 elements in line 2
2_3 - title, 4 elements + 4 subtitles 3
2_4 - title, 4 point plan 4
2_5 - title, pyramid, 4 steps, 6 side text elements 5
"""
slide_id_to_copy = int(layout.split('_')[1]) - 1
# загрузка шаблона
pres2 = Presentation(pres_loc2)
for _ in range(5):
# каким-то волшебным образом это убирает ошибки
placeholders.append(_)
# массив в словарь исходя из шаблона
if slide_id_to_copy + 1 == 2:
placeholders = {'{title}': placeholders[0],
'{title.1}': placeholders[1],
'{title.2}': placeholders[2],
'{title.3}': placeholders[3],
'{title.4}': placeholders[4],
'{title.5}': placeholders[5],
'{Content.1}': placeholders[6],
'{Content.2}': placeholders[7],
'{Content.3}': placeholders[8],
'{Content.4}': placeholders[9],
'{Content.5}': placeholders[10]}
elif (slide_id_to_copy + 1 == 3) or (slide_id_to_copy + 1 == 4) or (slide_id_to_copy + 1 == 6):
placeholders = {'{title}': placeholders[0],
'{title.1}': placeholders[1],
'{title.2}': placeholders[2],
'{title.3}': placeholders[3],
'{title.4}': placeholders[4],
'{Content.1}': placeholders[5],
'{Content.2}': placeholders[6],
'{Content.3}': placeholders[7],
'{Content.4}': placeholders[8], }
elif slide_id_to_copy + 1 == 5:
placeholders = {'{title}': placeholders[0],
'{title.1}': placeholders[1],
'{title.2}': placeholders[2],
'{title.3}': placeholders[3],
'{title.4}': placeholders[4],
'{title.5}': placeholders[5],
'{title.6}': placeholders[6],
'{pyramid.1}': placeholders[7],
'{pyramid.2}': placeholders[8],
'{pyramid.3}': placeholders[9],
'{pyramid.4}': placeholders[10], }
# замена текста в каждом слайде шаблона
for slide in pres2.slides:
replace_text_in_slide(slide, placeholders, font, font_color)
# сохранение
pres1 = presentation
slide = pres2.slides[slide_id_to_copy]
sl = pres1.slides.add_slide(pres1.slide_layouts[6])
for shape in slide.shapes:
element = shape.element
newelement = copy.deepcopy(element)
sl.shapes._spTree.insert_element_before(newelement, 'p:extLst')
return pres1
| Max3xis/Present-It | custom_layouts.py | custom_layouts.py | py | 3,999 | python | en | code | 1 | github-code | 13 |
41503350985 | """Working with hash values."""
from redis import Redis
from redis_python_tutorial.logger import LOGGER
def hash_values_demo(r: Redis):
"""
Create a Redis hash value.
:param Redis r: Remote Redis instance.
"""
record = {
"name": "Hackers and Slackers",
"description": "Mediocre tutorials",
"website": "https://hackersandslackers.com/",
"github": "https://github.com/hackersandslackers",
}
r.hset("business", mapping=record)
LOGGER.info(f"business: {r.hgetall('business')}")
| hackersandslackers/redis-python-tutorial | redis_python_tutorial/data/hash.py | hash.py | py | 542 | python | en | code | 23 | github-code | 13 |
16510630924 | import heapq
from typing import List, final
import math
def kClosest(points: List[List[int]], k: int) -> List[List[int]]:
"""
Given an array of points where points[i] = [xi, yi] represents a point on the X-Y plane and an integer k, return the k closest points to the origin (0, 0).
The distance between two points on the X-Y plane is the Euclidean distance (i.e., √(x1 - x2)2 + (y1 - y2)2).
You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in)
"""
result = {}
i = 0
for point in points:
current = round(math.sqrt(point[0]**2 + point[1]**2), 2)
result[i] = [current]
i += 1
sorted_result = sorted(result.items(), key=lambda x: x[1])
return [points[x[0]] for x in sorted_result[:k]]
if __name__ == '__main__':
points = [[1, 3], [-2, 2]]
k = 1
#Output: [[-2,2]]
#print(kClosest(points, k))
points = [[3, 3], [5, -1], [-2, 4]]
k = 2 # Output: [[3,3],[-2,4]]
print(kClosest(points, k))
points = [[0, 1], [1, 0]]
k = 2
print(kClosest(points, k))
| tmbothe/Data-Structures-and-algorithms | src/heap/k-points_from_the_origin.py | k-points_from_the_origin.py | py | 1,118 | python | en | code | 0 | github-code | 13 |
14851081345 | class Student():
def set_student(self,rol,name,course):
self.rol=rol
self.name=name
self.course=course
def get_student(self):
print(self.rol,",",self.name,",",self.course)
obj=Student()
obj.set_student(101,"rizni","django")
obj.get_student()
#set_student()
#this method is performed by initializing instance variable
#instance variable are prepanded with self keyword
#
#we can access instance variable outside class by using reference
#
print(obj.course)
print(obj.rol)
#
#inside class self keyword
#constructor
#duty of constructor initializing instance variable
#constructor name always class name in java c++
#in python constuctor name is __init__()
#constructor automatically invoked during object creation
class Faculty():
def __init__(self,rol,name,course):
self.rol=rol
self.name=name
self.course=course
obj=Faculty(200,"Deepthi","bca")
print(obj.rol)
print(obj.name)
print(obj.course) | rizniyarasheed/python | oops/stud.py | stud.py | py | 964 | python | en | code | 0 | github-code | 13 |
10328220257 | from dataclasses import dataclass
from typing import Callable, Optional, List, Dict, Any, Iterator
import ray
from ray.data.block import Block
from ray.data.context import DatasetContext
from ray.data._internal.compute import (
ComputeStrategy,
TaskPoolStrategy,
ActorPoolStrategy,
)
from ray.data._internal.execution.util import merge_ref_bundles
from ray.data._internal.execution.interfaces import (
RefBundle,
)
from ray.data._internal.execution.operators.map_task_submitter import MapTaskSubmitter
from ray.data._internal.execution.operators.actor_pool_submitter import (
ActorPoolSubmitter,
)
from ray.data._internal.execution.operators.task_pool_submitter import TaskPoolSubmitter
from ray.data._internal.memory_tracing import trace_allocation
from ray.types import ObjectRef
from ray._raylet import ObjectRefGenerator
class MapOperatorState:
def __init__(
self,
transform_fn: Callable[[Iterator[Block]], Iterator[Block]],
compute_strategy: ComputeStrategy,
ray_remote_args: Optional[Dict[str, Any]],
min_rows_per_bundle: Optional[int],
):
# Execution arguments.
self._min_rows_per_bundle: Optional[int] = min_rows_per_bundle
# Put the function def in the object store to avoid repeated serialization
# in case it's large (i.e., closure captures large objects).
transform_fn_ref = ray.put(transform_fn)
# Submitter of Ray tasks mapping transform_fn over data.
if ray_remote_args is None:
ray_remote_args = {}
if isinstance(compute_strategy, TaskPoolStrategy):
task_submitter = TaskPoolSubmitter(transform_fn_ref, ray_remote_args)
elif isinstance(compute_strategy, ActorPoolStrategy):
# TODO(Clark): Better mapping from configured min/max pool size to static
# pool size?
pool_size = compute_strategy.max_size
if pool_size == float("inf"):
# Use min_size if max_size is unbounded (default).
pool_size = compute_strategy.min_size
task_submitter = ActorPoolSubmitter(
transform_fn_ref, ray_remote_args, pool_size
)
else:
raise ValueError(f"Unsupported execution strategy {compute_strategy}")
self._task_submitter: MapTaskSubmitter = task_submitter
# Whether we have started the task submitter yet.
self._have_started_submitter = False
# The temporary block bundle used to accumulate inputs until they meet the
# min_rows_per_bundle requirement.
self._block_bundle: Optional[RefBundle] = None
# Execution state.
self._tasks: Dict[ObjectRef[ObjectRefGenerator], _TaskState] = {}
self._tasks_by_output_order: Dict[int, _TaskState] = {}
self._next_task_index: int = 0
self._next_output_index: int = 0
self._obj_store_mem_alloc: int = 0
self._obj_store_mem_freed: int = 0
self._obj_store_mem_cur: int = 0
self._obj_store_mem_peak: int = 0
def add_input(self, bundle: RefBundle) -> None:
if not self._have_started_submitter:
# Start the task submitter on the first input.
self._task_submitter.start()
self._have_started_submitter = True
if self._min_rows_per_bundle is None:
self._create_task(bundle)
return
def get_num_rows(bundle: Optional[RefBundle]):
if bundle is None:
return 0
if bundle.num_rows() is None:
return float("inf")
return bundle.num_rows()
bundle_rows = get_num_rows(bundle)
acc_num_rows = get_num_rows(self._block_bundle) + bundle_rows
if acc_num_rows > self._min_rows_per_bundle:
if self._block_bundle:
if get_num_rows(self._block_bundle) > 0:
self._create_task(self._block_bundle)
self._block_bundle = bundle
else:
self._create_task(bundle)
else:
# TODO(ekl) add a warning if we merge 10+ blocks per bundle.
self._block_bundle = merge_ref_bundles(self._block_bundle, bundle)
def inputs_done(self) -> None:
if self._block_bundle:
self._create_task(self._block_bundle)
self._block_bundle = None
self._task_submitter.task_submission_done()
def work_completed(self, ref: ObjectRef[ObjectRefGenerator]) -> None:
self._task_submitter.task_done(ref)
task: _TaskState = self._tasks.pop(ref)
# Dynamic block splitting path.
all_refs = list(ray.get(ref))
del ref
block_refs = all_refs[:-1]
block_metas = ray.get(all_refs[-1])
assert len(block_metas) == len(block_refs), (block_refs, block_metas)
for ref in block_refs:
trace_allocation(ref, "map_operator_work_completed")
task.output = RefBundle(list(zip(block_refs, block_metas)), owns_blocks=True)
allocated = task.output.size_bytes()
self._obj_store_mem_alloc += allocated
self._obj_store_mem_cur += allocated
# TODO(ekl) this isn't strictly correct if multiple operators depend on this
# bundle, but it doesn't happen in linear dags for now.
freed = task.inputs.destroy_if_owned()
if freed:
self._obj_store_mem_freed += freed
self._obj_store_mem_cur -= freed
if self._obj_store_mem_cur > self._obj_store_mem_peak:
self._obj_store_mem_peak = self._obj_store_mem_cur
def has_next(self) -> bool:
i = self._next_output_index
return (
i in self._tasks_by_output_order
and self._tasks_by_output_order[i].output is not None
)
def get_next(self) -> RefBundle:
i = self._next_output_index
self._next_output_index += 1
bundle = self._tasks_by_output_order.pop(i).output
self._obj_store_mem_cur -= bundle.size_bytes()
return bundle
def get_work_refs(self) -> List[ray.ObjectRef]:
return list(self._tasks.keys())
def num_active_work_refs(self) -> int:
return len(self._tasks)
def shutdown(self) -> None:
self._task_submitter.shutdown(self.get_work_refs())
@property
def obj_store_mem_alloc(self) -> int:
"""Return the object store memory allocated by this operator execution."""
return self._obj_store_mem_alloc
@property
def obj_store_mem_freed(self) -> int:
"""Return the object store memory freed by this operator execution."""
return self._obj_store_mem_freed
@property
def obj_store_mem_peak(self) -> int:
"""Return the peak object store memory utilization during this operator
execution.
"""
return self._obj_store_mem_peak
def _create_task(self, bundle: RefBundle) -> None:
input_blocks = []
for block, _ in bundle.blocks:
input_blocks.append(block)
# TODO fix for Ray client: https://github.com/ray-project/ray/issues/30458
if not DatasetContext.get_current().block_splitting_enabled:
raise NotImplementedError("New backend requires block splitting")
ref: ObjectRef[ObjectRefGenerator] = self._task_submitter.submit(input_blocks)
task = _TaskState(bundle)
self._tasks[ref] = task
self._tasks_by_output_order[self._next_task_index] = task
self._next_task_index += 1
self._obj_store_mem_cur += bundle.size_bytes()
if self._obj_store_mem_cur > self._obj_store_mem_peak:
self._obj_store_mem_peak = self._obj_store_mem_cur
@dataclass
class _TaskState:
"""Tracks the driver-side state for an MapOperator task.
Attributes:
inputs: The input ref bundle.
output: The output ref bundle that is set when the task completes.
"""
inputs: RefBundle
output: Optional[RefBundle] = None
| machallboyd/ray | python/ray/data/_internal/execution/operators/map_operator_state.py | map_operator_state.py | py | 8,006 | python | en | code | null | github-code | 13 |
73801664338 | import contextlib
import fnmatch
import logging
import os
from collections import OrderedDict
from schema import And
from schema import Or
from deployer.plugins.plugin_with_tasks import PluginWithTasks
from deployer.rendering import render
from deployer.result import Result
LOGGER = logging.getLogger(__name__)
@contextlib.contextmanager
def matrix_scoped_variables(context, tag):
"""Ensure all variables introduced by the ```Matrix``` plug-in are scoped to itself."""
if context:
context.variables.push_last()
# set the current matrix tag variable
context.variables.last()['matrix_tag'] = tag
# append the current matrix tag onto the descending list of entered matrices.
if 'matrix_list' not in context.variables.last():
context.variables.last()['matrix_list'] = []
context.variables.last()['matrix_list'].append(tag)
try:
yield
finally:
if context:
context.variables.pop()
class Matrix(PluginWithTasks):
"""Manage multiple combinations of a pipeline."""
TAG = 'matrix'
SCHEMA = {
'tags': Or([int, float, And(str, len)], {And(str, len): {And(str, len): And(str, len)}}),
}
def __init__(self, node):
"""Ctor."""
self._tags = node['tags']
super(Matrix, self).__init__(node)
@staticmethod
def valid(node):
"""Ensure node structure is valid."""
if type(node) is not OrderedDict:
return False
if Matrix.TAG not in node:
return False
return PluginWithTasks._valid(Matrix.SCHEMA, Matrix.TAG, node)
@staticmethod
def build(node):
"""Build a ```Matrix``` node."""
yield Matrix(node[Matrix.TAG])
def execute(self, context):
"""Perform the plugin's task purpose."""
result = Result(result='success')
for tag in self._tags:
if isinstance(self._tags, (dict, OrderedDict)):
# we have a dictionary of items.
LOGGER.debug("Setting environment variables for tag.")
for key, value in self._tags[tag].items():
if context:
value = render(value, **context.variables.last())
else: # noqa: no-cover
raise RuntimeError("Context is required.")
LOGGER.debug("Setting '%s' to '%s', in the system environment.", key, value)
os.putenv(key, value)
os.environ[key] = value
with matrix_scoped_variables(context, tag):
matrix_list = []
matrix_tags = []
if context and len(context.matrix_tags) > 0:
matrix_list = context.variables.last()[
'matrix_list'] if 'matrix_list' in context.variables.last() else []
matrix_tags = context.matrix_tags
if len(matrix_tags) > 0 and not all(
[fnmatch.fnmatch(x[1], x[0]) for x in zip(matrix_tags, matrix_list)]):
LOGGER.debug("Skipping because this matrix item does not have a user-selected matrix tag.")
LOGGER.debug("matrix_list=%r matrix_tags=%r", matrix_list, matrix_tags)
result = Result(result='skipped')
else:
LOGGER.debug('Beginning matrix entry: %s', tag)
result = self._execute_tasks(context)
LOGGER.debug('Completed matrix entry: %s', tag)
if isinstance(self._tags, (dict, OrderedDict)):
# we have a dictionary of items.
LOGGER.debug("Unsetting environment variables for tag.")
for key, _ in self._tags[tag].items():
try:
os.unsetenv(key)
except AttributeError: # noqa: no-cover
pass # noqa: no-cover
del os.environ[key]
if not result:
break
if result['result'] in ['skipped', 'continue']:
result = Result(result='success')
return result
| jbenden/deployer | src/deployer/plugins/matrix.py | matrix.py | py | 4,257 | python | en | code | 2 | github-code | 13 |
9023373884 | def lengthOfLongestSubstring(s):
len_s = len(s)
sub_string = {}
for i in range(len_s):
for j in range(i,len_s):
a = s[i:j+1]
if len(set(a)) == len(a):
a_len = len(a)
sub_string[a] = a_len
max_value = max(sub_string.values())
max_key = [k for k,v in sub_string.items() if v == max_value]
return max_key
NO_OF_CHARS = 256
def longestUniqueSubsttr(string):
n = len(string)
cur_len = 1 # To store the lenght of current substring
max_len = 1 # To store the result
prev_index = 0 # To store the previous index
i = 0
# Initialize the visited array as -1, -1 is used to indicate
# that character has not been visited yet.
visited = [-1] * NO_OF_CHARS
# Mark first character as visited by storing the index of
# first character in visited array.
visited[ord(string[0])] = 0
# Start from the second character. First character is already
# processed (cur_len and max_len are initialized as 1, and
# visited[str[0]] is set
for i in range(1,n):
prev_index = visited[ord(string[i])]
print("prev_index value is {}" .format(prev_index))
# If the currentt character is not present in the already
# processed substring or it is not part of the current NRCS,
# then do cur_len++
if prev_index == -1 or (i - cur_len > prev_index):
cur_len+=1
print ("if cur_len is {}" .format(cur_len))
# If the current character is present in currently considered
# NRCS, then update NRCS to start from the next character of
# previous instance.
else:
# Also, when we are changing the NRCS, we should also
# check whether length of the previous NRCS was greater
# than max_len or not.
if cur_len > max_len:
max_len = cur_len
cur_len = i - prev_index
print("else i value is {}" .format(i))
print ("else cur_len is {}" .format(cur_len))
print ("else prev_index is {}" .format(prev_index))
# update the index of current character
visited[ord(string[i])] = i
# Compare the length of last NRCS with max_len and update
# max_len if needed
if cur_len > max_len:
max_len = cur_len
return max_len
s = longestUniqueSubsttr("abbcd")
print (s)
| animeshmod/python-practice | longest_substring.py | longest_substring.py | py | 2,449 | python | en | code | 0 | github-code | 13 |
34631904692 | from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Comment
from .forms import BlogPostForm, BlogCommentForm
from django.utils import timezone
from django.contrib.auth.decorators import login_required
# Create your views here.
def show_posts(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, "blogposts.html", { 'posts': posts })
def view_post(request, id):
this_post = get_object_or_404(Post, pk=id)
comments = Comment.objects.filter(post=this_post)
form = BlogCommentForm()
return render(request, "viewpost.html", {'post': this_post, 'comments': comments, "form" : form})
def add_post(request):
if request.method == "POST":
form = BlogPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
"""
#(commit=False) means the form saves but does not get added to the database because we need to
#add the author and the created date which were not specified in the forms.py
#We then add that in the lines below and then do a post.save() which writes to
#the database because all the neccesercery fields from the model are now specified.
"""
post.author = request.user
post.created_date = timezone.now()
post.published_date = timezone.now()
post.save()
return redirect(view_post, post.pk) # points to the function get_index above
else:
# GET Request so just give them a blank form
form = BlogPostForm()
return render(request, "addform.html", { 'form': form })
# def edit_post(request, id):
# post = get_object_or_404(Post, pk=id)
# if request.method == "POST":
# form = BlogPostForm(request.POST, instance=post)
# if form.is_valid():
# #post = form.save(commit=False)
# form.save()
# return redirect(view_post, id)
# else:
# form = BlogPostForm(instance=post)
# """
# The if else statement above. When I am on the homepage and i can see all the blog posts when i click on read more it is performing
# a get request from the view post function. When i click the edit button it runs the edit_post function. After the get object404 it
# runs the else part of the if else statment because it is getting the form from the database in its current state. If i make no changes
# or make a change once i hit the save/submit button it fires off the POST and in that case it runs the if statement. So in this case
# the if else statement is back to front. We can if we want split the edit post function into two functions. One for GET and one for POST.
# """
# return render(request, "addform.html", { 'form': form })
@login_required(login_url="/accounts/login")
def edit_post(request, id):
post = get_object_or_404(Post, pk=id)
if request.method == "POST":
form = BlogPostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.created_date = timezone.now()
post.published_date = timezone.now()
post.save()
return redirect(view_post, post.pk)
else:
form = BlogPostForm(instance=post)
return render(request, "addform.html", { 'form': form })
def add_comment(request, id):
post = get_object_or_404(Post, pk=id)
form = BlogCommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect(view_post, post.pk) # points to the function get_index above
| declanmunroe/django_blog | blog/views.py | views.py | py | 3,885 | python | en | code | 0 | github-code | 13 |
18123156804 | class window:
def __init__(self, sli):
self.window = tuple(sli)
#*************************************
def upvotes(n, k, *args):
if len(args) != n:
print("# of days of upvotes != n")
return
days = list(args)
windows = []
for index in range(len(days)-k+1):
windows.append(tuple(days[index:index+k]))
print('Windows: '),
print(windows)
for window in windows:
subranges = []
#Build subranges first
for i in range(len(window)-1): #iterate through window from beginning until second to last element
for end in range(i+2, len(window)+1): #range is awkward because 'end' is not inclusive
subranges.append(window[i:end])
print('subranges: ' ),
print(subranges)
nondec = 0
noninc = 0
for subrange in subranges:
decrease = True
increase = True
for index in range(len(subrange)-1): #iterate until second to last element
if subrange[index] < subrange[index+1]:
decrease = False
if subrange[index] > subrange[index+1]:
increase = False
if decrease is True:
noninc-=1
if increase is True:
nondec+=1
print(nondec + noninc)
#--------------------------------------
def main():
upvotes(5, 4, 1,2,3,1,1)
if __name__ == "__main__":
main() | imchrisgao/upvotes | upvotes.py | upvotes.py | py | 1,477 | python | en | code | 0 | github-code | 13 |
26251368589 | '''
Computes max. configurations for chip design via rectangle packing problem solver.
'''
import rectpack
import time
import sys
import os
CHIPWIDTH = None # 2400 # 3200
CHIPHEIGHT = None # 2400 # 3200
ROTATION_ALLOWED = None # False
CORE_ORDER = ["big", "A72", "Mali", "LITTLE"]
PACKING_ALGORITHM = None # rectpack.MaxRectsBssf # rectpack.GuillotineBssfSas # rectpack.SkylineBl
class Core:
def __init__(self, width, height):
self.width = width
self.height = height
self.maxrows = int(CHIPHEIGHT // height)
self.maxcols = int(CHIPWIDTH // width)
COREINFO = None
def set_coreinfo():
global COREINFO
COREINFO = {
"big": Core(500,380),
"LITTLE": Core(210,181),
"A72": Core(583,469),
"Mali": Core(449,394)
}
# Hypothetical square cores
# COREINFO= {
# "big": Core(436,436),
# "LITTLE": Core(195,195),
# "A72": Core(523,523),
# "Mali": Core(421,421)
# }
def read_input(input_file):
corecounts = {}
placement_order = []
with open(input_file, 'r') as inpf:
inputlines = inpf.readlines()
fillwith = inputlines[0].rstrip("\n")
for i in range(1, len(inputlines)):
coretype, corecount = inputlines[i].split(",")
corecount = int(corecount)
corecounts[coretype] = corecount
placement_order.append(coretype)
return corecounts, fillwith, placement_order
def main():
output_file = sys.argv[1]
alg = sys.argv[2]
global PACKING_ALGORITHM
if alg == "maxrectsrot":
PACKING_ALGORITHM = rectpack.MaxRectsBssf
ROTATION_ALLOWED = True
elif alg == "maxrectsnorot":
PACKING_ALGORITHM = rectpack.MaxRectsBssf
ROTATION_ALLOWED = False
elif alg == "guillotinerot":
PACKING_ALGORITHM = rectpack.GuillotineBssfSas
ROTATION_ALLOWED = True
elif alg == "guillotinenorot":
PACKING_ALGORITHM = rectpack.GuillotineBssfSas
ROTATION_ALLOWED = False
elif alg == "skylinerot":
PACKING_ALGORITHM = rectpack.SkylineBl
ROTATION_ALLOWED = True
elif alg == "skylinenorot":
PACKING_ALGORITHM = rectpack.SkylineBl
ROTATION_ALLOWED = False
else:
print("Packing algorithm unknown! Exiting...")
sys.exit(1)
global CHIPWIDTH
CHIPWIDTH = int(sys.argv[3]) * 100
global CHIPHEIGHT
CHIPHEIGHT = int(sys.argv[4]) * 100
set_coreinfo()
if len(sys.argv) >= 6:
input_file = sys.argv[5]
corecounts, fillwith, placement_order = read_input(input_file)
rectid = 0
rectangles = []
rectangle_types = {}
for core in placement_order:
for _ in range(corecounts[core]):
rectangles.append((COREINFO[core].width, COREINFO[core].height, rectid))
rectangle_types[rectid] = core
rectid += 1
for _ in range(COREINFO[fillwith].maxrows*COREINFO[fillwith].maxcols):
rectangles.append((COREINFO[fillwith].width, COREINFO[fillwith].height, rectid))
rectangle_types[rectid] = fillwith
rectid += 1
bins = [(CHIPWIDTH,CHIPHEIGHT)]
packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, pack_algo=PACKING_ALGORITHM, sort_algo=rectpack.SORT_NONE, rotation=ROTATION_ALLOWED)
# Add the rectangles to packing queue
for r in rectangles:
packer.add_rect(*r)
# Add the bins where the rectangles will be placed
for b in bins:
packer.add_bin(*b)
# Start packing
packer.pack()
# Count cores
numcts = [0] * 4
all_rects = packer.rect_list()
# Print chip design to file, to be processed by plotter
with open(sys.argv[2], 'w') as cff:
for rect in all_rects:
b, x, y, w, h, rid = rect
cff.write("{},{},{},{},{}\n".format(x, y, w, h, rectangle_types[rid]))
elif len(sys.argv) == 5:
# Explore search space
maxct0 = COREINFO[CORE_ORDER[0]].maxrows * COREINFO[CORE_ORDER[0]].maxcols
maxct1 = COREINFO[CORE_ORDER[1]].maxrows * COREINFO[CORE_ORDER[1]].maxcols
maxct2 = COREINFO[CORE_ORDER[2]].maxrows * COREINFO[CORE_ORDER[2]].maxcols
for i in range(maxct0+1):
for j in range(maxct1+1):
for k in range(maxct2+1):
# Construct rectangle queue
#print("Investigating core counts ({},{},{}), max. ({},{},{})".format(i,j,k,maxct0,maxct1,maxct2))
rectid = 0
rectangles = []
rectangle_types = {}
for l in range(len(CORE_ORDER)):
core = CORE_ORDER[l]
if l == 0:
numrects = i
elif l == 1:
numrects = j
elif l == 2:
numrects = k
elif l == 3:
numrects = COREINFO[core].maxrows * COREINFO[core].maxcols
for _ in range(numrects):
rectangles.append((COREINFO[core].width, COREINFO[core].height, rectid))
rectangle_types[rectid] = core
rectid += 1
bins = [(CHIPWIDTH,CHIPHEIGHT)]
packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, pack_algo=PACKING_ALGORITHM, sort_algo=rectpack.SORT_NONE, rotation=ROTATION_ALLOWED)
# Add the rectangles to packing queue
for r in rectangles:
packer.add_rect(*r)
# Add the bins where the rectangles will be placed
for b in bins:
packer.add_bin(*b)
# Start packing
packer.pack()
# Count cores
numcts = [0] * 4
all_rects = packer.rect_list()
for rect in all_rects:
b, x, y, w, h, rid = rect
ct = rectangle_types[rid]
ctid = CORE_ORDER.index(ct)
numcts[ctid] += 1
#print(b, x, y, w, h, rid)
#print("Core counts placed: {} {} {} {}".format(*numcts))
if numcts[0] == i and numcts[1] == j and numcts[2] == k:
# Solution is feasible, save number of cores of fourth type added to chip
numct3 = numcts[3]
# Save layout information to file
if not os.path.isdir("/tmp/layouts_rectpack_{}".format(alg)):
os.mkdir("/tmp/layouts_rectpack_{}".format(alg))
with open("/tmp/layouts_rectpack_{}/layout_{}_{}_{}.csv".format(alg, i, j, k), 'w') as cff:
for rect in all_rects:
b, x, y, w, h, rid = rect
cff.write("{},{},{},{},{}\n".format(x, y, w, h, rectangle_types[rid]))
else:
numct3 = -1
with open(output_file, "a") as srf:
srf.write("{},{},{},{}\n".format(i,j,k,numct3))
else:
print("Please specify input/output file(s)!")
sys.exit(1)
if __name__ == "__main__":
start_time = time.process_time()
main()
end_time = time.process_time()
with open("./timerectpack.log", 'a+') as tlog:
tlog.write(str(end_time - start_time) + "," + sys.argv[3] + "x" + sys.argv[4] + "," + sys.argv[2] + "\n")
| sglitzinger/corepacking | rectpacker.py | rectpacker.py | py | 7,739 | python | en | code | 0 | github-code | 13 |
9713673616 | import os
import json
from flask import Flask, request, session, url_for, redirect, render_template, abort, g, flash, _app_ctx_stack
from model import *
app = Flask(__name__)
# configuration
DEBUG = True
SECRET_KEY = 'development key'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(app.root_path, 'chat.db')
app.config.from_object(__name__)
app.config.from_envvar('CHAT_SETTINGS', silent=True)
db.init_app(app)
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
db.create_all()
print('Initialized the database.')
@app.before_request
def before_request():
g.user = None
g.room = None
if 'user_id' in session:
g.user = User.query.filter_by(user_id=session['user_id']).first()
if 'chatroom' in session:
g.room = Chatroom.query.filter_by(room_id=session['chatroom']).first()
@app.route('/')
def chat():
if 'user_id' in session:
allrooms = Chatroom.query.all()
return render_template('chat.html', rooms=allrooms)
return render_template('layout.html')
@app.route('/<roomname>', methods=['GET', 'POST'])
def room(roomname):
if request.method != 'POST' and not g.room and roomname != None:
enter_room = Chatroom.query.filter_by(room_name=roomname).first()
if enter_room == None:
return render_template('chat.html')
session['chatroom'] = enter_room.room_id
before_request()
messages = Message.query.filter_by(room_id=enter_room.room_id).all()
for i in messages:
session[str(i.message_id)] = str(i.message_id)
return render_template('room.html', room=roomname, messages=messages)
if request.method == 'POST':
create_message = Message(g.user, g.room, request.form["message"])
db.session.add(create_message)
db.session.commit()
session[str(create_message.message_id)] = str(create_message.message_id)
return "OK!"
messages = Message.query.filter_by(room_id=g.room.room_id).all()
return render_template('room.html', room=roomname, messages=messages)
@app.route("/new_messages")
def get_messages():
temp = list()
messages = Message.query.filter_by(room_id=session['chatroom']).all()
for message in messages:
text = message.message_id
if str(text) not in session:
temp.append(message.text)
session[str(text)] = str(text)
return json.dumps(temp)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user = User.query.filter_by(username=request.form['username']).first()
if user is None:
error = 'Invalid username'
elif request.form['password'] != user.password:
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user.user_id
return redirect(url_for('chat'))
return render_template('login.html', error=error)
else:
return render_template('login.html')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['password']:
error = 'You have to enter a password'
else:
db.session.add(User(request.form['username'], request.form['password']))
db.session.commit()
flash('You were successfully registered')
return render_template('layout.html')
return render_template('register.html', error=error)
else:
return render_template('register.html')
@app.route('/create_room', methods=['GET', 'POST'])
def create_room():
error = None
if request.method == 'POST':
db.session.add(Chatroom(request.form['topic']))
db.session.commit()
flash('New chatroom was successfully created')
return redirect(url_for('chat'))
else:
return render_template('create_room.html')
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
session.pop('chatroom', None)
return redirect(url_for('chat'))
@app.route('/leave')
def leave_room():
"""Leave the room"""
flash('You left the chatroom')
session.pop('chatroom', None)
return redirect(url_for('chat'))
| jamesshuang/ChatRoom | chat.py | chat.py | py | 4,462 | python | en | code | 0 | github-code | 13 |
17176282185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask.ext.seasurf import SeaSurf
from flask.ext.oauth import OAuth
from vksunshine.config import VK_BASE_URL, VK_ACCESS_TOKEN_URL, VK_AUTHORIZE_URL, \
VK_REQUEST_TOKEN_PARAMS, VK_CONSUMER_KEY, VK_CONSUMER_SECRET
__all__ = ['csrf', 'oauth_manager', 'vkontakte']
csrf = SeaSurf()
oauth_manager = OAuth()
vkontakte = oauth_manager.remote_app('vkontakte',
base_url=VK_BASE_URL,
authorize_url=VK_AUTHORIZE_URL,
request_token_url=None,
request_token_params=VK_REQUEST_TOKEN_PARAMS,
access_token_url=VK_ACCESS_TOKEN_URL,
consumer_key=VK_CONSUMER_KEY,
consumer_secret=VK_CONSUMER_SECRET)
| klinkin/vksunshine | vksunshine/extensions.py | extensions.py | py | 695 | python | en | code | 0 | github-code | 13 |
35628587540 | import turtle
from math import *
from tractrix import tract1, tract0,transf_ang
curva = 82
# Dados do Cavalo
lfrontal = 2.49 # Largura frontal
eixof = 0.91 # Recuo do eixo em relação a frente do veículo
d_eixo = 4 # Distância entre eixos
ltraseira = 2.49 # Largura traseira
eixot = 0.91 # Recuo do eixo em relação a frente do veículo
alfa = 25 # ângulo máximo de esterçamento : alfa (Graus)
dx = 1 # Discretização, indicando o acrescimo de posição a cada iteração
rmin = d_eixo / sin(radians(alfa)) # Raio mínimo da composição
teta = degrees(2*asin(dx/(2*rmin)))# Angulo de giro pata a trajetória circular de menor raio definida
t = turtle.Turtle()
wn = turtle.Screen()
wn.setworldcoordinates(-50,-50, 50,50)
# Eixos e extremidades do cavalo
t2 = t.clone() # Roda dianteira direita
t2.up()
t2.goto(eixof,-lfrontal/2)
t3 = t.clone() # Roda dianteira esquerda
t3.up()
t3.goto(eixof,+lfrontal/2)
t4 = t.clone() # Eixo traseiro
t4.up()
t4.goto(-d_eixo,0)
d14 = d_eixo
t5 = t.clone() # Lateral traseira direita
t5.up()
t5.goto(t4.xcor()-eixot,-ltraseira/2)
t6 = t.clone() # Lateral traseira esquerda
t6.up()
t6.goto(t4.xcor()-eixot,ltraseira/2)
colors = ["black","green","green"]*3
for k,j in zip([t,t2,t3,t4,t5,t6],colors):
k.color(j)
k.speed("fastest")
k.down()
for _ in range(10):
for k in [t,t2,t3,t4,t5,t6]:
k.fd(dx)
alfa2 = 0
teta = teta if curva> 0 else - teta
ang_final = t.heading() + curva
ang_final = transf_ang(ang_final)
ang_atual = transf_ang(t.heading())
while abs(ang_final-ang_atual) > abs(teta):
if abs(alfa2) < abs(teta) :
alfa2 = alfa2+0.1*teta
else:
alfa2 = teta
t.lt(alfa2)
t.fd(dx)
tract1(t,t2,t3,t4,t5,t6,lfrontal,eixof,ltraseira,eixot,d14)
ang_atual = transf_ang(t.heading())
delta_head = ang_final-ang_atual
while delta_head > 0.01:
delta_head = t.heading()- ang_final
t.setheading(ang_final + 0.1*delta_head)
t.fd(dx)
tract1(t,t2,t3,t4,t5,t6,lfrontal,eixof,ltraseira,eixot,d14)
for _ in range(10):
t.setheading(ang_final)
t.fd(dx)
tract1(t,t2,t3,t4,t5,t6,lfrontal,eixof,ltraseira,eixot,d14)
| rafaeldjsm/Engenharia | Geometria_Estradas/tractrix_cm.py | tractrix_cm.py | py | 2,168 | python | pt | code | 0 | github-code | 13 |
24586608105 | import tkinter as tk
from tkinter import *
m=tk.Tk()
m.title('session1')
con=tk.Canvas(m,width=150,height=50)
button = tk.Button(m, text='submit', width=25)
lbl=tk.Label(m,text='fname',background='red')
var1=IntVar()
var2=IntVar()
Checkbutton(m,text='male', variable=var1).grid(row=3,column=0)
Checkbutton(m,text='fmale', variable=var2).grid(row=3,column=1)
con.grid(row=2)
lbl.grid(row=1)
button.grid(row=4)
lbl2=tk.Label(m,text='project')
lbl2.grid(row=5)
Label(m, text='First Name').grid(row=6)
Label(m, text='Last Name').grid(row=7)
e1 = Entry(m,width=100)
e2 = Entry(m,width=100)
e1.grid(row=6, column=1)
e2.grid(row=7, column=1)
m.mainloop() | elihe90/TKINIER_python_ITC | session1.py | session1.py | py | 675 | python | en | code | 1 | github-code | 13 |
37996306518 | # $Id$
# This jobO should not be included more than once:
include.block( "MinBiasD3PDMaker/MinBiasD3PD_prodJobOFragment.py" )
# Common import(s):
from AthenaCommon.JobProperties import jobproperties
prodFlags = jobproperties.D3PDProdFlags
from PrimaryDPDMaker.PrimaryDPDHelpers import buildFileName
# Set up a logger:
from AthenaCommon.Logging import logging
MinBiasD3PDStream_msg = logging.getLogger( "MinBiasD3PD_prodJobOFragment" )
# Check if the configuration makes sense:
if prodFlags.WriteMinBiasD3PD.isVirtual:
MinBiasD3PDStream_msg.error( "The MinBias D3PD stream can't be virtual! " +
"It's a configuration error!" )
raise NameError( "MinBias D3PD set to be a virtual stream" )
# Construct the stream and file names:
streamName = prodFlags.WriteMinBiasD3PD.StreamName
fileName = buildFileName( prodFlags.WriteMinBiasD3PD )
MinBiasD3PDStream_msg.info( "Configuring MinBias D3PD with streamName '%s' and fileName '%s'" % \
( streamName, fileName ) )
# Create the D3PD stream:
from OutputStreamAthenaPool.MultipleStreamManager import MSMgr
MinBiasD3PDStream = MSMgr.NewRootStream( streamName, fileName, "MinBiasTree" )
def MinBiasD3PD (d3pdalg = None,
file = 'minbias.root',
tuplename = 'MinBiasD3PD',
streamname = 'd3pdstream',
**kw):
# MinBias flags
from MinBiasD3PDMaker.MinBiasD3PDMakerFlags import minbiasD3PDflags
from IOVDbSvc.CondDB import conddb
from AthenaCommon.GlobalFlags import globalflags
if minbiasD3PDflags.doPixelTracklets():
MinBiasD3PDStream_msg.info( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> TRACKLETS" )
pass
## Don't do truth on data
if globalflags.DataSource == 'data':
minbiasD3PDflags.doMBTruth = False
pass
#--------------------------------------------------------------
# Configure Beam spot service
#--------------------------------------------------------------
# from AthenaCommon.GlobalFlags import globalflags
# if globalflags.DataSource == 'data':
# include("InDetBeamSpotService/BeamCondSvc.py")
# conddb.addOverride("/Indet/Beampos",
#minbiasD3PDflags.BSDBFolderName())
# pass
#--------------------------------------------------------------
# Configure the MinBiasD3PDMaker
#--------------------------------------------------------------
import TrackD3PDMaker
import TrigMbD3PDMaker
import TriggerD3PDMaker
import D3PDMakerCoreComps
import EventCommonD3PDMaker
## Event Info
from EventCommonD3PDMaker.EventInfoD3PDObject import EventInfoD3PDObject
## Trigger part
from TrigMbD3PDMaker.BcmRdoD3PDObject import BcmRdoD3PDObject
from TrigMbD3PDMaker.CtpRdoD3PDObject import CtpRdoD3PDObject
from TrigMbD3PDMaker.CtpDecisionD3PDObject import CtpDecisionD3PDObject
from TrigMbD3PDMaker.MbtsContainerD3PDObject import MbtsContainerD3PDObject
from TrigMbD3PDMaker.MbtsLvl2D3PDObject import MbtsLvl2D3PDObject
from TrigMbD3PDMaker.SpLvl2D3PDObject import SpLvl2D3PDObject
from TrigMbD3PDMaker.TrtLvl2D3PDObject import TrtLvl2D3PDObject
from TrigMbD3PDMaker.TrkCntsEfD3PDObject import TrkCntsEfD3PDObject
from TriggerD3PDMaker.TrigDecisionD3PDObject import TrigDecisionD3PDObject
from TriggerD3PDMaker.TrigConfMetadata import addTrigConfMetadata
## Tracking part
#-- TrackD3PDMaker Flags
from TrackD3PDMaker.TrackD3PDMakerFlags import TrackD3PDFlags
TrackD3PDFlags.doTruth.set_Value_and_Lock(minbiasD3PDflags.doMBTruth())
TrackD3PDFlags.storeDiagonalCovarianceAsErrors.set_Value_and_Lock(True)
TrackD3PDFlags.storeHitTruthMatching.set_Value_and_Lock(minbiasD3PDflags.doMBTruth())
TrackD3PDFlags.storeDetailedTruth.set_Value_and_Lock(minbiasD3PDflags.doMBTruth())
TrackD3PDFlags.storePullsAndResiduals.set_Value_and_Lock(False)
TrackD3PDFlags.storeBLayerHitsOnTrack.set_Value_and_Lock(False)
TrackD3PDFlags.storePixelHitsOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeSCTHitsOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeTRTHitsOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeBLayerOutliersOnTrack.set_Value_and_Lock(False)
TrackD3PDFlags.storePixelOutliersOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeSCTOutliersOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeTRTOutliersOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeBLayerHolesOnTrack.set_Value_and_Lock(False)
TrackD3PDFlags.storePixelHolesOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeSCTHolesOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeTRTHolesOnTrack.set_Value_and_Lock(True)
TrackD3PDFlags.storeVertexAssociation.set_Value_and_Lock(False)
TrackD3PDFlags.storeTrackPredictionAtBLayer.set_Value_and_Lock(True)
TrackD3PDFlags.storeTrackInfo.set_Value_and_Lock(True)
TrackD3PDFlags.storeTrackFitQuality.set_Value_and_Lock(True)
TrackD3PDFlags.storeTrackSummary.set_Value_and_Lock(True)
TrackD3PDFlags.storeTrackSummary.IDHits = True
TrackD3PDFlags.storeTrackSummary.IDHoles = True
TrackD3PDFlags.storeTrackSummary.IDSharedHits = True
TrackD3PDFlags.storeTrackSummary.IDOutliers = True
TrackD3PDFlags.storeTrackSummary.PixelInfoPlus = False
TrackD3PDFlags.storeTrackSummary.SCTInfoPlus = False
TrackD3PDFlags.storeTrackSummary.TRTInfoPlus = False
TrackD3PDFlags.storeTrackSummary.InfoPlus = False
TrackD3PDFlags.storeTrackSummary.MuonHits = False
TrackD3PDFlags.storeTrackSummary.MuonHoles = False
TrackD3PDFlags.storeTrackSummary.ExpectBLayer = True
TrackD3PDFlags.storeTrackSummary.HitSum = True
TrackD3PDFlags.storeTrackSummary.HoleSum = True
TrackD3PDFlags.storeTrackSummary.HitPattern = True
TrackD3PDFlags.storeTrackSummary.SiHits = False
TrackD3PDFlags.storeTrackSummary.TRTRatio = False
TrackD3PDFlags.storeTrackSummary.PixeldEdx = True # HI
TrackD3PDFlags.storeTrackSummary.ElectronPID = False
TrackD3PDFlags.trackParametersAtGlobalPerigeeLevelOfDetails.set_Value_and_Lock(3)
TrackD3PDFlags.trackParametersAtPrimaryVertexLevelOfDetails.set_Value_and_Lock(3)
TrackD3PDFlags.trackParametersAtBeamSpotLevelOfDetails.set_Value_and_Lock(3)
TrackD3PDFlags.storeTrackUnbiasedIPAtPV.set_Value_and_Lock(False)
TrackD3PDFlags.storeTrackMomentum.set_Value_and_Lock(True)
TrackD3PDFlags.vertexPositionLevelOfDetails.set_Value_and_Lock(3)
TrackD3PDFlags.storeVertexFitQuality.set_Value_and_Lock(True)
TrackD3PDFlags.storeVertexKinematics.set_Value_and_Lock(True)
TrackD3PDFlags.storeVertexPurity.set_Value_and_Lock(minbiasD3PDflags.doMBTruth())
TrackD3PDFlags.storeVertexTrackAssociation.set_Value_and_Lock(False)
TrackD3PDFlags.storeVertexTrackIndexAssociation.set_Value_and_Lock(True)
#-- Enhanced vertex info configuration
from TrackD3PDMaker.VertexD3PDAnalysisFlags import VertexD3PDAnalysisFlags
VertexD3PDAnalysisFlags.useEventInfo=False # No Evt Info reread in Vtx
VertexD3PDAnalysisFlags.useTruth = minbiasD3PDflags.doMBTruth()
VertexD3PDAnalysisFlags.useAllVertexCollections = True
VertexD3PDAnalysisFlags.useTracks=False #Already included in the MinBias D3PD
VertexD3PDAnalysisFlags.useBeamspot=False # Already included in the MinBias D3PD
VertexD3PDAnalysisFlags.useBackgroundWord=True # Components mostly included, just to be safe
VertexD3PDAnalysisFlags.useTrigger=False # Already included in the MinBias D3PD
VertexD3PDAnalysisFlags.useSecondaryVertex=False
VertexD3PDAnalysisFlags.useMET=False
VertexD3PDAnalysisFlags.useElectrons=False
VertexD3PDAnalysisFlags.useMuons=False
VertexD3PDAnalysisFlags.usePhotons=False
VertexD3PDAnalysisFlags.useJets=False
VertexD3PDAnalysisFlags.useTaus=False
#-- TrackD3PDMaker configuration
from TrackD3PDMaker.TruthTrackD3PDObject import TruthTrackD3PDObject
from TrackD3PDMaker.TruthVertexD3PDObject import TruthVertexD3PDObject
from TrackD3PDMaker.TrackD3PDObject import TrackParticleD3PDObject
from TrackD3PDMaker.TrackD3PDObject import PixelTrackD3PDObject
from TrackD3PDMaker.TrackD3PDObject import SCTTrackD3PDObject
from TrackD3PDMaker.TrackD3PDObject import TRTTrackD3PDObject
from TrackD3PDMaker.TrackD3PDObject import ResolvedTracksD3PDObject
from TrackD3PDMaker.VertexD3PDObject import PrimaryVertexD3PDObject
from TrackD3PDMaker.VertexD3PDObject import BuildVertexD3PDObject
SecVertexD3PDObject = BuildVertexD3PDObject(_prefix='secVx_',
_label='secVx',
_sgkey='SecVertices',
trackTarget='trk',
trackPrefix='trk_',
trackType='Rec::TrackParticleContainer')
from TrackD3PDMaker.V0D3PDObject import V0D3PDObject
from TrackD3PDMaker.BeamSpotD3PDObject import BeamSpotD3PDObject
## MinBias part
from MinBiasD3PDMaker.UnassociatedHitsD3PDObject import UnassociatedHitsD3PDObject
if not d3pdalg:
d3pdalg = MSMgr.NewRootStream(StreamName = streamname, FileName = file, TreeName = tuplename)
## Add blocks to the tree
# d3pdalg += EventInfoD3PDObject(10, prefix='ei_')
d3pdalg += EventInfoD3PDObject(10)
d3pdalg += TrackParticleD3PDObject(10)
d3pdalg += PrimaryVertexD3PDObject(10)
if minbiasD3PDflags.doBeamSpot():
d3pdalg += BeamSpotD3PDObject(10)
if minbiasD3PDflags.doUnassociatedHits():
d3pdalg += UnassociatedHitsD3PDObject(10)
if minbiasD3PDflags.doTrigger():
d3pdalg += CtpRdoD3PDObject(10)
d3pdalg += CtpDecisionD3PDObject(10)
d3pdalg += MbtsContainerD3PDObject(10)
d3pdalg += TrigDecisionD3PDObject(10)
addTrigConfMetadata( d3pdalg )
if minbiasD3PDflags.doDetailedTrigger():
d3pdalg += BcmRdoD3PDObject(10)
d3pdalg += MbtsLvl2D3PDObject(10)
d3pdalg += SpLvl2D3PDObject(10)
d3pdalg += TrtLvl2D3PDObject(10)
d3pdalg += TrkCntsEfD3PDObject(10)
if minbiasD3PDflags.doMBTruth():
d3pdalg += TruthTrackD3PDObject(10)
d3pdalg += TruthVertexD3PDObject(10)
if minbiasD3PDflags.doPixelTracklets():
d3pdalg += PixelTrackD3PDObject(10)
if minbiasD3PDflags.doSCTTracklets():
d3pdalg += SCTTrackD3PDObject(10)
if minbiasD3PDflags.doTRTTracklets():
d3pdalg += TRTTrackD3PDObject(10)
if minbiasD3PDflags.doResolvedTracklets():
d3pdalg += ResolvedTracksD3PDObject(10)
if minbiasD3PDflags.doV0s():
d3pdalg += V0D3PDObject(10, sgkey="V0Candidates")
d3pdalg += SecVertexD3PDObject(10)
if minbiasD3PDflags.doLucid():
from TrigMbD3PDMaker.LucidRawD3PDObject import LucidRawD3PDObject
d3pdalg += LucidRawD3PDObject(10)
if globalflags.DataSource=='geant4':
from TrigMbD3PDMaker.LucidDigitD3PDObject import LucidDigitD3PDObject
d3pdalg += LucidDigitD3PDObject(10)
if minbiasD3PDflags.doZDC() and globalflags.DataSource == "data":
##ZDC object
from ForwardDetectorsD3PDMaker.ZdcD3PDObject import ZdcD3PDObject
from ForwardDetectorsD3PDMaker.ZdcDigitsD3PDObject import ZdcDigitsD3PDObject
d3pdalg += ZdcD3PDObject(10)
d3pdalg += ZdcDigitsD3PDObject(10)
from CaloD3PDMaker.MBTSTimeD3PDObject import MBTSTimeD3PDObject
d3pdalg += MBTSTimeD3PDObject (10)
from TrackD3PDMaker.VertexGroupD3PD import VertexGroupD3PD
VertexGroupD3PD(d3pdalg)
def _args (level, name, kwin, **kw):
kw = kw.copy()
kw['level'] = level
for (k, v) in kwin.items():
if k.startswith (name + '_'):
kw[k[len(name)+1:]] = v
return kw
#--------------------------------------------------------------
# Clusters
#--------------------------------------------------------------
from CaloD3PDMaker.ClusterD3PDObject import ClusterD3PDObject
if minbiasD3PDflags.doClusterHad():
# define clusters
from CaloD3PDMaker import ClusterMomentFillerTool as CMFT
myMoments = [ CMFT.CENTER_LAMBDA, 'center_lambda',
CMFT.LATERAL, 'lateral',
CMFT.LONGITUDINAL, 'longitudinal',
CMFT.ISOLATION, 'isolation',
CMFT.SIGNIFICANCE, 'significance',
CMFT.CELL_SIGNIFICANCE, 'cellSignificance',
CMFT.CELL_SIG_SAMPLING, 'cellSigSampling'
]
d3pdalg += ClusterD3PDObject (**_args(0, 'CaloCalTopoCluster', kw,prefix='cl_had_',
include='Moments',
Kinematics_WriteE=True,Moments_Moments=myMoments))
if minbiasD3PDflags.doClusterEM():
d3pdalg += ClusterD3PDObject (**_args(0, 'CaloCalTopoCluster', kw,prefix='cl_em_',
Kinematics_WriteE=True,Kinematics_SignalState=0))
#--------------------------------------------------------------
# Alfa
#--------------------------------------------------------------
if globalflags.DataSource == "data":
IOVDbSvc = Service( "IOVDbSvc" )
from IOVDbSvc.CondDB import conddb
####for other possible servers see dbreplica.config in Athena
# #installation
# IOVDbSvc.dbConnection="oracle://ATLAS_COOLPROD;schema=ATLAS_COOLOFL_DCS;dbname=COMP200"
if not conddb.folderRequested('/RPO/DCS/BLM'):
conddb.addFolder("DCS_OFL","/RPO/DCS/BLM")
if not conddb.folderRequested('/RPO/DCS/FECONFIGURATION'):
conddb.addFolder("DCS_OFL","/RPO/DCS/FECONFIGURATION")
if not conddb.folderRequested('/RPO/DCS/HVCHANNEL'):
conddb.addFolder("DCS_OFL","/RPO/DCS/HVCHANNEL")
if not conddb.folderRequested('/RPO/DCS/LOCALMONITORING'):
conddb.addFolder("DCS_OFL","/RPO/DCS/LOCALMONITORING")
if not conddb.folderRequested('/RPO/DCS/MOVEMENT'):
conddb.addFolder("DCS_OFL","/RPO/DCS/MOVEMENT")
if not conddb.folderRequested('/RPO/DCS/RADMON'):
conddb.addFolder("DCS_OFL","/RPO/DCS/RADMON")
if not conddb.folderRequested('/RPO/DCS/TRIGGERRATES'):
conddb.addFolder("DCS_OFL","/RPO/DCS/TRIGGERRATES")
if not conddb.folderRequested('/RPO/DCS/TRIGGERSETTINGS'):
conddb.addFolder("DCS_OFL","/RPO/DCS/TRIGGERSETTINGS")
if jobproperties.Rec.doAlfa:
from ForwardDetectorsD3PDMaker.AlfaD3PDObject import AlfaD3PDObject
# d3pdalg += EventInfoD3PDObject(10)
d3pdalg += AlfaD3PDObject(0)
from ForwardDetectorsD3PDMaker import AlfaEventHeaderFillerTool
if globalflags.DataSource == "data":
AlfaEventHeaderFillerTool.DataType = 1
from ForwardDetectorsD3PDMaker.AlfaDCSMetadata import addAlfaDcsMetadata
addAlfaDcsMetadata(d3pdalg)
elif globalflags.DataSource == "geant4":
AlfaEventHeaderFillerTool.DataType = 0
MinBiasD3PD( MinBiasD3PDStream )
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/D3PDMaker/MinBiasD3PDMaker/share/MinBiasD3PD_prodJobOFragment.py | MinBiasD3PD_prodJobOFragment.py | py | 14,918 | python | en | code | 1 | github-code | 13 |
22035710885 | operations = ['+', '-', '*', '/']
class Number:
def __init__(self, num, steps):
self.value = num
self.steps = steps
def getValue(self):
return self.value
def getSteps(self):
return self.steps
def calculate(self, rhs_Num, operator):
rhs = rhs_Num.getValue()
result = {
'+': lambda rhs: self.value + rhs,
'-': lambda rhs: self.value - rhs,
'*': lambda rhs: self.value * rhs,
'/': lambda rhs: self.value / rhs,
}[operator](rhs)
if result <1 or not (result - int(result))==0:
return None
else:
if not rhs_Num.getSteps() == '' and not self.steps =='':
steps = '('+ self.steps+')' + operator +'(' + rhs_Num.getSteps() + ')'
elif not rhs_Num.getSteps() == '':
steps = str(self.value) + operator +'(' + rhs_Num.getSteps() + ')'
elif not self.steps =='':
steps = '('+ self.steps+')' + operator + str(rhs_Num.getValue())
else:
steps = str(self.value) + operator + str(rhs_Num.getValue())
v = Number(result, steps)
return v
def getNums(nums):
numbers = []
for num in nums:
numbers.append(Number(num, ''))
return numbers
def solve(target, numbers):
for num1 in numbers:
remaining = numbers.copy()
remaining.remove(num1)
for num2 in remaining:
for operator in operations:
result = num1.calculate(num2, operator)
if result == None:
continue
if result.getValue() == target:
return result.getSteps()
if len(numbers) > 2:
updated_nums = numbers.copy()
updated_nums.remove(num1)
updated_nums.remove(num2)
updated_nums.append(result)
result = solve(target, updated_nums)
if not result == None:
return result
return None
def getSolution(target, nums):
numbers = getNums(nums)
solution = solve(target, numbers)
return(str(target) + ' = ' + solution)
| Renu-R/Countdown-numbers | numbers_solver.py | numbers_solver.py | py | 2,259 | python | en | code | 0 | github-code | 13 |
1027838379 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
def find_create_post(driver):
footer = driver.find_element(By.CLASS_NAME, "border-top")
driver.execute_script("arguments[0].scrollIntoView();", footer)
# Navigate to the "Create New Post" page
create_post = driver.find_element(By.LINK_TEXT, "Create New Post")
time.sleep(1)
create_post.click()
def populate_data(driver,number):
title = driver.find_element(By.ID, "title")
title.send_keys(f"Hello world{number}")
subtitle = driver.find_element(By.ID, "subtitle")
subtitle.send_keys("Hello world3")
img_url = driver.find_element(By.ID, "img_url")
img_url.send_keys("https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/Gutenberg_Bible%2C_Lenox_Copy%2C_New_York_Public_Library%2C_2009._Pic_01.jpg/640px-Gutenberg_Bible%2C_Lenox_Copy%2C_New_York_Public_Library%2C_2009._Pic_01.jpg")
# Wait for CKEditor to be fully loaded
wait_for_ckeditor(driver)
# Find the CKEditor textarea element
ckeditor_textarea = driver.find_element(By.CSS_SELECTOR, "body.cke_editable")
# Clear any default content (if present)
ckeditor_textarea.send_keys(Keys.CONTROL + "a") # Select all existing text
ckeditor_textarea.send_keys(Keys.DELETE) # Delete selected text
# Send your desired text to the CKEditor
ckeditor_textarea.send_keys(text)
driver.switch_to.default_content()
def submit_data(driver):
submit_button = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.ID, "submit"))
)
driver.execute_script("arguments[0].scrollIntoView();", submit_button)
time.sleep(1)
submit_button.click()
def login(driver):
driver.get("https://angelas-blog2.onrender.com")
# ... your login code ...
time.sleep(1)
driver.get("https://angelas-blog2.onrender.com")
login = driver.find_element(By.ID, "login")
login.click()
email = driver.find_element(By.ID, "email")
email.send_keys("admin@email.com")
print("sent email")
password = driver.find_element(By.ID, "password")
password.send_keys("samme")
time.sleep(2)
driver.implicitly_wait(5)
login2 = driver.find_element(By.ID, "submit")
driver.execute_script("arguments[0].scrollIntoView();", login2)
time.sleep(2)
login2.click()
print("Logged in")
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
path = "C:\development\chromedriver.exe"
def wait_for_ckeditor(driver):
try:
WebDriverWait(driver, 10).until(
EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, "iframe.cke_wysiwyg_frame"))
)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body.cke_editable"))
)
except:
print("CKEditor not fully loaded.")
def main():
driver = webdriver.Chrome(executable_path=path)
login(driver)
# Wait for the new page to load completely
time.sleep(2)
for i in range(7,40):
find_create_post(driver)
populate_data(driver,i)
submit_data(driver)
print(f"the code has run {i} times")
# Now you can interact with elements on the new page without the stale element issue
# Fill in the title and subtitle
time.sleep(10)
driver.quit()
if __name__ == "__main__":
main()
| Bigsamme/Angelas-Blog | fun/main.py | main.py | py | 3,982 | python | en | code | 0 | github-code | 13 |
21721276597 | # this code will accept an input string and check if it is a plandrome
# it will then return true if it is a plaindrome and false if it is not
def reverse(str1):
if(len(str1) == 0):
return str1
else:
return reverse(str1[1:]) + str1[0]
string = input("Please enter your own String : ")
# check for strings
str1 = reverse(string)
print("String in reverse Order : ", str1)
if(string == str1):
print("This is a Palindrome String")
else:
print("This is Not a Palindrome String")
| jmusila/simple-logic-tests | palindrome/is_palindrome.py | is_palindrome.py | py | 513 | python | en | code | 0 | github-code | 13 |
40927726143 | import pandas as pd
from lib.exp.summary import Summary
from lib.exp.evaluator.slide_coverage import SlideCoverage as Scov
class _Scov(Scov):
def __init__(self, gnd, pre_ns=None, pre_ws=None):
"""
pre_ns: preprocessing number of slides
pre_ws: preprocessing number of switeches
"""
Scov.__init__(self, gnd.load("segments"))
self.__get_sn(gnd)
self.prens = pre_ns if pre_ns else self.ns
self.prews = pre_ws if pre_ws else len(self.gseg)
def __get_sn(self, gnd):
su = Summary()
sin = su.info(gnd.root, gnd.name)
self.ns = sin.n_slides
def __cov_base(self, pdf, key):
if len(pdf) == 0:
return 0
fdf = pdf[pdf[key].notnull() & (pdf[key] > 0)]
ks = [gi for gi, gd in fdf.groupby(key)]
return len(ks)*1.
def __switch_cov(self, pdf):
fs = self.__cov_base(pdf, "hit_seg_id")
return dict(sws=fs, swc=fs/len(self.gseg), fix_swc=fs/self.prews)
def __slide_cov(self, pdf):
fs = self.__cov_base(pdf, "sid")
return dict(sls=fs, slc=fs/self.ns, fix_slc=fs/self.prens)
def __covs(self, df, appkey):
akn = appkey[:-4]
fdf = df[df[appkey] == 0]
pdf = pd.DataFrame(map(self._mk_xdict, fdf.fid, fdf[akn]))
dk = dict(key=akn)
dk.update(self.__slide_cov(pdf))
dk.update(self.__switch_cov(pdf))
return dk
def __each_cov(self, key, df):
aks = filter(lambda x: "ans" in x, df.columns)
sext = lambda x: self.__covs(df, x)
ss = map(sext, aks)
df = pd.DataFrame(ss)
df["config"] = key
return df
def compute(self, plist):
bs = [self.__each_cov(*pack) for pack in plist]
rdf = reduce(lambda x, y: x.append(y), bs)
rdf = rdf.reset_index(drop=1)
return rdf
| speed-of-light/pyslider | lib/exp/evaluator/xframes/scov.py | scov.py | py | 1,866 | python | en | code | 2 | github-code | 13 |
43262366292 | def main():
ans = "Takahashi"
if A > C or (A == C and B > D):
ans = "Aoki"
return print(ans)
if __name__ == '__main__':
A, B, C, D = map(int, input().split())
main()
| Shirohi-git/AtCoder | abc241-/abc245_a.py | abc245_a.py | py | 197 | python | en | code | 2 | github-code | 13 |
40345978584 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The kv_seek_account_history command allows to query the KV 'History of Accounts' table."""
import argparse
import context # pylint: disable=unused-import
from silksnake.helpers.dbutils import tables
from silksnake.remote import kv_metadata
from silksnake.remote import kv_utils
from silksnake.remote.kv_remote import DEFAULT_TARGET
def kv_seek_account_history(account_address: str, block_number: int, target: str = DEFAULT_TARGET):
""" Search for the provided account address in KV 'History of Accounts' table.
"""
account_history_key = kv_metadata.encode_account_history_key(account_address, block_number)
print('REQ1 account_address:', account_address, '(key: ' + str(account_history_key.hex()) + ')')
print('RSP1 account history: [')
walker = lambda key, value: print('key:', key.hex(), 'value:', value.hex())
kv_utils.kv_walk(target, tables.ACCOUNTS_HISTORY_LABEL, account_history_key, walker)
print(']')
print('REQ2 account_address:', account_address, '(key: ' + str(account_history_key.hex()) + ')')
print('RSP2 storage history: [')
walker = lambda key, value: print('key:', key.hex(), 'value:', value.hex())
kv_utils.kv_walk(target, tables.STORAGE_HISTORY_LABEL, account_history_key, walker)
print(']')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('account_address', help='the account address as hex string (w or w/o 0x prefix)')
parser.add_argument('block_number', help='the block number as integer')
parser.add_argument('-t', '--target', default=DEFAULT_TARGET, help='the server location as string <address>:<port>')
args = parser.parse_args()
kv_seek_account_history(args.account_address, int(args.block_number), args.target)
| torquem-ch/silksnake | tools/kv_seek_account_history.py | kv_seek_account_history.py | py | 1,827 | python | en | code | 3 | github-code | 13 |
41632729325 | # Core Pkgs
import streamlit as st
import plotly.express as px
# sklearn version = 0.24.2
px.defaults.template='plotly_dark'
px.defaults.color_continuous_scale='reds'
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# EDA Pkgs
import pandas as pd
import numpy as np
import seaborn as sns
st.set_option('deprecation.showPyplotGlobalUse', False)
# Utils
import joblib
# Image
from PIL import Image
st.set_page_config(page_title='DATA PELATIHAN JARAH JAUH 2021 DAN 2022',
page_icon=':bar_chart:',
layout='wide')
# Read in data from the Google Sheet.
# Uses st.cache_data to only rerun when the query changes or after 10 min.
@st.cache_data(ttl=600)
def load_data(sheets_url):
csv_url = sheets_url.replace("/edit#gid=", "/export?format=csv&gid=")
return pd.read_csv(csv_url)
df = load_data(st.secrets["public_gsheets_url"])
#rencana (nama + tanggal + hari + JP + rencana peserta + rencana JP)
#realisasi peserta (per UE1 + total)
#realisasi jamlator (per UE1 + total)
#evaluasi penyelenggaraan pembelajaran (kesesuaian materi)
#evaluasi hasil pembelajaran (status keikutsertaan)
#realisasi jamlator (per UE1 + total) RENCANA VS REALISASI
tahun = st.sidebar.multiselect(
"Pilih Tahun:",
options=df["TAHUN"].unique(),
default=df["TAHUN"].unique()
)
bulan = st.sidebar.multiselect(
"Pilih Bulan:",
options=df["NAMA BULAN"].unique(),
default=df["NAMA BULAN"].unique()
)
nama = st.sidebar.multiselect(
"Pilih Nama Pelatihan:",
options=df["NAMA"].unique(),
default=df["NAMA"].unique()
)
angkatan = st.sidebar.multiselect(
"Pilih Angkatan:",
options=df["ANGKATAN"].unique(),
default=df["ANGKATAN"].unique()
)
df_selection = df.query(
"TAHUN ==@tahun & `NAMA BULAN` == @bulan & NAMA == @nama & ANGKATAN ==@angkatan"
)
# ---- MAINPAGE ----
tahun_picked = (df_selection['TAHUN'].sum())
st.title("Data Pelatihan Pusat Pendidikan dan Pelatihan Kepemimpinan dan Manajerial")
total_rencana = int(df_selection['RENCANA PESERTA'].sum())
total_peserta = int(df_selection['TOTAL REALISASI PESERTA'].sum())
total_jamlator = int(df_selection['TOTAL JAMLATOR'].sum())
rerata_seseusaian_materi = round(df_selection['KESESUAIAN MATERI'].mean(), 2)
indeks_bintang =':star:' * int(round(rerata_seseusaian_materi, 0))
col1, col2, col3, col4 = st.columns(4)
with col1:
st.subheader('RENCANA PESERTA')
st.subheader(f"{total_rencana:,}")
with col2:
st.subheader('TOTAL REALISASI PESERTA')
st.subheader(f"{total_peserta:,}")
with col3:
st.subheader('TOTAL JAMLATOR')
st.subheader(f"{total_jamlator:,}")
with col4:
st.subheader('KESESUAIAN MATERI')
st.subheader(f"{rerata_seseusaian_materi:,} {indeks_bintang}")
st.markdown("""---""")
st.dataframe(df_selection[['NAMA PELATIHAN', 'TAHUN', 'KESESUAIAN MATERI', 'HARI', 'JP', 'RENCANA PESERTA', 'RENCANA JAMLATOR', 'TELAH MENGIKUTI/LULUS', 'TIDAK MEMENUHI SYARAT', 'MENGUNDURKAN DIRI', 'TIDAK MENGIKUTI', 'TOTAL REALISASI PESERTA', 'PERSENTASE KEIKUTSERTAAN', 'TOTAL JAMLATOR']])
#RENCANA VS REALISASI PESERTA PER PELATIHAN
st.subheader('RENCANA VS REALISASI PESERTA PER PELATIHAN')
realisasi_peserta_by_nama = (
df_selection.groupby(by=['BULAN']).sum()[['RENCANA PESERTA', 'TOTAL REALISASI PESERTA']].sort_values(by='BULAN')
)
st.dataframe(realisasi_peserta_by_nama)
st.line_chart(data=realisasi_peserta_by_nama, x=['BULAN'], y=['RENCANA PESERTA', 'TOTAL REALISASI PESERTA'], width=0, height=0, use_container_width=True)
st.subheader('RENCANA VS REALISASI JAMLATOR PER PELATIHAN')
realisasi_jamlator_by_nama = (
df_selection.groupby(by=['NAMA']).sum()[['RENCANA JAMLATOR', 'TOTAL JAMLATOR']].sort_values(by='NAMA')
)
st.dataframe(realisasi_jamlator_by_nama)
st.line_chart(data=realisasi_jamlator_by_nama, x=['BULAN'], y=['RENCANA JAMLATOR', 'TOTAL JAMLATOR'], width=0, height=0, use_container_width=True)
#st.subheader('PERSENTASE KEIKUTSERTAAN DAN REALISASI JAMLATOR')
#persentase_keikutsertaan_by_bulan = (
#round(df_selection.groupby(by=['BULAN']).mean()[['PERSENTASE KEIKUTSERTAAN']].sort_values(by='BULAN'), 2)
#)
#st.dataframe(persentase_keikutsertaan_by_bulan)
#st.bar_chart(data=persentase_keikutsertaan_by_bulan, x=['BULAN'], y=['PERSENTASE KEIKUTSERTAAN'], width=0, height=0, use_container_width=True)
st.subheader('INDEKS KESESUAIAN MATERI')
kesesuaian_materi = (
round(df_selection.groupby(by=['BULAN']).mean()[['KESESUAIAN MATERI']].sort_values(by='BULAN'), 2)
)
st.dataframe(kesesuaian_materi)
st.bar_chart(data=kesesuaian_materi, x=['BULAN'], y=['KESESUAIAN MATERI'], width=0, height=0, use_container_width=True)
st.subheader('PESERTA PER STATUS')
peserta_per_unit = (
df_selection.groupby(by=['NAMA']).sum()[['TELAH MENGIKUTI/LULUS', 'TIDAK MEMENUHI SYARAT', 'MENGUNDURKAN DIRI', 'TIDAK MENGIKUTI']].sort_values(by='NAMA')
)
st.dataframe(peserta_per_unit)
st.line_chart(data=peserta_per_unit, x=['NAMA'], y=['TELAH MENGIKUTI/LULUS', 'TIDAK MEMENUHI SYARAT', 'MENGUNDURKAN DIRI', 'TIDAK MENGIKUTI'], width=0, height=0, use_container_width=True)
st.subheader('PESERTA PER UNIT')
peserta_per_unit = (
df_selection.groupby(by=['NAMA']).sum()[['PESERTA SETJEN', 'PESERTA ITJEN', 'PESERTA DJA', 'PESERTA DJP', 'PESERTA DJBC', 'PESERTA DJPb', 'PESERTA DJPK', 'PESERTA DJKN', 'PESERTA DJPPR', 'PESERTA BKF', 'PESERTA BPPK', 'PESERTA LNSW', 'PESERTA KSSK']].sort_values(by='NAMA')
)
st.dataframe(peserta_per_unit)
st.area_chart(data=peserta_per_unit, x=['NAMA'], y=['PESERTA SETJEN', 'PESERTA ITJEN', 'PESERTA DJA', 'PESERTA DJP', 'PESERTA DJBC', 'PESERTA DJPb', 'PESERTA DJPK', 'PESERTA DJKN', 'PESERTA DJPPR', 'PESERTA BKF', 'PESERTA BPPK', 'PESERTA LNSW', 'PESERTA KSSK'], width=0, height=0, use_container_width=True)
st.subheader('PESERTA PER JAMLATOR')
peserta_per_unit = (
df_selection.groupby(by=['NAMA']).sum()[['JAMLATOR SETJEN', 'JAMLATOR ITJEN', 'JAMLATOR DJA', 'JAMLATOR DJP', 'JAMLATOR DJBC', 'JAMLATOR DJPb', 'JAMLATOR DJPK', 'JAMLATOR DJKN', 'JAMLATOR DJPPR', 'JAMLATOR BKF', 'JAMLATOR BPPK', 'JAMLATOR LNSW', 'JAMLATOR KSSK']].sort_values(by='NAMA')
)
st.dataframe(peserta_per_unit)
st.line_chart(data=peserta_per_unit, x=['NAMA'], y=['JAMLATOR SETJEN', 'JAMLATOR ITJEN', 'JAMLATOR DJA', 'JAMLATOR DJP', 'JAMLATOR DJBC', 'JAMLATOR DJPb', 'JAMLATOR DJPK', 'JAMLATOR DJKN', 'JAMLATOR DJPPR', 'JAMLATOR BKF', 'JAMLATOR BPPK', 'JAMLATOR LNSW', 'JAMLATOR KSSK'], width=0, height=0, use_container_width=True)
st.subheader('RENCANA VS REALISASI PESERTA PER TAHUN')
realisasi_peserta_by_tahun = (
df.groupby(by=['TAHUN']).sum()[['RENCANA PESERTA', 'TOTAL REALISASI PESERTA']].sort_values(by='TAHUN')
)
st.dataframe(realisasi_peserta_by_tahun, width=1360)
st.bar_chart(data=realisasi_peserta_by_tahun, x=['TAHUN'], y=['RENCANA PESERTA', 'TOTAL REALISASI PESERTA'], width=0, height=0, use_container_width=True) | ALKelompok6/repo | app.py | app.py | py | 6,942 | python | id | code | 0 | github-code | 13 |
38838298232 | # coding=utf-8
from __future__ import absolute_import
import octoprint.plugin
import subprocess
import re
CONTROL_RE = re.compile(r'^\s*(\S+)\s*\((\S+)\)\s*:(.*)$')
MENU_RE = re.compile('^\s*(\S+)\s*:(.*)$')
class WebcamSettingsPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.TemplatePlugin):
@staticmethod
def _try_parse_int(val):
try:
return int(val)
except ValueError:
return val
def _load_controls(self):
output = subprocess.check_output(['v4l2-ctl', '-L'])
controls = {}
last_control = None
for line in output.splitlines():
parsed_control = CONTROL_RE.match(line)
if parsed_control:
name, kind, settings = parsed_control.groups()
settings = dict(s.split('=', 1) for s in settings.split())
settings = {k: self._try_parse_int(v) for k, v in settings.iteritems()}
controls[name] = {'kind': kind, 'settings': settings}
if kind == 'menu':
controls[name]['menu'] = {}
last_control = controls[name]
continue
parsed_menu = MENU_RE.match(line)
if parsed_menu and last_control:
value, meaning = parsed_menu.groups()
last_control['menu'][int(value)] = meaning
self._logger.info("Detected %d webcam controls", len(controls))
return controls
def _calc_values(self, name):
control = self._controls[name]
if control['kind'] == 'menu':
return control['menu'].keys()
elif control['kind'] == 'bool':
return [0, 1]
elif control['kind'] == 'int':
return range(control['min'], control['max'], control['step'])
else:
return []
def _set_control(self, name, value):
assert name in self._controls
assert value in self._calc_values(name)
subprocess.check_call(['v4l2-ctl', '-C', '%s=%s' % (name, value)])
def on_after_startup(self):
self._controls = self._load_controls()
__plugin_name__ = 'Webcam Settings'
__plugin_implementation__ = WebcamSettingsPlugin()
| rryk/OctoPrint-Webcam-Settings | octoprint_webcam_settings/__init__.py | __init__.py | py | 2,008 | python | en | code | 0 | github-code | 13 |
5840019726 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
def show_image(image):
plt.imshow(image)
plt.show()
img_rgb = cv2.imread('fuf.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('out.png', 0)
height, width = template.shape[::]
res = cv2.matchTemplate(img_gray, template, cv2.TM_SQDIFF)
plt.imshow(res, cmap='gray')
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc #Change to max_loc for all except for TM_SQDIFF
bottom_right = (top_left[0] + width, top_left[1] + height)
cv2.rectangle(img_rgb, top_left, bottom_right, (255, 0, 0), 2)
show_image(img_rgb)
| fr1ht/WindowCensor | ipExample/cv2_matching.py | cv2_matching.py | py | 692 | python | en | code | 0 | github-code | 13 |
38305261279 | import requests
import datetime
# dict with the location name as keys and the swiss1903 coordinates as values
locations = {"Rapperswil OST Campus": (704301, 231052),
"Rapperswil Seebad": (704077, 231654),
"Schmerikon Badi": (714163, 231433),
"Insel Lützelau Nordost": (703019, 23235),
"Zürich Seebad Utoquai": (683598, 246245),
"Strandbad Meilen": (691516, 235727),
"Lachen SZ": (706947, 228423),
"Noulen SZ": (709651, 229673)
}
def templist(url):
"""converts the url response to a list containing only the temperatures"""
# get the data and save as string
data_string = requests.get(url).content.decode("utf8")
# convert string to list and remove first element representing depth
data_list = data_string[data_string.find("\n")+1:].split(",")
data_list.pop(0)
# convert strings in list to float
float_list = [float(x) for x in data_list]
return float_list
def file_scanner():
"""Returns a dictionary with location name as key and a string of water temperatures as value.
Locations and values read in from text file "tempData.txt" """
data_dict = dict()
with open("tempData.txt") as file:
for line in file: # iteration line by line
data_string = line.rstrip() # reads line into string
data_list = data_string.split(",") # splits string into substrings after every comma
data_dict[data_list[0]] = list(float(x) for x in data_list[1:-1]) # adds element to dictionary:
# 1. substring as key, others as float numbers
return data_dict
def dict_printer(data_dict):
""""Takes a dictionary and prints it to the console: Location: Value (actual time)"""
for key in data_dict:
print(f"{key+':':<25} {data_dict[key][int(datetime.datetime.now().hour/3)]:>}")
def timestamp():
import datetime
# get the current GTM time zone date and time
current_date_gtm = datetime.datetime.utcnow()
# get the current local time zone date and time (for future use)
# current_date = datetime.datetime.now()
# convert current GTM date to string with current time at 00:00:00
string_date_start = current_date_gtm.strftime("%Y-%m-%d 00:00:00")
# convert current GTM date to string with current time at 21:00:00
string_date_end = current_date_gtm.strftime("%Y-%m-%d 21:00:00")
#convert day start string into object
date_object_start = datetime.datetime.strptime(string_date_start, "%Y-%m-%d %H:%M:%S")
# convert day end string into object
date_object_end = datetime.datetime.strptime(string_date_end, "%Y-%m-%d %H:%M:%S")
#Multiply the timestamp of the datetime object day start by 1'000 to convert into millisec and round to remove .0
millisecstart = round(date_object_start.timestamp() * 1000)
# Multiply the timestamp of the datetime object day end by 1'000 to convert into millisec and round to remove .0
millisecend = round(date_object_end.timestamp() * 1000)
return millisecstart, millisecend
# API URL
testurl = "http://meteolakes.ch/api/coordinates/534700/144950/geneva/temperature/1537034400000/1537768800000/20"
temp_list = templist(testurl)
print(temp_list)
| ElectryFresh/WCIE | main.py | main.py | py | 3,307 | python | en | code | null | github-code | 13 |
71764524177 | from random import randint
from random import random
def roll():
val = randint(0,20)
return val
class Character(object):
def __init__(self, name, strength, armor, speed, health):
self.name = name
self.strength = strength
self.armor = armor
self.speed = speed
self.health = health
def attack(self):
to_hit = self.strength + roll()
return to_hit
def dodge(self):
to_dodge = random()
dodge_chance = 1 - self.speed
if to_dodge >= dodge_chance:
return True
else:
return False
orc = Character("orc",2, 14, 0, 20)
spider = Character("spider", 0, 13, 0.3, 10)
golem = Character("golem", 6, 12, 0, 40)
fighter = Character("fighter", 4, 16, 0, 35)
ranger = Character("ranger", 2, 14, .4, 25)
enemies = [orc, spider, golem]
jobs = [fighter, ranger]
champions = [orc, spider, golem, fighter, ranger]
def fight(c1, c2):
f1 = c1.name
f2 = c2.name
c1_health = c1.health
c2_health = c2.health
print(f"A {f1} and a {f2} square off!")
print(f"The {f1} attacks first!")
round = 0
victor = None
while c1_health > 0 and c2_health > 0:
strike_result = c1.attack() - c2.armor
strike_result_2 = c2.attack() - c1.armor
dodge_result_1 = c1.dodge()
dodge_result_2 = c2.dodge()
if round % 2 == 0:
if dodge_result_2 == True:
print(f"The {f1} swings but the {f2} dodges the attack!")
elif strike_result >= 0 and dodge_result_2 == False:
c2_health -= strike_result
print(f"The {f1} hits for {strike_result} damage! ({f2}: {c2_health}/{c2.health})")
round += 1
if c2_health <= 0:
victor = f1
else:
print(f"The {f1} misses!")
round += 1
else:
if dodge_result_1 == True:
print(f"The {f2} swings but the {f1} dodges the attack!")
elif strike_result_2 >= 0 and dodge_result_1 == False:
c1_health -= strike_result_2
print(f"The {f2} hits for {strike_result_2} damage! ({f1}: {c1_health}/{c1.health})")
round += 1
if c1_health <= 0:
victor = f2
else:
print(f"The {f2} misses!")
round += 1
if victor == f1:
print(f"The {f2} loses the fight. The {f1} is victorious!")
else:
print(f"The {f1} loses the fight. The {f2} is victorious!")
def startgame():
print("""
Welcome to the arena!
Here you can square off against any number of opponents.
But first, pick your champion:
- fighter
- ranger
- orc
- spider
- golem
""")
choice = input("> ")
champion = next((x for x in champions if x.name == choice))
print(f"You chose the {choice}.")
fight(champion, enemies[randint(0,2)])
print("Would you like to fight in the arena again?")
replay = input("> ")
if replay == "yes":
startgame()
else:
print("Bye... for now.")
startgame()
| rjfische/Python-Projects | first_game.py | first_game.py | py | 3,220 | python | en | code | 0 | github-code | 13 |
21675514312 | import sys
input = sys.stdin.readline
def backtracking(t, idx):
if len(t) == m:
answer.add(t)
return
for i in range(idx, n):
if not visited[i]:
visited[i] = 1
backtracking(t + (arr[i],), i+1)
visited[i] = 0
answer = set()
n, m = map(int, input().split())
arr = sorted(list(map(int, input().split())))
visited = [0] * n
backtracking(tuple(), 0)
for ans in sorted(answer):
print(*ans) | SangHyunGil/Algorithm | Baekjoon/baekjoon_15664(backtracking).py | baekjoon_15664(backtracking).py | py | 423 | python | en | code | 0 | github-code | 13 |
16707517506 | """
Written by Lorenzo Vainigli for the Facebook Hacker Cup 2020 Qualification Round
This program provides a correct solution for the following problem:
https://www.facebook.com/codingcompetitions/hacker-cup/2020/qualification-round/problems/A
"""
import time
filename = "travel_restrictions"
DEBUG = False
BIGINPUT = False
VALIDATION = False
if DEBUG:
if not BIGINPUT:
input_filename = filename + "_sample_input.txt"
output_filename = filename + "_sample_output.txt"
else:
input_filename = filename + "_sample_biginput.txt"
output_filename = filename + "_sample_bigoutput.txt"
elif VALIDATION:
input_filename = filename + "_validation_input.txt"
output_filename = filename + "_validation_output.txt"
else:
input_filename = filename + "_input.txt"
output_filename = filename + "_output.txt"
def execution_time(start_time):
finish_time = time.time()
diff = finish_time - start_time
min = int(diff/60)
diff = diff - min*60
sec = int(diff)
diff = diff - sec
ms = round(diff*1000, 3)
if min > 0:
return "Execution time: {} min, {} sec, {} ms.".format(min, sec, ms)
elif sec > 0:
return "Execution time: {} sec, {} ms.".format(sec, ms)
else:
return "Execution time: {} ms.".format(ms)
def solve(N, I, O):
possible_flights = [['-' for i in range(N)] for j in range(N)]
for dep in range(N):
for arr in range(N):
if dep == arr:
possible_flights[dep][arr] = 'Y'
elif abs(arr-dep) == 1:
if O[dep] == 'N' or I[arr] == 'N':
possible_flights[dep][arr] = 'N'
else:
possible_flights[dep][arr] = 'Y'
for dep in range(N):
for arr in range(N):
# To deduce if a flight from dep to arr is possible (when they are not adjacent) is necessary that the
# chain of airports between them did not have a 'N'
if possible_flights[dep][arr] == '-':
start = dep
end = arr
if dep < arr:
step = 1
else:
step = -1
string = ''
for i in range(start, end, step):
string = string + possible_flights[i][i+step]
if not 'N' in string:
possible_flights[dep][arr] = 'Y'
else:
possible_flights[dep][arr] = 'N'
result = ''
for dep in range(len(possible_flights)):
result += '\n'
for arr in range(len(possible_flights[dep])):
result += str(possible_flights[dep][arr])
return result
in_data = []
out_data = []
start_time = time.time()
with open(input_filename, 'r') as fileinput:
for line in fileinput:
in_data.append(line.rstrip().split(" "))
fileinput.close()
del in_data[0]
i = 0
case = 1
while i < len(in_data):
print("Case " + str(case))
N = int(in_data[i][0])
i = i + 1
out_data.append("Case #" + str(case) + ": " + str(solve(N, in_data[i][0], in_data[i+1][0])))
i = i + 2
case = case + 1
with open(output_filename, 'w') as fileoutput:
for line in out_data:
fileoutput.write(line + "\n")
fileoutput.close()
print(execution_time(start_time))
| lorenzovngl/meta-hacker-cup | 2020/qualification_round/travel_restrictions/travel_restrictions.py | travel_restrictions.py | py | 3,326 | python | en | code | 1 | github-code | 13 |
37630221922 | from __future__ import print_function
import configparser
import traceback
import json
import pprint
import rospy
from std_msgs.msg import String, Bool
from heartbeat import Heartbeat
from itri_mqtt_client import ItriMqttClient
from ctrl_info02 import CtrlInfo02
from ctrl_info03 import CtrlInfo03
from can_checker import CanChecker
from pedcross_alert import PedCrossAlert
from system_load_checker import SystemLoadChecker
from action_emitter import ActionEmitter
from status_level import OK, WARN, ERROR, FATAL, STATUS_CODE_TO_STR
from sb_param_utils import get_vid
from issue_reporter import (IssueReporter, generate_issue_description,
generate_crash_description)
from timestamp_utils import get_timestamp_mot
_MQTT_FAIL_SAFE_TOPIC = "/fail_safe" # To be removed in the future
_MQTT_FAIL_SAFE_STATUS_TOPIC = "vehicle/report/itri/fail_safe_status"
_MQTT_SYS_READY_TOPIC = "ADV_op/sys_ready"
def _overall_status(module_states):
return max(_["status"] for _ in module_states)
def _overall_status_str(module_states):
mnames = [_["module"] for _ in module_states if _["status"] != OK]
return "Misbehaving modules: {}".format(" ".join(mnames))
def aggregate_event_status(status, status_str, events):
"""
Args:
status(int) -- overall status of states
status_str(str) -- aggregated strs of states
Return:
status(int) -- Highest level between |status| and |events|
status_str(str) -- Aggregated status_str with |events|
"""
for event in events:
status = max(status, event["status"])
if event["status"] != OK:
if status_str:
status_str += "; " + event["status_str"]
else:
status_str = event["status_str"]
return status, status_str
class FailSafeChecker(object):
def __init__(self, heartbeat_ini, mqtt_fqdn, mqtt_port):
self.debug_mode = False
self.vid = get_vid() # vehicle id
rospy.init_node("FailSafeChecker")
rospy.logwarn("Init FailSafeChecker")
cfg = configparser.ConfigParser()
self.modules = {}
cfg.read(heartbeat_ini)
self.latched_modules = []
for module in cfg.sections():
self.modules[module] = Heartbeat(
module, cfg[module]["topic"],
cfg[module].get("message_type", "Empty"),
cfg[module].getfloat("fps_low"),
cfg[module].getfloat("fps_high"),
cfg[module].getboolean("inspect_message_contents"),
cfg[module].getboolean("latch"),
cfg[module].get("sensor_type", None),
cfg[module].get("sensor_uid", None))
enable = cfg[module].getboolean("enable", True)
self.modules[module].set_enabled(enable)
if cfg[module].getboolean("latch"):
self.latched_modules.append(module)
self.ctrl_info_03 = CtrlInfo03()
self.ctrl_info_02 = CtrlInfo02()
self.pedcross_alert = PedCrossAlert()
self.system_load_checker = SystemLoadChecker()
self.can_checker = CanChecker()
self.issue_reporter = IssueReporter()
self.mqtt_client = ItriMqttClient(mqtt_fqdn, mqtt_port)
self.action_emitter = ActionEmitter()
self.sensor_status_publisher = rospy.Publisher(
"/vehicle/report/itri/sensor_status", String, queue_size=1000)
self.fail_safe_status_publisher = rospy.Publisher(
"/vehicle/report/itri/fail_safe_status", String, queue_size=1000)
self.self_driving_mode_publisher = rospy.Publisher(
"/vehicle/report/itri/self_driving_mode", Bool, queue_size=2)
self.sys_ready_publisher = rospy.Publisher(
"/ADV_op/sys_ready", Bool, queue_size=1000)
# counters for warn, error states. When the counter reaches 10,
# change the state into next level (warn->error, error->fatal)
self.warn_count = 0
self.error_count = 0
self.seq = 1
def _get_ego_speed(self):
return self.modules["veh_info"].get_ego_speed()
def set_debug_mode(self, mode):
self.debug_mode = mode
def get_current_status(self):
"""Collect states from the components of the car"""
ego_speed = self._get_ego_speed()
ret = {"states": self.ctrl_info_03.get_status_in_list(),
"events": self.ctrl_info_03.get_events_in_list(),
"seq": self.seq,
"timestamp": get_timestamp_mot()}
self.seq += 1
ret["states"] += self.can_checker.get_status_in_list()
ret["states"] += [self.modules[_].to_dict() for _ in self.modules]
# pedcross is still under heavy development
ret["states"] += self.pedcross_alert.get_status_in_list()
ret["states"] += self.system_load_checker.get_status_in_list()
status = _overall_status(ret["states"])
status_str = _overall_status_str(ret["states"])
if ego_speed > 0:
ret["events"] += self.pedcross_alert.get_events_in_list()
if (self.modules["3d_object_detection"].get_fps() +
self.modules["LidarDetection"].get_fps()) == 0:
status = FATAL
status_str += "; Cam/Lidar detection offline at the same time"
if status == OK:
self.warn_count = 0
self.error_count = 0
if status == WARN:
self.warn_count += 1
else:
self.warn_count = 0
if self.warn_count > 10 and ego_speed > 0:
status = ERROR
status_str += "; WARN states more than 10 seconds"
if status == ERROR:
self.error_count += 1
else:
self.error_count = 0
if self.error_count > 10:
status = FATAL
status_str += "; ERROR states more than 10 seconds"
status, status_str = aggregate_event_status(status, status_str, ret["events"])
ret["status"] = status
ret["status_str"] = status_str
self._publish_sys_ready(status)
return ret
def _publish_sys_ready(self, status):
if status == FATAL:
# force stop self-driving mode
self.sys_ready_publisher.publish(False)
self.mqtt_client.publish(_MQTT_SYS_READY_TOPIC, "0")
else:
self.sys_ready_publisher.publish(True)
self.mqtt_client.publish(_MQTT_SYS_READY_TOPIC, "1")
def _get_all_sensor_status(self):
docs = {"vid": self.vid,
"camera": [],
"gps": [],
"lidar": [],
"radar": []}
for mod_name in self.modules:
module = self.modules[mod_name]
if module.sensor_type is None:
continue
doc = module.get_sensor_status()
docs[module.sensor_type].append(doc)
return docs
def is_self_driving(self):
return self.ctrl_info_02.is_self_driving()
def post_issue_if_necessary(self, current_status):
if not self.is_self_driving():
if current_status["status"] != OK:
rospy.logwarn("Do not post issue in non-self-driving mode")
return
if not rospy.get_param("/fail_safe/should_post_issue", True):
if current_status["status"] != OK:
rospy.logwarn("Do not post issue due to /fail_safe/should_post_issue is False")
return
for doc in current_status["events"]:
if doc["status"] != OK:
summary = "[Auto Report] {}: {}".format(
doc["module"], doc["status_str"])
desc = generate_issue_description(
doc["status"], doc["status_str"], current_status["timestamp"])
self.issue_reporter.post_issue(summary, desc)
return
if current_status["status"] != OK:
summary = "[Auto Report] {}".format(
current_status["status_str"])
desc = generate_issue_description(
current_status["status"], current_status["status_str"], current_status["timestamp"])
self.issue_reporter.post_issue(summary, desc)
def __run(self):
for module in self.latched_modules:
self.modules[module].update_latched_message()
current_status = self.get_current_status()
sensor_status = self._get_all_sensor_status()
rospy.logwarn("status: %s -- %s",
STATUS_CODE_TO_STR[current_status["status"]],
current_status["status_str"])
if self.debug_mode:
# pprint.pprint(sensor_status)
pprint.pprint(current_status)
if current_status["status"] != OK and self.is_self_driving():
self.action_emitter.backup_rosbag(current_status["status_str"])
self.post_issue_if_necessary(current_status)
current_status_json = json.dumps(current_status)
self.mqtt_client.publish(_MQTT_FAIL_SAFE_TOPIC, current_status_json)
self.mqtt_client.publish(_MQTT_FAIL_SAFE_STATUS_TOPIC, current_status_json)
self.fail_safe_status_publisher.publish(current_status_json)
self.sensor_status_publisher.publish(json.dumps(sensor_status))
self.self_driving_mode_publisher.publish(Bool(self.is_self_driving()))
if self.warn_count + self.error_count > 0:
rospy.logwarn("warn_count: %d, error_count: %d",
self.warn_count, self.error_count)
def __report_fail_safe_crash(self, exc_str):
rospy.logwarn("Fail safe crashed! report to JiRA")
rospy.logwarn(exc_str)
summary = "[Auto Report] Fail-Safe: Unexpected Crash"
desc = generate_crash_description(exc_str)
self.issue_reporter.post_issue(summary, desc)
def run(self):
"""Send out aggregated info to backend server every second."""
rate = rospy.Rate(1)
while not rospy.is_shutdown():
try:
self.__run()
except KeyboardInterrupt:
rospy.logwarn("Ctrl-c is pressed, exit.")
except Exception:
exc_str = traceback.format_exc()
self.__report_fail_safe_crash(exc_str)
rate.sleep()
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/utilities/fail_safe/src/fail_safe_checker.py | fail_safe_checker.py | py | 10,290 | python | en | code | 2 | github-code | 13 |
9134263670 | import math, random, types, pymunk
import actors,sound,rooms
from helpers import *
debug=debugFlags["shot"]
class hitSpark(actors.Actor):
def __init__(self,space,x,y,dt=1/120):
actors.Actor.__init__(self,space,x,y,dt)
self.anim=[ loadImage('assets/shots/hitspark1.png'),
loadImage('assets/shots/hitspark2.png'),
loadImage('assets/shots/hitspark3.png')]
self.timeUp = 0.15
self.animSpeed = 16
def update(self,mapGrid):
self.t += self.dt
if self.t > self.timeUp:
self.removeFlag = True
def draw(self,screen):
pos=(self.body.position.x-8,self.body.position.y-8)
actors.drawAnimation(screen,self.anim,pos,self.animSpeed,self.t)
class parrySpark(hitSpark):
def __init__(self,space,x,y,dt=1/120):
hitSpark.__init__(self,space,x,y,dt)
self.timeUp = 0.30
self.anim=[ loadImage('assets/shots/parry1.png'),
loadImage('assets/shots/parry2.png'),
loadImage('assets/shots/parry3.png'),
loadImage('assets/shots/parry4.png'),
loadImage('assets/shots/parry5.png'),
loadImage('assets/shots/parry6.png'),
loadImage('assets/shots/parry7.png')]
class hurtSpark(hitSpark):
def __init__(self,space,x,y,dt=1/120):
hitSpark.__init__(self,space,x,y,dt)
self.timeUp = 0.15
self.animSpeed = 32
self.anim=[ loadImage('assets/shots/hurtslash1.png'),
loadImage('assets/shots/hurtslash2.png'),
loadImage('assets/shots/hurtslash3.png'),
loadImage('assets/shots/hurtslash4.png'),
loadImage('assets/shots/hurtslash5.png'),
loadImage('assets/shots/hurtslash6.png')]
class Shot(actors.Actor):
def __init__(self,space,x,y,fx,fy,speed=160,damage=1,dt=1/120):
actors.Actor.__init__(self,space,x,y,dt)
self.shape=pymunk.Circle(self.body,3)
self.shape.collision_type=collisionTypes["shot"]
self.shape.collided = False
space.add(self.body,self.shape)
self.anim=[ loadImage('assets/shots/orb1.png'),
loadImage('assets/shots/orb2.png'),
loadImage('assets/shots/orb3.png'),
loadImage('assets/shots/orb4.png')]
self.face=[fx,fy]
self.shape.damage=damage
self.speed=speed
self.timer=0.70
self.shape.removeFlag=False
self.hp = 1
self.shape.hurt=self.hurt
self.shape.hit=self.hit
self.shape.hitWall=self.hitWall
sound.shotChannel.play(sound.sounds["shot"])
def update(self):
# Handle removal.
self.timer -= self.dt
if debug:print("Shot timer: "+str(self.timer))
if self.timer<=0:
self.shape.removeFlag=True
if self.body.position.x<0:
self.shape.removeFlag=True
if debug:print("Out of bounds -x")
if self.body.position.y<0:
self.shape.removeFlag=True
if debug:print("Out of bounds -y")
if self.body.position.x>width:
self.shape.removeFlag=True
if debug:print("Out of bounds +x")
if self.body.position.y>height:
self.shape.removeFlag=True
if debug:print("Out of bounds +y")
actors.Actor.update(self)
# Apply new movement.
angle=math.atan2(self.face[1],self.face[0])
self.dx=math.cos(angle)*self.speed*self.dt*self.xFactor
self.dy=math.sin(angle)*self.speed*self.dt*self.yFactor
# Apply force.
self.body.apply_force_at_local_point((self.dx*actors.factor,self.dy*actors.factor),(0,0))
def draw(self,screen):
pos=(self.body.position.x-3,self.body.position.y-3)
actors.drawAnimation(screen,self.anim,pos,8,self.t)
def hurt(self,amount):
self.shape.removeFlag = True
spark = parrySpark(self.space,self.body.position.x,self.body.position.y)
self.room.addActor(spark)
sound.pingChannel.play(sound.sounds["ping"])
def hit(self):
self.shape.removeFlag = True
spark = hurtSpark(self.space,self.body.position.x,self.body.position.y)
self.room.addActor(spark)
sound.hurtChannel.play(sound.sounds["hurt"])
#sound.sounds["hurt"].play()
def hitWall(self):
self.shape.removeFlag = True
spark = hitSpark(self.space,self.body.position.x,self.body.position.y)
self.room.addActor(spark)
sound.hurtChannel.play(sound.sounds["hurt"])
# Bad guy shot
class BadShot(Shot):
def __init__(self,space,x,y,fx,fy,speed=120,damage=1,dt=1/120):
Shot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.shape.collision_type=collisionTypes["badshot"]
# Spreader weapon.
class SpreadShot(Shot):
def __init__(self,space,x,y,fx,fy,speed=160,damage=1,dt=1/120):
Shot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.timer=0.45
self.anim=[loadImage('assets/shots/pulser1.png'),
loadImage('assets/shots/pulser2.png'),
loadImage('assets/shots/pulser3.png'),]
# Spreader sub-shot.
class SubShot(Shot):
def __init__(self,space,x,y,fx,fy,speed=160,damage=1,dt=1/120):
Shot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.timer=0.40
rotation=math.atan2(self.face[0],self.face[1])
rotation=rotation*180/math.pi
rotation+=90 #Kludge until I rotate the sprite itself
self.anim=[pygame.transform.rotate(loadImage('assets/shots/beam1.png'),rotation),
pygame.transform.rotate(loadImage('assets/shots/beam2.png'),rotation),
pygame.transform.rotate(loadImage('assets/shots/beam3.png'),rotation)]
#if abs(fy)>abs(fx):
# self.anim=[ pygame.transform.rotate(loadImage('assets/shots/beam1.png'),90),
# pygame.transform.rotate(loadImage('assets/shots/beam2.png'),90),
# pygame.transform.rotate(loadImage('assets/shots/beam3.png'),90),]
#else:
# self.anim=[ loadImage('assets/shots/beam1.png'),
# loadImage('assets/shots/beam2.png'),
# loadImage('assets/shots/beam3.png'),]
# Spear shot.
class LongShot(Shot):
def __init__(self,space,x,y,fx,fy,speed=160,damage=1,dt=1/120,multi=2):
Shot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.timer=1.5
self.changetime=self.timer-0.45
self.changed=False
self.multi=multi
self.anim=[ loadImage('assets/shots/spinnerdouble1.png'),
loadImage('assets/shots/spinnerdouble2.png'),
loadImage('assets/shots/spinnerdouble3.png'),
loadImage('assets/shots/spinnerdouble4.png'),]
angle=math.atan2(fy,fx)
angle=math.degrees(angle)
self.anim2=[ pygame.transform.rotate(loadImage('assets/shots/spear1.png'),-angle),
pygame.transform.rotate(loadImage('assets/shots/spear2.png'),-angle),
pygame.transform.rotate(loadImage('assets/shots/spear3.png'),-angle),]
def update(self):
Shot.update(self)
if self.timer<self.changetime and not self.changed:
self.anim=self.anim2
self.shape.damage*=self.multi
self.changed=True
class BadLongShot(LongShot):
def __init__(self,space,x,y,fx,fy,speed=120,damage=1,dt=1/120):
LongShot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.shape.collision_type=collisionTypes["badshot"]
self.multi=2
class BadSpreadShot(SpreadShot):
def __init__(self,space,x,y,fx,fy,speed=120,damage=1,dt=1/120):
SpreadShot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.shape.collision_type=collisionTypes["badshot"]
class BadSubShot(SubShot):
def __init__(self,space,x,y,fx,fy,speed=120,damage=1,dt=1/120):
SubShot.__init__(self,space,x,y,fx,fy,speed,damage,dt)
self.shape.collision_type=collisionTypes["badshot"]
| Derpford/memelords | shots.py | shots.py | py | 8,177 | python | en | code | 0 | github-code | 13 |
17158939757 | import numpy as np
import matplotlib.pyplot as plt
ax = plt.axes(projection='3d')
def plot_frame_2d(rotmat_2d, translation, plt_basis=False, plt_show=False):
r1 = np.array([[0],[0]])
r2 = np.array([[1],[0]])
r4 = np.array([[0],[1]])
dx = translation[0,0]
dy = translation[1,0]
d1 = np.array([[dx],[dy]])
d2 = np.array([[dx],[dy]])
d4 = np.array([[dx],[dy]])
# rnew = Rotation @ rold + d
r1new = rotmat_2d @ r1 + d1
r2new = rotmat_2d @ r2 + d2
r4new = rotmat_2d @ r4 + d4
plt.axes().set_aspect('equal')
if plt_basis:
# plot basic axis
plt.axvline(x=0, c="black")
plt.axhline(y=0, c="black")
# plot frame
plt.plot([r1new[0,0], r2new[0,0]],[r1new[1,0], r2new[1,0]],"blue", linewidth=4) #x axis
plt.plot([r1new[0,0], r4new[0,0]],[r1new[1,0], r4new[1,0]],"red", linewidth=4) #y axis
if plt_show:
plt.show()
def plot_frame_3d(homtran, plt_basis=False, plt_show=False):
# input 4x4 transform matrix
rotation = np.array([[homtran[0,0], homtran[0,1], homtran[0,2]],
[homtran[1,0], homtran[1,1], homtran[1,2]],
[homtran[2,0], homtran[2,1], homtran[2,2]]])
d = np.array([[homtran[0,3]],
[homtran[1,3]],
[homtran[2,3]]])
r1 = np.array([[1],[0],[0]])
r2 = np.array([[0],[1],[0]])
r3 = np.array([[0],[0],[1]])
r4 = np.array([[0],[0],[0]])
r1new = rotation @ r1 + d
r2new = rotation @ r2 + d
r3new = rotation @ r3 + d
r4new = rotation @ r4 + d
if plt_basis:
# plot basic axis
ax.plot3D([0, 2], [0, 0], [0, 0], 'red', linewidth=4)
ax.plot3D([0, 0], [0, 2], [0, 0], 'purple', linewidth=4)
ax.plot3D([0, 0], [0, 0], [0, 2], 'green', linewidth=4)
# plot frame
ax.plot3D([r4new[0,0], r1new[0,0]], [r4new[1,0], r1new[1,0]], [r4new[2,0], r1new[2,0]], 'gray', linewidth=4, label="gray is x")
ax.plot3D([r4new[0,0], r2new[0,0]], [r4new[1,0], r2new[1,0]], [r4new[2,0], r2new[2,0]], 'blue', linewidth=4, label="blue is y")
ax.plot3D([r4new[0,0], r3new[0,0]], [r4new[1,0], r3new[1,0]], [r4new[2,0], r3new[2,0]], 'yellow', linewidth=4, label="yellow is z")
ax.legend()
if plt_show:
plt.show() | Phayuth/robotics_manipulator | rigid_body_transformation/plot_frame.py | plot_frame.py | py | 2,286 | python | en | code | 0 | github-code | 13 |
71430574739 | import kachery as ka
import spikeextractors as se
import h5py
import numpy as np
from .mdaextractors import MdaSortingExtractor
from ...pycommon.load_nwb_item import load_nwb_item
class AutoSortingExtractor(se.SortingExtractor):
def __init__(self, arg):
super().__init__()
self._hash = None
if isinstance(arg, se.SortingExtractor):
self._sorting = arg
self.copy_unit_properties(sorting=self._sorting)
else:
self._sorting = None
if 'kachery_config' in arg:
ka.set_config(**arg['kachery_config'])
if 'path' in arg:
path = arg['path']
if ka.get_file_info(path):
file_path = ka.load_file(path)
if not file_path:
raise Exception('Unable to realize file: {}'.format(path))
self._init_from_file(file_path, original_path=path, kwargs=arg)
else:
raise Exception('Not a file: {}'.format(path))
else:
raise Exception('Unable to initialize sorting extractor')
def _init_from_file(self, path: str, *, original_path: str, kwargs: dict):
if 'nwb_path' in kwargs:
self._sorting = NwbSortingExtractor(path=path, nwb_path=kwargs['nwb_path'])
elif original_path.endswith('.mda'):
if 'paramsPath' in kwargs:
params = ka.load_object(kwargs['paramsPath'])
samplerate = params['samplerate']
elif 'samplerate' in kwargs:
samplerate = kwargs['samplerate']
else:
raise Exception('Missing argument: samplerate or paramsPath')
self._sorting = MdaSortingExtractor(firings_file=path, samplerate=samplerate)
else:
raise Exception('Unsupported format for {}'.format(original_path))
def hash(self):
if not self._hash:
if hasattr(self._sorting, 'hash'):
if type(self._sorting.hash) == str:
self._hash = self._sorting.hash
else:
self._hash = self._sorting.hash()
else:
self._hash = None
# self._hash = _samplehash(self._sorting)
return self._hash
def get_unit_ids(self):
return self._sorting.get_unit_ids()
def get_unit_spike_train(self, **kwargs):
return self._sorting.get_unit_spike_train(**kwargs)
def get_sampling_frequency(self):
return self._sorting.get_sampling_frequency()
class NwbSortingExtractor(se.SortingExtractor):
def __init__(self, *, path, nwb_path):
super().__init__()
self._path = path
with h5py.File(self._path, 'r') as f:
X = load_nwb_item(file=f, nwb_path=nwb_path)
self._spike_times = X['spike_times'][:] * self.get_sampling_frequency()
self._spike_times_index = X['spike_times_index'][:]
self._unit_ids = X['id'][:]
self._index_by_id = dict()
for index, id0 in enumerate(self._unit_ids):
self._index_by_id[id0] = index
def get_unit_ids(self):
return [int(val) for val in self._unit_ids]
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
index = self._index_by_id[unit_id]
ii2 = self._spike_times_index[index]
if index - 1 >= 0:
ii1 = self._spike_times_index[index - 1]
else:
ii1 = 0
return self._spike_times[ii1:ii2]
def get_sampling_frequency(self):
# need to fix this
return 30000
# def _samplehash(sorting):
# from mountaintools import client as mt
# obj = {
# 'unit_ids': sorting.get_unit_ids(),
# 'sampling_frequency': sorting.get_sampling_frequency(),
# 'data': _samplehash_helper(sorting)
# }
# return mt.sha1OfObject(obj)
# def _samplehash_helper(sorting):
# h = 0
# for id in sorting.get_unit_ids():
# st = sorting.get_unit_spike_train(unit_id=id)
# h = hash((hash(bytes(st)), hash(h)))
# return h | flatironinstitute/ephys-viz | widgets/pycommon/autoextractors/autosortingextractor.py | autosortingextractor.py | py | 4,298 | python | en | code | 6 | github-code | 13 |
74449765457 | import os
import logging
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator, BigQueryInsertJobOperator # to interact with bigquery
#inorder to create an external table
from airflow.providers.google.cloud.transfers.gcs_to_gcs import GCSToGCSOperator
PROJECT_ID= os.environ.get("GCP_PROJECT_ID")
BUCKET= os.environ.get("GCP_GCS_BUCKET")
path_to_local_home = os.environ.get("AIRFLOW_HOME","/opt/AIRFLOW/")
BIGQUERY_DATASET = os.environ.get ("BIGQUERY_DATASET",'trips_data_all')
DATASET='tripdata'
COLOUR={'yellow':'tpep_pickup_datetime', 'green':'lpep_pickup_datetime'}
INPUT="raw"
FILETYPE="parquet"
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
with DAG(
dag_id = "gcs_2_bq_dag",
schedule_interval = "@daily",
default_args =default_args,
catchup = False,
max_active_runs = 1,
tags= ["dtc-de"],
) as dag:
for colour, ds_col in COLOUR.items():
move_files_gsc_task= GCSToGCSOperator(
task_id=f"move_{colour}_{DATASET}_files_task",
source_bucket=BUCKET, # because we have already assigned the gcs bucket id here "BUCKET= os.environ.get("GCP_GCS_BUCKET")" in docker config. so we are just picking up the env. variable
source_object=f"{INPUT}/{colour}_{DATASET}*.{FILETYPE}",
destination_bucket=BUCKET,
destination_object=f"{colour}/{colour}_{DATASET}",
move_object=True,
# gcp_conn_id=google_cloud_conn_id we dont need this becasue api configured in dockercompose.yml file
)
bigquery_external_table_task = BigQueryCreateExternalTableOperator(
task_id=f"bq_{colour}_{DATASET}_files_task",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": f"{colour}_{DATASET}_external_table",
},
"externalDataConfiguration": {
"sourceFormat": "PARQUET",
"sourceUris": [f"gs://{BUCKET}/{colour}/*"],
},
},
)
# DROP_TABLE_COLUMN_QUERY=(f"ALTER TABLE {BIGQUERY_DATASET}.{colour}_{DATASET}_external_table\
# DROP COLUMNS IF EXISTS ['airport_fee']")
# update_table_schema_task = BigQueryUpdateTableSchemaOperator(
# task_id="update_table_schema_task",
# dataset_id={DATASET},
# table_id="test_table",
# schema_fields_updates=[
# {"name": "emp_name", "description": "Name of employee"},
# {"name": "salary", "description": "Monthly salary in USD"},
# ],
# )
CREATE_PARTITIONED_TABLE_QUERY=(f"CREATE OR REPLACE TABLE {BIGQUERY_DATASET}.{colour}_{DATASET}_partitioned\
PARTITION BY DATE ({ds_col})\
AS \
SELECT * EXCEPT (airport_fee) FROM {BIGQUERY_DATASET}.{colour}_{DATASET}_external_table;" #find a way to incooporate both green and yellow tripdata in one statement
)
bq_create_partition_task = BigQueryInsertJobOperator(
task_id=F"bq_create_{colour}_{DATASET}_partition_task",
configuration={
"query":{
"query": CREATE_PARTITIONED_TABLE_QUERY,
"useLegacySql": False,
}
},
)
move_files_gsc_task >> bigquery_external_table_task >> bq_create_partition_task | LeviScoffie/Data-Engineering-StepbyStep | week3_data_warehouse_bigquery/airflow/dags/gcs_to_bq_dag.py | gcs_to_bq_dag.py | py | 3,920 | python | en | code | 0 | github-code | 13 |
41619169126 | import pygame
from settings import *
from support import import_folder
from math import sin
class Player(pygame.sprite.Sprite):
def __init__(self, pos, surface, create_jump_particles, change_health, change_stamina) -> None:
super().__init__()
self.import_character_assets()
self.frame_index = 0
self.animation_speed = 0.33
self.image = self.animations["idle"][self.frame_index]
self.rect = self.image.get_rect(topleft = pos)
#dust particles
self.import_dust_run_particles()
self.dust_frame_index = 0
self.dust_animation_speed = 0.33
self.display_surface = surface
self.create_jump_particles = create_jump_particles
#self.current_stamina = current_stamina
#movement
self.direction = pygame.math.Vector2(0,0)
self.walking_speed = 4
self.gravity = 0.8
self.jump_speed = -24
self.collision_rect = pygame.Rect(self.rect.topleft, (50,self.rect.h))
self.running_speed = 8
self.movement_status = "walking"
#status
self.status = "idle"
self.facing_right = True
self.on_ground = False
self.on_ceiling = False
self.on_left = False
self.on_right = False
#health management
self.change_health = change_health
self.change_stamina = change_stamina
self.invincible_bool = False
self.invincible_ms = 400
self.hurt_moment = 0
#audio
self.jump_sound = pygame.mixer.Sound(".//JUEGO 2//audio//effects//jump.wav")
self.jump_sound.set_volume(0.25)
self.hit_sound = pygame.mixer.Sound(".//JUEGO 2//audio//effects//hit.wav")
self.hit_sound.set_volume(0.25)
def import_character_assets(self):
character_path = ".//JUEGO 2//graphics//character//"
self.animations = {"idle" : [], "run" : [], "jump": [], "fall": []}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
def import_dust_run_particles(self):
self.dust_run_particules = import_folder(".//JUEGO 2//graphics//character//dust_particles//run")
def animate(self):
animation = self.animations[self.status]
#loop over the index
self.frame_index += self.animation_speed
if self.frame_index >= len(animation):
self.frame_index = 0
image = animation[int(self.frame_index)]
if self.facing_right:
self.image = image
self.rect.bottomleft = self.collision_rect.bottomleft
else:
flipped_img = pygame.transform.flip(image, True, False)
self.image = flipped_img
self.rect.bottomright = self.collision_rect.bottomright
if self.invincible_bool:
alpha = self.wave_value()
self.image.set_alpha(alpha)
else:
self.image.set_alpha(255)
def run_dust_animation(self):
if self.status == "run" and self.on_ground:
self.dust_frame_index += self.dust_animation_speed
if self.dust_frame_index >= len(self.dust_run_particules):
self.dust_frame_index = 0
dust_particle = self.dust_run_particules[int(self.dust_frame_index)]
if self.facing_right:
pos = self.rect.bottomleft - pygame.math.Vector2(6,10)
self.display_surface.blit(dust_particle, pos)
else:
pos = self.rect.bottomright - pygame.math.Vector2(6,10)
flipped_dust_particle = pygame.transform.flip(dust_particle, True, False)
self.display_surface.blit(flipped_dust_particle, pos)
def get_input(self, able_stamina_consume):
keys = pygame.key.get_pressed()
if keys[pygame.K_LSHIFT] and able_stamina_consume:
self.movement_status = "running"
else:
self.movement_status = "walking"
if keys[pygame.K_RIGHT] :
self.direction.x = 1
self.facing_right = True
elif keys[pygame.K_LEFT] :
self.direction.x = -1
self.facing_right = False
else:
self.direction.x = 0
if keys[pygame.K_UP] and self.on_ground:
self.jump()
self.create_jump_particles(self.rect.midbottom)
def get_status(self):
if self.direction.y < 0:
self.status = "jump"
elif self.direction.y > 1:
self.status = "fall"
else:
if self.direction.x == 0:
self.status = "idle"
else:
self.status = "run"
def apply_gravity(self):
self.direction.y += self.gravity
self.collision_rect.y += self.direction.y
def jump(self):
self.direction.y = self.jump_speed
self.jump_sound.play()
def get_damaged (self):
if not self.invincible_bool:
self.hit_sound.play()
self.change_health(-30)
self.invincible_bool = True
self.hurt_moment = pygame.time.get_ticks()
def invincibility_timer(self):
if self.invincible_bool:
current_time = pygame.time.get_ticks()
if current_time - self.hurt_moment >= self.invincible_ms:
self.invincible_bool = False
def consume_stamina(self):
if self.movement_status == "running":
self.change_stamina(-0.5)
def regenerate_stamina(self):
self.change_stamina(0.3)
def wave_value(self):
value = sin((pygame.time.get_ticks()) / 10)
if value >= 0:
return 255
else:
return 0
def update(self, able_stamina_consume):
self.get_input(able_stamina_consume)
self.get_status()
self.animate()
self.run_dust_animation()
self.invincibility_timer()
self.wave_value() | AgustinSande/sandeAgustin-pygame-tp-final | codefiles/player.py | player.py | py | 6,453 | python | en | code | 0 | github-code | 13 |
14880612077 | import socket
import json
from select import select # работает со всем у чего есть файловый дескриптор .fileno()
# https://docs.python.org/3/howto/sockets.html
# https://docs.python.org/3/library/socket.html#module-socket
# https://www.youtube.com/watch?v=ZGfv_yRLBiY&list=PLlWXhlUMyooawilqK4lPXRvxtbYiw34S8&index=1
# можно подключиться через браузер, telnet или nc localhost 33000
def process_incoming_message(s):
print(f"debug: {s}")
try:
s = json.loads(s)
s['value'] = 2.718281828
except json.decoder.JSONDecodeError:
pass
except TypeError:
pass
return json.dumps(s)
def accept_connection(server_socket):
client_socket, client_address = server_socket.accept()
print('Connected by', client_address)
to_monitor.append(client_socket)
def send_message(client_socket):
request_link = client_socket.recv(1024) # дожидаемся входящего сообщения
if request_link:
result = process_incoming_message(request_link.decode('utf-8').strip())
resp = str(result).encode('utf-8') + b'\n'
client_socket.send(resp) # если буфер отправки полный то это в некотором смысле блокирующая операция
else:
client_socket.close()
def event_loop():
global to_monitor
while True:
to_monitor = list(filter(lambda x: x.fileno() != -1, to_monitor))
ready_to_read, _, _ = select(to_monitor, [], []) # read, write, errors
for soc in ready_to_read:
if soc is server_socket:
accept_connection(soc)
else:
send_message(soc)
to_monitor = []
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socket.AF_INET - address family IPv4; socket.SOCK_STREAM - TCP
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# на случай прерывания работы скрипта, порт будет занят в течении таймаута от ОС (~3 мин),
# чтобы данные в пути дошли до адресата, будем использовать переиспользование адреса
server_socket.bind(('localhost', 33000))
server_socket.listen(1) # прослушивание порта на предмет входящего подключения
if __name__ == '__main__':
to_monitor.append(server_socket)
event_loop()
| ekomissarov/edu | some-py-examples/socket-example/socserv-eventloop-select.py | socserv-eventloop-select.py | py | 2,540 | python | ru | code | 0 | github-code | 13 |
10300626146 | from datetime import datetime, timedelta
from pg_statviz.tests.util import mock_dictrow
from pg_statviz.modules.cache import calc_ratio
tstamp = datetime.now()
data = [mock_dictrow({'blks_hit': 150000, 'blks_read': 14000,
'snapshot_tstamp': tstamp + timedelta(seconds=10)}),
mock_dictrow({'blks_hit': 160000, 'blks_read': 15000,
'snapshot_tstamp': tstamp + timedelta(seconds=20)}),
mock_dictrow({'blks_hit': 170000, 'blks_read': 16000,
'snapshot_tstamp': tstamp + timedelta(seconds=30)}),
mock_dictrow({'blks_hit': 180000, 'blks_read': 17000,
'snapshot_tstamp': tstamp + timedelta(seconds=40)}),
mock_dictrow({'blks_hit': 200000, 'blks_read': 19000,
'snapshot_tstamp': tstamp + timedelta(seconds=50)})]
def test_calc_ratio():
response = calc_ratio(data)
ratio = [91.46, 91.43, 91.4, 91.37, 91.32]
assert ratio == response
| vyruss/pg_statviz | src/pg_statviz/tests/test_cache.py | test_cache.py | py | 982 | python | en | code | 23 | github-code | 13 |
10213639508 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 11 20:30:28 2017
@author: Anders
"""
import pandas as pd
import os
print ('importing data')
os.chdir('/Users/Anders/Dropbox/Projects/CPD_QC/sql2/Data_imports')
'''
SEQUENCING DATA
'''
fields = {'Pos': float, 'Alt': str, 'Gene_name': str, 'Chrom_without_chr': str,
'Effect': str, 'VariantType': str, 'FDP': float, 'FRD': float,
'FAD': float,'Sample Sequencing Name': str}
rename = {'Pos' : 'pos', 'Alt' :'alt', 'Gene_name' : 'gene',
'Chrom_without_chr' : 'chr', 'Effect' : 'effect',
'VariantType' : 'vtype', 'FDP' : 'fdp', 'FRD' : 'frd', 'FAD' : 'fad',
'Sample Sequencing Name' : 'seq_name'}
original_data = pd.read_excel("validation_full.xlsx", converters = fields, usecols = fields.keys())
CPDV000386 = pd.read_table("CPDV000386.tab", dtype = fields, usecols = fields.keys())
CPDV141537 = pd.read_table("CPDV141537.tab", dtype = fields, usecols = fields.keys())
CPDV151487 = pd.read_table("CPDV151487.tab", dtype = fields, usecols = fields.keys())
CPDV160726 = pd.read_table("CPDV160726.tab", dtype = fields, usecols = fields.keys())
heme_data = pd.read_csv("heme pos ctl.csv", dtype = fields, usecols = fields.keys())
ash_data = pd.read_csv("fm.sv2_tsca.clincal_only.multirun.csv",
dtype = fields, usecols = fields.keys())
CPDV170682 = pd.read_table("CPDV170682_HEME_pos_ctl_2.tab",
dtype = fields, usecols = fields.keys())
dfs = [original_data, CPDV000386, CPDV141537, CPDV151487, CPDV160726,
heme_data, ash_data, CPDV170682]
reads = pd.DataFrame()
for df in dfs:
reads = reads.append(df, ignore_index = True)
reads.rename(columns=rename, inplace = True)
reads = reads.dropna(subset = ['pos', 'chr', 'fdp', 'fad', 'frd'])
reads = reads.drop_duplicates(subset = ['pos', 'alt', 'chr', 'seq_name'])
os.chdir('/Users/Anders/Dropbox/Projects/CPD_QC/sql2/Data_exports')
reads.to_csv('reads1.csv', index = False)
| meyer-anders/CPD_QC | get_reads.py | get_reads.py | py | 2,011 | python | en | code | 0 | github-code | 13 |
38702388675 | #!/usr/bin/env python
import math
import json
import Queue
import threading
FRAME_LOCAL_NED = 1
MAV_CMD_CONDITION_YAW = 115
MAV_CMD_DO_SET_ROI = 201
downloaded = False
q = Queue.Queue()
def print_json():
while True:
msg = q.get()
print(json.dumps(msg))
t = threading.Thread(target=print_json,args=())
t.daemon = True
t.start()
def attribute_callback(self,attr_name,value):
if value != None:
if attr_name == 'location.global_frame':
q.put({ 'gpsCoords':{ 'lat':value.lat, 'long':value.lon, 'alt':value.alt }})
elif attr_name == 'attitude':
q.put({ 'attitude':{ 'value':{'pitch':value.pitch, 'yaw':value.yaw, 'roll':value.roll }}})
elif attr_name == 'mode': q.put({ 'modeName':value.name })
elif attr_name == 'armed': q.put({ 'isArmed':value })
def send_ned_velocity(vehicle,vn,ve,vd):
msg = vehicle.message_factory.set_position_target_local_ned_encode(0,0,0,FRAME_LOCAL_NED,0b0000111111000111,0,0,0,vn,ve,vd,0,0,0,0,0)
vehicle.send_mavlink(msg)
def condition_yaw(vehicle,heading):
msg = vehicle.message_factory.command_long_encode(0,0,MAV_CMD_CONDITION_YAW,0,heading,0,1,0,0,0,0)
vehicle.send_mavlink(msg)
def set_roi(vehicle,latitude,longitude,altitude):
msg = vehicle.message_factory.command_long_encode(0,0,MAV_CMD_DO_SET_ROI,0,0,0,0,0,latitude,longitude,altitude)
vehicle.send_mavlink(msg)
def process_command(command,vehicle):
global downloaded
x = command.split();
if x[0] == "arm": vehicle.armed = True
# elif x[0] == "getAttitude":
# if vehicle.attitude == None: q.put({ 'attitude':{ 'value':None }})
# else: q.put({ 'attitude':{ 'value':{ 'pitch':vehicle.attitude.pitch, 'yaw':vehicle.attitude.yaw, 'roll':vehicle.attitude.roll }}})
elif x[0] == "getGimbal":
if vehicle.gimbal == None: q.put({ 'gimbal':{ 'value':None }})
else: q.put({ 'gimbal':{ 'value':vehicle.gimbal.pitch }})
elif x[0] == "getHomeLocation":
if not downloaded:
cmds = vehicle.commands
cmds.download()
cmds.wait_ready()
downloaded = True
if vehicle.home_location == None: q.put({ 'homeLocation':{ 'value':None }})
else: q.put({ 'homeLocation':{ 'value':{ 'lat':vehicle.home_location.lat, 'long':vehicle.home_location.lon, 'alt':vehicle.home_location.alt }}})
elif x[0] == "getVelocity":
if vehicle.velocity == None: q.put({ 'velocity':{ 'value':None }})
else: q.put({ 'velocity':{ 'value':vehicle.velocity }})
elif x[0] == "goto":
coord_lat = float(x[1])
coord_long = float(x[2])
coord_alt = float(x[3])
speed = float(x[4])
cmd_str = "goto " + str(coord_lat) + " " + str(coord_long) + " " + str(coord_alt) + " " + str(speed)
q.put({ 'cmd':cmd_str })
a_location = dronekit.LocationGlobal(coord_lat,coord_long,coord_alt)
vehicle.simple_goto(a_location,groundspeed=speed)
elif x[0] == "guided":
vehicle.mode = dronekit.VehicleMode("GUIDED")
q.put({ 'cmd':'guided' })
elif x[0] == "launch":
q.put({ 'cmd':'takeoff' })
vehicle.simple_takeoff(10)
elif x[0] == "loiter":
vehicle.mode = dronekit.VehicleMode("LOITER")
q.put({ 'cmd':'loiter' })
elif x[0] == "mode":
q.put({ 'modeName':vehicle.mode.name })
elif x[0] == "rotateGimbal":
pitch = float(x[1])
yaw = vehicle.attitude.yaw
cmd_str = "gimbal (" + str(pitch) + "," + str(yaw) + ")"
if yaw is not None and not math.isnan(yaw) and not math.isnan(pitch): vehicle.gimbal.rotate(pitch,0,yaw)
q.put({ 'cmd':cmd_str })
elif x[0] == "rtl":
vehicle.mode = dronekit.VehicleMode("RTL")
q.put({ 'cmd':'rtl' })
elif x[0] == "setROI":
latitude = float(x[1])
longitude = float(x[2])
altitude = float(x[3])
cmd_str = "roi " + str(latitude) + " " + str(longitude) + " " + str(altitude)
q.put({ 'cmd':cmd_str })
if not math.isnan(latitude) and not math.isnan(longitude) and not math.isnan(altitude): set_roi(vehicle,latitude,longitude,altitude)
elif x[0] == "setVelocity":
vn = float(x[1])
ve = float(x[2])
vd = float(x[3])
cmd_str = "velocity " + str(vn) + " " + str(ve) + " " + str(vd)
q.put({ 'cmd':cmd_str })
if not math.isnan(vn) and not math.isnan(ve) and not math.isnan(vd): send_ned_velocity(vehicle,vn,ve,vd)
elif x[0] == "setYaw":
heading = float(x[1])
cmd_str = "yaw " + str(heading)
q.put({ 'cmd':cmd_str })
if not math.isnan(heading): condition_yaw(vehicle,heading)
elif x[0] == "stabilize":
vehicle.mode = dronekit.VehicleMode("STABILIZE")
q.put({ 'cmd':'stabilize' })
# Connect to UDP endpoint (and wait for default attributes to accumulate)
def main():
target = "udpin:0.0.0.0:14550"
vehicle = dronekit.connect(target)
q.put({ 'isConnected':True })
vehicle.add_attribute_listener('location.global_frame',attribute_callback)
vehicle.add_attribute_listener('mode',attribute_callback)
vehicle.add_attribute_listener('armed',attribute_callback)
vehicle.add_attribute_listener('attitude',attribute_callback)
while 1:
line = ""
for c in raw_input():
line = line + c
process_command(line,vehicle)
vehicle.close()
try:
import dronekit
import sys
main()
except ImportError:
q.put({ 'isConnected':False })
| waTeim/flying-monkey | 3DR/droneAPI.py | droneAPI.py | py | 5,358 | python | en | code | 0 | github-code | 13 |
31384655159 | import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
#Read the file
data = pd.read_csv('1231.csv', parse_dates=['date'])
#rename the coloumns
dta = data['tmin']
dta_year = data['date']
begin_year = dta_year[0:1].dt.year
end_year = dta_year[-1:].dt.year
dta = data['tmin']
dta = np.array(dta, dtype=np.float)
dta = pd.Series(dta)
#Make the date as an index
dta.index = pd.Index(sm.tsa.datetools.dates_from_range(str(begin_year.values[0]), str(end_year.values[0])))
#plot the graphs
dta.plot(figsize=(10, 6)).set_title('Time-series graph for 1 time-series example')
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(111)
diff1 = dta.diff(1)
diff1.plot(ax =ax1).set_title('Performe First order difference')
diff1 = dta.diff(1)
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(dta, lags=30, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(dta, lags=30, ax=ax2)
| sungho93/Temperature-prediction-based-on-spark | Temp_pridiction.py | Temp_pridiction.py | py | 1,060 | python | en | code | 0 | github-code | 13 |
73554656656 | from Manejafacultades import manejador
if __name__=='__main__':
man=manejador()
man.cargar()
man.mostrar()
print("---Menu de opciones---")
print("1:Mostrar carreras que se dicatan en una facultad")
op=input("ingrese opción de menu:")
while op !="0":
if op=="1":
cod=input("Ingresar codigo de facultad:")
man.BuscarcodigoFac(cod)
op=input("ingrese opción de menu:")
| Merypi/UNIT3 | Main.py | Main.py | py | 454 | python | es | code | 0 | github-code | 13 |
36262650132 | import numpy
from numpy.linalg import norm, eig, svd, eigh
normalize = lambda v: v/norm(v)
sqlength = lambda v: numpy.sum(v*v)
from chimerax.core.state import State
class Plane(State):
"""A mathematical plane
The 'origin_info' must either be a point (numpy array of 3 floats) or an array/list
of at least 3 non-colinear points. If it's a single point, then the normal vector
(numpy array of 3 floats) must be specified. If it's multiple points then
the best-fitting plane through those points will be calculated, with the origin
at the centroid of those points.
"""
usage_msg = "Plane must be defined either by a single point and a normal, or " \
"with an array of N points"
def __init__(self, origin_info, *, normal=None):
origin_info = numpy.array(origin_info)
dims = origin_info.shape
if dims == (3,):
if normal is None:
raise ValueError(self.usage_msg)
self._origin = origin_info
# sets 'normal' property, and therefore calls _compute_offset
self.normal = normal
elif len(dims) == 2 and dims[-1] == 3:
if normal is not None:
raise ValueError("'normal' must be None for Plane defined by multiple points")
num_pts = dims[0]
if num_pts < 3:
raise ValueError("Must provide at least 3 points to define plane")
xyzs = origin_info
centroid = xyzs.mean(0)
centered = xyzs - centroid
ignore, vals, vecs = svd(centered, full_matrices=False)
self._origin = centroid
# sets 'normal' property, and therefore calls _compute_offset
self.normal = vecs[numpy.argmin(vals)]
else:
raise ValueError(self.usage_msg)
def distance(self, pt):
return numpy.dot(pt, self._normal) + self._offset
def equation(self):
return numpy.array(list(self._normal) + [self._offset])
def intersection(self, plane):
"""Returns a line in the form (origin, vector); throws PlaneNoIntersectionError if parallel"""
v = numpy.cross(self._normal, plane._normal)
if sqlength(v) == 0.0:
raise PlaneNoIntersectionError()
s1 = numpy.negative(self._offset)
s2 = numpy.negative(plane._offset)
n1n2dot = numpy.dot(self._normal, plane._normal)
n1normsqr = numpy.dot(self._normal, self._normal)
n2normsqr = numpy.dot(plane._normal, plane._normal)
divisor = n1n2dot * n1n2dot - n1normsqr * n2normsqr
a = (s2 * n1n2dot - s1 * n2normsqr) / divisor
b = (s1 * n1n2dot - s2 * n1normsqr) / divisor
return a * self._normal + b * plane._normal, v
def nearest(self, pt):
return pt - self._normal * self.distance(pt)
def _get_normal(self):
return self._normal
def _set_normal(self, normal):
self._normal = normalize(normal)
self._compute_offset()
normal = property(_get_normal, _set_normal)
def _get_offset(self):
return self._offset
offset = property(_get_offset)
def _get_origin(self):
return self._origin
def _set_origin(self, origin):
self._origin = origin
self._compute_offset()
origin = property(_get_origin, _set_origin)
def _compute_offset(self):
self._offset = -numpy.dot(self.origin, self.normal)
@staticmethod
def restore_snapshot(session, data):
return Plane(data['origin'], normal=data['normal'])
def take_snapshot(self, session, flags):
data = { 'origin': self.origin, 'normal': self.normal }
return data
class PlaneNoIntersectionError(ValueError):
def __init__(self, msg="Planes do not intersect"):
ValueError.__init__(self, msg)
| HamineOliveira/ChimeraX | src/bundles/geometry/src/plane.py | plane.py | py | 3,824 | python | en | code | null | github-code | 13 |
33946751040 | import tkinter as tk
from tkinter.filedialog import askopenfilename
import spc
import numpy as np
import matplotlib.pyplot as plt
from spectrumFit import baseline
class Baseline(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs) # Frame initialization
self.parent = parent
# Creating interface
tk.Label(self, text='Select the order of the baseline : ').pack(side=tk.LEFT)
self.order_list = tk.Listbox(self, selectmode=tk.EXTENDED)
self.order_list.pack(side=tk.LEFT)
self.order_list.insert(tk.END, 'Multi-Linear')
self.order_list.insert(tk.END, 'Quadratic')
self.order_list.insert(tk.END, 'Cubic')
self.order_list.bind('<<ListboxSelect>>', self.select)
self.apply_button = tk.Button(self, text='Apply', command=self.apply)
self.apply_button.pack(side=tk.LEFT)
self.parent.bind("<Return>", lambda event: self.apply())
# Variable initialization
self.bl = None
self.blLine = None
def select(self, event, *args, **kwargs):
method = event.widget.get(int(event.widget.curselection()[0]))
if method == 'Multi-Linear':
self.bl = baseline(self.parent.data[0], self.parent.data[1], method='lin', ax = self.parent.ax)
elif method == 'Quadratic':
self.bl = baseline(self.parent.data[0], self.parent.data[1], method='quad', ax = self.parent.ax)
elif method == 'Cubic':
self.bl = baseline(self.parent.data[0], self.parent.data[1], method='cub', ax = self.parent.ax)
else:
method = None
if method is not None:
if self.blLine is None:
self.blLine = self.parent.ax.plot(self.parent.data[0], self.bl, 'k', label='Baseline')[0]
plt.legend()
else:
self.blLine.set_ydata(self.bl)
if self.parent.display:
self.parent.canvas.draw()
def apply(self):
if self.bl is not None:
self.parent.data_change([self.parent.data[0], self.parent.data[1]-self.bl])
self._quit()
def _quit(self):
""" Properly quits the spectrum removal tool """
self.parent.wm_title('Spectral Analysis')
self.parent.unbind("<Return>")
self.parent.data_change(self.parent.data, refresh_ax_only=True)
self.destroy()
return
| MarcG-LBMC-Lyos/Spectrum_Analysis | src/Baseline.py | Baseline.py | py | 2,445 | python | en | code | 0 | github-code | 13 |
20987100461 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout
from django.utils.translation import gettext_lazy as _
from .models import Contacto
# Create your views here.
class CustomUserCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].help_text = 'El nombre de usuario debe ser menor a 150 caracteres y no puede contener caracteres especiales'
self.fields['password1'].help_text = 'La contraseña debe tener al menos 3 caracteres'
self.fields['password2'].help_text = 'La contraseña debe tener al menos 3 caracteres'
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('CRUD:login')
else:
form = CustomUserCreationForm()
return render(request, 'autenticacion/register.html', {'form': form})
def login(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
auth_login(request, user)
return redirect('/')
else:
form = AuthenticationForm()
return render(request, 'autenticacion/login.html', {'form': form})
def logout_view(request):
logout(request)
return redirect('CRUD:login')
@login_required(login_url='CRUD:login')
def index(request):
contactos = Contacto.objects.all()
return render(request, 'index.html', {'contactos': contactos})
@login_required(login_url='CRUD:login')
def guardar_contacto(request):
nombre = request.POST['txtnombre']
apellido = request.POST['txtapellido']
telefono = request.POST['txttelefono']
correo = request.POST['txtcorreo']
direccion = request.POST['txtdireccion']
contacto = Contacto(nombre=nombre, apellido=apellido, telefono=telefono, correo=correo, direccion=direccion)
contacto.save()
return redirect('/')
@login_required(login_url='CRUD:login')
def eliminar_contacto(request, id):
contacto = Contacto.objects.get(pk=id)
contacto.delete()
return redirect('/')
@login_required(login_url='CRUD:login')
def editar_contacto(request, id):
contactoseleccionado = Contacto.objects.get(pk=id)
contactos = Contacto.objects.all()
context = {'contactos': contactos, 'contactoseleccionado': contactoseleccionado}
return render(request, 'editar_contacto.html', context)
@login_required(login_url='CRUD:login')
def actualizar_contacto(request):
nombre = request.POST['txtnombre']
apellido = request.POST['txtapellido']
telefono = request.POST['txttelefono']
correo = request.POST['txtcorreo']
direccion = request.POST['txtdireccion']
contacto = Contacto.objects.get(pk=request.POST['id'])
contacto.nombre = nombre
contacto.apellido = apellido
contacto.telefono = telefono
contacto.correo = correo
contacto.direccion = direccion
contacto.save()
return redirect('/') | lcorralesg/django-contactos | CRUD/views.py | views.py | py | 3,239 | python | es | code | 0 | github-code | 13 |
44397871875 | import time
import numpy as np
import torch
from tqdm import tqdm
from tree_based_sampling import construct_tree, construct_tree_fat_leaves
from kndpp import kndpp_mcmc
from utils import load_ndpp_kernel, get_arguments
def TEST_kndpp_real_dataset(dataset='uk', k=10, random_state=1, ondpp=False, min_num_leaf=8, num_samples=10):
rng = np.random.RandomState(random_state)
torch.random.manual_seed(random_state if random_state else rng.randint(99))
X, W = load_ndpp_kernel(dataset, ondpp=ondpp)
n, d = X.shape
# Preprocessing - tree construction
tic = time.time()
print("[MCMC] Tree construction")
if n >= 1e5:
tree = construct_tree_fat_leaves(np.arange(n), X.T, min_num_leaf)
else:
tree = construct_tree(np.arange(n), X.T)
time_tree_mcmc = time.time() - tic
print(f"[MCMC] tree construction time: {time_tree_mcmc:.5f} sec")
# Set the mixing time to k^2
num_walks = k**2
for i in range(num_samples):
tic = time.time()
sample, num_rejects = kndpp_mcmc(tree, X, W, k, num_walks, rng)
time_sample = time.time() - tic
print(f"[MCMC] sampling time : {time_sample:.5f} sec")
print(f"[MCMC] num_rejections: {np.mean(num_rejects)}")
if __name__ == "__main__":
print("k-NDPP MCMC Experiment")
args = get_arguments()
for name_, value_ in args.__dict__.items():
print(f"{name_:<20} : {value_}")
TEST_kndpp_real_dataset(args.dataset, args.k, args.seed, args.min_num_leaf, args.num_samples)
| insuhan/ndpp-mcmc-sampling | demo_kndpp.py | demo_kndpp.py | py | 1,536 | python | en | code | 0 | github-code | 13 |
41854054460 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='challenge-calc',
version='0.1.0',
author="mconsta000",
author_email="",
packages=setuptools.find_packages(),
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mconsta000/challenge_calc",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Utilities",
],
package_data= {
"": ["*.csv"],
},
python_requires='>=3.6',
) | mconsta000/challenge_calc | setup.py | setup.py | py | 803 | python | en | code | 0 | github-code | 13 |
3801215013 | import argparse
import json
import sys
from . import optimisations
from . import solver
from .direction import Axis, Direction
from .template import EdgeMode, EdgeModeType
from .util import implies, increment_number, invert_components, set_all_false, set_number, set_numbers_equal
def ensure_loop_length(grid: solver.Grid, edge_mode: EdgeModeType):
for y in range(grid.height):
for x in range(grid.width):
tile_a = grid.get_tile_instance(x, y)
if x == 0 and y == 0:
grid.clauses += [[-lit] for lit in tile_a.colour]
else:
grid.clauses.append(tile_a.colour)
for direction in Direction:
dx, dy = direction.vec
tile_b = grid.get_tile_instance_offset(x, y, dx, dy, edge_mode)
x1, y1 = x + dx, y + dy
if tile_b is None:
continue
if direction.axis == Axis.HORIZONTAL:
colour_a = tile_a.colour_ux
colour_b = tile_b.colour_ux
else:
colour_a = tile_a.colour_uy
colour_b = tile_b.colour_uy
if x1 == 0 and y1 == 0:
grid.clauses += implies([tile_a.output_direction[direction]], set_number(grid.colours - 1, tile_a.colour))
else:
grid.clauses += implies([tile_a.output_direction[direction]], increment_number(tile_a.colour, tile_b.colour))
grid.clauses += implies([tile_a.input_direction[direction], *invert_components(tile_a.output_direction)],
increment_number(tile_a.colour, colour_b))
grid.clauses += implies([*invert_components(tile_b.input_direction), tile_b.output_direction[direction]],
set_numbers_equal(colour_a, tile_b.colour))
grid.clauses += implies([tile_a.underground[direction], tile_b.underground[direction]], set_numbers_equal(colour_a, colour_b))
def prevent_parallel(grid: solver.Grid, edge_mode: EdgeModeType):
for x in range(grid.width):
for y in range(grid.height):
tile_a = grid.get_tile_instance(x, y)
for direction in (Direction.RIGHT, Direction.UP):
tile_b = grid.get_tile_instance_offset(x, y, *direction.next.vec, edge_mode)
if tile_b is None:
continue
grid.clauses.append([-tile_a.underground[direction + 0], -tile_b.underground[direction + 0]])
grid.clauses.append([-tile_a.underground[direction + 2], -tile_b.underground[direction + 0]])
grid.clauses.append([-tile_a.underground[direction + 0], -tile_b.underground[direction + 2]])
grid.clauses.append([-tile_a.underground[direction + 2], -tile_b.underground[direction + 2]])
def main():
parser = argparse.ArgumentParser(description='Creates a stream of blocks of random belts')
parser.add_argument('width', type=int, help='Block width')
parser.add_argument('height', type=int, help='Block height')
parser.add_argument('--tile', action='store_true', help='Makes output blocks tilable')
parser.add_argument('--allow-empty', action='store_true', help='Allow empty tiles')
parser.add_argument('--underground-length', type=int, default=4, help='Maximum length of underground section (excludes ends)')
parser.add_argument('--no-parallel', action='store_true', help='Prevent parallel underground segments')
parser.add_argument('--all', action='store_true', help='Produce all blocks')
parser.add_argument('--label', type=str, help='Output blueprint label')
parser.add_argument('--solver', type=str, default='Glucose3', help='Backend SAT solver to use')
parser.add_argument('--single-loop', action='store_true', help='Prevent multiple loops')
parser.add_argument('--output', type=argparse.FileType('w'), nargs='?', help='Output file, if no file provided then results are sent to standard out')
args = parser.parse_args()
if args.allow_empty and args.single_loop:
raise RuntimeError('Incompatible options: allow-empty + single-loop')
if args.underground_length < 0:
raise RuntimeError('Underground length cannot be negative')
if args.single_loop:
grid = solver.Grid(args.width, args.height, args.width * args.height, args.underground_length)
else:
grid = solver.Grid(args.width, args.height, 1)
edge_mode = EdgeMode.WRAP if args.tile else EdgeMode.NO_WRAP
grid.prevent_intersection(edge_mode)
grid.prevent_bad_undergrounding(edge_mode)
if not args.tile:
grid.block_belts_through_edges()
grid.block_underground_through_edges()
optimisations.prevent_small_loops(grid)
if grid.underground_length > 0:
grid.enforce_maximum_underground_length(edge_mode)
optimisations.prevent_empty_along_underground(grid, edge_mode)
if args.no_parallel:
prevent_parallel(grid, edge_mode)
if args.single_loop:
ensure_loop_length(grid, edge_mode)
for tile in grid.iterate_tiles():
if not args.allow_empty:
grid.clauses.append(tile.all_direction) # Ban Empty
if args.underground_length == 0: # Ban underground
grid.clauses += set_all_false(tile.underground)
grid.clauses.append([-tile.is_splitter]) # Ban splitters
if args.output is not None:
with args.output:
for solution in grid.itersolve(solver=args.solver, ignore_colour=True):
json.dump(solution.tolist(), args.output)
args.output.write('\n')
if not args.all:
break
else:
for i, solution in enumerate(grid.itersolve(solver=args.solver, ignore_colour=True)):
print(json.dumps(solution.tolist()))
if i == 0:
sys.stdout.flush() # Push the first one out as fast a possible
if not args.all:
break
if __name__ == '__main__':
main()
| R-O-C-K-E-T/Factorio-SAT | factorio_sat/make_block.py | make_block.py | py | 6,101 | python | en | code | 323 | github-code | 13 |
8303233984 | import pygame
from constantes import *
from client_player import Player
from client_asteroid import Asteroid
import os
from client_stub import StubClient
import time
pygame.font.init()
BACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join("images", "background-black.png")), (WIDTH, HEIGHT))
class Ui:
def __init__(self, stub: StubClient, player_order=0):
self.win = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Asteroid Destroyer")
self.run = True
self.player_order = player_order
self.player = Player(100 // GRID_SIZE, (NUM_ROWS - 1))
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont("comicsans", 40)
self.lost = False
self.stub = stub
self.players = []
def update_positions(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_a]:
if self.player_order == 1:
self.stub.action(LEFT1)
elif self.player_order == 2:
self.stub.action(LEFT2)
if keys[pygame.K_d]:
if self.player_order == 1:
self.stub.action(RIGHT1)
elif self.player_order == 2:
self.stub.action(RIGHT2)
if keys[pygame.K_SPACE]:
if self.player_order == 1:
self.stub.action(UP1)
elif self.player_order == 2:
self.stub.action(UP2)
def draw_grid(self):
for x in range(0, WIDTH, GRID_SIZE):
pygame.draw.line(self.win, pygame.Color("white"), (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, GRID_SIZE):
pygame.draw.line(self.win, pygame.Color("white"), (0, y), (WIDTH, y))
def redraw_window(self, asteroids, counter, lasers):
"""
Esta função desenha todos os elementos na janela
"""
self.win.blit(BACKGROUND, (0, 0))
self.draw_grid() # Draw the grid
for asteroid in asteroids:
ast = Asteroid(asteroid[0], asteroid[1])
ast.draw(self.win)
for laser in lasers:
laser_x, laser_y = laser
pygame.draw.rect(self.win, pygame.Color("red"), (laser_x, laser_y, GRID_SIZE // 2, GRID_SIZE // 2))
grid_x = laser_x // GRID_SIZE # Convert x-coordinate to grid coordinate
grid_y = laser_y // GRID_SIZE # Convert y-coordinate to grid coordinate
for player in self.players:
player.draw(self.win)
counter_text = self.font.render(f"Counter: {counter}/6", True, pygame.Color("white"))
self.win.blit(counter_text, (10, 10))
pygame.display.update()
def run_game(self):
player = self.stub.get_player()
self.player = Player(player[0], player[1])
self.players = self.stub.get_all_players()
self.player_order = len(self.players)
while self.run:
self.players = self.stub.get_all_players()
for player in range(len(self.players)):
self.players[player] = Player(self.players[player][0], self.players[player][1])
self.update_positions()
time.sleep(0.05)
counter = self.stub.get_counter()
lasers = self.stub.get_lasers()
self.player.lasers = lasers
asteroids = self.stub.get_asteroids()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.stub.remove_player(self.player_order)
self.run = False
if counter == 6:
self.lost = True
self.redraw_window(asteroids, counter, lasers)
if self.lost:
pygame.time.delay(2000)
self.run = False
if counter >= 6:
print("Congratulations! You destroyed 6 asteroids!")
else:
print("Game Over! You didn't destroy 10 asteroids.")
pygame.quit()
self.stub.s.close()
| tandrade855/SD2023-asteroid | Novo_jogo/ui.py | ui.py | py | 4,019 | python | en | code | 0 | github-code | 13 |
21675780622 | import sys, time
from collections import defaultdict, deque
input = sys.stdin.readline
def inverse(a, b):
graph[a][b] = 1
graph[b][a] = 0
indegree[b] += 1
indegree[a] -= 1
def topology_sort():
queue = deque([])
visited = [0] * (n+1)
answer = []
for i in range(1, n+1):
if not indegree[i]:
queue.append(i)
while queue:
if len(queue) > 1:
return "?"
x = queue.popleft()
answer.append(str(x))
for i in range(1, n+1):
if graph[x][i]:
if not visited[i]:
indegree[i] -= 1
if indegree[i] == 0:
queue.append(i)
else:
return "IMPOSSIBLE"
return answer
for i in range(int(input())):
n = int(input())
graph = [[0] * (n+1) for _ in range(n+1)]
indegree = [0] * (n+1)
info = [0] + list(map(int, input().split()))
for i in range(1, n+1):
for j in range(i+1, n+1):
graph[info[i]][info[j]] = 1
indegree[info[j]] += 1
for _ in range(int(input())):
a, b = map(int, input().split())
if graph[a][b]:
inverse(b, a)
else:
inverse(a, b)
answer = topology_sort()
if answer == "?":
print(answer)
else:
if len(answer) == n:
print(" ".join(answer))
else:
print("IMPOSSIBLE")
| SangHyunGil/Algorithm | Baekjoon/baekjoon_4195(union find).py | baekjoon_4195(union find).py | py | 1,480 | python | en | code | 0 | github-code | 13 |
73114885776 | class Array:
def __init__(self):
self.length=0
self.data=dict()
def __str__(self):
return str(self.__dict__)
#This will print the attributes of the array class(length and data) in string format when print(array_instance) is executed
def get(self,index):
return self.data[index]
def push(self,item):
self.data[self.length]=item
self.length+=1
def pop(self):
lastitem=self.data[self.length-1]
del self.data[self.length-1]
self.length-=1
return lastitem
def delete(self,index):
deleteditem=self.data[index]
for i in range(index,self.length-1):
self.data[i]=self.data[i+1]
del self.data[self.length-1]
self.length-=1
return deleteditem
arr=Array()
arr.push(3)
arr.push('hi')
arr.push(34)
arr.push(20)
arr.push('hey')
arr.push('welcome')
arr.delete(3)
arr.pop()
print(arr)
| KayWei2000/vigilant-octo-chainsaw | DS-and-Algo-Python/Arrays/Implementation.py | Implementation.py | py | 1,024 | python | en | code | 0 | github-code | 13 |
19628698693 | listanomi = []
listalanci = []
studenti = 0
lanci = 0
x = 1
while x == 1:
studenti += 1
lanci += 1
print("Inserire il nome dello studente", studenti,": ")
studente = input()
print("Inserire il lancio in metri dello studente:", lanci, ": ")
lancio = int(input())
listanomi.append(studente)
listalanci.append(lancio)
scelta = input("Per terminare l'elenco scrivere STOP, altrimenti scrivere un altra lettera o parola qualsiasi: ")
if scelta == "STOP":
break
lanciomag = max(listalanci)
print ("Il lacio dello studente vincitore è di", lanciomag, "metri")
| albertozelioli/Esercizi-pag.73-pt.2 | es28.py | es28.py | py | 604 | python | it | code | 0 | github-code | 13 |
37130045928 | from modulefinder import IMPORT_NAME
from typing import Text
from numpy import save
import streamlit as st
import pandas as pd
import base64, random
import time, datetime
from pyresparser import ResumeParser
from pdfminer3.layout import LAParams, LTTextBox
from pdfminer3.pdfpage import PDFPage
from pdfminer3.pdfinterp import PDFResourceManager
from pdfminer3.pdfinterp import PDFPageInterpreter
from pdfminer3.converter import TextConverter
import io, random
from streamlit_tags import st_tags
from PIL import Image
import pymysql
import plotly.express as px
connection = pymysql(host = "localhost", user="root", password = "Erw9jujw5er69rt!!", db = 'sra')
cursor = connection.cursor()
def pdf_reader(file):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle, laparams=LAParams())
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(file, 'rb') as fh:
for page in PDFPage.get_pages(fh,
cachning=True,
check_extractable=True):
page_interpreter.process_page(page)
print(page)
text = fake_file_handle.getvalue()
converter.close()
fake_file_handle.close()
return text
def show_pdf(file_path):
with open(file_path, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
pdf_display = F'<iframe src="data:application/pdf;base64,{base64_pdf}" width="700" height="1000" type="application/pdf"></iframe>'
st.markdown(pdf_display, unsafe_allow_html=True)
def insert_date(name, email, skills):
DB_table_name = 'user_data'
insert_sql = "insert into" + DB_table_name + """
values (0, %s, %s, %s)"""
rec_values = (name, email, skills)
cursor.execute(insert_sql, rec_values)
connection.commit()
sbt.set_page_config(
page_title = "Test",
)
def run():
st.title("test")
st.sidebar.markdown("# Choose User")
activities = ["Normal User", "Recruiter"]
choice = st.sidebar.selectbox("Choose among the given options:", activities)
db_sql = """CREATE DATABASE IF NOT EXISTS SRA;"""
cursor.execute(db_sql)
DB_table_name = 'user_data'
table_sql = "CREATE TABLE IF NOT EXISTS" + DB_table_name + """
(ID INT NOT NULL AUTO_INCREMENT,
NAME varchar(100) NOT NULL,
EMAIL_ID VARCHAR(50) NOT NULL,
Skills VARCHAR(300) NOT NULL
PRIMARY KEY (ID))
"""
cursor.execute(table_sql)
if choice == 'Normal User':
pdf_file = st.file_uploader("Choose your Resume", type=["pdf"])
if pdf_file is not None:
save_image_path = './parsed_resumes/' + pdf_file.name
with open(save_image_path, "wb") as f:
f.write(pdf_file.getbuffer())
show_pdf(save_image_path)
resume_data = ResumeParser(save_image_path).get_extracted_data()
if resume_data:
resume_text = pdf_reader(save_image_path) | erikchan1000/resume-parser | flask-server/server.py | server.py | py | 3,001 | python | en | code | 0 | github-code | 13 |
11442194981 | from lucent.optvis import objectives
import torch
@objectives.wrap_objective()
def neuron(layer, n_channel, offset=(0, 0), batch=None):
"""Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+
"""
@objectives.handle_batch(batch)
def inner(model):
layer_t = model(layer)
x, y = layer_t.shape[-1] // 2, layer_t.shape[-2] // 2
return -layer_t[:, n_channel, :, y + offset[1], x + offset[0]].mean()
return inner
@objectives.wrap_objective()
def slow(layer, decay_ratio=2):
"""Encourage neighboring images to be change slowly with L2 penalty"""
def inner(model):
layer_t = model(layer)
return torch.mean(
torch.sum((layer_t[:-1, ...] - layer_t[1:, ...]) ** 2, axis=0)
+ (layer_t[0, ...] - layer_t[-1, ...]) ** 2
)
return inner
@objectives.wrap_objective()
def tv_slow(layer, decay_ratio=2):
"""Encourage neighboring images to be change slowly with L1 penalty"""
def inner(model):
layer_t = model(layer)
return torch.mean(
torch.sum(abs(layer_t[:-1, ...] - layer_t[1:, ...]), axis=0)
+ abs(layer_t[0, ...] - layer_t[-1, ...])
)
return inner
@objectives.wrap_objective()
def intensity_preservation(layer, block_size, input_size):
"""Encourage neighboring images to change slowly by lying on the optic flow"""
def inner(model):
penalty = 0
layer_t = model(layer)
for i in range(1, layer_t.shape[0] - 1):
for k in range(0, input_size - block_size + 1, block_size):
for j in range(0, input_size - block_size + 1, block_size):
rgx = slice(j + 1, j + block_size - 1)
rgy = slice(k + 1, k + block_size - 1)
dx = (
layer_t[i, :, rgy, (j + 2) : (j + block_size)]
- layer_t[i, :, rgy, (j) : (j + block_size - 2)]
)
dy = (
layer_t[i, :, (k + 2) : (k + block_size), rgx]
- layer_t[i, :, (k) : (k + block_size - 2), rgx]
)
ip = (i + 1) % layer_t.shape[0]
im = (i - 1) % layer_t.shape[0]
dt = layer_t[ip, :, rgy, rgx] - layer_t[im, :, rgy, rgx]
A = torch.stack([dx.reshape(-1), dy.reshape(-1)], axis=1)
b = -dt.reshape(-1, 1)
M = torch.inverse(torch.matmul(A.T, A))
bP = torch.matmul(torch.matmul(A, M), torch.matmul(A.T, b))
delta_brightness = ((bP.view(-1) - b.view(-1)) ** 2).mean()
penalty += delta_brightness
return penalty
return inner | patrickmineault/your-head-is-there-to-move-you-around | lucentpatch/objectives.py | objectives.py | py | 3,476 | python | en | code | 11 | github-code | 13 |
17042293474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DeliveryAgencyMerchantInfo import DeliveryAgencyMerchantInfo
from alipay.aop.api.domain.DeliveryBaseInfo import DeliveryBaseInfo
from alipay.aop.api.domain.DeliveryConfig import DeliveryConfig
from alipay.aop.api.domain.DeliveryPlayConfig import DeliveryPlayConfig
from alipay.aop.api.domain.DeliveryTargetRule import DeliveryTargetRule
class AlipayMarketingActivityDeliveryCreateModel(object):
def __init__(self):
self._belong_merchant_info = None
self._delivery_base_info = None
self._delivery_booth_code = None
self._delivery_config_list = None
self._delivery_play_config = None
self._delivery_target_rule = None
self._merchant_access_mode = None
self._out_biz_no = None
@property
def belong_merchant_info(self):
return self._belong_merchant_info
@belong_merchant_info.setter
def belong_merchant_info(self, value):
if isinstance(value, DeliveryAgencyMerchantInfo):
self._belong_merchant_info = value
else:
self._belong_merchant_info = DeliveryAgencyMerchantInfo.from_alipay_dict(value)
@property
def delivery_base_info(self):
return self._delivery_base_info
@delivery_base_info.setter
def delivery_base_info(self, value):
if isinstance(value, DeliveryBaseInfo):
self._delivery_base_info = value
else:
self._delivery_base_info = DeliveryBaseInfo.from_alipay_dict(value)
@property
def delivery_booth_code(self):
return self._delivery_booth_code
@delivery_booth_code.setter
def delivery_booth_code(self, value):
self._delivery_booth_code = value
@property
def delivery_config_list(self):
return self._delivery_config_list
@delivery_config_list.setter
def delivery_config_list(self, value):
if isinstance(value, list):
self._delivery_config_list = list()
for i in value:
if isinstance(i, DeliveryConfig):
self._delivery_config_list.append(i)
else:
self._delivery_config_list.append(DeliveryConfig.from_alipay_dict(i))
@property
def delivery_play_config(self):
return self._delivery_play_config
@delivery_play_config.setter
def delivery_play_config(self, value):
if isinstance(value, DeliveryPlayConfig):
self._delivery_play_config = value
else:
self._delivery_play_config = DeliveryPlayConfig.from_alipay_dict(value)
@property
def delivery_target_rule(self):
return self._delivery_target_rule
@delivery_target_rule.setter
def delivery_target_rule(self, value):
if isinstance(value, DeliveryTargetRule):
self._delivery_target_rule = value
else:
self._delivery_target_rule = DeliveryTargetRule.from_alipay_dict(value)
@property
def merchant_access_mode(self):
return self._merchant_access_mode
@merchant_access_mode.setter
def merchant_access_mode(self, value):
self._merchant_access_mode = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
def to_alipay_dict(self):
params = dict()
if self.belong_merchant_info:
if hasattr(self.belong_merchant_info, 'to_alipay_dict'):
params['belong_merchant_info'] = self.belong_merchant_info.to_alipay_dict()
else:
params['belong_merchant_info'] = self.belong_merchant_info
if self.delivery_base_info:
if hasattr(self.delivery_base_info, 'to_alipay_dict'):
params['delivery_base_info'] = self.delivery_base_info.to_alipay_dict()
else:
params['delivery_base_info'] = self.delivery_base_info
if self.delivery_booth_code:
if hasattr(self.delivery_booth_code, 'to_alipay_dict'):
params['delivery_booth_code'] = self.delivery_booth_code.to_alipay_dict()
else:
params['delivery_booth_code'] = self.delivery_booth_code
if self.delivery_config_list:
if isinstance(self.delivery_config_list, list):
for i in range(0, len(self.delivery_config_list)):
element = self.delivery_config_list[i]
if hasattr(element, 'to_alipay_dict'):
self.delivery_config_list[i] = element.to_alipay_dict()
if hasattr(self.delivery_config_list, 'to_alipay_dict'):
params['delivery_config_list'] = self.delivery_config_list.to_alipay_dict()
else:
params['delivery_config_list'] = self.delivery_config_list
if self.delivery_play_config:
if hasattr(self.delivery_play_config, 'to_alipay_dict'):
params['delivery_play_config'] = self.delivery_play_config.to_alipay_dict()
else:
params['delivery_play_config'] = self.delivery_play_config
if self.delivery_target_rule:
if hasattr(self.delivery_target_rule, 'to_alipay_dict'):
params['delivery_target_rule'] = self.delivery_target_rule.to_alipay_dict()
else:
params['delivery_target_rule'] = self.delivery_target_rule
if self.merchant_access_mode:
if hasattr(self.merchant_access_mode, 'to_alipay_dict'):
params['merchant_access_mode'] = self.merchant_access_mode.to_alipay_dict()
else:
params['merchant_access_mode'] = self.merchant_access_mode
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingActivityDeliveryCreateModel()
if 'belong_merchant_info' in d:
o.belong_merchant_info = d['belong_merchant_info']
if 'delivery_base_info' in d:
o.delivery_base_info = d['delivery_base_info']
if 'delivery_booth_code' in d:
o.delivery_booth_code = d['delivery_booth_code']
if 'delivery_config_list' in d:
o.delivery_config_list = d['delivery_config_list']
if 'delivery_play_config' in d:
o.delivery_play_config = d['delivery_play_config']
if 'delivery_target_rule' in d:
o.delivery_target_rule = d['delivery_target_rule']
if 'merchant_access_mode' in d:
o.merchant_access_mode = d['merchant_access_mode']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMarketingActivityDeliveryCreateModel.py | AlipayMarketingActivityDeliveryCreateModel.py | py | 7,034 | python | en | code | 241 | github-code | 13 |
27954501743 | import telebot, time
import sqlite3
from book_stikers import *
from config import *
from book import *
from btn import *
from hh_parsing import parse_data
from handle_data import handel_vacancies
bot = telebot.TeleBot(token)
@bot.message_handler(commands=['start'])
def start(message):
connect = sqlite3.connect('db1.db')
cursor = connect.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS users_id(
id INTEGER UNIQUE
)""")
connect.commit()
people_id = message.chat.id
cursor.execute(f"SELECT id FROM users_id WHERE id = {people_id}")
data = cursor.fetchone()
if data is None:
user_id = [message.chat.id]
cursor.execute("INSERT INTO users_id VALUES(?);", user_id)
connect.commit()
else:
bot.send_message(message.chat.id, '👋 Nice to see you again!')
bot.send_sticker(message.chat.id, sticker) # выводим первый стикер
bot.send_message(message.chat.id, message.from_user.first_name + hello) # приветствие используя id
bot.send_message(message.chat.id, lang, reply_markup=keyboard1) # выводим клавиатуру для выбора языка
keyboard1 = types.ReplyKeyboardMarkup(True) # клава выбора языка(используем book для переменных)
keyboard1.row(rus, eng)
@bot.message_handler(content_types=['text']) # любое сообщение текст
def send_text(message):
# англ ветка
if message.text == '🇬🇧 English':
bot.send_message(message.chat.id, lang_eng, reply_markup=english_menu)
bot.send_sticker(message.chat.id, sticker_cool)
elif message.text == 'About us 🤖':
bot.send_sticker(message.chat.id, sticker_man)
bot.send_message(message.from_user.id, about_me_eng_txt, reply_markup=eng_about)
elif message.text == 'Check 🔄': # запрос на вакансии
bot.send_message(message.from_user.id, Loading_eng)
bot.send_sticker(message.chat.id, sticker_dance)
raw_data = parse_data()
data = handel_vacancies(raw_data)
if not data:
bot.send_message(message.chat.id, 'There are no new vacancies yet..')
else:
for text in data:
time.sleep(0.8)
bot.send_message(message.chat.id, text, parse_mode='html')
# русская ветка
elif message.text == '🇷🇺 Русский':
bot.send_message(message.chat.id, lang_rus, reply_markup=russian_menu)
bot.send_sticker(message.chat.id, sticker_alkash)
elif message.text == 'О нас 🤖':
bot.send_sticker(message.chat.id, sticker_man)
bot.send_message(message.from_user.id, about_me_txt, reply_markup=about_rus)
elif message.text == 'Проверить 🔄':
bot.send_message(message.from_user.id, Loading_rus)
bot.send_sticker(message.chat.id, sticker_dance)
raw_data = parse_data()
data = handel_vacancies(raw_data)
if not data:
bot.send_message(message.chat.id, 'новых вакансий пока нет..')
else:
for text in data:
time.sleep(0.8)
bot.send_message(message.chat.id, text, parse_mode='html')
# менюшка инлайн клавиатуры (ответы)
@bot.callback_query_handler(func=lambda call: call.data == "👄 Изменить язык")
def callback_worker(call):
if call.data == "👄 Изменить язык":
bot.send_message(call.message.chat.id, lang, reply_markup=keyboard1)
@bot.callback_query_handler(func=lambda call: call.data == "👄 Choose language")
def callback_worker(c):
if c.data == "👄 Choose language":
bot.send_message(c.message.chat.id, lang, reply_markup=keyboard1)
@bot.message_handler()
def another_answer(message):
bot.send_message(message.chat.id, underst, reply_markup=russian_menu_check)
if __name__ == '__main__':
bot.infinity_polling() | TumashenkaAliaksandr/vacancy_pars_bot | bot.py | bot.py | py | 4,035 | python | en | code | 1 | github-code | 13 |
73912528979 | import requests
import datetime
import lib.btc_validator
#------------------------------------------------------------------------------
_Debug = False
#------------------------------------------------------------------------------
LatestKnownBTCPrice = None
#------------------------------------------------------------------------------
def parse_btc_url(inp):
addr = inp
if addr.lower().startswith('bitcoin:'):
addr = addr[8:]
params = ''
if addr.count('?'):
addr, _, params = addr.partition('?')
result = {
'address': addr,
}
if params:
params = params.split('&')
for p in params:
key, _, value = p.partition('=')
result[key] = value
return result
#------------------------------------------------------------------------------
def clean_btc_amount(inp):
if not inp:
return '0.0'
if isinstance(inp, float):
inp = str(inp)
if isinstance(inp, int):
inp = str(inp)
inp = inp.replace(',', '.')
if inp.count('.') >= 2:
inp = '.'.join(inp.split('.')[:2])
return inp
#------------------------------------------------------------------------------
def fetch_transactions(btc_address):
url = 'https://chain.api.btc.com/v3/address/{}/tx'.format(btc_address)
try:
response = requests.get(url, headers={
'User-Agent': 'curl/7.68.0',
'Accept': '*/*',
})
json_response = response.json()
except Exception as exc:
txt = ''
try:
txt = response.text
except:
pass
if txt.count('Access denied'):
txt = 'Access denied'
else:
txt = str(exc)
if _Debug:
print('fetch_transactions ERROR:', txt)
return {}
result = {}
if json_response:
try:
tr_list = ((json_response.get('data', {}) or {}).get('list', []) or [])
except Exception as exc:
if _Debug:
print(exc, json_response)
return {}
for tr in tr_list:
result[tr['hash']] = {
'balance_diff': tr['balance_diff'] / 100000000.0,
'block_time': tr['block_time'],
'hash': tr['hash'],
}
if _Debug:
print('fetch_transactions found %d' % len(result))
return result
def verify_contract(contract_details, price_precision_matching_percent=1.0, price_precision_fixed_amount=25, time_matching_seconds_before=0.0, time_matching_seconds_after=0.0):
global LatestKnownBTCPrice
expected_balance_diff_min = float(contract_details['btc_amount']) * ((100.0 - price_precision_matching_percent) / 100.0)
expected_balance_diff_max = float(contract_details['btc_amount']) * ((100.0 + price_precision_matching_percent) / 100.0)
expected_balance_fixed_diff_min = float(contract_details['btc_amount'])
expected_balance_fixed_diff_max = float(contract_details['btc_amount'])
if LatestKnownBTCPrice is not None:
expected_balance_fixed_diff_min -= price_precision_fixed_amount / LatestKnownBTCPrice
expected_balance_fixed_diff_max += price_precision_fixed_amount / LatestKnownBTCPrice
btc_transactions = fetch_transactions(contract_details['buyer']['btc_address'])
contract_local_time = datetime.datetime.strptime('{} {}'.format(contract_details['date'], contract_details['time']), '%b %d %Y %I:%M %p')
if _Debug:
print('verify_contract', contract_local_time, contract_details['btc_amount'], expected_balance_diff_min, expected_balance_diff_max,
expected_balance_fixed_diff_min, expected_balance_fixed_diff_max)
if not btc_transactions:
if _Debug:
print('NO TRANSACTIONS FOUND FOR THAT BTC ADDRESS', contract_details['buyer']['btc_address'], )
return []
matching_transactions = []
for tr_info in btc_transactions.values():
balance_diff = tr_info['balance_diff']
block_time = tr_info['block_time']
block_local_time = datetime.datetime.fromtimestamp(0) + datetime.timedelta(seconds=block_time)
diff_seconds = (block_local_time - contract_local_time).total_seconds()
if _Debug:
print(' compare with %r %r %r' % (tr_info['hash'], block_local_time, balance_diff, ))
if time_matching_seconds_before:
if diff_seconds < -time_matching_seconds_before:
continue
if time_matching_seconds_after:
if diff_seconds > time_matching_seconds_after:
continue
if (expected_balance_diff_min <= balance_diff and balance_diff <= expected_balance_diff_max) or (
expected_balance_fixed_diff_min <= balance_diff and balance_diff <= expected_balance_fixed_diff_max):
matching_transactions.append(tr_info)
if _Debug:
if len(matching_transactions) == 1:
print(' SUCCESS', contract_local_time, contract_details['btc_amount'], expected_balance_diff_min, expected_balance_diff_max)
else:
print(' NO MATCHING TRANSACTIONS FOUND', contract_local_time, expected_balance_diff_min, expected_balance_diff_max, matching_transactions, )
return matching_transactions
def validate_btc_address(inp):
return lib.btc_validator.Validation.is_btc_address(inp)
| datahaven-net/recotra | lib/btc_util.py | btc_util.py | py | 5,357 | python | en | code | 4 | github-code | 13 |
17509395743 | from os import system, name
from fistacuffs_pkg.character_class import Character
"""
File is for some of the larger text block to help keep main code readable
Also contains display control functions
"""
def clear():
"""
clear the console screen function copied from
https://www.geeksforgeeks.org/clear-screen-python/
"""
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def hp_string_format(player: object):
"""
HP formatting display helper
"""
start = " Hit Points:"
width = 10
padding = ' '
return (f'{start}{player.hit_points:{padding}>{width}} |')
def name_string_format(player):
"""
Name formatting display helper
"""
s = '|' # end of string
padding = ' ' # pad with spaces
length = 23 - len(player.name)
return player.name + format(s, padding + '>' + str(length))
def player_stat_display(message1: str, message2: str, player: Character):
"""
Display a formatted version of the player stats
"""
### formatting ###
format_length = 25
disp_name = name_string_format(player)
disp_hp = hp_string_format(player)
### display ###
print("#" * format_length)
print("| " + disp_name)
print(f"| Level: {player.level} |")
print(f"| 1) Strength: {player.strength} |" if player.strength <
player.level + 1 else f"| 1) Strength: {player.strength}*|")
print(f"| 2) Defense: {player.defense} |" if player.defense <
player.level + 1 else f"| 2) Defense: {player.defense}*|")
print(f"| 3) Speed: {player.speed} |" if player.speed <
player.level + 1 else f"| 3) Speed: {player.speed}*|")
print(f"| 4) Luck: {player.luck} |" if player.luck <
player.level + 1 else f"| 4) Luck: {player.luck}*|")
print("#" * format_length)
print("|" + disp_hp)
print("#" * format_length)
print(message1)
print(message2)
print("#" * format_length)
def fight_display(message1: str, message2: str, player: Character, monster: Character):
"""
Display formatting for the fight rounds
"""
# Name display line set up
length_format = 25
disp_player_name = name_string_format(player)
disp_monster_name = name_string_format(monster)
hp_player = hp_string_format(player)
hp_monster = hp_string_format(monster)
# below is the code I am keeping change the vars to the player and monster class attributes
# show stats
print("#" * ((length_format * 2) - 1))
print(f"# {message1}")
print(f"# {message2}")
print("#" * ((length_format * 2) - 1))
print("| " + disp_player_name + " " + disp_monster_name)
print(
f"| Level: {player.level} | Level: {monster.level} |")
print(
f"| Strength: {player.strength} | Strength: {monster.strength} |")
print(
f"| Defense: {player.defense} | Defense: {monster.defense} |")
print(
f"| Speed: {player.speed} | Speed: {monster.speed} |")
print(
f"| Luck: {player.luck} | Luck: {monster.luck} |")
print("#" * ((length_format * 2) - 1))
print("|" + hp_player + hp_monster)
print("#" * ((length_format * 2) - 1))
print("| 1) Attack 2) Precise Strike")
print("| 3) Brutal Strike 4) Wild Swing")
print("#" * ((length_format * 2) - 1))
def win_battle():
"""
message to player when they win a battle
"""
print("You are victorious.")
print("You have ganined a level\n")
def lose_battle():
"""
message to player when they lose a battle
"""
print("You have lost that battle.\n")
print("Your lack of glory will be the source of family shame for ages to come.")
print("Really... this could not have gone worse.")
print()
print("I'm not sure I see the point, but I suppose could reserecut you.\n")
def name_too_long(new_name: str) -> str:
"""
control name length for display formatting
"""
print(f"Your name is seriously {new_name}? \n")
if len(new_name) >= 20:
if new_name.find(" ") != -1:
new_name = new_name[0: new_name.find(" ")]
else:
x = slice(20)
new_name = new_name[x]
print(
f"Well.. that IS a mouthfull... How about I just call you {new_name}?\n")
x = input("Press enter to continue...")
return new_name
def develop_character(player: Character):
"""
Flow control and screen output for assigning stat
points to attributes to level up character
"""
# setup display messages
message1 = ""
message2 = ""
clear()
while player.stat_points > 0: # attribute assignment loop
player_stat_display(message1, message2, player)
message1 = ""
message2 = ""
print(f"{player.name}, you can train {player.stat_points} skills today.")
assign_to = input(
"Select the number of the skill you want to imporve. ").lower()
if assign_to == "1" and player.strength < player.level + 1:
player.modify_strength()
message1 = "Aren't you looking buff!? Strength +1"
clear()
elif assign_to == "2" and player.defense < player.level + 1:
player.modify_defense()
message1 = "The best offense is a good defense. Defense +1"
clear()
elif assign_to == "3" and player.speed < player.level + 1:
player.modify_speed()
message1 = "You somehow just seem faster. Speed +1"
clear()
elif assign_to == "4" and player.luck < player.level + 1:
player.modify_luck()
message1 = "Umm... I am not sure how you trained this skill... but okay. Luck +1"
clear()
else:
clear()
message1 = "Input not recognized or that stat is maxed (*)"
message2 = "Stat can only be the character Level +1"
clear()
message2 = "Your training is done for now."
player_stat_display(message1, message2, player)
x = input("Press enter to begin battle...")
def game_story(level: str) -> str:
"""
feed parts of the story based on input
"""
if level == "intro":
story = """You have been traveling all day and come up on a small town that looks a bit rough...
But there is an Inn and you are tired. So you enter and approach the bar to get a meal and some rest.
The bar keep look you up and down and says."""
elif level == "1":
story = (f"""Hmm... ok well you have stumbled on the town Fistacuffs. You can stay but I am going to
warn you that fighting is a way of life here. So before I can even pour you a drink you must
prepair yourself. You must fight in order to stay. In fact you must fight in order to leave.\n""")
elif level == "2":
story = """Okay... that was not too bad but my grandmother can beat one of those in her sleep.
Get some rest we will train more in the morning.\n"""
elif level == "3":
story = """Really...? I was sure you were going to lose! In fact I placed a prety hefty wager
against you. Here is some food... you fight again tomorrow.\n"""
elif level == "4":
story = "Lorem Ipsum - level 4 story text - Lorem Ipsum"
elif level == "5":
story = "Lorem Ipsum - level 5 story text - Lorem Ipsum"
else:
story = "fail"
return story
| cbowen216/Fisticuffs | fistacuffs_pkg/display.py | display.py | py | 7,636 | python | en | code | 0 | github-code | 13 |
37473531924 | class Scanner():
#Assuming that rotation preceded translation
def __init__(self):
self.beacons = []
self.rotatedBeacons = []
self.translatedBeacons = []
self.currentRotation = ((0,1,2),(1,1,1))
self.currentTranslation = (0,0,0)
self.transformedBeaconSet = set()
self.locked = False
self.index = 0
def Setify(self):
self.transformedBeaconSet = set(self.translatedBeacons)
def ClearAllTransformation(self):
self.rotatedBeacons = self.beacons.copy()
self.translatedBeacons = self.beacons.copy()
self.currentRotation = ((0,1,2),(1,1,1))
self.currentTranslation = (0,0,0)
def ClearTranslation(self):
self.translatedBeacons = self.rotatedBeacons.copy()
def Rotate(self,rotation):
for i in range(len(self.rotatedBeacons)):
pos = self.rotatedBeacons[i]
permutation, inversion = rotation
newPos = [0,0,0]
for axis in range(len(permutation)):
newPos[permutation[axis]] = pos[axis]*inversion[axis]
self.rotatedBeacons[i] = tuple(newPos)
self.translatedBeacons = self.rotatedBeacons.copy()
self.currentRotation = rotation
self.currentTranslation = (0,0,0)
def Translate(self,translation):
for i in range(len(self.translatedBeacons)):
pos = self.translatedBeacons[i]
newPos = (pos[0]+translation[0],pos[1]+translation[1],pos[2]+translation[2])
self.translatedBeacons[i] = newPos
self.currentTranslation = translation
def IsInBounds(self,pos):
for i in range(3):
if abs(pos[i]-self.currentTranslation[i])>500:
return False
return True
rotations = [] #permutation, inversion. screw it im doing it manually
rotations.append(((0,1,2),(1,1,1)))
rotations.append(((0,1,2),(-1,-1,1)))
rotations.append(((0,1,2),(-1,1,-1)))
rotations.append(((0,1,2),(1,-1,-1)))
rotations.append(((1,0,2),(1,-1,1)))
rotations.append(((1,0,2),(-1,1,1)))
rotations.append(((1,0,2),(1,1,-1)))
rotations.append(((1,0,2),(-1,-1,-1)))
rotations.append(((0,2,1),(1,1,-1)))
rotations.append(((0,2,1),(-1,1,1)))
rotations.append(((0,2,1),(1,-1,1)))
rotations.append(((0,2,1),(-1,-1,-1)))
rotations.append(((1,2,0),(1,1,1)))
rotations.append(((1,2,0),(-1,1,-1)))
rotations.append(((1,2,0),(-1,-1,1)))
rotations.append(((1,2,0),(1,-1,-1)))
rotations.append(((2,0,1),(1,1,1)))
rotations.append(((2,0,1),(1,-1,-1)))
rotations.append(((2,0,1),(-1,-1,1)))
rotations.append(((2,0,1),(-1,1,-1)))
rotations.append(((2,1,0),(1,1,-1)))
rotations.append(((2,1,0),(1,-1,1)))
rotations.append(((2,1,0),(-1,1,1)))
rotations.append(((2,1,0),(-1,-1,-1)))
input = []
with open("input.txt") as FILE:
currentScanner = None
for line in FILE.readlines():
line = line.strip()
if len(line) == 0:
continue
if line.startswith('---'):
currentScanner = Scanner()
currentScanner.index = len(input)
input.append(currentScanner)
else:
tokens = line.split(',')
currentScanner.beacons.append((int(tokens[0]),int(tokens[1]),int(tokens[2])))
def TryToFindMatch(rootScanner, scanner):
for rotation in rotations:
scanner.ClearAllTransformation()
scanner.Rotate(rotation)
#Move each test becon to each root beacon to see if we can get sufficient alignment
for rootBeacon in rootScanner.translatedBeacons:
for beacon in scanner.rotatedBeacons:
translation = (rootBeacon[0]-beacon[0],rootBeacon[1]-beacon[1],rootBeacon[2]-beacon[2])
scanner.ClearTranslation()
scanner.Translate(translation)
scanner.Setify()
matchingCount = 0
for b in rootScanner.translatedBeacons:
if b in scanner.transformedBeaconSet:
matchingCount += 1
if matchingCount >= 12:
print(str(scanner.index) + " " + str(translation))
scanner.locked = True
return
#part a
input[0].ClearAllTransformation()
input[0].Setify()
input[0].locked = True
while True:
for i in range(0,len(input)):
if input[i].locked:
for j in range(0,len(input)):
if not input[j].locked:
TryToFindMatch(input[i],input[j])
allLocked = True
for beacon in input:
if not beacon.locked:
allLocked = False
break
if allLocked:
break
fullBeaconSet = set()
for scanner in input:
fullBeaconSet.update(scanner.transformedBeaconSet)
print(len(fullBeaconSet))
#part b
maxDistance = 0
for s1 in input:
for s2 in input:
distance = abs(s1.currentTranslation[0]-s2.currentTranslation[0])+abs(s1.currentTranslation[1]-s2.currentTranslation[1])+abs(s1.currentTranslation[2]-s2.currentTranslation[2])
if distance > maxDistance:
maxDistance = distance
print(maxDistance) | Chromega/adventofcode | 2021/Day19/day19.py | day19.py | py | 5,254 | python | en | code | 0 | github-code | 13 |
25697381924 | #!/usr/bin/env python3
import subprocess
if __name__ == '__main__':
cmd = subprocess.Popen("/snap/openldap/current/bin/ldapsearch -L -Y EXTERNAL -H ldapi:/// -b 'dc=my-domain,dc=com'", shell=True, stdout=subprocess.PIPE)
for line in cmd.stdout:
if b"numEntries" in line:
new = line.decode("utf-8").rstrip()[-1:]
print(new) | spiculedata/openldap-charm | scripts/count_objects.py | count_objects.py | py | 364 | python | en | code | 1 | github-code | 13 |
26884056595 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import glob
import re
from os.path import join
from pathlib import Path
from typing import Union, List, Optional, Dict
import gdal # # ogr
import numpy as np
import os
import osr
from dtran.metadata import Metadata
from funcs.topoflow.nc2geotiff import nc2geotiff
from tqdm import tqdm
from zipfile import ZipFile
from dtran.argtype import ArgType
from dtran.ifunc import IFunc, IFuncType
from funcs.topoflow.rti_files import generate_rti_file
class Topoflow4ClimateWriteFunc(IFunc):
id = "topoflow4_climate_write_func"
description = ''' A reader-transformation-writer multi-adapter.
Creates a zip file of RTS (and RTI) file from NetCDF (climate) files.
'''
inputs = {
"input_dir": ArgType.String,
"temp_dir": ArgType.String,
"output_file": ArgType.String,
"var_name": ArgType.String,
"DEM_bounds": ArgType.String,
"DEM_xres_arcsecs": ArgType.String,
"DEM_yres_arcsecs": ArgType.String,
}
outputs = {"output_file": ArgType.String}
friendly_name: str = "Topoflow Climate"
func_type = IFuncType.MODEL_TRANS
example = {
"input_dir": "$.my_dcat_read_func.data",
"temp_dir": "/data/mint/sample_grid_baro",
"output_file": "/data/mint/sample_baro/climate_all.zip",
"var_name": "HQprecipitation",
"DEM_bounds": "34.221249999999, 7.362083333332, 36.446249999999, 9.503749999999",
"DEM_xres_arcsecs": "30",
"DEM_yres_arcsecs": "30"
}
def __init__(self, input_dir: str, temp_dir: str, output_file: Union[str, Path], var_name: str, DEM_bounds: str, DEM_xres_arcsecs: str, DEM_yres_arcsecs: str):
self.DEM = {
"bounds": [float(x.strip()) for x in DEM_bounds.split(",")],
"xres": float(DEM_xres_arcsecs) / 3600.0,
"yres": float(DEM_yres_arcsecs) / 3600.0,
}
self.var_name = var_name
self.input_dir = str(input_dir)
self.temp_dir = str(temp_dir)
self.output_file = str(output_file)
def exec(self) -> dict:
for path in [self.input_dir, self.temp_dir]:
Path(path).mkdir(exist_ok=True, parents=True)
Path(self.output_file).parent.mkdir(exist_ok=True, parents=True)
create_rts_from_nc_files(self.input_dir, self.temp_dir, self.output_file, self.DEM, self.var_name, IN_MEMORY=True)
return {"output_file": self.output_file}
def validate(self) -> bool:
return True
class Topoflow4ClimateWritePerMonthFunc(IFunc):
id = "topoflow4_climate_write_per_month_func"
description = ''' A reader-transformation-writer multi-adapter.
Creates RTS (and RTI) files per month from NetCDF (climate) files.
'''
inputs = {
"grid_dir": ArgType.String,
"date_regex": ArgType.String,
"output_file": ArgType.FilePath,
}
outputs = {}
friendly_name: str = "Topoflow Climate Per Month"
func_type = IFuncType.MODEL_TRANS
example = {
"grid_dir": f"/data/mint/gpm_grid_baro",
"date_regex": '3B-HHR-E.MS.MRG.3IMERG.(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})',
"output_file": f"/data/mint/baro/climate.rts",
}
def __init__(self, grid_dir: str, date_regex: str, output_file: Union[str, Path]):
self.grid_dir = str(grid_dir)
self.date_regex = re.compile(str(date_regex))
self.output_file = str(output_file)
def exec(self) -> dict:
grid_files_per_month = {}
for grid_file in glob.glob(join(self.grid_dir, '*.npz')):
month = self.date_regex.match(Path(grid_file).name).group('month')
if month not in grid_files_per_month:
grid_files_per_month[month] = []
grid_files_per_month[month].append(grid_file)
for month in sorted(grid_files_per_month.keys()):
grid_files = grid_files_per_month[month]
print(">>> Process month", month, "#files=", len(grid_files))
output_file = Path(self.output_file).parent / f"{Path(self.output_file).stem}.{month}.rts"
write_grid_files_to_rts(grid_files, output_file)
return {}
def validate(self) -> bool:
return True
def change_metadata(self, metadata: Optional[Dict[str, Metadata]]) -> Dict[str, Metadata]:
return metadata
# -------------------------------------------------------------------
def get_raster_bounds(ds, VERBOSE=False):
# -------------------------------------------------------------
# Note: The bounds depend on the map projection and are not
# necessarily a Geographic bounding box of lons and lats.
# -------------------------------------------------------------
# See:
# https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
# and search on "geotransform". An example of gdal.SetGeoTransform
# gives: [xmin, pixel_size, 0, ymax, 0, -pixel_size].
# Also says args are:
# [ulx, xDist, rtnX, uly, yDist, rtnY]
# This is consistent with information below.
# -------------------------------------------------------------
# ulx = upper left x = xmin
# uly = upper left y = ymax
# lrx = lower right x = xmax
# lry = lower right y = ymin
# -----------------------------
# ----------------------------------------------------------
# Notice the strange order or parameters here is CORRECT.
# It is not: ulx, xres, xskew, uly, yres, yskew
# ----------------------------------------------------------
ulx, xres, xskew, uly, yskew, yres = ds.GetGeoTransform()
lrx = ulx + (ds.RasterXSize * xres)
lry = uly + (ds.RasterYSize * yres)
if (VERBOSE):
print('ulx, uly =', ulx, uly)
print('lrx, lry =', lrx, lry)
print('xres, yres = ', xres, yres)
print('xskew, yskew =', xskew, yskew)
print('----------------------------------')
#########################################################
# Bounding box reported by gdal.info does not match
# what the GES DISC website is saying. The result is
# that gdal.Warp gives all nodata values in output.
#########################################################
return [ulx, lry, lrx, uly] # [xmin, ymin, xmax, ymax]
#########################################################
# Bounding box reported by gdal.info does not match
# what the GES DISC website is saying. Reversing lats
# and lons like this doesn't fix the problem.
#########################################################
## return [lry, ulx, uly, lrx]
# get_raster_bounds()
# -------------------------------------------------------------------
def fix_raster_bounds(ds, VERBOSE=False):
# ------------------------------------------------------------
# Note: NetCDF files downloaded from the GES DISC website
# have corner coordinate lons and lats reversed.
# I checked with multiple files for which bounding
# box was known that when gdalinfo reports Corner
# Coordinates, it uses (lon, lat) vs. (lat, lon).
# Here, we use SetGeoTransform to fix the bounding
# box, so that gdal.Warp() and other gdal functions
# will work correctly. (8/14/2019)
# ------------------------------------------------------------
ulx, xres, xskew, uly, yskew, yres = ds.GetGeoTransform()
lrx = ulx + (ds.RasterXSize * xres)
lry = uly + (ds.RasterYSize * yres)
ulx2 = lry
uly2 = lrx
lrx2 = uly
lry2 = ulx
# lrx2 = ulx2 + (ds.RasterXsize * xres)
# Note: (xres > 0, yres < 0)
if (VERBOSE):
# -----------------------------------------------------
# These print out correctly, but the reported corner
# coordinates are now really messed up.
# Need to close or flush to make new info "stick" ?
# -----------------------------------------------------
print('in_bounds =', ulx, lry, lrx, uly) # (2,20,15,40)
print('out_bounds =', ulx2, lry2, lrx2, uly2) # (20,2,40,15)
print(' ')
ds.SetGeoTransform((ulx2, xskew, xres, uly2, yskew, yres))
# fix_raster_bounds()
# -------------------------------------------------------------------
def bounds_disjoint(bounds1, bounds2, VERBOSE=False):
# -----------------------------------------------------------
# Note. Assume both bounds are in same spatial reference
# system (SRS), e.g. Geographic lons and lats.
# ------------------------------------------------------------------
# https://gamedev.stackexchange.com/questions/586/
# what-is-the-fastest-way-to-work-out-2d-bounding-box-intersection
# ------------------------------------------------------------------
b1_xmin = bounds1[0]
b1_xmax = bounds1[2]
b2_xmin = bounds2[0]
b2_xmax = bounds2[2]
# x_overlap1 = (b1_xmin < b2_xmin) and (b2_xmin < b1_xmax)
# x_overlap2 = (b2_xmin < b1_xmin) and (b1_xmin < b2_xmax)
# x_overlap = (x_overlap1 or x_overlap2)
b1_ymin = bounds1[1]
b1_ymax = bounds1[3]
b2_ymin = bounds2[1]
b2_ymax = bounds2[3]
# y_overlap1 = (b1_ymin < b2_ymin) and (b2_ymin < b1_ymax)
# y_overlap2 = (b2_ymin < b1_ymin) and (b1_ymin < b2_ymax)
# y_overlap = (y_overlap1 or y_overlap2)
# return not(x_overlap and y_overlap)
disjoint = (b2_xmin > b1_xmax) or (b2_xmax < b1_xmin) or \
(b2_ymax < b1_ymin) or (b2_ymin > b1_ymax)
return disjoint
# bounds_disjoint()
# -------------------------------------------------------------------
def gdal_regrid_to_dem_grid(ds_in, tmp_file,
nodata, DEM_bounds, DEM_xres, DEM_yres,
RESAMPLE_ALGO='bilinear'):
# -----------------------------------
# Specify the resampling algorithm
# -----------------------------------
algo_dict = {
'nearest': gdal.GRA_NearestNeighbour,
'bilinear': gdal.GRA_Bilinear,
'cubic': gdal.GRA_Cubic,
'cubicspline': gdal.GRA_CubicSpline,
'lanczos': gdal.GRA_Lanczos,
'average': gdal.GRA_Average,
'min': gdal.GRA_Min,
'max': gdal.GRA_Max,
'mode': gdal.GRA_Mode,
'med': gdal.GRA_Med}
resample_algo = algo_dict[RESAMPLE_ALGO]
# --------------------------------------------------
# Use gdal.Warp to clip and resample to DEM grid
# then save results to a GeoTIFF file (tmp_file).
# --------------------------------------------------
# gdal_bbox = [DEM_bounds[0], DEM_bounds[2], DEM_bounds[1], DEM_bounds[3]]
ds_tmp = gdal.Warp(tmp_file, ds_in,
format='GTiff', # (output format string)
outputBounds=DEM_bounds, xRes=DEM_xres, yRes=DEM_yres,
srcNodata=nodata, ########
### dstNodata=nodata, ########
resampleAlg=resample_algo)
grid = ds_tmp.ReadAsArray()
ds_tmp = None # Close tmp_file
return grid
# gdal_regrid_to_dem_grid()
# -------------------------------------------------------------------
def resave_grid_to_geotiff(ds_in, new_file, grid1, nodata):
new_nodata = -9999.0
grid1[grid1 <= nodata] = new_nodata
##### raster = gdal.Open( nc_file )
raster = ds_in
ncols = raster.RasterXSize
nrows = raster.RasterYSize
geotransform = raster.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(new_file, ncols, nrows, 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(grid1)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromWkt(raster.GetProjectionRef())
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
# resave_grid_to_geotiff()
# -------------------------------------------------------------------
def get_tiff_file(temp_bin_dir, ncfile):
return os.path.join(temp_bin_dir, f"{Path(ncfile).stem}.tif")
def extract_grid_data(args):
output_dir, nc_file, var_name, rts_nodata, DEM_bounds, DEM_nrows, DEM_ncols, DEM_xres, DEM_yres, VERBOSE, IN_MEMORY = args
if IN_MEMORY:
tif_file1 = f'/vsimem/{Path(nc_file).stem}.tmp.tif'
tif_file2 = f'/vsimem/{Path(nc_file).stem}.tmp.2.tif'
else:
tif_file1 = f'/tmp/{Path(nc_file).stem}.tmp.tif'
tif_file2 = f'/tmp/{Path(nc_file).stem}.tmp.2.tif'
nc2geotiff(nc_file, var_name, tif_file1,
no_data=rts_nodata)
ds_in = gdal.Open(tif_file1)
grid1 = ds_in.ReadAsArray()
gmax = grid1.max()
band = ds_in.GetRasterBand(1)
nc_nodata = band.GetNoDataValue()
if (VERBOSE):
print('===============================================================')
print('grid1: min =', grid1.min(), 'max =', grid1.max())
print('grid1.shape =', grid1.shape)
print('grid1.dtype =', grid1.dtype)
print('grid1 nodata =', nc_nodata)
w = np.where(grid1 > nc_nodata)
nw = w[0].size
print('grid1 # data =', nw)
print(' ')
# --------------------------------------
# Use gdal.Info() to print/check info
# --------------------------------------
## print( gdal.Info( ds_in ) )
## print( '===============================================================')
# -----------------------------------------------
# Check if the bounding boxes actually overlap
# -----------------------------------------------
ds_bounds = get_raster_bounds(ds_in, VERBOSE=False)
BAD_FILE = False
if (bounds_disjoint(ds_bounds, DEM_bounds)):
print('###############################################')
print('WARNING: Bounding boxes do not overlap.')
print(' New grid will contain only nodata.')
print('###############################################')
print('file =', nc_file)
print('ds_bounds =', ds_bounds)
print('DEM_bounds =', DEM_bounds)
print(' ')
BAD_FILE = True
# -------------------------------------------
# Replace nodata value and save as GeoTIFF
# -------------------------------------------
# new_file = 'TEMP2.tif'
# resave_grid_to_geotiff( ds_in, new_file, grid1, nodata )
# ds_in = None # Close the nc_file
# ds_in = gdal.Open( new_file ) # Open the GeoTIFF file; new nodata
# -------------------------------------------
# Clip and resample data to the DEM's grid
# then save to a temporary GeoTIFF file.
# -------------------------------------------
if not (BAD_FILE):
grid2 = gdal_regrid_to_dem_grid(ds_in, tif_file2,
rts_nodata, DEM_bounds, DEM_xres, DEM_yres,
RESAMPLE_ALGO='bilinear')
if (VERBOSE):
print('grid2: min =', grid2.min(), 'max =', grid2.max())
print('grid2.shape =', grid2.shape)
print('grid2.dtype =', grid2.dtype)
w = np.where(grid2 > rts_nodata)
nw = w[0].size
print('grid2 # data =', nw)
print(' ')
ds_in = None # Close the tmp_file
if IN_MEMORY:
gdal.Unlink(tif_file2)
else:
os.remove(tif_file2)
else:
grid2 = np.zeros((DEM_nrows, DEM_ncols), dtype='float32')
grid2 += rts_nodata
if IN_MEMORY:
gdal.Unlink(tif_file1)
else:
os.remove(tif_file1)
grid2 = np.float32(grid2)
np.savez_compressed(os.path.join(output_dir, f"{Path(nc_file).stem}.npz"), grid=grid2)
# grid2.tofile(os.path.join(output_dir, f"{Path(nc_file).stem}.bin"))
return gmax, BAD_FILE, grid2.shape
def write_grid_files_to_rts(grid_files: List[str], rts_output_file: str):
"""
grid_files need to be sorted in time-order
"""
rts_unit = open(rts_output_file, 'wb')
grid_files = sorted(grid_files)
for grid_file in tqdm(grid_files):
# grid = np.fromfile(grid_file, dtype=np.float32)
grid = np.load(grid_file)['grid']
grid.tofile(rts_unit)
rts_unit.close()
# fix_gpm_file_as_geotiff()
# -------------------------------------------------------------------
def create_rts_from_nc_files(nc_dir_path, temp_bin_dir, zip_file, DEM_info: dict,
var_name,
IN_MEMORY=False, VERBOSE=False):
# ------------------------------------------------------
# For info on GDAL constants, see:
# https://gdal.org/python/osgeo.gdalconst-module.html
# ------------------------------------------------------
############### TODO: this is temporary ###############
# if (rts_file == 'TEST.rts'):
# -----------------------------------------------------------
DEM_bounds = DEM_info["bounds"]
DEM_xres = DEM_info["xres"]
DEM_yres = DEM_info["yres"]
#######################################################
# ------------------------------------------------
# Get list of all nc files in working directory
# ------------------------------------------------
nc_file_list = sorted(glob.glob(join(nc_dir_path, '*.nc4')))
if len(nc_file_list) == 0:
# couldn't find .NC4, look for .NC
nc_file_list = sorted(glob.glob(join(nc_dir_path, '*.nc')))
count = 0
bad_count = 0
BAD_FILE = False
#### rts_nodata = -9999.0 #################
rts_nodata = 0.0 # (good for rainfall rates; not general)
Pmax = -1
# ------------------------
# BINH: run multiprocessing
from multiprocessing import Pool
pool = Pool()
# print(">>> preprocessing geotiff files")
# nc_file_list_need_tif = [
# (fpath, get_tiff_file(temp_bin_dir, fpath), var_name, rts_nodata)
# for fpath in nc_file_list if not Path(get_tiff_file(temp_bin_dir, fpath)).exists()
# ]
# for _ in tqdm(pool.imap_unordered(fix_gpm_file_as_geotiff_wrap, nc_file_list_need_tif), total=len(nc_file_list_need_tif)):
# pass
# print(">>> finish geotiff files")
# ------------------------
gmax, bad_file, shp = extract_grid_data(((temp_bin_dir, nc_file_list[0], var_name, rts_nodata, DEM_bounds, 100, 100, DEM_xres, DEM_yres, False, False)))
assert not bad_file
DEM_nrows, DEM_ncols = shp[0], shp[1]
args = [
# output_dir, nc_file, var_name, rts_nodata, DEM_bounds, DEM_nrows, DEM_ncols, DEM_xres, DEM_yres, VERBOSE, IN_MEMORY
(temp_bin_dir, nc_file, var_name, rts_nodata, DEM_bounds, DEM_nrows, DEM_ncols, DEM_xres, DEM_yres, False, IN_MEMORY)
for nc_file in nc_file_list
# skip generated files
if not os.path.exists(os.path.join(temp_bin_dir, f"{Path(nc_file).stem}.npz"))
]
for gmax, bad_file, _ in tqdm(pool.imap_unordered(extract_grid_data, args), total=len(args)):
# for gmax, bad_file in tqdm((extract_grid_data(a) for a in args), total=len(args)):
count += 1
Pmax = max(Pmax, gmax)
if bad_file:
bad_count += 1
# -------------------------
# Write grid to RTS file
# -------------------------
# grid2 = np.float32(grid2)
# grid2.tofile(rts_unit)
# count += 1
# -------------------------
# Open RTS file to write
# -------------------------
print(">>> write to files")
file_name = os.path.basename(zip_file)
rts_file = f"{temp_bin_dir}/{file_name.replace('.zip', '.rts')}"
grid_files = sorted(glob.glob(join(temp_bin_dir, '*.npz')))
write_grid_files_to_rts(grid_files, rts_file)
# Generate RTI file
rti_fname = f"{temp_bin_dir}/{file_name.replace('.zip', '.rti')}"
generate_rti_file(rts_file, rti_fname, DEM_ncols, DEM_nrows, DEM_xres, DEM_yres, pixel_geom=0)
print(' ')
print('Max precip rate =', Pmax)
print('bad_count =', bad_count)
print('n_grids =', count)
print('Finished saving data to rts file and generating a matching rti file.')
print(' ')
print(f'Zipping {rts_file} and {rti_fname}...')
with ZipFile(zip_file, 'w') as z:
z.write(rts_file, os.path.basename(rts_file))
z.write(rti_fname, os.path.basename(rti_fname))
print(f"Zipping is complete. Please check {zip_file}")
| mintproject/MINT-Transformation | funcs/topoflow/write_topoflow4_climate_func.py | write_topoflow4_climate_func.py | py | 20,531 | python | en | code | 3 | github-code | 13 |
42987370180 |
from vanilla.dialogs import *
glyphsWithSupportLayer = "w"
inputFonts = getFile(
"select UFOs", allowsMultipleSelection=True, fileTypes=["ufo"])
print("Glyphs that shouldn't be in layer `support.w.middle`:")
def checkFont(f):
print("\n", f.info.styleName)
problems = []
for layer in f.layers:
if layer.name == "support.w.middle":
for glyphName in layer.keys():
if glyphName not in glyphsWithSupportLayer.split(" "):
print(" •", glyphName)
problems.append(glyphName)
if not problems:
print("\t🤖 Looks good.")
# del layer[glyphName] # NOT WORKING YET
for file in inputFonts:
f = OpenFont(file, showInterface=False)
for layer in f.layers:
if layer.name == "support.w.middle":
checkFont(f)
f.close()
| arrowtype/recursive | src/00-recursive-scripts-for-robofont/checking-similarity-between-fonts/check-support-layer-for-glyphs.py | check-support-layer-for-glyphs.py | py | 869 | python | en | code | 2,922 | github-code | 13 |
2327646466 | import MetaTrader5 as mt5
import time
from datetime import datetime
import telegram
import pytz
import schedule
import login
# DEFINE GLOBAL CONSTANTS
message_html = ""
def connect():
if not mt5.initialize(
login=login.login_id,
server=login.server,
password=login.login_pw,
portable=True,
):
print("User not authorized.")
quit()
def get_new_positions():
positions = mt5.positions_get()
if positions == None:
print("No positions, error code={}".format(mt5.last_error()))
elif len(positions) > 0:
global message_html
posMsg = ""
for position in positions:
lst = list(position)
""" print(position) """
get_symbol = lst[16]
get_lotsize = lst[9]
get_oprice = datetime.fromtimestamp(lst[1]).strftime("%Y-%m-%d %I:%M:%S")
get_price_open = lst[10]
get_SL = lst[11]
get_TP = lst[12]
get_curr_price = lst[13]
get_profit = lst[15]
get_type = lst[5]
typeStr = {0: "BUY", 1: "SELL"}
posMsg += "{} {} {} {} {} {} {} {} {}\n".format(
get_symbol,
typeStr[get_type],
get_lotsize,
get_oprice,
get_price_open,
get_SL,
get_TP,
get_curr_price,
get_profit,
)
message_html += (
"\n\n===ACTIVE ORDERS===\nSYM TYPE LOT O/PRICE ENTRY SL TP CUR_PRICE PROFIT\n\n"
+ posMsg
+ ("\n\nTotal Positions: %s" % str(len(positions)))
)
mt5.shutdown()
def telegram_bot():
## Initializing telegram bot ##
"""bot = telegram.Bot(token=login.bot_token)
bot.send_message(
chat_id="@%s" % login.bot_id,
text=message_html,
parse_mode=telegram.ParseMode.HTML,
)"""
print(message_html)
def exec_trade():
connect()
get_new_positions()
telegram_bot()
def schedule_trade():
schedule.every(10).seconds.do(exec_trade)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
schedule_trade()
| jfengg3/mt5-fx-telebot | get_mt5_opentrades.py | get_mt5_opentrades.py | py | 2,233 | python | en | code | 5 | github-code | 13 |
21466582012 | """
Specializers for various sorts of data layouts and memory alignments.
These specializers operate on a copy of the simplified array expression
representation (i.e., one with an NDIterate node). This node is replaced
with one or several ForNode nodes in a specialized order.
For auto-tuning code for tile size and OpenMP size, see
https://github.com/markflorisson88/cython/blob/_array_expressions/Cython/Utility/Vector.pyx
"""
import sys
import copy
try:
from functools import wraps
except ImportError:
def wraps(wrapped):
def decorator(wrapper):
return wrapper
return decorator
import minivisitor
import miniutils
import minitypes
import minierror
import codegen
strength_reduction = True
def debug(*args):
sys.stderr.write(" ".join(str(arg) for arg in args) + '\n')
def specialize_ast(ast):
return copy.deepcopy(ast)
class ASTMapper(minivisitor.VisitorTransform):
"""
Base class to map foreign ASTs onto a minivect AST, or vice-versa.
This sets the current node's position in the astbuilder for each
node that is being visited, to make it easy to build new AST nodes
without passing in source position information everywhere.
"""
def __init__(self, context):
super(ASTMapper, self).__init__(context)
self.astbuilder = context.astbuilder
def getpos(self, opaque_node):
return self.context.getpos(opaque_node)
def map_type(self, opaque_node, **kwds):
"Return a mapped type for the foreign node."
return self.context.typemapper.map_type(
self.context.gettype(opaque_node), **kwds)
def visit(self, node, *args):
prev = self.astbuilder.pos
self.astbuilder.pos = node.pos
result = super(ASTMapper, self).visit(node)
self.astbuilder.pos = prev
return result
class BaseSpecializer(ASTMapper):
"""
Base class for specialization. Does not perform any specialization itself.
"""
def getpos(self, node):
return node.pos
def get_type(self, type):
"Resolve the type to the dtype of the array if an array type"
if type.is_array:
return type.dtype
return type
def visit(self, node, *args):
result = super(BaseSpecializer, self).visit(node)
if result is not None:
result.is_specialized = True
return result
def visit_Node(self, node):
# node = copy.copy(node)
self.visitchildren(node)
return node
def init_pending_stats(self, node):
"""
Allow modifications while visiting some descendant of this node
This happens especially while variables are resolved, which
calls compute_inner_dim_pointer()
"""
b = self.astbuilder
if not node.is_function:
node.prepending = b.stats()
node.appending = b.stats()
def handle_pending_stats(self, node):
"""
Handle any pending statements that need to be inserted further
up in the AST.
"""
b = self.astbuilder
# self.visitchildren(node.prepending)
# self.visitchildren(node.appending)
if node.is_function:
# prepending is a StatListNode already part of the function body
# assert node.prepending in list(self.treepath(node, '//StatListNode'))
node.body = b.stats(node.body, node.appending)
else:
node.body = b.stats(node.prepending, node.body, node.appending)
if not self.context.use_llvm:
node.body = self.fuse_omp_stats(node.body)
def get_loop(self, loop_level):
if loop_level:
return self.function.for_loops[self.loop_level - 1]
return self.function
def fuse_omp_stats(self, node):
"""
Fuse consecutive OpenMPConditionalNodes.
"""
import miniast
if not node.stats:
return node
b = self.astbuilder
stats = [node.stats[0]]
for next_stat in node.stats[1:]:
stat = stats[-1]
c1 = isinstance(stat, miniast.OpenMPConditionalNode)
c2 = isinstance(next_stat, miniast.OpenMPConditionalNode)
if c1 and c2:
if_body = None
else_body = None
if stat.if_body or next_stat.if_body:
if_body = b.stats(stat.if_body, next_stat.if_body)
if stat.else_body or next_stat.else_body:
else_body = b.stats(stat.else_body, next_stat.else_body)
stats[-1] = b.omp_if(if_body, else_body)
else:
stats.append(next_stat)
node.stats[:] = stats
return node
#
### Stubs for cooperative multiple inheritance
#
def visit_NDIterate(self, node):
# Do not visit children
return node
visit_AssignmentExpr = visit_Node
visit_ErrorHandler = visit_Node
visit_BinopNode = visit_Node
visit_UnopNode = visit_Node
visit_IfNode = visit_Node
class Specializer(BaseSpecializer):
"""
Base class for most specializers, provides some basic functionality
for subclasses. Implement visit_* methods to specialize nodes
to some pattern.
Implements implementations to handle errors and cleanups, adds a return
statement to the function and can insert debug print statements if
context.debug is set to a true value.
"""
is_contig_specializer = False
is_tiled_specializer = False
is_vectorizing_specializer = False
is_inner_contig_specializer = False
is_strided_specializer = False
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(Specializer, self).__init__(context)
if specialization_name is not None:
self.specialization_name = specialization_name
self.variables = {}
def _index_list(self, pointer, ndim):
"Return a list of indexed pointers"
return [self.astbuilder.index(pointer, self.astbuilder.constant(i))
for i in range(ndim)]
def _debug_function_call(self, b, node):
"""
Generate debug print statements when the specialized function is
called.
"""
stats = [
b.print_(b.constant(
"Calling function %s (%s specializer)" % (
node.mangled_name, self.specialization_name)))
]
if self.is_vectorizing_specializer:
stats.append(
b.print_(b.constant("Vectorized version size=%d" %
self.vector_size)))
stats.append(
b.print_(b.constant("shape:"), *self._index_list(node.shape,
node.ndim)))
if self.is_tiled_specializer:
stats.append(b.print_(b.constant("blocksize:"), self.get_blocksize()))
if not self.is_contig_specializer:
for idx, arg in enumerate(node.arguments):
if arg.is_array_funcarg:
stats.append(b.print_(b.constant("strides operand%d:" % idx),
*self._index_list(arg.strides_pointer,
arg.type.ndim)))
stats.append(b.print_(b.constant("data pointer %d:" % idx),
arg.data_pointer))
node.prepending.stats.append(b.stats(*stats))
def visit_FunctionNode(self, node):
"""
Handle a FunctionNode. Sets node.total_shape to the product of the
shape, wraps the function's body in a
:py:class:`minivect.miniast.ErrorHandler` if needed and adds a
return statement.
"""
b = self.astbuilder
self.compute_total_shape(node)
node.mangled_name = self.context.mangle_function_name(node.name)
# set this so bad people can specialize during code generation time
node.specializer = self
node.specialization_name = self.specialization_name
self.function = node
if self.context.debug:
self._debug_function_call(b, node)
if node.body.may_error(self.context):
node.body = b.error_handler(node.body)
node.body = b.stats(node.body, b.return_(node.success_value))
self.visitchildren(node)
# if not self.is_contig_specializer:
# self.compute_temp_strides(b, node)
return node
def visit_ForNode(self, node):
if node.body.may_error(self.context):
node.body = self.astbuilder.error_handler(node.body)
self.visitchildren(node)
return node
def visit_Variable(self, node):
if node.name not in self.variables:
self.variables[node.name] = node
return self.visit_Node(node)
def get_data_pointer(self, variable, loop_level):
return self.function.args[variable.name].data_pointer
def omp_for(self, node):
"""
Insert an OpenMP for loop with an 'if' clause that checks to see
whether the total data size exceeds the given OpenMP auto-tuned size.
The caller needs to adjust the size, set in the FunctionNode's
'omp_size' attribute, depending on the number of computations.
"""
if_clause = self.astbuilder.binop(minitypes.bool_, '>',
self.function.total_shape,
self.function.omp_size)
return self.astbuilder.omp_for(node, if_clause)
class FinalSpecializer(BaseSpecializer):
"""
Perform any final specialization and optimizations. The initial specializer
is concerned with specializing for the given data layouts, whereas this
specializer is concerned with any rewriting of the AST to support
fundamental operations.
"""
vectorized_equivalents = None
in_lhs_expr = False
should_vectorize = False
def __init__(self, context, previous_specializer):
super(FinalSpecializer, self).__init__(context)
self.previous_specializer = previous_specializer
self.sp = previous_specializer
self.error_handlers = []
self.loop_level = 0
self.variables = {}
self.strides = {}
self.outer_pointers = {}
self.vector_temps = {}
def run_optimizations(self, node):
"""
Run any optimizations on the AST. Currently only loop-invariant code
motion is implemented when broadcasting information is present.
"""
import optimize
# TODO: support vectorized specializations
if (self.context.optimize_broadcasting and not
self.sp.is_contig_specializer or
self.sp.is_vectorizing_specializer):
optimizer = optimize.HoistBroadcastingExpressions(self.context)
node = optimizer.visit(node)
return node
def visit_Variable(self, node):
"""
Process variables, which includes arrays and scalars. For arrays,
this means retrieving the element from the array. Performs strength
reduction for index calculation of array variables.
"""
if node.type.is_array:
tiled = self.sp.is_tiled_specializer
last_loop_level = (self.loop_level == self.function.ndim or
(self.sp.is_vectorizing_specializer and not
self.should_vectorize))
inner_contig = (
self.sp.is_inner_contig_specializer and
(last_loop_level or node.hoisted) and
(not self.sp.is_strided_specializer or
self.sp.matching_contiguity(node.type)))
contig = self.sp.is_contig_specializer
# Get the array data pointer
arg_data_pointer = self.function.args[node.name].data_pointer
if self.sp.is_contig_specializer:
# Contiguous, no strength reduction needed
data_pointer = arg_data_pointer
else:
# Compute strength reduction pointers for all dimensions leading
# up the the dimension this variable occurs in.
self.compute_temp_strides(node, inner_contig, tiled=tiled)
data_pointer = self.compute_data_pointer(
node, arg_data_pointer, inner_contig, tiled)
# Get the loop level corresponding to the occurrence of the variable
for_node = self.function.for_loops[self.loop_level - 1]
if self.should_vectorize:
return self.handle_vector_variable(node, data_pointer, for_node,
inner_contig, contig)
else:
element = self.element_location(data_pointer, for_node,
inner_contig, contig,
tiled=tiled, variable=node)
return self.astbuilder.resolved_variable(
node.name, node.type, element)
else:
return node
def visit_VectorVariable(self, vector_variable):
# use visit_Variable, since is does the strength reduction and such
return self.visit_Variable(vector_variable.variable)
def element_location(self, data_pointer, for_node,
inner_contig, is_contig, tiled, variable):
"Return the element in the array for the current index set"
b = self.astbuilder
def debug(item):
if self.context.debug_elements:
string = b.constant("Referenced element from %s:" %
variable.name)
print_ = self.visit(b.print_(string, item))
for_node = self.function.for_loops[self.loop_level - 1]
for_node.prepending.stats.append(print_)
if not is_contig:
stats = []
for i, stride in enumerate(self.strides[variable]):
if stride is not None:
string = b.constant("%s step[%d]:" % (variable.name, i))
stats.append(b.print_(string, stride))
print_steps = b.stats(*stats)
self.function.prepending.stats.append(self.visit(print_steps))
return item
if inner_contig or is_contig:
# contiguous access, index the data pointer in the inner dimension
return debug(b.index(data_pointer, for_node.index))
else:
# strided access, this dimension is performing strength reduction,
# so we just need to dereference the data pointer
return debug(b.dereference(data_pointer))
def handle_vector_variable(self, variable, data_pointer, for_node,
inner_contig, is_contig):
"Same as `element_location`, except for Vector variables"
b = self.astbuilder
# For array operands, load reads into registers, and store
# writes back into the data pointer. For assignment to a register
# we use a vector type, for assignment to a data pointer, the
# data pointer type
if inner_contig or is_contig:
data_pointer = b.add(data_pointer, for_node.index)
if self.in_lhs_expr:
return data_pointer
else:
variable = b.vector_variable(variable, self.sp.vector_size)
if variable in self.vector_temps:
return self.vector_temps[variable]
rhs = b.vector_load(data_pointer, self.sp.vector_size)
temp = b.temp(variable.type, 'xmm')
self.vector_temps[variable] = temp
for_node.prepending.stats.append(b.assign(temp, rhs))
return self.visit(temp)
def compute_temp_strides(self, variable, handle_inner_dim, tiled=False):
"""
Compute the temporary strides needed for the strength reduction. These
should be small constants, so division should be fast. We could use
char * instead of element_type *, but it's nicer to avoid the casts.
"""
b = self.astbuilder
if variable in self.strides:
return self.strides[variable]
start = 0
stop = variable.type.ndim
if handle_inner_dim:
if self.sp.order == "F":
start = 1
else:
stop = stop - 1
self.strides[variable] = strides = [None] * len(self.function.for_loops)
for dim in range(start, stop):
stride = b.stride(variable, dim)
temp_stride = b.temp(stride.type.unqualify("const"),
name="%s_stride%d" % (variable.name, dim))
stat = b.assign(temp_stride,
b.div(stride, b.sizeof(variable.type.dtype)))
self.function.prepending.stats.append(stat)
strides[dim] = temp_stride
return strides
def compute_data_pointer(self, variable, argument_data_pointer,
handle_inner_dim, tiled):
"""
Compute the data pointer for the dimension the variable is located in
(the loop level). This involves generating a strength reduction in
each outer dimension.
Variables referring to the same array may be found on different
loop levels.
"""
b = self.astbuilder
assert variable.type.is_array
pointer_type = argument_data_pointer.type.unqualify("const")
loop_level = self.loop_level
offset = self.function.ndim - variable.type.ndim
stop = loop_level - handle_inner_dim
if self.outer_pointers.get(variable):
start = len(self.outer_pointers[variable])
if stop <= start:
return self.outer_pointers[variable][stop - 1]
else:
self.outer_pointers[variable] = []
start = offset
outer_pointers = self.outer_pointers[variable]
temp = argument_data_pointer
for_loops = self.function.for_loops[start:stop]
# Loop over all outer loop levels
for i, for_node in zip(range(start, stop), for_loops):
if for_node.dim < offset:
continue
# Allocate a temp_data_pointer on each outer loop level
temp = b.temp(pointer_type)
dim = for_node.dim - offset
if not outer_pointers: #i == offset:
outer_node = self.function
outer_pointer = self.function.args[variable.name].data_pointer
else:
outer_node = self.function.for_loops[i - 1]
outer_pointer = outer_pointers[-1]
# Generate: temp_data_pointer = outer_data_pointer
assmt = b.assign(temp, outer_pointer)
outer_node.prepending.stats.append(assmt)
stride = original_stride = self.strides[variable][dim]
assert stride is not None, ('strides', self.strides[variable],
'dim', dim, 'start', start,
'stop', stop, 'offset', offset,
'specializer', self.sp)
if for_node.is_controlling_loop:
# controlling loop for tiled specializations, multiply by the
# tiling blocksize for this dimension
stride = b.mul(stride, for_node.blocksize)
# Generate: temp_data_pointer += stride
stat = b.assign(temp, b.add(temp, stride))
if not outer_pointers:
# Outermost loop level, generate some additional OpenMP
# parallel-loop-compatible code
# Generate: temp_data_pointer = data_pointer + i * stride0
omp_body = b.assign(temp, b.add(outer_pointer,
b.mul(original_stride, for_node.index)))
for_node.prepending.stats.append(b.omp_if(omp_body))
for_node.appending.stats.append(b.omp_if(None, stat))
omp_for = self.treepath_first(self.function, '//OpenMPLoopNode')
if omp_for is not None:
omp_for.privates.append(temp)
else:
for_node.appending.stats.append(stat)
self.outer_pointers[variable].append(temp)
return temp
def visit_FunctionNode(self, node):
self.function = node
self.indices = self.sp.indices
node = self.run_optimizations(node)
self.init_pending_stats(node)
self.visitchildren(node)
self.handle_pending_stats(node)
return node
def _visit_set_vectorizing_flag(self, node):
was_vectorizing = self.should_vectorize
self.should_vectorize = node.should_vectorize
self.visitchildren(node)
self.should_vectorize = was_vectorizing
return node
def visit_ForNode(self, node):
is_nd_fornode = node in self.function.for_loops or node.is_fixup
self.loop_level += is_nd_fornode
self.init_pending_stats(node)
self._visit_set_vectorizing_flag(node)
self.handle_pending_stats(node)
self.loop_level -= is_nd_fornode
return node
def visit_IfNode(self, node):
self.loop_level += node.is_fixup
result = self._visit_set_vectorizing_flag(node)
self.loop_level -= node.is_fixup
return result
def visit_AssignmentExpr(self, node):
# assignment expressions should not be nested
self.in_lhs_expr = True
node.lhs = self.visit(node.lhs)
self.in_lhs_expr = False
node.rhs = self.visit(node.rhs)
if node.lhs.type.is_pointer and node.rhs.type.is_vector:
# This expression must be a statement
return self.astbuilder.vector_store(node.lhs, node.rhs)
return node
def visit_TempNode(self, node):
self.visitchildren(node)
return node
def visit_BinopNode(self, node):
type = self.get_type(node.type)
if node.operator == '%' and type.is_float and not self.context.use_llvm:
# rewrite modulo for floats to fmod()
b = self.astbuilder
functype = minitypes.FunctionType(return_type=type,
args=[type, type])
if type.itemsize == 4:
modifier = "f"
elif type.itemsize == 8:
modifier = ""
else:
modifier = "l"
fmod = b.variable(functype, "fmod%s" % modifier)
return self.visit(b.funccall(fmod, [node.lhs, node.rhs]))
self.visitchildren(node)
return node
def visit_UnopNode(self, node):
if node.type.is_vector and node.operator == '-':
# rewrite unary subtract
type = node.operand.type
if type.is_float:
constant = 0.0
else:
constant = 0
lhs = self.astbuilder.vector_const(type, constant)
node = self.astbuilder.binop(type, '-', lhs, node.operand)
return self.visit(node)
self.visitchildren(node)
return node
def visit_DereferenceNode(self, node):
node.operand = self.visit(node.operand)
if self.context.llvm:
node = self.astbuilder.index(node, self.astbuilder.constant(0))
return node
def visit_IfElseExprNode(self, node):
self.visitchildren(node)
if self.context.use_llvm:
# Rewrite 'cond ? x : y' expressions to if/else statements
b = self.astbuilder
temp = b.temp(node.lhs.type, name='if_temp')
stat = b.if_else(node.cond, b.assign(temp, node.lhs),
b.assign(temp, node.rhs))
for_node = self.get_loop(self.loop_level)
for_node.prepending.stats.append(stat)
node = temp
return node
def visit_PrintNode(self, node):
b = self.astbuilder
printf_type = minitypes.FunctionType(
return_type=minitypes.int_,
args=[minitypes.CStringType()],
is_vararg=True)
printf = b.funcname(printf_type, 'printf')
args = []
specifiers = []
for i, arg in enumerate(node.args):
specifier, arg = codegen.format_specifier(arg, b)
args.append(arg)
specifiers.append(specifier)
args.insert(0, b.constant(" ".join(specifiers) + "\n"))
return b.expr_stat(b.funccall(printf, args))
def visit_PositionInfoNode(self, node):
"""
Replace with the setting of positional source information in case
of an error.
"""
b = self.astbuidler
posinfo = self.function.posinfo
if posinfo:
pos = node.posinfo
return b.stats(
b.assign(b.deref(posinfo.filename), b.constant(pos.filename)),
b.assign(b.deref(posinfo.lineno), b.constant(pos.lineno)),
b.assign(b.deref(posinfo.column), b.constant(pos.column)))
def visit_RaiseNode(self, node):
"""
Generate a call to PyErr_Format() to set an exception.
"""
from minitypes import FunctionType, object_
b = self.astbuilder
args = [object_] * (2 + len(node.fmt_args))
functype = FunctionType(return_type=object_, args=args)
return b.expr_stat(
b.funccall(b.funcname(functype, "PyErr_Format"),
[node.exc_var, node.msg_val] + node.fmt_args))
def visit_ErrorHandler(self, node):
"""
See miniast.ErrorHandler for an explanation of what this needs to do.
"""
b = self.astbuilder
node.error_variable = b.temp(minitypes.bool_)
node.error_var_init = b.assign(node.error_variable, 0)
node.cleanup_jump = b.jump(node.cleanup_label)
node.error_target_label = b.jump_target(node.error_label)
node.cleanup_target_label = b.jump_target(node.cleanup_label)
node.error_set = b.assign(node.error_variable, 1)
if self.error_handlers:
cascade_code = b.jump(self.error_handlers[-1].error_label)
else:
cascade_code = b.return_(self.function.error_value)
node.cascade = b.if_(node.error_variable, cascade_code)
self.error_handlers.append(node)
self.visitchildren(node)
self.error_handlers.pop()
return node
def visit_PragmaForLoopNode(self, node):
if self.previous_specializer.is_vectorizing_specializer:
return self.visit(node.for_node)
else:
self.visitchildren(node)
return node
def visit_StatListNode(self, node):
self.visitchildren(node)
return self.fuse_omp_stats(node)
class OrderedSpecializer(Specializer):
"""
Specializer that understands C and Fortran data layout orders.
"""
vectorized_equivalents = None
def compute_total_shape(self, node):
"""
Compute the product of the shape (entire length of array output).
Sets the total shape as attribute of the function (total_shape).
"""
b = self.astbuilder
# compute the product of the shape and insert it into the function body
extents = [b.index(node.shape, b.constant(i))
for i in range(node.ndim)]
node.total_shape = b.temp(node.shape.type.base_type)
init_shape = b.assign(node.total_shape, reduce(b.mul, extents),
may_reorder=True)
node.body = b.stats(init_shape, node.body)
return node.total_shape
def loop_order(self, order, ndim=None):
"""
Returns arguments to (x)range() to process something in C or Fortran
order.
"""
if ndim is None:
ndim = self.function.ndim
if order == "C":
return self.c_loop_order(ndim)
else:
return self.f_loop_order(ndim)
def c_loop_order(self, ndim):
return ndim - 1, -1, -1
def f_loop_order(self, ndim):
return 0, ndim, 1
def order_indices(self, indices):
"""
Put the indices of the for loops in the right iteration order. The
loops were build backwards (Fortran order), so for C we need to
reverse them.
Note: the indices are always ordered on the dimension they index
"""
if self.order == "C":
indices.reverse()
def ordered_loop(self, node, result_indices, lower=None, upper=None,
step=None, loop_order=None):
"""
Return a ForNode ordered in C or Fortran order.
"""
b = self.astbuilder
if lower is None:
lower = lambda i: None
if upper is None:
upper = lambda i: b.shape_index(i, self.function)
if loop_order is None:
loop_order = self.loop_order(self.order)
indices = []
for_loops = []
for i in range(*loop_order):
node = b.for_range_upwards(node, lower=lower(i), upper=upper(i),
step=step)
node.dim = i
for_loops.append(node)
indices.append(node.target)
self.order_indices(indices)
result_indices.extend(indices)
return for_loops[::-1], node
def _index_pointer(self, pointer, indices, strides):
"""
Return an element for an N-dimensional index into a strided array.
"""
b = self.astbuilder
return b.index_multiple(
b.cast(pointer, minitypes.char.pointer()),
[b.mul(index, stride) for index, stride in zip(indices, strides)],
dest_pointer_type=pointer.type)
def _strided_element_location(self, node, indices=None, strides_index_offset=0,
ndim=None, pointer=None):
"""
Like _index_pointer, but given only an array operand indices. It first
needs to get the data pointer and stride nodes.
"""
indices = indices or self.indices
b = self.astbuilder
if ndim is None:
ndim = node.type.ndim
if pointer is None:
pointer = b.data_pointer(node)
indices = [index for index in indices[len(indices) - ndim:]]
strides = [b.stride(node, i + strides_index_offset)
for i, idx in enumerate(indices)]
node = self._index_pointer(pointer, indices, strides)
self.visitchildren(node)
return node
def get_any_array_argument(arguments):
for arg in arguments:
if arg.type is not None and arg.type.is_array:
return arg
class CanVectorizeVisitor(minivisitor.TreeVisitor):
"""
Determines whether we can vectorize a given expression. Currently only
support arithmetic on floats and doubles.
"""
can_vectorize = True
def _valid_type(self, type):
if type.is_array:
type = type.dtype
return type.is_float and type.itemsize in (4, 8)
def visit_FunctionNode(self, node):
array_dtypes = [
arg.type.dtype for arg in node.arguments[1:]
if arg.type is not None and arg.type.is_array]
all_the_same = miniutils.all(
dtype == array_dtypes[0] for dtype in array_dtypes)
self.can_vectorize = all_the_same and self._valid_type(array_dtypes[0])
if self.can_vectorize:
self.visitchildren(node)
def visit_BinopNode(self, node):
if node.lhs.type != node.rhs.type or not self._valid_type(node.lhs.type):
self.can_vectorize = False
else:
self.visitchildren(node)
def visit_UnopNode(self, node):
if self._valid_type(node.type):
self.visitchildren(node)
else:
self.can_vectorize = False
def visit_FuncCallNode(self, node):
self.can_vectorize = False
def visit_NodeWrapper(self, node):
# TODO: dispatch to self.context.can_vectorize
self.can_vectorize = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_if_should_vectorize(func):
"""
Visits the given method if we are vectorizing, otherwise visit the
superclass' method of :py:class:`VectorizingSpecialization`
"""
@wraps(func)
def wrapper(self, node):
if self.should_vectorize:
return func(self, node)
else:
method = getattr(super(VectorizingSpecializer, self), func.__name__)
return method(node)
return wrapper
class VectorizingSpecializer(Specializer):
"""
Generate explicitly vectorized code if supported.
:param vector_size: number of 32-bit operands in the vector
"""
is_vectorizing_specializer = True
can_vectorize_visitor = CanVectorizeVisitor
vectorized_equivalents = None
# set in subclasses
vector_size = None
def __init__(self, context, specialization_name=None):
super(VectorizingSpecializer, self).__init__(context,
specialization_name)
# temporary registers
self.temps = {}
# Flag to vectorize expressions in a vectorized loop
self.should_vectorize = True
@classmethod
def can_vectorize(cls, context, ast):
visitor = cls.can_vectorize_visitor(context)
visitor.visit(ast)
# print visitor.can_vectorize, ast.pos
return visitor.can_vectorize
@visit_if_should_vectorize
def visit_FunctionNode(self, node):
self.dtype = get_any_array_argument(node.arguments).type.dtype
return super(VectorizingSpecializer, self).visit_FunctionNode(node)
@visit_if_should_vectorize
def visit_Variable(self, variable):
if variable.type.is_array:
variable = self.astbuilder.vector_variable(variable, self.vector_size)
return variable
@visit_if_should_vectorize
def visit_BinopNode(self, node):
self.visitchildren(node)
if node.lhs.type.is_vector:
# TODO: promotion
node = self.astbuilder.vector_binop(node.operator,
node.lhs, node.rhs)
return node
@visit_if_should_vectorize
def visit_UnopNode(self, node):
self.visitchildren(node)
if node.operand.type.is_vector:
if node.operator == '+':
node = node.operand
else:
assert node.operator == '~'
raise NotImplementedError
node = self.astbuilder.vector_unop(node.type, node.operator,
self.visit(node.operand))
return node
@visit_if_should_vectorize
def visit_ForNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
@visit_if_should_vectorize
def visit_IfNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
def _modify_inner_loop(self, b, elements_per_vector, node, step):
"""
Turn 'for (i = 0; i < N; i++)' into 'for (i = 0; i < N - 3; i += 4)'
for a vector size of 4. In case the data size is not a multiple of
4, we can only SIMDize that part, and need a fixup loop for any
remaining elements. Returns the upper limit and the counter (N and i).
"""
i = node.step.lhs
N = node.condition.rhs
# Adjust step
step = b.mul(step, b.constant(elements_per_vector))
node.step = b.assign_expr(i, b.add(i, step))
# Adjust condition
vsize_minus_one = b.constant(elements_per_vector - 1)
node.condition.rhs = b.sub(N, vsize_minus_one)
return N, i
def fixup_loop(self, i, N, body, elements_per_vector):
"""
Generate a loop to fix up any remaining elements that didn't fit into
our SIMD vectors.
"""
b = self.astbuilder
cond = b.binop(minitypes.bool_, '<', i, N)
if elements_per_vector - 1 == 1:
fixup_loop = b.if_(cond, body)
else:
# fixup_loop = b.for_range_upwards(body, lower=i, upper=N)
init = b.noop_expr()
step = b.assign_expr(i, b.add(i, b.constant(1)))
fixup_loop = b.for_(body, init, cond, step, index=i)
fixup_loop.is_fixup = True
self.should_vectorize = False
fixup_loop = self.visit(fixup_loop)
self.should_vectorize = True
return fixup_loop
def process_inner_forloop(self, node, original_expression, step=None):
"""
Process an inner loop, adjusting the step accordingly and injecting
any temporary assignments where necessary. Returns the fixup loop,
needed when the data size is not a multiple of the vector size.
:param original_expression: original, unmodified, array expression (
the body of the NDIterate node)
"""
b = self.astbuilder
if step is None:
step = b.constant(1)
elements_per_vector = self.vector_size * 4 / self.dtype.itemsize
N, i = self._modify_inner_loop(b, elements_per_vector, node, step)
return self.fixup_loop(i, N, original_expression, elements_per_vector)
class StridedCInnerContigSpecializer(OrderedSpecializer):
"""
Specialize on the first or last dimension being contiguous (depending
on the 'order' attribute).
"""
specialization_name = "inner_contig"
order = "C"
is_inner_contig_specializer = True
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(StridedCInnerContigSpecializer, self).__init__(
context, specialization_name)
self.indices = []
def _generate_inner_loop(self, b, node):
"""
Generate innermost loop, injecting the pointer assignments in the
right place
"""
loop = node
if len(self.indices) > 1:
for index in self.indices[:-2]:
loop = node.body
self.inner_loop = loop.body
loop.body = b.pragma_for(self.inner_loop)
node = self.omp_for(node)
else:
self.inner_loop = loop
node = self.omp_for(b.pragma_for(self.inner_loop))
return loop, node
def _vectorize_inner_loop(self, b, loop, node, original_expr):
"Vectorize the inner loop and insert the fixup loop"
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(self.inner_loop,
original_expr)
if len(self.indices) > 1:
loop.body = b.stats(loop.body, fixup_loop)
else:
node = b.stats(node, fixup_loop)
return node
def visit_NDIterate(self, node):
"""
Replace this node with ordered loops and a direct index into a
temporary data pointer in the contiguous dimension.
"""
b = self.astbuilder
assert not list(self.treepath(node, '//NDIterate'))
original_expr = specialize_ast(node.body)
# start by generating a C or Fortran ordered loop
self.function.for_loops, node = self.ordered_loop(node.body,
self.indices)
loop, node = self._generate_inner_loop(b, node)
result = self.visit(node)
node = self._vectorize_inner_loop(b, loop, node, original_expr)
return result
def index(self, loop_level):
if self.order == 'C':
return self.indices[loop_level]
else:
return self.indices[-loop_level]
def strided_indices(self):
"Return the list of strided indices for this order"
return self.indices[:-1]
def contig_index(self):
"The contiguous index"
return self.indices[-1]
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level)
class StridedFortranInnerContigSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on the first dimension being contiguous.
"""
order = "F"
specialization_name = "inner_contig_fortran"
vectorized_equivalents = None
def strided_indices(self):
return self.indices[1:]
def contig_index(self):
return self.indices[0]
class StrengthReducingStridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer. For strided operands, perform strength reduction in the
inner dimension by adding the stride to the data pointer in each iteration.
"""
specialization_name = "strided"
order = "C"
is_strided_specializer = True
vectorized_equivalents = None
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def visit_NDIterate(self, node):
b = self.astbuilder
outer_loop = super(StridedSpecializer, self).visit_NDIterate(node)
# outer_loop = self.strength_reduce_inner_dimension(outer_loop,
# self.inner_loop)
return outer_loop
def strength_reduce_inner_dimension(self, outer_loop, inner_loop):
"""
Reduce the strength of strided array operands in the inner dimension,
by adding the stride to the temporary pointer.
"""
b = self.astbuilder
outer_stats = []
stats = []
for arg in self.function.arguments:
type = arg.variable.type
if type is None:
continue
contig = self.matching_contiguity(type)
if arg.variable in self.pointers and not contig:
p = self.pointers[arg.variable]
if self.order == "C":
inner_dim = type.ndim - 1
else:
inner_dim = 0
# Implement: temp_stride = strides[inner_dim] / sizeof(dtype)
stride = b.stride(arg.variable, inner_dim)
temp_stride = b.temp(stride.type.qualify("const"),
name="temp_stride")
outer_stats.append(
b.assign(temp_stride, b.div(stride, b.sizeof(type.dtype))))
# Implement: temp_pointer += temp_stride
stats.append(b.assign(p, b.add(p, temp_stride)))
inner_loop.body = b.stats(inner_loop.body, *stats)
outer_stats.append(outer_loop)
return b.stats(*outer_stats)
class StrengthReducingStridedFortranSpecializer(
StridedFortranInnerContigSpecializer, StrengthReducingStridedSpecializer):
"""
Specialize on Fortran order for strided operands and apply strength
reduction in the inner dimension.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
class StridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer.
"""
specialization_name = "strided"
order = "C"
vectorized_equivalents = None
is_strided_specializer = True
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def _element_location(self, variable, loop_level):
"""
Generate a strided or directly indexed load of a single element.
"""
#if variable in self.pointers:
if self.matching_contiguity(variable.type):
return super(StridedSpecializer, self)._element_location(variable,
loop_level)
b = self.astbuilder
pointer = self.get_data_pointer(variable, loop_level)
indices = [self.contig_index()]
if self.order == "C":
inner_dim = variable.type.ndim - 1
else:
inner_dim = 0
strides = [b.stride(variable, inner_dim)]
return self._index_pointer(pointer, indices, strides)
class StridedFortranSpecializer(StridedFortranInnerContigSpecializer,
StridedSpecializer):
"""
Specialize on Fortran order for strided operands.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
if strength_reduction:
StridedSpecializer = StrengthReducingStridedSpecializer
StridedFortranSpecializer = StrengthReducingStridedFortranSpecializer
class ContigSpecializer(OrderedSpecializer):
"""
Specialize on all specializations being contiguous (all F or all C).
"""
specialization_name = "contig"
is_contig_specializer = True
def visit_FunctionNode(self, node):
node = super(ContigSpecializer, self).visit_FunctionNode(node)
self.astbuilder.create_function_type(node, strides_args=False)
return node
def visit_NDIterate(self, node):
"""
Generate a single ForNode over the total data size.
"""
b = self.astbuilder
original_expr = specialize_ast(node.body)
node = super(ContigSpecializer, self).visit_NDIterate(node)
for_node = b.for_range_upwards(node.body,
upper=self.function.total_shape)
self.function.for_loops = [for_node]
self.indices = [for_node.index]
node = self.omp_for(b.pragma_for(for_node))
self.target = for_node.target
node = self.visit(node)
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(for_node, original_expr)
node = b.stats(node, fixup_loop)
return node
def visit_StridePointer(self, node):
return None
def _element_location(self, node, loop_level):
"Directly index the data pointer"
data_pointer = self.astbuilder.data_pointer(node)
return self.astbuilder.index(data_pointer, self.target)
def index(self, loop_level):
return self.target
def contig_index(self):
return self.target
class CTiledStridedSpecializer(StridedSpecializer):
"""
Generate tiled code for the last two (C) or first two (F) dimensions.
The blocksize may be overridden through the get_blocksize method, in
a specializer subclass or mixin (see miniast.Context.specializer_mixin_cls).
"""
specialization_name = "tiled"
order = "C"
is_tiled_specializer = True
vectorized_equivalents = None
def get_blocksize(self):
"""
Get the tile size. Override in subclasses to provide e.g. parametric
tiling.
"""
return self.astbuilder.constant(64)
def tiled_order(self):
"Tile in the last two dimensions"
return self.function.ndim - 1, self.function.ndim - 1 - 2, -1
def untiled_order(self):
return self.function.ndim - 1 - 2, -1, -1
def visit_NDIterate(self, node):
assert self.function.ndim >= 2
return self._tile_in_two_dimensions(node)
def _tile_in_two_dimensions(self, node):
"""
This version generates tiling loops in the first or last two dimensions
(depending on C or Fortran order).
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
# Generate the two outer tiling loops
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(
tiled_loop_body, self.tiled_indices, step=self.blocksize,
loop_order=self.tiled_order())
del tiled_loop_body.stats[:]
# Generate some temporaries to store the upper limit of the inner
# tiled loops
upper_limits = {}
stats = []
# sort the indices in forward order, to match up with the ordered
# indices
tiled_order = sorted(range(*self.tiled_order()))
for i, index in zip(tiled_order, self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits[i] = upper_limit
tiled_indices = dict(zip(tiled_order, self.tiled_indices))
def lower(i):
if i in tiled_indices:
return tiled_indices[i]
return None
def upper(i):
if i in upper_limits:
return upper_limits[i]
return b.shape_index(i, self.function)
# Generate the inner tiled loops
outer_for_node = node.body
inner_body = node.body
tiling_loops, inner_loops = self.ordered_loop(
node.body, self.indices,
lower=lower, upper=upper,
loop_order=self.tiled_order())
tiled_loop_body.stats.append(inner_loops)
innermost_loop = inner_loops.body
# Generate the outer loops (in case the array operands have more than
# two dimensions)
indices = []
outer_loops, body = self.ordered_loop(body, indices,
loop_order=self.untiled_order())
body = self.omp_for(body)
# At this point, 'self.indices' are the indices of the tiled loop
# (the indices in the first two dimensions for Fortran,
# the indices in the last two # dimensions for C)
# 'indices' are the indices of the outer loops
if self.order == "C":
self.indices = indices + self.indices
else:
self.indices = self.indices + indices
# if strength_reduction:
# body = self.strength_reduce_inner_dimension(body, innermost_loop)
for dim, for_node in enumerate(controlling_loops):
for_node.is_controlling_loop = True
for_node.blocksize = self.blocksize
for dim, for_node in enumerate(tiling_loops):
for_node.is_tiling_loop = True
self.set_dims(controlling_loops)
self.set_dims(tiling_loops)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = outer_loops
self.function.for_loops = outer_loops + controlling_loops + tiling_loops
self.function.lower_tiling_limits = tiled_indices
self.function.upper_tiling_limits = upper_limits
return self.visit(body)
def set_dims(self, tiled_loops):
"Set the 'dim' attributes of the tiling and controlling loops"
# We need to reverse our tiled order, since this order is used to
# build up the for nodes in reverse. We have an ordered list of for
# nodes.
tiled_order = reversed(range(*self.tiled_order()))
for dim, for_node in zip(tiled_order, tiled_loops):
for_node.dim = dim
def _tile_in_all_dimensions(self, node):
"""
This version generates tiling loops in all dimensions.
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(tiled_loop_body,
self.tiled_indices,
step=self.blocksize)
body = self.omp_for(body)
del tiled_loop_body.stats[:]
upper_limits = []
stats = []
for i, index in enumerate(self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits.append(upper_limit)
tiling_loops, inner_body = self.ordered_loop(
node.body, self.indices,
lower=lambda i: self.tiled_indices[i],
upper=lambda i: upper_limits[i])
tiled_loop_body.stats.append(inner_body)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = []
self.function.for_loops = tiling_loops
return self.visit(body)
def strided_indices(self):
return self.indices[:-1] + [self.tiled_indices[1]]
def _element_location(self, variable, loop_level):
"""
Return data + i * strides[0] + j * strides[1] when we are not using
strength reduction. Otherwise generate temp_data += strides[1]. For
this to work, temp_data must be set to
data + i * strides[0] + outer_j * strides[1]. This happens through
_compute_inner_dim_pointers with tiled=True.
"""
if strength_reduction:
return super(CTiledStridedSpecializer, self)._element_location(
variable, loop_level)
else:
return self._strided_element_location(variable)
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level, tiled=True)
class FTiledStridedSpecializer(StridedFortranSpecializer,
#StrengthReducingStridedFortranSpecializer,
CTiledStridedSpecializer):
"Tile in Fortran order"
specialization_name = "tiled_fortran"
order = "F"
def tiled_order(self):
"Tile in the first two dimensions"
return 0, 2, 1
def untiled_order(self):
return 2, self.function.ndim, 1
def strided_indices(self):
return [self.tiled_indices[0]] + self.indices[1:]
#
### Vectorized specializer equivalents
#
def create_vectorized_specializers(specializer_cls):
"""
Creates Vectorizing specializer classes from the given specializer for
SSE and AVX.
"""
bases = (VectorizingSpecializer, specializer_cls)
d = dict(vectorized_equivalents=None)
name = 'Vectorized%%d%s' % specializer_cls.__name__
cls1 = type(name % 4, bases, dict(d, vector_size=4))
cls2 = type(name % 8, bases, dict(d, vector_size=8))
return cls1, cls2
ContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(ContigSpecializer))
StridedCInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedCInnerContigSpecializer))
StridedFortranInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedFortranInnerContigSpecializer))
#
### Create cict of all specializers
#
_specializer_list = [
ContigSpecializer,
StridedCInnerContigSpecializer, StridedFortranInnerContigSpecializer,
StridedSpecializer, StridedFortranSpecializer,
CTiledStridedSpecializer, FTiledStridedSpecializer,
]
specializers = {}
for sp in _specializer_list:
specializers[sp.specialization_name] = sp
vectorizers = getattr(sp, 'vectorized_equivalents', None)
if vectorizers:
specializers[sp.specialization_name + '_sse'] = vectorizers[0]
specializers[sp.specialization_name + '_avx'] = vectorizers[1]
| markflorisson/minivect | minivect/specializers.py | specializers.py | py | 56,778 | python | en | code | 19 | github-code | 13 |
7659277482 | # encoding: utf-8
"""
URL conf for django-sphinxdoc.
"""
from django.conf.urls import patterns, url
from django.views.generic import ListView
from sphinxdoc import models
from sphinxdoc.views import ProjectSearchView
project_info = {
'queryset': models.Project.objects.all().order_by('name'),
'context_object_name': 'project_list',
}
urlpatterns = patterns('sphinxdoc.views',
url(
r'^$',
ListView.as_view(**project_info),
project_info,
),
url(
r'^(?P<slug>[\w-]+)/search/$',
ProjectSearchView(),
name='doc-search',
),
# These URLs have to be without the / at the end so that relative links in
# static HTML files work correctly and that browsers know how to name files
# for download
url(
r'^(?P<slug>[\w-]+)/(?P<type_>_images|_static|_downloads|_source)/' + \
r'(?P<path>.+)$',
'sphinx_serve',
),
url(
r'^(?P<slug>[\w-]+)/_objects/$',
'objects_inventory',
name='objects-inv',
),
url(
r'^(?P<slug>[\w-]+)/$',
'documentation',
{'path': ''},
name='doc-index',
),
url(
r'^(?P<slug>[\w-]+)/(?P<path>.+)/$',
'documentation',
name='doc-detail',
),
)
| omji/django-sphinxdoc | sphinxdoc/urls.py | urls.py | py | 1,277 | python | en | code | 0 | github-code | 13 |
26533928885 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from alignment import *
import multiprocessing as mp
from numpy import random
from xkcdrgb import xkcd_rgb
class Node():
def __init__(self,name=None,root=False):
self.name=name
self.ancestor='ROOT'
self.descendents=[]
self.branch_length=None
self.root=root
class TreeError(Exception):
def __init__(self,value):
if value is None:
value='\n Please check for following errors in newick tree.\n'
Exception.__init__(self,value)
def newick_check(ns,index=0):
try:
ns[index]=='(' and ns[len(ns)-1]==';'
ns.count('(')==ns.count(')')
except TreeError:
raise TreeError('Improper Newick format or newick tree doesnot have trailing ;')
except TreeError:
raise TreeError('Opening and closing parentesis do not match')
finally:
print('Newick tree looks good.')
#########################################
def Nodelabel(string,index):
label=''
branchlen=None
while string[index]!=';':
if string[index]==':':
index+=1
branchlen,index=branch(string,index)
break
label+=string[index]
index+=1
return label,branchlen,index
def branch(string,index):
branchlen=''
a=index
while string[index]!=';':
if string[index]==',' or string[index]==')':
break
branchlen+=string[index]
index+=1
if branchlen is not '':
branchlen=float(branchlen)
else:
branchlen=None
return branchlen,index
## adapted from pyevolve
def read_tree(tree):
current_node=None
index=0
count=(tree.count(':')-tree.count(')'))+1
while tree[index]!=';':
if tree[index]=='(' and index==0:
count=count
root_node=Node(index,root=True)
current_node=root_node
index+=1
elif tree[index]=='(':
index+=1
Internal_node=Node(index)
Internal_node.ancestor=current_node
current_node.descendents.append(Internal_node)
current_node=Internal_node
elif tree[index]==',':
index+=1
elif tree[index]==')':
index+=1
name,branchlength,index=Nodelabel(tree,index)
if name=='' or name is None:
current_node.name="Internalnode_"+str(count)
current_node.branch_length=branchlength
else:
current_node.name=name
current_node.branch_length=branchlength
current_node=current_node.ancestor
count+=1
else:
name,branchlength,index=Nodelabel(tree,index)
leaf_node=Node()
leaf_node.name=name
leaf_node.branch_length=branchlength
leaf_node.ancestor=current_node
current_node.descendents.append(leaf_node)
return root_node
######
def write_topology(root_node,n=0,s=[]):
if n>0:
s.append(",")
if len(root_node.descendents) is not 0:
s.append("(")
[write_topology(i,n,s) for n,i in enumerate(root_node.descendents)]
s.append(")")
else:
s.append(root_node.name)
return ''.join(s)+';'
## Lambda function to return branch_length for a given node
ert=lambda x,y:x.branch_length if x.name==y else None
## returns a list of node and node names
def write_description(root_node,node,s=[]):
if len(root_node.descendents) is not 0:
[write_description(i,s) for i in root_node.descendents]
s.append(ert(root_node,node.name))
else:
s.append(ert(root_node,node.name))
return list(filter(None,s))
def colourup(numTrees):
from seaborn import xckd_rgb
colormap={}
return
def add_sister(S,node):
EDS=Node()
EDS.ancestor=S.ancestor
EDS.descendents.append(node)
node.ancestor=EDS
EDS.descendents.append(S)
S.ancestor=EDS
for n,i in enumerate(EDS.ancestor.descendents):
if i==S:
EDS.ancestor.descendents[n]=EDS
else:
pass
return EDS
#EDS.ancestor.descendents.append(S)
#S.ancestor.descendents.remove(EDS)
def remove_sister(EDS,node):
EDS.descendents.remove(node)
S=EDS.descendents[0]
S.ancestor=EDS.ancestor
for n,i in enumerate(EDS.ancestor.descendents):
if i==EDS:
EDS.ancestor.descendents[n]=S
else:
pass
del EDS
def addto_root(S,node):
EDS=Node()
EDS.descendents.append(node)
if S.root is True:
S.ancestor=EDS
EDS.root=True
S.root=False
else:
raise TreeError("This is not the root node !!")
EDS.descendents.append(S)
EDS.ancestor='ROOT'
return EDS
def removefrom_root(EDS,node):
EDS.descendents.remove(node)
if EDS.root is True:
EDS.descendents[0].ancestor=None
EDS.descendents[0].root=True
EDS.root=False
else:
raise TreeError("This is not the root node !!")
del EDS
#WED=add_sister(root_node,node)
# write_topology(ee,s=[])
# remove_sister(WED,node)
# write_topology(ee,s=[])
'''
def listgen(root_node,EE):
if root_node.descendents is not 0:
[listgen(i,EE) for i in root_node.descendents]
print(root_node.name)
'''
def listgen(root_node,EE):
if root_node.descendents is not 0:
[listgen(i,EE) for i in root_node.descendents]
EE.append(root_node)
def tiplabel_changer(root_node,p):
if root_node.ancestor is not None:
[tiplabel_changer(i,p) for i in root_node.descendents]
if 'Internal' in root_node.name:
pass
else:
root_node.name=p[root_node.name]
print(str(root_node.name)+'\t'+"{:}".format(root_node.branch_length))
# test code
def write_tree(root_node,n=0,s=[]):
if n>0:
s.append(",")
if len(root_node.descendents) is not 0:
s.append("(")
[write_tree(i,n,s) for n,i in enumerate(root_node.descendents)]
s.append(")")
if root_node.branch_length is not None:
s.append(':'+"{:}".format(root_node.branch_length))
else:
pass
else:
s.append(root_node.name+':'+"{:}".format(root_node.branch_length))
return ''.join(s)+';'
@readfiles
def build_topologies(treemodels):
for k in treemodels:
ee=read_tree(k)
EE=[]
listgen(ee,EE)
return ee,EE
XKCD_RGBcolors=[xkcd_rgb[i] for n,i in enumerate(xkcd_rgb)]
def build_trees(treemodels,Backbone_target_sequences,ser,rootinc):
ee,EE=build_topologies(treemodels)
treecolors=random.choice(XKCD_RGBcolors,len(EE),replace=False)
Alltrees={}
for jam in Backbone_target_sequences:
j=Node(str(aliasfindinator(Backbone_target_sequences[jam][0],ser)))
print(j.name)
o=open(str(Backbone_target_sequences[jam][0])+'_'+'Treetopologynewick.nwk','w')
treedetails=open(str(Backbone_target_sequences[jam][0])+'_'+'Topologymodeldetails.txt','w')
Alltrees[str(jam)]=[]
for n,i in enumerate(EE):
if i.root is False:
Alltrees[str(jam)].append(i)
treedetails.write('Tree_'+str(n+1)+'\t'+str(i.name)+'\t'+str(treecolors[n])+'\n')
WED=add_sister(i,j)
o.write(write_topology(ee,s=[])+'\n')
remove_sister(WED,j)
elif i.root and rootinc:
Alltrees[str(jam)].append(i)
treedetails.write('Tree_'+str(n+1)+'\t'+str(i.name)+'\t'+str(treecolors[n])+'\n')
WED=addto_root(i,j)
o.write(write_topology(WED,s=[])+'\n')
removefrom_root(WED,j)
else:
pass
o.close()
treedetails.close()
return Alltrees
def prunenbuild_trees(ser):
Alltrees={}
for jam in Backbone_target_sequences:
j=Node(str(aliasfindinator(Backbone_target_sequences[jam][0],ser)))
print(j.name)
o=open(str(Backbone_target_sequences[jam][0])+'_'+'Treetopologynewick.nwk','w')
treedetails=open(str(Backbone_target_sequences[jam][0])+'_'+'Topologymodeldetails.txt','w')
Alltrees[str(jam)]=[]
for n,i in enumerate(EE):
if i.root is False:
Alltrees[str(jam)].append(i)
treedetails.write('Tree_'+str(n+1)+'\t'+str(i.name)+'\t'+str(treecolors[n])+'\n')
WED=add_sister(i,j)
o.write(write_topology(ee,s=[])+'\n')
remove_sister(WED,j)
elif i.root and rootinc:
Alltrees[str(jam)].append(i)
treedetails.write('Tree_'+str(n+1)+'\t'+str(i.name)+'\t'+str(treecolors[n])+'\n')
WED=addto_root(i,j)
o.write(write_topology(WED,s=[])+'\n')
removefrom_root(WED,j)
else:
pass
o.close()
treedetails.close()
return Alltrees
def tree_topology_creator(ee,EE,j,o,rootinc):
for n,i in enumerate(EE):
if i.root is False:
WED=add_sister(i,j)
o.write(write_topology(ee,s=[])+'\n')
remove_sister(WED,j)
elif i.root and rootinc:
WED=addto_root(i,j)
o.write(write_topology(WED,s=[])+'\n')
removefrom_root(WED,j)
else:
pass
o.close()
def tree_topology_creator(ee,EE,j,o,rootinc):
for n,i in enumerate(EE):
if i.root is False:
WED=add_sister(i,j)
o.write(write_topology(ee,s=[])+'\n')
remove_sister(WED,j)
elif i.root and rootinc:
WED=addto_root(i,j)
o.write(write_topology(WED,s=[])+'\n')
removefrom_root(WED,j)
else:
pass
o.close()
def prune_node(node):
intnode,intnode_index=node.ancestor,node.ancestor.descendents.index(node)
intnode.descendents.remove(node)
if intnode.ancestor=='ROOT':
pass
else:
for n,i in enumerate(intnode.descendents):
i.ancestor=intnode.ancestor
intnode.ancestor.descendents[intnode.ancestor.descendents.index(intnode)]=i
return(intnode,intnode_index)
def rewire_nodes(intnode,intnode_index,node):
if intnode.ancestor=='ROOT':
pass
node.ancestor=intnode
else:
for n,i in enumerate(intnode.descendents):
if i==intnode.ancestor.descendents[intnode.ancestor.descendents.index(i)]:
intnode.ancestor.descendents[intnode.ancestor.descendents.index(i)]=intnode
i.ancestor=intnode
node.ancestor=intnode
intnode.descendents.insert(intnode_index,node)
| AdityaLankapalli/Ttrip-BAC | Tree.py | Tree.py | py | 9,059 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.