hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17a347bd5164cd6a7614cc6077d50a8c68cb0c7e | 1,109 | py | Python | test_lib.py | DrakenWan/deeplearninglibrary | 0fb7d76f9220d1479c6ddadc2e6165590ab000eb | [
"Apache-2.0"
] | null | null | null | test_lib.py | DrakenWan/deeplearninglibrary | 0fb7d76f9220d1479c6ddadc2e6165590ab000eb | [
"Apache-2.0"
] | null | null | null | test_lib.py | DrakenWan/deeplearninglibrary | 0fb7d76f9220d1479c6ddadc2e6165590ab000eb | [
"Apache-2.0"
] | null | null | null | """
Tutorial reference: https://www.kdnuggets.com/2020/09/implementing-deep-learning-library-scratch-python.html
Original Library: https://github.com/parmeet/dll_numpy
Author: DrakenWan 2020
"""
import core as DL
import utilities
import numpy as np
if __name__ == "__main__":
batch_size = 20
num_epochs = 200
samples_per_class = 100
num_classes = 3
hidden_units = 100
data,target = utilities.genSpiralData(samples_per_class,num_classes)
model = utilities.Model()
model.add(DL.Linear(2,hidden_units))
model.add(DL.ReLU())
model.add(DL.Linear(hidden_units,num_classes))
optim = DL.SGD(model.parameters,lr=1.0,weight_decay=0.001,momentum=.9)
loss_fn = DL.SoftmaxWithLoss()
model.fit(data,target,batch_size,num_epochs,optim,loss_fn)
predicted_labels = np.argmax(model.predict(data),axis=1)
accuracy = np.sum(predicted_labels==target)/len(target)
print("Model Accuracy = {}".format(accuracy))
utilities.plot2DDataWithDecisionBoundary(data,target,model) | 39.607143 | 113 | 0.67899 | 142 | 1,109 | 5.105634 | 0.556338 | 0.041379 | 0.041379 | 0.044138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036322 | 0.205591 | 1,109 | 28 | 114 | 39.607143 | 0.786606 | 0.167719 | 0 | 0 | 0 | 0 | 0.030787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd587bc3d2baa172c3072e8e32527093a3a9f149 | 2,547 | py | Python | src/routes/dataNetwork.py | spaezsuarez/view-data-app | 6482b73eb39048e5dbdf133d4ccf29cc357dcb6f | [
"MIT"
] | 1 | 2021-05-28T17:01:17.000Z | 2021-05-28T17:01:17.000Z | src/routes/dataNetwork.py | spaezsuarez/view-data-app | 6482b73eb39048e5dbdf133d4ccf29cc357dcb6f | [
"MIT"
] | null | null | null | src/routes/dataNetwork.py | spaezsuarez/view-data-app | 6482b73eb39048e5dbdf133d4ccf29cc357dcb6f | [
"MIT"
] | null | null | null | from . import dataRoute
from flask import render_template, request
from utils import dataManagment
import dateutil
import datetime
@dataRoute.route('/head', methods=['POST'])
def create_head():
number = request.form['number']
df = dataManagment.data_frame_head(number)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/sex/country', methods=['POST'])
def create_second_request():
sexo = request.form.get('selection-sex')
pais = request.form.get('selection-country')
df = dataManagment.get_sex_country_deaths(pais, sexo)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/country/dates', methods=['POST'])
def create_third_request():
firstDate = dateutil.parser.parse(request.form.get(
'firstDate'), dayfirst=False) # Datetime
secondDate = dateutil.parser.parse(
request.form.get('secondDate'), dayfirst=False)
firstDate = datetime.date(firstDate.year, firstDate.month, firstDate.day)
secondDate = datetime.date(
secondDate.year, secondDate.month, secondDate.day)
pais = request.form.get('selection-country')
df = dataManagment.get_country_dates(pais, firstDate, secondDate)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/count/country', methods=['POST'])
def create_fourth_request():
sexo = request.form.get('selection-sex')
df = dataManagment.get_contagios_por_pais(sexo)
return render_template('data.html', tables=[df.to_html()],isCentered = False)
@dataRoute.route('/count/state', methods=['POST'])
def create_fifth_request():
estado = request.form.get('selection-state')
df = dataManagment.get_estado_por_pais(estado)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/resumen/departamento', methods=['POST'])
def create_sixth_request():
pais = request.form.get('country')
df = dataManagment.get_resumen(pais)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/muertes/ciudad',methods=['POST'])
def create_seventh_request():
ciudad = request.form.get('selection-city')
df = dataManagment.get_muertes_por_ciudad(ciudad)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/test',methods=['GET'])
def create_test():
df = dataManagment.test()
return render_template('data.html',tables=[df.to_html()],isCentered = True) | 39.796875 | 82 | 0.725167 | 316 | 2,547 | 5.693038 | 0.199367 | 0.061145 | 0.070039 | 0.106726 | 0.480267 | 0.45025 | 0.413563 | 0.372429 | 0.372429 | 0.314619 | 0 | 0 | 0.122104 | 2,547 | 64 | 83 | 39.796875 | 0.804562 | 0.003141 | 0 | 0.230769 | 0 | 0 | 0.126872 | 0.008274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.096154 | 0 | 0.403846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5a208ee269d3bfedf85a940f895d881be68074 | 1,151 | py | Python | Chapter 4/05 - Real-life example - lazily evaluated attributes/lazy_class_attribute.py | moseskim/Expert-Python-Programming-Fourth-Edition | 5160f974deb2365597b7be9cc032f24bfa13471a | [
"MIT"
] | null | null | null | Chapter 4/05 - Real-life example - lazily evaluated attributes/lazy_class_attribute.py | moseskim/Expert-Python-Programming-Fourth-Edition | 5160f974deb2365597b7be9cc032f24bfa13471a | [
"MIT"
] | null | null | null | Chapter 4/05 - Real-life example - lazily evaluated attributes/lazy_class_attribute.py | moseskim/Expert-Python-Programming-Fourth-Edition | 5160f974deb2365597b7be9cc032f24bfa13471a | [
"MIT"
] | null | null | null | import OpenGL.GL as gl
from OpenGL.GL import shaders
class lazy_class_attribute(object):
def __init__(self, function):
self.fget = function
def __get__(self, obj, cls):
value = self.fget(obj or cls)
# note: 인스턴스가 아닌 클래스 객체에 저장한다.
# 클래스-레벨 또는 인스턴스-레벨 접근과 관계없다.
setattr(cls, self.fget.__name__, value)
return value
class ObjectUsingShaderProgram(object):
# 전형적인 pass-through vertex shader 구현
VERTEX_CODE = """
#version 330 core
layout(location = 0) in vec4 vertexPosition;
void main(){
gl_Position = vertexPosition;
}
"""
# 전형적인 프래그먼트 셰이더
# 모든 요소를 흰색으로 그린다.
FRAGMENT_CODE = """
#version 330 core
out lowp vec4 out_color;
void main(){
out_color = vec4(1, 1, 1, 1);
}
"""
@lazy_class_attribute
def shader_program(self):
print("compiling!")
return shaders.compileProgram(
shaders.compileShader(self.VERTEX_CODE, gl.GL_VERTEX_SHADER),
shaders.compileShader(self.FRAGMENT_CODE, gl.GL_FRAGMENT_SHADER),
)
| 26.767442 | 77 | 0.592528 | 135 | 1,151 | 4.844444 | 0.518519 | 0.036697 | 0.055046 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017789 | 0.316247 | 1,151 | 42 | 78 | 27.404762 | 0.813215 | 0.112945 | 0 | 0.193548 | 0 | 0 | 0.312315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0 | 0.354839 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5a5f01afeebbef48ab0545bb48413999a63520 | 6,134 | py | Python | main.py | Lulzx/gittools | b4a1a4d7169a10af10079a3903b9108843e97385 | [
"MIT"
] | 4 | 2020-10-22T03:58:56.000Z | 2021-10-29T20:45:49.000Z | main.py | Lulzx/gittools | b4a1a4d7169a10af10079a3903b9108843e97385 | [
"MIT"
] | null | null | null | main.py | Lulzx/gittools | b4a1a4d7169a10af10079a3903b9108843e97385 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import sys
import time
from itertools import islice
from uuid import uuid4
import emojis
from github import Github
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram import InlineQueryResultArticle, ParseMode
from telegram import InputTextMessageContent
from telegram.ext import Updater, InlineQueryHandler, CommandHandler
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
access_token = os.environ.get("access_token")
g = Github(access_token)
def start(update, context):
update.message.reply_text('Hi!')
def help(update, context):
update.message.reply_text('Help!')
def fetch_url(query_term, query_type):
if query_type in ["u", "user"]:
result = get_user(query_term)
elif query_type in ["r", "repo"]:
result = get_repo(query_term)
else:
result = "NIL"
return result
def get_repo(query):
repo = g.get_repo(query)
name = repo.name
repo_url = repo.html_url
clone_url = repo.clone_url
# description = repo.description
stars = repo.stargazers_count
language = repo.language
owner_name = repo.owner.name
owner_url = repo.owner.html_url
response = f"""🗄 [{name}]({repo_url}) by [{owner_name}]({owner_url})"""
response += f""" in #{language}\n⭐️ {stars} Stars\n📥 [Clone]({clone_url})"""
return response
def get_user(query):
user = g.get_user(query)
name = "👥 " + user.name
location = "📌 " + user.location
bio = "🎭 " + user.bio
# avatar = user.avatar_url
response = "{}\n{}\n{}".format(name, location, bio)
response += "\n🔗 https://github.com/{}".format(query)
return response
def search_callback(update, context):
user_says = context.args
if len(user_says):
chat_id = update.message.chat.id
query_type = str(user_says[0])
query_term = str(user_says[1:][0])
result = fetch_url(query_term, query_type)
link = result.split("[Clone](")[-1][:-1]
data = result.split(".")[1].split("/")
base = "https://github.com/"
username = query_term
# repo_name = data[2]
url = base + username
if query_type == "u":
button_text = "🗄 repositories"
link = url + "?tab=repositories"
else:
button_text = "🗄 repository"
markup = InlineKeyboardMarkup(
[[InlineKeyboardButton("👤 profile", url=url), InlineKeyboardButton(button_text, url=link)]])
context.bot.send_message(chat_id=chat_id, text="{}".format(result), reply_markup=markup,
parse_mode=ParseMode.MARKDOWN)
else:
return
def download(update, context):
user_says = context.args
chat_id = update.message.chat.id
# query_type = str(user_says[0])
query_term = str(user_says[0])
url = f"https://github.com/{query_term}/archive/master.zip"
caption = f"✅ download successful for repository: {query_term}"
context.bot.send_document(chat_id=chat_id, document=url, caption=caption)
# except:
# context.bot.send_message(chat_id=chat_id, text="repository not found!")
def emoji_callback(update, context):
chat_id = update.message.chat.id
emojiset = g.get_emojis()
for x in emojiset:
x = f":{x}:"
context.bot.send_message(chat_id=chat_id, text=emojis.encode(x))
time.sleep(0.1)
def inlinequery(update, context):
try:
query = update.inline_query.query # .split(" ")
# query_type = query[0]
# query_term = query[1]
keywords = [keyword.strip() for keyword in query.split(',')]
except:
return
query = '+'.join(keywords) + '+in:readme+in:description'
result = g.search_repositories(query, 'stars', 'desc')
print(f'Found {result.totalCount} repo(s)')
# result = fetch_url(query_term, query_type)
title = "Result"
results = list()
if result.totalCount == 0:
title = "No results found."
content = "No results found."
results.append(
InlineQueryResultArticle(
id=uuid4(),
title=title,
input_message_content=InputTextMessageContent(
"{}".format(content),
parse_mode=ParseMode.MARKDOWN)))
update.inline_query.answer(results, cache_time=3)
stop = 10
for repo in islice(result, 0, stop):
name = repo.name
repo_url = repo.html_url
clone_url = repo.clone_url
description = repo.description
stars = repo.stargazers_count
language = repo.language
owner_name = repo.owner.name
owner_url = repo.owner.html_url
response = f"""🗄 [{name}]({repo_url}) by [{owner_name}]({owner_url})"""
response += f""" in #{language}\n⭐️ {stars} Stars\n📥 [Clone]({clone_url})"""
results.append(
InlineQueryResultArticle(
id=uuid4(),
title=name,
description=description,
input_message_content=InputTextMessageContent(
"{}".format(response),
parse_mode=ParseMode.MARKDOWN)))
update.inline_query.answer(results, cache_time=3)
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
try:
TOKEN = sys.argv[1]
except IndexError:
TOKEN = os.environ.get("telegram_token")
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("search", search_callback))
dp.add_handler(CommandHandler("emoji", emoji_callback))
dp.add_handler(CommandHandler("download", download))
dp.add_handler(InlineQueryHandler(inlinequery))
dp.add_error_handler(error)
updater.start_polling()
logger.info("Ready to rock..!")
updater.idle()
if __name__ == '__main__':
main()
| 31.45641 | 104 | 0.630747 | 745 | 6,134 | 5.044295 | 0.237584 | 0.022352 | 0.020756 | 0.034593 | 0.37041 | 0.32677 | 0.251464 | 0.234433 | 0.234433 | 0.204896 | 0 | 0.005352 | 0.238507 | 6,134 | 194 | 105 | 31.618557 | 0.795761 | 0.053962 | 0 | 0.293333 | 0 | 0 | 0.12364 | 0.013642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073333 | false | 0 | 0.08 | 0 | 0.186667 | 0.006667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5b5440ec10bb3d894d2f6fb7dcd6426ce92c37 | 4,132 | py | Python | SnakeGame.py | captainpolar/snakegame | 018638875309f0b17a6dbbd5c3ff4f536058b844 | [
"MIT"
] | 1 | 2021-03-29T17:15:09.000Z | 2021-03-29T17:15:09.000Z | SnakeGame.py | captainpolar/snakegame | 018638875309f0b17a6dbbd5c3ff4f536058b844 | [
"MIT"
] | null | null | null | SnakeGame.py | captainpolar/snakegame | 018638875309f0b17a6dbbd5c3ff4f536058b844 | [
"MIT"
] | null | null | null | import pygame
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
yellow = (255, 255, 102)
red = (250, 0, 0) # Other nice red color: 213, 50, 80
green = (152, 251, 152)
blue = (30, 144, 255) # other nice combo: 50, 151, 213
dis_width = 800
dis_height = 600
dis = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption('Snake Game')
clock = pygame.time.Clock()
snake_block = 10
snake_speed = 15
# Fonts
font_style = pygame.font.SysFont("roboto", 30)
score_font = pygame.font.SysFont("chango", 55)
level_font = pygame.font.SysFont("chango", 55)
def score(score):
value = score_font.render("Score: " + str(score), True, blue)
dis.blit(value, [0, 0])
def our_snake(snake_block, snake_list):
for x in snake_list:
pygame.draw.rect(dis, black, [x[0], x[1], snake_block, snake_block])
def message(msg, color):
mesg = font_style.render(msg, True, color)
dis.blit(mesg, [dis_width/6, dis_height/3])
def gameLoop():
end_game = False
close_game = False
x1 = dis_width / 2
y1 = dis_height / 2
x1_change = 0
y1_change = 0
snake_List = []
Snake_Length = 1
foodx = round(random.randrange(0, dis_width - snake_block) / 10) * 10
foody = round(random.randrange(0, dis_width - snake_block) / 10) * 10
while not end_game:
while close_game:
dis.fill(green) # change color
message("GAME OVER! Press SPACE to play again or ESC to quit.", red)
score(Snake_Length - 1)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
end_game = True
close_game = False
if event.key == pygame.K_SPACE:
gameLoop()
# for event in pygame.event.get():
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_2:
# end_game = True
# close_game = False
# if event.key == pygame.K_1:
# gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -snake_block
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = snake_block
y1_change = 0
elif event.key == pygame.K_UP:
x1_change = 0
y1_change = -snake_block
elif event.key == pygame.K_DOWN:
x1_change = 0
y1_change = snake_block
if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:
close_game = True
x1 += x1_change
y1 += y1_change
dis.fill(green) # change color maybe
pygame.draw.rect(dis, red, [foodx, foody, snake_block, snake_block]) # change food color maybe
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_List.append(snake_Head)
if len(snake_List) > Snake_Length:
del snake_List[0]
for x in snake_List[:-1]:
if x == snake_Head:
close_game = True
our_snake(snake_block, snake_List)
score(Snake_Length - 1)
pygame.display.update()
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(0, dis_width - snake_block) / 10) * 10
foody = round(random.randrange(0, dis_height - snake_block) / 10) * 10
Snake_Length += 1
clock.tick(snake_speed)
# Planning to add levels:
# if level == 1 and score == 10:
# level +=2
# snake_speed = 20
pygame.quit()
quit()
gameLoop()
| 29.304965 | 104 | 0.526864 | 519 | 4,132 | 4.021195 | 0.235067 | 0.071874 | 0.053666 | 0.057499 | 0.444178 | 0.390513 | 0.336847 | 0.276473 | 0.276473 | 0.276473 | 0 | 0.056967 | 0.371249 | 4,132 | 140 | 105 | 29.514286 | 0.746343 | 0.090997 | 0 | 0.276596 | 0 | 0 | 0.02418 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.021277 | 0 | 0.06383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5c8afbbd833cee9c288bc0007055dc079f69d8 | 3,335 | py | Python | MpSA-TRIP/BarcodeGenomeLibMain.py | wiw/pyMPFA | a72aa196859031b2194fb51f204a1ab938193aaa | [
"Unlicense"
] | null | null | null | MpSA-TRIP/BarcodeGenomeLibMain.py | wiw/pyMPFA | a72aa196859031b2194fb51f204a1ab938193aaa | [
"Unlicense"
] | null | null | null | MpSA-TRIP/BarcodeGenomeLibMain.py | wiw/pyMPFA | a72aa196859031b2194fb51f204a1ab938193aaa | [
"Unlicense"
] | null | null | null | #C:\Python27\python.exe
#!/usr/bin/env python
# encoding: utf-8
import os
# import subprocess
import SupportFunc as supp
import ReadIndexesFunc as rind
import CollectBcMutFunc as colb
import WriteFunc as wrt
import param
import picks
from TripMain_0_2 import Pdump
def main():
supp.setup_logging()
for name in param.indexList:
index = param.indexList[name]
# readsStat = {}
if not os.path.exists(os.path.join(picks.workdir, name)): os.makedirs(os.path.join(picks.workdir, name))
indexFile = os.path.join(picks.workdir, name, "index_{}.fastq".format(index.upper()))
# indexFiltFile = os.path.join(picks.workdir, name, "filt_index_{}.fastq".format(index.upper()))
if not os.path.exists(indexFile) or os.stat(indexFile).st_size == 0:
rind.SplitFastqByIndexes(picks.input_file, indexFile, index.upper(), param.indexError, param.const_1.upper(), param.const_1Error, param.regExpIndex, picks.no_trim_index)
if picks.random_read:
indexFileRand = os.path.join(picks.workdir, name, "random_index_{}.fastq".format(index.upper()))
rind.RandomReadIndexes(indexFile, indexFileRand, param.probability)
indexFile = indexFileRand
supp.LogInfo("\n\nEnd splitting.\n\n#####################################\n")
# readsStat[name] = rind.filterShadyReads(indexFile, param.reFilter, indexFiltFile)
# indexFile = indexFiltFile
# supp.LogInfo("Filter before: {}, after: {}\n indexFile - {}, indexFiltFile - {}".format(readsStat[name][0], readsStat[name][1], indexFile, indexFiltFile))
supp.LogInfo('''Processing on: '{}'.\n
Total reads in file '{}': {} reads.\n
Generate dictionary of barcodes.\n'''.format(os.path.basename(indexFile), os.path.basename(indexFile), supp.GetTotalSeqRecords(indexFile)))
bcDictPI = colb.CollectBarcodeGenome(indexFile, param.barcodeLength, param.readsValue, param.barcodeError, param.const_2.upper(), param.const_2Error, param.regExpBc, picks.merge_indexes, picks.reverse_barcode, param.pmi, param.pmiLength, param.pmiSubst)
Pdump(bcDictPI, name + "_bcDictPI", picks.PdumpDir)
# Pdump(readsStat, name + "_readsStat", picks.PdumpDir)
for pI in bcDictPI:
csvFile = wrt.WriteBcDictToFile(bcDictPI[pI], os.path.join(picks.workdir, name), indexFile, pI)
# csvFile_R = wrt.SimpleCsvWriter(None, bcDictPI[pI], os.path.join(picks.workdir, name), indexFile, pI)
supp.LogInfo(''' I had select the {} unique barcodes.\n
Results writing to file '{}'
in your working directory: '{}'\n'''.format(len(bcDictPI[pI]), csvFile, os.path.join(picks.workdir, name)))
# if os.path.exists(param.rscript):
# pathToScript = os.path.join(os.getcwd(), "trip_Rstat.R")
# option = [csvFile_R, os.path.dirname(csvFile_R), index]
# cmd = [param.rscript, pathToScript] + option
# subprocess.call(cmd)
# else:
# print("You do not have installed R-session, or you incorrectly specified the path to the Rscript.\nStatistics on barcodes will not be displayed.")
supp.LogInfo("End processing with: '{}'.\n\n".format(os.path.basename(indexFile)))
if __name__ == "__main__":
main() | 60.636364 | 261 | 0.661469 | 401 | 3,335 | 5.418953 | 0.366584 | 0.044179 | 0.041417 | 0.055223 | 0.198343 | 0.146802 | 0.059365 | 0.043258 | 0.043258 | 0.043258 | 0 | 0.004459 | 0.193103 | 3,335 | 55 | 262 | 60.636364 | 0.803047 | 0.297151 | 0 | 0 | 0 | 0 | 0.166309 | 0.031801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.235294 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5d9ea17d4a9ae541c6e6916ed6741c75882ac1 | 12,036 | py | Python | polyart/_input.py | IsaacTFM/pygrr-polyart | 7a8219cd93b5691ddca89b4fb46a4af7bc1e3456 | [
"Apache-2.0"
] | 4 | 2021-11-21T18:35:54.000Z | 2021-12-14T02:02:37.000Z | polyart/_input.py | IsaacTFM/pygrr-polyart | 7a8219cd93b5691ddca89b4fb46a4af7bc1e3456 | [
"Apache-2.0"
] | 3 | 2021-11-21T18:37:56.000Z | 2021-11-21T19:15:21.000Z | polyart/_input.py | IsaacTFM/pygrr-polyart | 7a8219cd93b5691ddca89b4fb46a4af7bc1e3456 | [
"Apache-2.0"
] | 1 | 2021-11-21T18:39:52.000Z | 2021-11-21T18:39:52.000Z | """
This file handles the input of PolyArt.
"""
# if this is the origin file (not imported)
if __name__ == "__main__":
# print the documentation
print(__doc__)
# add sysmessages module location to path
from sys import path
path.append('..')
# run sysmessages
import common.sysmessages
# import parent package
import polyart
# import used functions from math
from math import cos, atan, sin, radians
# ------------------------------------------------------------------------ #
# rotation #
# ------------------------------------------------------------------------ #
# region rotation
def rotate_left(event):
"""
Wrapper for rotating the model anti-clockwise.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, rotate 22.5 degrees
rotate(-22.5)
else:
# else, rotate 1 degrees
rotate(-1)
def rotate_right(event):
"""
Wrapper for rotating the model clockwise.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, rotate 22.5 degrees
rotate(22.5)
else:
# else, rotate 1 degrees
rotate(1)
def rotate(angle): # assumes anti-clockwise
"""
Rotates the model.
"""
angle *= -1 # makes it clockwise
if angle < 0: # this cleanses the number to ensure it is between 0 and 360
angle = -(abs(angle) % 360)
else:
angle = angle % 360
new_points = []
for point in polyart.model_data:
x = point[0] - polyart.CENTER[0]
y = -(point[1] - polyart.CENTER[1])
# point_rotation = the angle from the center of the model to the point
if x == 0 and y == 0: # ignore this point, it is in the centre
new_points.append((polyart.CENTER[0] + x, polyart.CENTER[1] - y))
else:
if x == 0:
if y > 0: # it is directly up
point_rotation = radians(0 - 90)
else: # it is directly down
point_rotation = radians(180 - 90)
elif y == 0:
if x > 0: # it is directly right
point_rotation = radians(90 - 90)
else: # it is directly left
point_rotation = radians(270 - 90)
else:
if x > 0 and y > 0:
point_rotation = atan(x / y) + radians(0 - 90)
elif x > 0 and y < 0:
point_rotation = atan(x / y) + radians(180 - 90)
elif x < 0 and y > 0:
point_rotation = atan(x / y) + radians(360 - 90)
else: # x < 0 and y < 0:
point_rotation = atan(x / y) + radians(180 - 90)
theta = radians(
angle) - point_rotation # theta is equal to the rotation of the object added to the angle, minus the model rotation of the point
radius = polyart.cached_hypot(x, y) # get distance from the point to the center of the object
new_xdiff = radius * cos(theta)
new_ydiff = radius * sin(theta)
new_points.append((polyart.CENTER[0] + new_xdiff, polyart.CENTER[1] - new_ydiff))
# update model_data
polyart.model_data = new_points
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# mouse #
# ------------------------------------------------------------------------ #
# region mouse
def left_click(event):
"""
Either creates a new point, or chooses the point to move (index_moving).
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
# get mouse position
mouse = (event.x, event.y)
# iterate through model_data
for i in range(len(polyart.model_data)):
point = polyart.model_data[i]
# calculate the distance between the mouse and point
distance = polyart.distance(mouse, point)
if distance <= polyart.POINTSELECTDISTANCE:
# if the distance is less than or equal to the point_select_distance
polyart.index_moving = i
break
if polyart.index_moving == None:
# if no point is selected, assume that the user is trying to create a new point
# iterate through model_data
for i in range(len(polyart.model_data)):
# get the parent points of the line
a = polyart.model_data[i % len(polyart.model_data)]
c = polyart.model_data[(i + 1) % len(polyart.model_data)]
if polyart.is_between(a, mouse, c):
# if the mouse position lies on that line
# work out index
index = (i + 1) % len(polyart.model_data)
if polyart.snapped:
# position is snapped
position = (polyart.snap(mouse[0]), polyart.snap(mouse[1]))
else:
# position not snapped
position = mouse
# insert new point
polyart.model_data.insert(index, position)
# refresh the canvas
polyart.refresh()
# force only one point to be made
break
def left_release(event):
"""
Sets the selected point (index_moving) to None.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
polyart.index_moving = None
def right_click(event):
"""
Deletes the point hovered over.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if len(polyart.model_data) > 3 and polyart.index_moving == None:
# if there are less than 4 points, deleting a point should NOT be allowed to happen
# if a point is selected, the same goes
# get the mouse position
mouse = (event.x, event.y)
# iterate through model_data
for point in polyart.model_data:
if polyart.distance(mouse, point) <= polyart.POINTSELECTDISTANCE:
# if the distance is less than or equal to the point_select_distance
# remove the point
polyart.model_data.remove(point)
# force only one point to be deleted
break
# refresh the canvas
polyart.refresh()
def motion(event):
"""
Moves the selected point, if there is one.
"""
# get the moue position
mouse = (event.x, event.y)
if polyart.index_moving is not None:
# if a point is selected
if polyart.snapped:
# if snapped, snap the position
new_position = (polyart.snap(mouse[0]), polyart.snap(mouse[1]))
else:
# if not, do not snap the position
new_position = mouse
# update variable model_data
polyart.model_data[polyart.index_moving] = new_position
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# movement #
# ------------------------------------------------------------------------ #
# region movement
def left(event):
"""
Moves the model left.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (-polyart.GRIDSIZE, 0)
else:
move = (-1, 0)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def right(event):
"""
Moves the model right.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (polyart.GRIDSIZE, 0)
else:
move = (1, 0)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def up(event):
"""
Moves the model up.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (0, polyart.GRIDSIZE)
else:
move = (0, 1)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def down(event):
"""
Moves the model down.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (0, -polyart.GRIDSIZE)
else:
move = (0, -1)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# binding #
# ------------------------------------------------------------------------ #
# region binding
def bind_inputs():
"""
Binds the inputs to each function in this file.
"""
# bind the mouse movement on the canvas to the motion function
polyart.ui.canvas.bind("<Motion>", motion)
# bind the mouse presses on the root to the correct functions
polyart.ui.root.bind("<ButtonPress-1>", left_click)
polyart.ui.root.bind("<ButtonRelease-1>", left_release)
polyart.ui.root.bind("<ButtonPress-3>", right_click)
# bind the rotate functions to E and Q
polyart.ui.root.bind("e", rotate_right)
polyart.ui.root.bind("q", rotate_left)
# bind the arrow keys to the movement functions
polyart.ui.root.bind("<Key-Left>", left)
polyart.ui.root.bind("<Key-Right>", right)
polyart.ui.root.bind("<Key-Up>", up)
polyart.ui.root.bind("<Key-Down>", down)
# bind the WSAD keys to the movement functions
polyart.ui.root.bind("<a>", left)
polyart.ui.root.bind("<d>", right)
polyart.ui.root.bind("<w>", up)
polyart.ui.root.bind("<s>", down)
# endregion
| 27.990698 | 149 | 0.534064 | 1,456 | 12,036 | 4.337912 | 0.141484 | 0.056998 | 0.078531 | 0.040374 | 0.59943 | 0.522324 | 0.456935 | 0.456935 | 0.447435 | 0.433819 | 0 | 0.015282 | 0.325856 | 12,036 | 429 | 150 | 28.055944 | 0.763125 | 0.359172 | 0 | 0.522843 | 0 | 0 | 0.021991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060914 | false | 0.045685 | 0.020305 | 0 | 0.126904 | 0.005076 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd5f550b799363cf152de68256f97f876bf2f1f2 | 1,486 | py | Python | biodada/alphabets.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | biodada/alphabets.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | biodada/alphabets.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | """Alphabet-related methods."""
import logging
import numpy
ALPHABETS = {
'protein': '-ACDEFGHIKLMNPQRSTVWY',
'dna': '-ACGT',
'rna': '-ACGU',
'protein_u': '-ACDEFGHIKLMNPQRSTVWYBZX',
'dna_u': '-ACGTRYMKWSBDHVN',
'rna_u': '-ACGURYMKWSBDHVN',
}
logger = logging.getLogger(__name__)
def check_alphabet(alphabet):
# A string of ordered, unique symbols
return ''.join(sorted(set(alphabet)))
def check_alphabet_records(records, alphabet):
"""Filter out records not consistent with alphabet."""
alphabet_set = set(alphabet)
return (r for r in records if set(r[1]) <= alphabet_set)
def score_alphabet(alphabet, counts):
"""Score for alphabet given counts."""
import math
chars = set(alphabet) - set('*-')
score = (sum([counts.get(a, 0) for a in chars]) / math.log(len(alphabet)))
logger.debug('alphabet %r score %r', alphabet, score)
return score
def guess_alphabet(records):
"""Guess alphabet from an iterable of records."""
from collections import Counter
from biodada import ALPHABETS
data = numpy.array([list(record[1]) for record in records],
dtype='U1').flatten()
counts = Counter(data)
max_score = float('-inf')
for key, alphabet in ALPHABETS.items():
score = score_alphabet(alphabet, counts)
if score > max_score:
max_score = score
guess = key
logger.info('Alphabet guess: %r', guess)
return ALPHABETS[guess]
| 28.576923 | 78 | 0.647376 | 179 | 1,486 | 5.273743 | 0.413408 | 0.067797 | 0.033898 | 0.057203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003445 | 0.218708 | 1,486 | 51 | 79 | 29.137255 | 0.809647 | 0.126514 | 0 | 0 | 0 | 0 | 0.129209 | 0.035239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.138889 | 0.027778 | 0.361111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd62a6bcee335bdbb5aaa3e5bf955b75d241b748 | 1,813 | py | Python | server/app.py | osteele/tidal-memories | afcd3c8900814577374bfd847ba05c12ca88f397 | [
"MIT"
] | 1 | 2018-07-24T20:19:52.000Z | 2018-07-24T20:19:52.000Z | server/app.py | osteele/tidal-memories | afcd3c8900814577374bfd847ba05c12ca88f397 | [
"MIT"
] | 2 | 2021-03-09T09:59:44.000Z | 2021-05-09T17:29:22.000Z | server/app.py | osteele/tidal-memories | afcd3c8900814577374bfd847ba05c12ca88f397 | [
"MIT"
] | null | null | null | import os
import gevent
import redis
from flask import Flask, render_template, send_from_directory
from flask_cors import CORS
from flask_restplus import Api
from flask_sockets import Sockets
from geventwebsocket.exceptions import WebSocketError
from .image_resource import api as images_api
from .thumbnails import SMALL_THUMBNAIL_DIR
REDIS_CHAN = "sensor_data"
app = Flask(__name__)
app.config["SERVE_LOCAL_IMAGES"] = os.environ.get("SERVE_LOCAL_IMAGES")
CORS(app)
sockets = Sockets(app)
REDIS_URL = os.environ.get("REDIS_URL")
if REDIS_URL:
redis_conn = redis.StrictRedis.from_url(REDIS_URL)
@app.route("/")
def splash():
return render_template("splash.html")
if app.config["SERVE_LOCAL_IMAGES"]:
@app.route("/images/small-thumbnail/<path:filename>")
def thumbnail(filename):
return send_from_directory(str(SMALL_THUMBNAIL_DIR), str(filename))
if REDIS_URL:
@sockets.route("/sensor_data")
def sensor_data_route(ws):
def publish():
while not ws.closed:
data = ws.receive()
if data:
# print(ws, "publish", data, type(data))
redis_conn.publish(REDIS_CHAN, data)
def subscribe():
pubsub = redis_conn.pubsub()
pubsub.subscribe(REDIS_CHAN)
for message in pubsub.listen():
if message["type"] == "message":
data = message.get("data")
# print(ws, "send", data, type(data))
try:
ws.send(data.decode())
except WebSocketError:
return
gevent.spawn(subscribe)
publish()
api = Api(app, doc="/docs/", title="Matrix Image Gallery API", version="0.1")
api.add_namespace(images_api)
| 26.661765 | 77 | 0.629344 | 219 | 1,813 | 5.013699 | 0.342466 | 0.03643 | 0.043716 | 0.034608 | 0.045537 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001499 | 0.264203 | 1,813 | 67 | 78 | 27.059701 | 0.821589 | 0.040816 | 0 | 0.042553 | 0 | 0 | 0.106567 | 0.022465 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.212766 | 0.042553 | 0.382979 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd63313f347912b5d918aa4c501b628907905f8a | 3,045 | py | Python | batch_rl/multi_head/atari_helpers.py | alhamzah/batch_rl | 7f8d9ea0ba330cc0642515dcc44c2ad687c3a927 | [
"Apache-2.0"
] | null | null | null | batch_rl/multi_head/atari_helpers.py | alhamzah/batch_rl | 7f8d9ea0ba330cc0642515dcc44c2ad687c3a927 | [
"Apache-2.0"
] | null | null | null | batch_rl/multi_head/atari_helpers.py | alhamzah/batch_rl | 7f8d9ea0ba330cc0642515dcc44c2ad687c3a927 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
class multi_head_network(tf.keras.Model):
"""The convolutional network used to compute the agent's Q-values."""
def __init__(self, num_actions, num_heads, network_type, name=None, **kwargs):
"""Creates the layers used for calculating Q-values.
"""
super(multi_head_network, self).__init__(name=name)
self.num_actions = num_actions
self.network_type = network_type
self.num_heads = num_heads
# Defining layers.
activation_fn = tf.keras.activations.relu
# Setting names of the layers manually to make variable names more similar
# with tf.slim variable names/checkpoints.
self.conv1 = tf.keras.layers.Conv2D(32, [8, 8], strides=4, padding='same',
activation=activation_fn, name='Conv')
self.conv2 = tf.keras.layers.Conv2D(64, [4, 4], strides=2, padding='same',
activation=activation_fn, name='Conv')
self.conv3 = tf.keras.layers.Conv2D(64, [3, 3], strides=1, padding='same',
activation=activation_fn, name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(512, activation=activation_fn,
name='fully_connected')
self.dense2 = tf.keras.layers.Dense(num_actions*num_heads,
activation=None,
name='fully_connected_q_heads')
def call(self, state):
"""Creates the output tensor/op given the state tensor as input.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Parameters created here will have scope according to the `name` argument
given at `.__init__()` call.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
x = tf.cast(state, tf.float32)
x = tf.div(x, 255.)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
q_heads = tf.reshape(x, [-1, self.num_actions, self.num_heads])
unordered_q_heads = q_heads
q_values = tf.reduce_mean(q_heads, axis=-1)
return self.network_type(q_heads, unordered_q_heads, q_values)
| 40.6 | 80 | 0.674877 | 435 | 3,045 | 4.597701 | 0.413793 | 0.0385 | 0.039 | 0.052 | 0.1095 | 0.0675 | 0.0675 | 0.0675 | 0 | 0 | 0 | 0.019815 | 0.221018 | 3,045 | 74 | 81 | 41.148649 | 0.823356 | 0.430213 | 0 | 0.085714 | 0 | 0 | 0.037304 | 0.013839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd63927a981ce679de4270308c480c8f49ea21fc | 2,358 | py | Python | data/gary/heart.py | isabellewei/deephealth | a226788561996e698b5f52b4b83683dcb3563ea5 | [
"MIT"
] | null | null | null | data/gary/heart.py | isabellewei/deephealth | a226788561996e698b5f52b4b83683dcb3563ea5 | [
"MIT"
] | null | null | null | data/gary/heart.py | isabellewei/deephealth | a226788561996e698b5f52b4b83683dcb3563ea5 | [
"MIT"
] | null | null | null | from time import time
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
import numpy as np
import scipy as sp
import pandas as pd
import math
df = pd.read_csv("parsed_heart.csv")
y1 = df["num"].values
cols = list(df)
mlp = MLPClassifier(hidden_layer_sizes=(100,100,100))
clf1 = BaggingClassifier(n_estimators=10)
clf2 = BaggingClassifier(n_estimators=100)
clf3 = RandomForestClassifier(n_estimators=10,criterion='gini', min_samples_split=2,max_features=None)
clf4 = AdaBoostClassifier(n_estimators=100)
clf5 = VotingClassifier(estimators=[("rf",clf3),('bg',clf2),('ml',mlp),('ada',clf4)],voting='soft')
dropped = set(['num','id'])
columns2 = [z for z in cols if z not in dropped]
X2 = df[columns2].values
X_train, X_test, y_train, y_test = train_test_split(X2,y1,test_size=0.90)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
mlp.fit(X_train,y_train)
predictions2 = mlp.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
kfold = KFold(n_splits=3,shuffle=True)
print(cross_val_score(mlp,X_test,y_test,cv=kfold).mean())
clf2.fit(X_train,y_train)
predictions = clf2.predict(X_test)
print(classification_report(y_test, predictions))
print(accuracy_score(y_test, predictions))
clf3.fit(X_train,y_train)
predictions2 = clf3.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
clf4.fit(X_train,y_train)
predictions2 = clf4.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
clf5.fit(X_train,y_train)
predictions2 = clf5.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
print(cross_val_score(clf5,X_test,y_test,cv=kfold).mean())
| 34.173913 | 109 | 0.787956 | 339 | 2,358 | 5.250737 | 0.315634 | 0.036517 | 0.076404 | 0.02809 | 0.329213 | 0.307865 | 0.247191 | 0.223596 | 0.2 | 0.2 | 0 | 0.027936 | 0.104326 | 2,358 | 68 | 110 | 34.676471 | 0.814867 | 0 | 0 | 0.150943 | 0 | 0 | 0.017935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.245283 | 0 | 0.245283 | 0.226415 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd664ca13e3a2c2338580dff837587436ec24e4f | 3,925 | py | Python | fitsmap/utils.py | ryanhausen/fitsmap | 07c1fcd44e2d2efed24607f3e866611a1be395d8 | [
"MIT"
] | 19 | 2021-06-24T13:57:59.000Z | 2022-02-02T04:45:23.000Z | fitsmap/utils.py | ryanhausen/fitsmap | 07c1fcd44e2d2efed24607f3e866611a1be395d8 | [
"MIT"
] | 38 | 2019-12-17T18:21:43.000Z | 2022-03-12T00:16:38.000Z | fitsmap/utils.py | ryanhausen/fitsmap | 07c1fcd44e2d2efed24607f3e866611a1be395d8 | [
"MIT"
] | 1 | 2021-06-24T10:53:15.000Z | 2021-06-24T10:53:15.000Z | # MIT License
# Copyright 2021 Ryan Hausen and contributers
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import string
from functools import reduce
from itertools import chain, filterfalse
from typing import Iterable, List, Tuple
from astropy.io import fits
from tqdm import tqdm
from PIL import Image
import fitsmap
def digit_to_string(digit: int) -> str:
"""Converts an integer into its word representation"""
if digit == 0:
return "zero"
elif digit == 1:
return "one"
elif digit == 2:
return "two"
elif digit == 3:
return "three"
elif digit == 4:
return "four"
elif digit == 5:
return "five"
elif digit == 6:
return "six"
elif digit == 7:
return "seven"
elif digit == 8:
return "eight"
elif digit == 9:
return "nine"
else:
raise ValueError("Only digits 0-9 are supported")
def make_fname_js_safe(fname: str) -> str:
"""Converts a string filename to a javascript safe identifier."""
if fname[0] in string.digits:
adj_for_digit = digit_to_string(int(fname[0])) + fname[1:]
else:
adj_for_digit = fname
return adj_for_digit.replace(".", "_dot_").replace("-", "_")
def get_fits_image_size(fits_file: str) -> Tuple[int, int]:
"""Returns image size (x, y)
Args:
fits_file (str): fits file path
Returns:
Tuple[int, int]: returns the x and y dims of the input file
"""
hdr = fits.getheader(fits_file)
return hdr["NAXIS1"], hdr["NAXIS2"]
def get_standard_image_size(image_file: str) -> Tuple[int, int]:
"""Returns image size (x, y)
Args:
image_file (str): image file path
Returns:
Tuple[int, int]: returns the x and y dims of the input file
"""
with Image.open(image_file) as f:
size = f.size
return size
def peek_image_info(img_file_names: List[str]) -> Tuple[int, int]:
"""Gets image size values given passed image file names
Args:
img_file_names (List[str]): Input image files that are being tiled
Returns:
Tuple[int, int]: The `max x`, and `max y`
"""
fits_sizes = list(
map(get_fits_image_size, filter(lambda f: f.endswith("fits"), img_file_names),)
)
standard_sizes = list(
map(
get_standard_image_size,
filterfalse(lambda f: f.endswith("fits"), img_file_names),
)
)
max_x, max_y = reduce(
lambda x, y: (max(x[0], y[0]), max(x[1], y[1])),
chain.from_iterable([fits_sizes, standard_sizes]),
(0, 0),
)
return max_x, max_y
def get_version():
with open(os.path.join(fitsmap.__path__[0], "__version__.py"), "r") as f:
return f.readline().strip().replace('"', "")
class MockQueue:
def __init__(self, bar):
self.bar = bar
def put(self, n):
self.bar.update(n=n)
| 28.035714 | 87 | 0.654777 | 569 | 3,925 | 4.400703 | 0.376098 | 0.032348 | 0.026358 | 0.028754 | 0.119808 | 0.104633 | 0.104633 | 0.104633 | 0.079073 | 0.079073 | 0 | 0.009447 | 0.244841 | 3,925 | 139 | 88 | 28.23741 | 0.835358 | 0.423185 | 0 | 0.028986 | 0 | 0 | 0.052315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115942 | false | 0 | 0.130435 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd66ac3284bd0acf65bf00b9b9956f03b31e1d35 | 5,819 | py | Python | bibterm2dict/tresemo.py | madskinner/bibterm2dict | 93039125fb4eaf5640bf4c91d676607dd98bb974 | [
"MIT"
] | null | null | null | bibterm2dict/tresemo.py | madskinner/bibterm2dict | 93039125fb4eaf5640bf4c91d676607dd98bb974 | [
"MIT"
] | null | null | null | bibterm2dict/tresemo.py | madskinner/bibterm2dict | 93039125fb4eaf5640bf4c91d676607dd98bb974 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 10:49:22 2017
@author: marks
"""
# File: tree.py
# References:
# http://hg.python.org/cpython/file/4e32c450f438/Lib/tkinter/ttk.py
# http://www.tcl.tk/man/tcl8.5/TkCmd/ttk_treeview.htm#M79
# http://svn.python.org/projects/python/branches/pep-0384/Demo/tkinter/ttk/dirbrowser.py
import os
from tkinter import *
from tkinter import ttk #@Reimport
from demopanels import MsgPanel, SeeDismissPanel
# Constants for formatting file sizes
KB = 1024.0
MB = KB * KB
GB = MB * KB
class TreeDemo(ttk.Frame):
def __init__(self, isapp=True, name='treedemo'):
ttk.Frame.__init__(self, name=name)
self.pack(expand=Y, fill=BOTH)
self.master.title('Tree Demo')
self.isapp = isapp
self._create_widgets()
def _create_widgets(self):
if self.isapp:
MsgPanel(self, ["One of the new Tk themed widgets is a tree widget, which allows ",
"the user to browse a hierarchical data-set such as a file system. ",
"The tree widget not only allows for the tree part itself, but it ",
"also supports an arbitrary number of additional columns which can ",
"show additional data (in this case, the size of the files found ",
"on your file system). You can also change the width of the columns ",
"by dragging the boundary between them."])
SeeDismissPanel(self)
self._create_demo_panel()
def _create_demo_panel(self):
demoPanel = Frame(self)
demoPanel.pack(side=TOP, fill=BOTH, expand=Y)
self._create_treeview(demoPanel)
self._populate_root()
def _create_treeview(self, parent):
f = ttk.Frame(parent)
f.pack(side=TOP, fill=BOTH, expand=Y)
# create the tree and scrollbars
self.dataCols = ('fullpath', 'type', 'size')
self.tree = ttk.Treeview(columns=self.dataCols,
displaycolumns='size')
ysb = ttk.Scrollbar(orient=VERTICAL, command= self.tree.yview)
xsb = ttk.Scrollbar(orient=HORIZONTAL, command= self.tree.xview)
self.tree['yscroll'] = ysb.set
self.tree['xscroll'] = xsb.set
# setup column headings
self.tree.heading('#0', text='Directory Structure', anchor=W)
self.tree.heading('size', text='File Size', anchor=W)
self.tree.column('size', stretch=0, width=70)
# add tree and scrollbars to frame
self.tree.grid(in_=f, row=0, column=0, sticky=NSEW)
ysb.grid(in_=f, row=0, column=1, sticky=NS)
xsb.grid(in_=f, row=1, column=0, sticky=EW)
# set frame resizing priorities
f.rowconfigure(0, weight=1)
f.columnconfigure(0, weight=1)
# action to perform when a node is expanded
self.tree.bind('<<TreeviewOpen>>', self._update_tree)
def _populate_root(self):
# use current directory as root node
self.path = os.getcwd()
# insert current directory at top of tree
# 'values' = column values: fullpath, type, size
# if a column value is omitted, assumed empty
parent = self.tree.insert('', END, text=self.path,
values=[self.path, 'directory'])
# add the files and sub-directories
self._populate_tree(parent, self.path, os.listdir(self.path))
def _populate_tree(self, parent, fullpath, children):
# parent - id of node acting as parent
# fullpath - the parent node's full path
# children - list of files and sub-directories
# belonging to the 'parent' node
for child in children:
# build child's fullpath
cpath = os.path.join(fullpath, child).replace('\\', '/')
if os.path.isdir(cpath):
# directory - only populate when expanded
# (see _create_treeview() 'bind')
cid =self.tree.insert(parent, END, text=child,
values=[cpath, 'directory'])
# add 'dummy' child to force node as expandable
self.tree.insert(cid, END, text='dummy')
else:
# must be a 'file'
size = self._format_size(os.stat(cpath).st_size)
self.tree.insert(parent, END, text=child,
values=[cpath, 'file', size])
def _format_size(self, size):
if size >= GB:
return '{:,.1f} GB'.format(size/GB)
if size >= MB:
return '{:,.1f} MB'.format(size/MB)
if size >= KB:
return '{:,.1f} KB'.format(size/KB)
return '{} bytes'.format(size)
def _update_tree(self, event): #@UnusedVariable
# user expanded a node - build the related directory
nodeId = self.tree.focus() # the id of the expanded node
if self.tree.parent(nodeId): # not at root
topChild = self.tree.get_children(nodeId)[0]
# if the node only has a 'dummy' child, remove it and
# build new directory; skip if the node is already
# populated
if self.tree.item(topChild, option='text') == 'dummy':
self.tree.delete(topChild)
path = self.tree.set(nodeId, 'fullpath')
self._populate_tree(nodeId, path, os.listdir(path))
if __name__ == '__main__':
TreeDemo().mainloop() | 39.053691 | 98 | 0.554906 | 698 | 5,819 | 4.545845 | 0.34957 | 0.050425 | 0.017649 | 0.009455 | 0.054207 | 0.054207 | 0.043492 | 0.027104 | 0.027104 | 0 | 0 | 0.013748 | 0.337515 | 5,819 | 149 | 99 | 39.053691 | 0.809339 | 0.218938 | 0 | 0 | 0 | 0 | 0.139432 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096386 | false | 0 | 0.048193 | 0 | 0.204819 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd68af29a64b3d9ad5ee5bf60f1893c99d3b0c76 | 5,803 | py | Python | test/cnnl/op_test/test_multiline_views_graph.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | 20 | 2022-03-01T11:40:51.000Z | 2022-03-30T08:17:47.000Z | test/cnnl/op_test/test_multiline_views_graph.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | test/cnnl/op_test/test_multiline_views_graph.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import sys
import os
import copy
import unittest
import logging
import random
# from unittest.main import main
import torch
from torch import nn
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0413, C0411
logging.basicConfig(level=logging.DEBUG)
class buildMultiAdd(nn.Module): # pylint: disable=W0223
r"""
graph:
fc --> split --> squeeze --> transpose --> \
\ --> squeeze --> slice --> batch_dot
"""
def __init__(self, shape):
super(buildMultiAdd, self).__init__()
self.shape = len(shape)
self.s1_dim0_1 = random.randint(5, 25)
self.s1_dim0_2 = random.randint(5,25)
self.s2_dim2 = random.randint(5, 25)
self.split_dim_0_2 = random.randint(0, 2)
self.split_dim_0_3 = random.randint(0, 3)
self.split_dim_0_4 = random.randint(0, 4)
self.split_dim_0_5 = random.randint(0, 5)
self.unbind_dim_0_2 = random.randint(0, 2)
self.unbind_dim_0_3 = random.randint(0, 3)
self.unbind_dim_0_4 = random.randint(0, 4)
self.unbind_dim_0_5 = random.randint(0, 5)
self.select_dim_0_3 = random.randint(0, 3)
self.select_dim_0_4 = random.randint(0, 4)
self.narrow_dim_0_2 = random.randint(0, 2)
def forward(self, x):
if self.shape == 1:
dim1 = x.size()[0]
x = x.unsqueeze(1)
dim2 = x.size()[1]
dim0 = self.s1_dim0_1
x = x.expand(dim0, dim1, dim2)
x = x.permute(2, 0, 1)
dim0, dim1, dim2 = dim2, dim0, dim1
x = x.add(x)
x = x[:, :, :dim2-1]
x = x.transpose(0, 1)
dim0, dim2 = self.s1_dim0_2, dim2-1
x = x.expand(dim0, dim1, 1, dim2)
x = x.squeeze()
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 2:
dim0, dim1 = x.size()
x = x.unsqueeze(1)
x = x.permute(0, 2, 1)
dim2 = self.s2_dim2
x = x.expand(dim2, dim0, dim1, 1)
x = x.squeeze()
dim0, dim1, dim2 = dim2, dim0, dim1
x = x.add(x)
x = x[:, :, :dim2-1]
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 3:
dim0, dim1, dim2 = x.size()
x = x.permute(2, 0, 1)
x = x.transpose(1, 2)
x = x.add(x)
x = x.unsqueeze(2)
dim0, dim2 = dim2, dim0
x = x[:, :, :, :dim2-1]
x = x.squeeze()
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 4:
x = x.permute(0, 1, 3, 2)
x = x.transpose(0, 1)
x = x.add(x)
dim0, dim1, dim2, dim3 = x.size()
x = x[:, :, :, :dim3-1]
x = x.split(2, self.split_dim_0_3)[0]
tensors = x.unbind(self.unbind_dim_0_3)
elif self.shape == 5:
x = x.permute(3, 2, 0, 4, 1)
x = x.transpose(0, 3)
x = x.add(x)
dim0, dim1, dim2, dim3, _ = x.size()
x = x[:, :dim1-1, :, :dim3-1, :]
x = x.split(2, self.split_dim_0_4)[0]
x = x.select(self.select_dim_0_4, 1)
tensors = x.unbind(self.unbind_dim_0_3)
else:
x = x.permute(0, 3, 4, 1, 2, 5)
x = x.transpose(0, 5)
x = x.add(x)
dim0, dim1, dim2, dim3, _, dim5 = x.size()
x = x[:, :dim1-1, :, :dim3-1, :, :dim5-1]
x = x.split(2, self.split_dim_0_5)[0]
x = x.unbind(self.unbind_dim_0_5)[0]
x = x.select(self.select_dim_0_4, 1)
tensors = x.unbind(self.unbind_dim_0_2)
y = None
for idx in range(len(tensors)-1):
tensor = tensors[idx]
tensor = tensor.transpose(0, 1)
tensor = tensor.permute(2, 1, 0)
tensor = tensor.add(tensor)
tensor = tensor.narrow(self.narrow_dim_0_2, 0, 1)
tensor = tensor.chunk(2, 1)[0]
tensor = tensor.squeeze()
y = y.add(tensor) if y is not None else tensor
return y
class TestMultiWayNetOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_multi_way(self):
#print('----Multi-way structure----')
for d in range(6):
dim = d + 1
shape = ()
for _ in range(1, dim+1):
ran_d = random.randint(5, 25)
shape = shape + (ran_d,)
data = torch.randn(shape, dtype=torch.float)
in_cpu = copy.deepcopy(data)
in_mlu = self.to_mlu(data)
net_cpu = buildMultiAdd(shape)
out_cpu = net_cpu(in_cpu)
out_mlu = net_cpu(in_mlu)
self.assertTensorsEqual(out_cpu,
out_mlu.contiguous().cpu().float(),
0.03,
use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_multi_way_channel_last(self):
#print('----Multi-way structure----')
shape = (3,4,5,6)
data = torch.randn(shape).to(memory_format=torch.channels_last)
in_cpu = copy.deepcopy(data)
in_mlu = self.to_mlu(data)
net_cpu = buildMultiAdd(shape)
out_cpu = net_cpu(in_cpu)
out_mlu = net_cpu(in_mlu)
self.assertTensorsEqual(out_cpu,
out_mlu.contiguous().cpu().float(),
0.03,
use_MSE=True)
if __name__ == '__main__':
unittest.main()
| 33.350575 | 75 | 0.507496 | 806 | 5,803 | 3.465261 | 0.157568 | 0.029359 | 0.012889 | 0.046545 | 0.552095 | 0.465807 | 0.427855 | 0.420695 | 0.308271 | 0.270319 | 0 | 0.070468 | 0.359297 | 5,803 | 173 | 76 | 33.543353 | 0.680742 | 0.055489 | 0 | 0.323944 | 0 | 0 | 0.002752 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 1 | 0.028169 | false | 0 | 0.077465 | 0 | 0.126761 | 0.007042 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd6c528a553f5578be66448336398952d4309fa6 | 645 | py | Python | easy/53.Maximum_Subarray.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | 2 | 2018-03-04T23:29:49.000Z | 2019-04-23T01:13:12.000Z | easy/53.Maximum_Subarray.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | null | null | null | easy/53.Maximum_Subarray.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | 1 | 2018-03-05T09:58:59.000Z | 2018-03-05T09:58:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Question:
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
Example:
given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
'''
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
cur_sum = max_sum = nums[0] #当前和与最大和
for num in nums[1:]:
cur_sum = max(num, cur_sum + num)
max_sum = max(cur_sum, max_sum)
return max_sum | 23.035714 | 105 | 0.56124 | 96 | 645 | 3.6875 | 0.541667 | 0.067797 | 0.076271 | 0.090395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040359 | 0.308527 | 645 | 28 | 106 | 23.035714 | 0.753363 | 0.516279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd6d10559ed324300c492ea76a3b5dca3a2078f7 | 3,153 | py | Python | tests/test_docstring.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | 8 | 2019-10-07T22:49:20.000Z | 2021-12-30T22:31:28.000Z | tests/test_docstring.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | 5 | 2019-09-17T21:03:38.000Z | 2020-07-23T04:47:21.000Z | tests/test_docstring.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | null | null | null | """Unit tests for Docstring."""
import docconvert
class TestDocstring(object):
def test_element_ordering(self):
docstring = docconvert.parser.Docstring()
docstring.add_element(("raw", "Docstring."))
docstring.add_return(kind="int")
docstring.add_raises(kind="ValueError")
docstring.add_arg("arg", kind="str")
docstring.add_element(("note", ["First note.", "Second Note."]))
assert docstring.elements == [
("raw", "Docstring."),
("return",),
("raises",),
("args",),
("note", ["First note.", "Second Note."]),
]
def test_args(self):
docstring = docconvert.parser.Docstring()
docstring.add_arg_type("arg1", "Object")
docstring.add_arg("arg2", kind="str")
docstring.add_arg("arg3", desc=["Description."], optional=True)
docstring.add_arg_type("arg3", "int")
assert docstring.elements == [("args",)]
first_arg = docstring.arg_fields.popitem(last=False)
assert first_arg[0] == "arg1"
assert first_arg[1].kind == "Object"
assert docstring.arg_fields["arg2"].kind == "str"
assert docstring.arg_fields["arg2"].optional == False
assert docstring.arg_fields["arg3"].kind == "int"
assert docstring.arg_fields["arg3"].desc == ["Description."]
assert docstring.arg_fields["arg3"].optional == True
def test_attributes(self):
docstring = docconvert.parser.Docstring()
docstring.add_attribute_type("attr1", "Object")
docstring.add_attribute("attr2", kind="str")
docstring.add_attribute("attr3", desc=["Description."])
docstring.add_attribute_type("attr3", "int")
assert docstring.elements == [("attributes",)]
first_attribute = docstring.attribute_fields.popitem(last=False)
assert first_attribute[0] == "attr1"
assert first_attribute[1].kind == "Object"
assert docstring.attribute_fields["attr2"].kind == "str"
assert docstring.attribute_fields["attr2"].optional == False
assert docstring.attribute_fields["attr3"].kind == "int"
assert docstring.attribute_fields["attr3"].desc == ["Description."]
def test_raises(self):
docstring = docconvert.parser.Docstring()
docstring.add_raises("ValueError")
docstring.add_raises("RuntimeError", desc=["Description."])
assert docstring.elements == [("raises",)]
assert docstring.raise_fields[0].kind == "ValueError"
assert docstring.raise_fields[0].desc == []
assert docstring.raise_fields[1].kind == "RuntimeError"
assert docstring.raise_fields[1].desc == ["Description."]
def test_returns(self):
docstring = docconvert.parser.Docstring()
docstring.add_return_type("int")
docstring.add_return(desc=["Description."])
assert docstring.elements == [("return",)]
assert docstring.return_field.kind == "int"
assert docstring.return_field.desc == ["Description."]
docstring.add_return_type("str")
assert docstring.return_field.kind == "str"
| 43.791667 | 75 | 0.631779 | 329 | 3,153 | 5.890578 | 0.161094 | 0.162539 | 0.065015 | 0.074819 | 0.468524 | 0.163055 | 0.128999 | 0 | 0 | 0 | 0 | 0.010914 | 0.21535 | 3,153 | 71 | 76 | 44.408451 | 0.772433 | 0.007929 | 0 | 0.079365 | 0 | 0 | 0.136451 | 0 | 0 | 0 | 0 | 0 | 0.396825 | 1 | 0.079365 | false | 0 | 0.015873 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd6de39ebf958030d5c9ef03e542a73527be7547 | 4,890 | py | Python | preprocess.py | EMBEDDIA/morphological-comment-filtering | 450ecbdaf2672ea09a39476db91c210657ff9c6f | [
"MIT"
] | 1 | 2020-12-01T17:56:11.000Z | 2020-12-01T17:56:11.000Z | preprocess.py | matejklemen/morphological-comment-filtering | 450ecbdaf2672ea09a39476db91c210657ff9c6f | [
"MIT"
] | null | null | null | preprocess.py | matejklemen/morphological-comment-filtering | 450ecbdaf2672ea09a39476db91c210657ff9c6f | [
"MIT"
] | null | null | null | """ The following script is used to preprocess text once and cache it to a csv file. Currently, this means obtaining
the UPOS tags and universal features + renaming columns to a common format.
This is done because it's quite a long process and we do not want to do it every time we make a change. """
import pandas as pd
import os
import argparse
import json
import stanza
from conllu import parse
from tqdm import tqdm
from utils import PAD
parser = argparse.ArgumentParser()
parser.add_argument("--lang", type=str, default="de",
help="2-letter code (ISO 639-1) of used language")
parser.add_argument("--package", type=str, default="default",
help="Name of the used processor for POS/ufeats tagging")
parser.add_argument("--data_path", type=str, default="/home/matej/Documents/embeddia/morphological-additions/morphological-comment-filtering/data/GER/test.csv",
help="PATH to your data")
parser.add_argument("--data_column", type=str, default="content",
help="Column of csv in which the text to be processed is stored")
parser.add_argument("--target_column", type=str, default="target",
help="Column of csv in which the target label is stored")
parser.add_argument("--target_dir", type=str, default="preprocessed/GER",
help="DIRECTORY where processed data should be stored")
def process_conllu(conllu_data):
""" Accepts a conllu string, containing processed sequence, and returns a list[list[dict]] containing properties
of tokens by sentence, i.e. index [i][j] of returned list represents features of j-th token in i-th sentence."""
sent_features = parse(conllu_data)
processed = []
for curr_sent in sent_features:
converted_sent = []
for curr_token in curr_sent:
curr_features = {"form": curr_token["form"]}
# Unpack universal features; note that some tokens don't have universal features (e.g. punctuation)
universal_features = curr_token["feats"]
if universal_features is not None:
curr_features.update(universal_features)
curr_features.update({"upostag": curr_token.get("upostag", PAD)})
converted_sent.append(curr_features)
processed.append(converted_sent)
return processed
def extract_features(stanza_output):
""" Filter the result returned by a stanza Pipeline, keeping only 'form' (raw word), 'upostag' and universal
features (if present)"""
# features of tokens inside sentence(s): each sentence is a list of dicts, containing token features
relevant_features = []
for curr_sent in stanza_output.sentences:
sent_features = []
for curr_token in curr_sent.words:
processed_feats = {"form": curr_token.text}
# Note: if FEATURES are not predicted for token, they will not be present in dict, whereas if POS TAG is not
# predicted, a generic PAD gets written
token_feats = curr_token.feats
if token_feats is not None:
for feat_val_pair in token_feats.split("|"):
feat, val = feat_val_pair.split("=")
processed_feats[feat] = val
token_upos = curr_token.upos
if token_upos is None:
token_upos = PAD
processed_feats["upostag"] = token_upos
sent_features.append(processed_feats)
relevant_features.append(sent_features)
return relevant_features
if __name__ == "__main__":
import torch
args = parser.parse_args()
df = pd.read_csv(args.data_path)
# hr - ftb, en - ewt
nlp = stanza.Pipeline(lang=args.lang, processors='tokenize,pos', package=args.package,
use_gpu=torch.cuda.is_available())
features = []
take_mask = []
for idx_ex in tqdm(range(df.shape[0])):
curr_ex = df.iloc[idx_ex][args.data_column]
try:
output = nlp(curr_ex)
except RuntimeError:
# Undiagnosed stanza error
print(f"Skipping example #{idx_ex}: '{curr_ex}'")
take_mask.append(False)
continue
ex_features = extract_features(output)
take_mask.append(True)
features.append(json.dumps(ex_features))
if not os.path.exists(args.target_dir):
print("Warning: creating directory to store processed data")
os.makedirs(args.target_dir)
# Extract file name from given source path
file_name = args.data_path.split(os.sep)[-1]
target_path = os.path.join(args.target_dir, file_name)
df = df.loc[take_mask].reset_index(drop=True)
df["features"] = features
df = df.rename({args.data_column: "content", args.target_column: "target"}, axis=1)
df.to_csv(os.path.join(args.target_dir, file_name), index=False)
| 41.092437 | 160 | 0.660941 | 657 | 4,890 | 4.765601 | 0.347032 | 0.022996 | 0.032577 | 0.013414 | 0.069626 | 0.069626 | 0.035771 | 0.019802 | 0 | 0 | 0 | 0.002163 | 0.243558 | 4,890 | 118 | 161 | 41.440678 | 0.844282 | 0.218405 | 0 | 0 | 0 | 0.012346 | 0.169536 | 0.02755 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0 | 0.111111 | 0 | 0.160494 | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd6ded5eb81a42427f646f06cfec5c9cfc641f63 | 4,685 | py | Python | util/callbacks.py | TobiasKoopmann/cobert | 279fc6ce938a81afa2b8f14e4cb20b13f842ff48 | [
"Apache-2.0"
] | null | null | null | util/callbacks.py | TobiasKoopmann/cobert | 279fc6ce938a81afa2b8f14e4cb20b13f842ff48 | [
"Apache-2.0"
] | null | null | null | util/callbacks.py | TobiasKoopmann/cobert | 279fc6ce938a81afa2b8f14e4cb20b13f842ff48 | [
"Apache-2.0"
] | null | null | null | import os
import abc
import json
import numpy as np
class Callback(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def __call__(self, predictions, labels):
pass
class Evaluation(Callback):
def __init__(self, ks=(1, 5, 10), ignore_index: int = -100, n_samples_file: str = None):
super().__init__()
self.ks = ks
self.ignore_index = ignore_index
self.evaluation = None
self.use_neg_sampling = False
if n_samples_file:
self.use_neg_sampling = True
with open(n_samples_file, "r") as file:
negative_samples = json.load(file)
self.negative_samples = {int(k): v for k, v in negative_samples.items()}
self.reset()
def __call__(self, predictions, labels): # predictions: torch [batch, 50, 35115], labels: [batch, 50]
for i in range(labels.shape[0]):
for j in range(labels.shape[1]):
if labels[i, j] == self.ignore_index: # [ignore, ignore, ..., mask]
continue
candidate = labels[i, j].item() # integer
samples = self.negative_samples[candidate] + [candidate]
sample_predictions = predictions[i, j][samples].tolist()
ranked_samples = list(sorted(zip(samples, sample_predictions), key=lambda x: x[1], reverse=True)) # list of id, logit
self.evaluation["n"] += 1
rank = 0
for index, sample in enumerate(ranked_samples):
if sample[0] == candidate:
rank = index
break
for k in self.ks:
if rank < k:
self.evaluation["sampled_ndcg"][k] += 1 / np.log2(rank + 2)
self.evaluation["sampled_hit"][k] += 1
# Again without neg sampling
all_predictions = predictions[i, j].tolist()
all_samples = np.arange(len(all_predictions))
ranked_predictions = list(sorted(zip(all_samples, all_predictions), key=lambda x: x[1], reverse=True))
rank = 0
for index, sample in enumerate(ranked_predictions):
if sample[0] == candidate:
rank = index
break
for k in self.ks:
if rank < k:
self.evaluation["ndcg"][k] += 1 / np.log2(rank + 2)
self.evaluation["hit"][k] += 1
def __str__(self):
return " ".join(
f"{key}@{k}={self.evaluation[key][k] / self.evaluation['n']:.5f}" for key in ("sampled_ndcg", "sampled_hit", "ndcg", "hit") for k in
self.evaluation[key])
def reset(self):
self.evaluation = {"sampled_ndcg": {k: 0 for k in self.ks},
"sampled_hit": {k: 0 for k in self.ks},
"ndcg": {k: 0 for k in self.ks},
"hit": {k: 0 for k in self.ks},
"n": 0}
def get_metric(self, metric: str):
if metric in self.evaluation:
return [(k, self.evaluation[metric][k] / self.evaluation['n']) for k in self.evaluation[metric]]
class PredictionSerializer(Callback):
def __init__(self,
file_name: str,
ignore_index: int = -100):
super().__init__()
self.predictions = []
self.labels = []
self.ignore_index = ignore_index
parent_dir = os.path.dirname(file_name)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
self.file = open(file_name, "w")
self.file.write("Prediction\tLabel\n")
def __call__(self, predictions, labels):
for i in range(labels.shape[0]):
for j in range(labels.shape[1]):
if labels[i, j] != self.ignore_index:
self.predictions.append(np.argsort(predictions[i, j].cpu()).tolist()[:100])
self.labels.append(labels[i, j].item())
for p, l in zip(self.predictions, self.labels):
self.file.write(",".join([str(x) for x in p]) + "\t" + str(l) + "\n")
self.predictions, self.labels = [], []
def serialize(self, file_path: str):
parent_dir = os.path.dirname(file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
with open(file_path, "w") as file:
json.dump({
"predictions": self.predictions,
"labels": self.labels,
}, file)
self.predictions, self.labels = [], []
| 40.387931 | 144 | 0.526147 | 558 | 4,685 | 4.259857 | 0.207885 | 0.082457 | 0.020194 | 0.033656 | 0.397139 | 0.288599 | 0.266723 | 0.266723 | 0.178376 | 0.152293 | 0 | 0.01542 | 0.349413 | 4,685 | 115 | 145 | 40.73913 | 0.764436 | 0.029669 | 0 | 0.313131 | 0 | 0.010101 | 0.044053 | 0.012996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10101 | false | 0.020202 | 0.040404 | 0.010101 | 0.191919 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd6e02dadbfb92bc2040067fe4f4629a0b88329a | 4,436 | py | Python | titanic.py | techwizAJ/Titanic-comepetition-kaggle | 39a3d8d97a4401b0e12eefd7fbb07dde92147328 | [
"Apache-2.0"
] | null | null | null | titanic.py | techwizAJ/Titanic-comepetition-kaggle | 39a3d8d97a4401b0e12eefd7fbb07dde92147328 | [
"Apache-2.0"
] | null | null | null | titanic.py | techwizAJ/Titanic-comepetition-kaggle | 39a3d8d97a4401b0e12eefd7fbb07dde92147328 | [
"Apache-2.0"
] | null | null | null | """
# -*- coding: utf-8 -*-
@author: techwiz
Created on Sun May 27 14:47:20 2018
"""
import pandas as pd
train_set = pd.read_csv("train.csv")
test_set = pd.read_csv("test.csv")
""" Exploratory Data Analysis """
train_set['Sex'].value_counts()
train_set['Age'].value_counts()
train_set['Embarked'].value_counts()
train_set.isnull().values.any()
train_set.isnull().sum().sum()
train_set.describe()
# Selecting required features from training dataset
train_set.drop('PassengerId', axis=1, inplace= True)
train_set.drop('Name' , axis=1,inplace=True)
train_set.drop('Cabin' , axis =1 , inplace=True)
train_set.drop('Ticket',axis=1, inplace = True)
test_set.drop(['PassengerId','Name','Cabin','Ticket'],axis=1,inplace=True)
#Encoding Categorial Data
train_set['Age'].hist(bins=30)
train_set['Fare'].hist(bins=30)
# impute missing values
"""
Losing Data Distribution by imputing through mean and median
train_set.fillna(train_set.mean(),inplace=True)
train_set.isnull().values.any()
test_set.fillna(train_set.mean(),inplace=True)
test_set.isnull().values.any()
"""
# imputing data with outliners
train_set['Age'].fillna(-1,inplace=True)
train_set['Fare'].fillna(-1,inplace=True)
train_set['Embarked'].fillna('Q',inplace=True)
test_set['Age'].fillna(-1,inplace=True)
test_set['Fare'].fillna(-1,inplace=True)
test_set['Embarked'].fillna('Q',inplace=True)
#LabelEncoder
from sklearn.preprocessing import LabelEncoder
lb = LabelEncoder()
train_set['Sex'] = lb.fit_transform(train_set['Sex'])
test_set['Sex'] = lb.fit_transform(test_set['Sex'])
lb_t = LabelEncoder()
train_set['Embarked'] = lb_t.fit_transform(train_set['Embarked'])
test_set['Embarked'] = lb_t.fit_transform(test_set['Embarked'])
"""
train_set = pd.get_dummies(data= train_set , dummy_na = True,columns =['Sex' , 'Embarked'])
test_set = pd.get_dummies(data= test_set , dummy_na = True,columns =['Sex' , 'Embarked'])
train_set.drop('Sex_nan',axis=1,inplace=True)
test_set.drop('Sex_nan',axis=1,inplace=True)
"""
# Selecting Features and target
X = train_set.iloc[:,1:13].values
y = train_set.iloc[:,0].values
X_test = test_set.iloc[:,:].values
"""
#Validating Model for Parameter tuning
from sklearn.model_selection import train_test_split
X_train , X_validate , y_train , y_validate = train_test_split(X,y,test_size=0.18,random_state=42)
#Now Appling Various ML Models For Classification
#Feature Scaling , testing differnt scalers and their effect on data distibution
#Using Min Max Scalar
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0.5,0.95))
train_set = scaler.fit_transform(train_set)
test_set = scaler.fit_transform(test_set)
train_set['Age'].hist(bins=30)
#testing differnt scalers
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
train_set = sc_X.fit_transform(train_set)
test_set = sc_X.fit_transform(test_set)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=1000,min_samples_split=30,min_samples_leaf=4,random_state=42,warm_start=True)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_validate)
import xgboost as xg
classifier = xg.XGBClassifier()
classifier.fit(X_train,y_train)
y_predict_xg = classifier.predict(X_validate)
#metrics
from sklearn.metrics import confusion_matrix
cnf = confusion_matrix(y_validate,y_pred)
cnf1 = confusion_matrix(y_validate,y_predict_xg)
"""
#Feature Scaling , testing differnt scalers and their effect on data distibution
#Using Min Max Scalar
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0.5,0.95))
X = scaler.fit_transform(X)
X_test= scaler.transform(X_test)
train_set['Age'].hist(bins=30)
"""
#testing differnt scalers
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
X_test = sc_X.transform(X_test)
"""
#using various ml models
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=1000,min_samples_split=30,min_samples_leaf=4,random_state=42,warm_start=True)
clf.fit(X,y)
"""
import xgboost as xg
classifier = xg.XGBClassifier()
classifier.fit(X,y)
y_pred_xg = classifier.predict(X_test)
"""
y_predict = clf.predict(X_test)
sub = pd.read_csv('gender_submission.csv')
print(sub['Survived'].value_counts())
#submission
sub['Survived']=y_predict
sub.to_csv('submissions1.csv',index=False)
final = pd.read_csv('submissions1.csv')
print(final['Survived'].value_counts()) | 31.685714 | 119 | 0.772543 | 682 | 4,436 | 4.812317 | 0.234604 | 0.085314 | 0.040219 | 0.034126 | 0.557892 | 0.478976 | 0.379951 | 0.305911 | 0.288239 | 0.288239 | 0 | 0.017751 | 0.085663 | 4,436 | 140 | 120 | 31.685714 | 0.79142 | 0.085437 | 0 | 0.042553 | 0 | 0 | 0.125638 | 0.010725 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.042553 | 0.085106 | 0 | 0.085106 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd70054eb143582225de2963fca43fb0a5b2f887 | 1,301 | py | Python | scripts/sort-files/sort_files.py | toddnguyen47/saved-games | ca30e369c72f819b2bd87f2ade450bd6aa058f41 | [
"MIT"
] | null | null | null | scripts/sort-files/sort_files.py | toddnguyen47/saved-games | ca30e369c72f819b2bd87f2ade450bd6aa058f41 | [
"MIT"
] | null | null | null | scripts/sort-files/sort_files.py | toddnguyen47/saved-games | ca30e369c72f819b2bd87f2ade450bd6aa058f41 | [
"MIT"
] | null | null | null | import os
import heapq
import json
class SortFiles:
def __init__(self):
self._spell_dir = self._read_json()
self._heap = []
def execute(self):
self._heap = []
with os.scandir(self._spell_dir) as iter:
for entry in iter:
if entry.is_dir():
count = self._count_png_in_dir(entry)
heapq.heappush(self._heap, (count, entry.name))
self._iterate_heap()
###########################################################################
# PRIVATE FUNCTIONS
###########################################################################
def _count_png_in_dir(self, entry: os.DirEntry):
count = 0
with os.scandir(entry) as iter2:
for entry2 in iter2:
if entry2.is_file() and entry2.name.endswith(".png"):
count += 1
return count
def _iterate_heap(self):
while self._heap:
val = heapq.heappop(self._heap)
print("{}: {}".format(val[0], val[1]))
def _read_json(self):
with open("config.json", "r") as file:
data = json.load(file)
return data["spellsDirectory"]
if __name__ == "__main__":
sort_files = SortFiles()
sort_files.execute()
| 27.680851 | 79 | 0.487317 | 138 | 1,301 | 4.297101 | 0.384058 | 0.067454 | 0.040472 | 0.043845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009847 | 0.297463 | 1,301 | 46 | 80 | 28.282609 | 0.63895 | 0.013067 | 0 | 0.060606 | 0 | 0 | 0.039753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.090909 | 0 | 0.333333 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7412e497e9ca995fe87668255ac1e986b4ec36 | 4,585 | py | Python | rlo/src/ray_main.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 31 | 2021-09-09T16:09:55.000Z | 2022-02-20T02:15:19.000Z | rlo/src/ray_main.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 40 | 2021-08-06T14:30:08.000Z | 2022-01-19T08:49:52.000Z | rlo/src/ray_main.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 5 | 2021-08-06T11:20:31.000Z | 2022-01-07T19:39:40.000Z | # fmt: off
import cProfile
import os
import ray
from rlo import analytics
from rlo.config_utils import config_for_repetition, kwargs_from_config
from rlo.factory import seed_from_config, simul_search_curriculum_from_config, get_train_and_eval_exprs
from rlo.flags import make_config_for_scenario, make_parser, check_save_config, ray_run_arguments
from rlo.ray_worker import RayWorkerPool
def main():
from rlo.summarize_logs import summarize_logs
run_parser = make_parser(ray_run_arguments)
run_args, _ = run_parser.parse_known_args()
if run_args.workers_per_gpu > 1 and (
run_args.gpu_memory_fraction is None or run_args.gpu_memory_fraction * run_args.workers_per_gpu > 1.0):
# In fact it seems there may need to be some margin of extra space on the GPU after allocating each worker
# but we haven't identified how much, or good defaults for gpu_memory_fraction, yet.
raise ValueError("Must have --gpu_memory_fraction <= 1/workers_per_gpu")
config = make_config_for_scenario(run_args.scenario, ray_run_arguments)
ray.init(config['address'], **kwargs_from_config(config,
required_keys=("log_to_driver", "num_cpus", "num_gpus"),
optional_keys=(),
renames=(("redis_token", "redis_password"),)))
train_set, eval_set = get_train_and_eval_exprs(config)
check_save_config(config, train_set.named_exprenvs(), eval_set.named_exprenvs())
pool = RayWorkerPool(config, remote_timeout=config["ray_timeout"], local_task_limit=run_args.profile_local or 0)
with analytics.log_events_to_files(os.path.join(config["result_save_path"], "head" + os.path.sep)):
analytics.event("expression_summary", num_train_expr = len(train_set.named_exprenvs()), num_test_expr = len(eval_set.named_exprenvs()))
for rep_config in ([config] if config.get("repetition") is not None
else [config_for_repetition(config, repetition) for repetition in range(config["num_repetitions"])]):
with analytics.Scope(repetition=rep_config['repetition']):
curriculum = simul_search_curriculum_from_config(rep_config, train_set, eval_set)
pool.schedule_work_requests_from(
curriculum.request_initial(seed_from_config(rep_config)))
if (run_args.profile_local is None) or (run_args.profile_local > 0):
# None means --profile_local was specified without a time limit
cProfile.runctx("pool.run()", {}, {"pool": pool}, os.path.join(config["result_save_path"], "head", "prof.pstats"))
else:
pool.run()
print("Run finished, {} live weights".format(len(pool._weight_id_map)))
if run_args.timeline:
ray.timeline(filename=os.path.join(config['result_save_path'], "ray_timeline.json"))
ray.object_transfer_timeline(filename=os.path.join(config['result_save_path'], "ray_object_transfers.json"))
ray.shutdown() # Reduce memory use of Ray while this headnode machine does all the plotting
events = summarize_logs(config, eval_set, ray=True)
if config["test_kill_worker_after_tasks"] >= 0:
# Test mode - check the logs were sensible; otherwise, fail the run (after producing plots).
# Note that these asserts are not guaranteed or even expected to hold for all parameter values.
# Rather they are intended to allow writing useful tests via sensible choices of parameters.
# First, check that at least one worker was killed. This is only guaranteed if the total number
# of tasks is at least (num_workers * (test_kill_worker_after_tasks-1))+1.
assert any(e["event"] == "worker_died" for e in events)
# Second, check that at least one worker joined after the start.
# Note that this doesn't check that the joining worker was one that had been killed (e.g. from
# the same IP address); instead, another node might have connected for the first time instead.
# Only if num_workers == num_repetitions (the number of workers required before we start),
# can we be sure that the new-joiner was a reconnection.
# Conversely, failing does not necessarily imply that such a worker cannot reconnect, merely that
# it didn't (before the run finished). Only if the total number of tasks is greater than
# (num_workers * test_kill_worker_after_tasks) can we be sure that at least one worker would *have* to
# reconnect for the run to get this far.
assert any(e["event"] == "worker_joined" for e in events)
if __name__ == "__main__": main()
| 59.545455 | 143 | 0.721047 | 672 | 4,585 | 4.669643 | 0.364583 | 0.022307 | 0.02167 | 0.020395 | 0.210644 | 0.119822 | 0.090504 | 0.0529 | 0.03123 | 0.03123 | 0 | 0.002428 | 0.191494 | 4,585 | 76 | 144 | 60.328947 | 0.844079 | 0.325409 | 0 | 0 | 0 | 0 | 0.135047 | 0.024081 | 0 | 0 | 0 | 0 | 0.044444 | 1 | 0.022222 | false | 0.022222 | 0.2 | 0 | 0.222222 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd776a6674d87758862d2b05706f184ab21ca00d | 3,490 | py | Python | monero/wordlists/wordlist.py | massanchik/monero-python | 5699c26f6ba0a64f50ac065ebe0419daf01fd993 | [
"BSD-3-Clause"
] | 130 | 2019-03-22T01:50:38.000Z | 2022-03-30T11:34:12.000Z | monero/wordlists/wordlist.py | massanchik/monero-python | 5699c26f6ba0a64f50ac065ebe0419daf01fd993 | [
"BSD-3-Clause"
] | 64 | 2019-03-12T10:32:36.000Z | 2022-03-31T12:38:20.000Z | monero/wordlists/wordlist.py | massanchik/monero-python | 5699c26f6ba0a64f50ac065ebe0419daf01fd993 | [
"BSD-3-Clause"
] | 55 | 2019-03-22T01:50:50.000Z | 2022-03-28T02:38:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from binascii import crc32
from six import with_metaclass
WORDLISTS = {}
_log = logging.getLogger(__name__)
class WordlistType(type):
def __new__(cls, name, bases, attrs):
if bases:
if 'language_name' not in attrs:
raise TypeError("Missing language_name for {0}".format(name))
if 'unique_prefix_length' not in attrs:
raise TypeError("Missing 'unique_prefix_length' for {0}".format(name))
if 'word_list' not in attrs:
raise TypeError("Missing 'word_list' for {0}".format(name))
if 'english_language_name' not in attrs:
_log.warn("No 'english_language_name' for {0} using '{1}'".format(name, language_name))
attrs['english_language_name'] = attrs['language_name']
if len(attrs['word_list']) != 1626:
raise TypeError("Wrong word list length for {0}".format(name))
new_cls = super(WordlistType, cls).__new__(cls, name, bases, attrs)
if bases:
WORDLISTS[new_cls.english_language_name] = new_cls
return new_cls
class Wordlist(with_metaclass(WordlistType)):
n = 1626
@classmethod
def encode(cls, hex):
"""Convert hexadecimal string to mnemonic word representation with checksum.
"""
out = []
for i in range(len(hex) // 8):
word = endian_swap(hex[8*i:8*i+8])
x = int(word, 16)
w1 = x % cls.n
w2 = (x // cls.n + w1) % cls.n
w3 = (x // cls.n // cls.n + w2) % cls.n
out += [cls.word_list[w1], cls.word_list[w2], cls.word_list[w3]]
checksum = cls.get_checksum(" ".join(out))
out.append(checksum)
return " ".join(out)
@classmethod
def decode(cls, phrase):
"""Calculate hexadecimal representation of the phrase.
"""
phrase = phrase.split(" ")
out = ""
for i in range(len(phrase) // 3):
word1, word2, word3 = phrase[3*i:3*i+3]
w1 = cls.word_list.index(word1)
w2 = cls.word_list.index(word2) % cls.n
w3 = cls.word_list.index(word3) % cls.n
x = w1 + cls.n *((w2 - w1) % cls.n) + cls.n * cls.n * ((w3 - w2) % cls.n)
out += endian_swap("%08x" % x)
return out
@classmethod
def get_checksum(cls, phrase):
"""Given a mnemonic word string, return a string of the computed checksum.
:rtype: str
"""
phrase_split = phrase.split(" ")
if len(phrase_split) < 12:
raise ValueError("Invalid mnemonic phrase")
if len(phrase_split) > 13:
# Standard format
phrase = phrase_split[:24]
else:
# MyMonero format
phrase = phrase_split[:12]
wstr = "".join(word[:cls.unique_prefix_length] for word in phrase)
wstr = bytearray(wstr.encode('utf-8'))
z = ((crc32(wstr) & 0xffffffff) ^ 0xffffffff ) >> 0
z2 = ((z ^ 0xffffffff) >> 0) % len(phrase)
return phrase_split[z2]
def get_wordlist(name):
try:
return WORDLISTS[name]
except KeyError:
raise ValueError("No such word list")
def list_wordlists():
return WORDLISTS.keys()
def endian_swap(word):
"""Given any string, swap bits and return the result.
:rtype: str
"""
return "".join([word[i:i+2] for i in [6, 4, 2, 0]])
| 31.160714 | 103 | 0.565616 | 446 | 3,490 | 4.29148 | 0.269058 | 0.027168 | 0.034483 | 0.029258 | 0.150993 | 0.094566 | 0.028213 | 0 | 0 | 0 | 0 | 0.030528 | 0.305444 | 3,490 | 111 | 104 | 31.441441 | 0.759076 | 0.105158 | 0 | 0.068493 | 0 | 0 | 0.107062 | 0.028311 | 0 | 0 | 0.009762 | 0 | 0 | 1 | 0.09589 | false | 0 | 0.041096 | 0.013699 | 0.273973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd78a71bbe18cc4f93cb82249b13ad78a464f21f | 3,165 | py | Python | BlenderScripts/process_mesh.py | razluta-unity/BlenderProcessUnity | d1d20f8b1910132c63cd73570c783a55c05fafe2 | [
"MIT"
] | 2 | 2020-11-24T06:10:44.000Z | 2021-09-13T11:57:22.000Z | BlenderScripts/process_mesh.py | razluta-unity/BlenderProcessUnity | d1d20f8b1910132c63cd73570c783a55c05fafe2 | [
"MIT"
] | null | null | null | BlenderScripts/process_mesh.py | razluta-unity/BlenderProcessUnity | d1d20f8b1910132c63cd73570c783a55c05fafe2 | [
"MIT"
] | 2 | 2020-12-03T07:48:48.000Z | 2021-06-09T20:18:26.000Z | import os
import argparse
import bpy
# Constants
FBX_EXTENSION = ".fbx"
BLENDER_ACTION_SELECT = "SELECT"
BLENDER_TYPE_MESH = "MESH"
BLENDER_MODIFIER_BEVEL = "BEVEL"
def get_args():
"""
A method to obtain the arguments that came with the triggered Python file - from the .bat file.
:rtype: object
:return: An object containing the arguments as properties.
"""
parser_double_dash = "--"
parser_path_short_argument = "-p"
parser_path_long_argument = "--path"
parser_path_help = "asset path"
parser = argparse.ArgumentParser()
_, all_arguments = parser.parse_known_args()
double_dash_index = all_arguments.index(parser_double_dash)
script_args = all_arguments[double_dash_index + 1:]
parser.add_argument(parser_path_short_argument, parser_path_long_argument, help=parser_path_help)
parsed_script_args, _ = parser.parse_known_args(script_args)
return parsed_script_args
def setup_and_run_mesh_process():
"""
Initialize the arguments and run the mesh process.
"""
args = get_args()
source_asset_path = args.path
process_mesh(source_asset_path)
def process_mesh(asset_path):
"""
Process the mesh at the given asset_path.
In this sample, processing = beveling and exporting the beveled mesh to the same path, with an added
suffix to the name.
:param string asset_path: The absolute asset path.
"""
processed_mesh_suffix = "_processed"
asset_name = os.path.splitext(os.path.basename(asset_path))[0]
source_asset_directory = os.path.dirname(asset_path)
# Determine new naming and paths for the processed mesh
export_asset_name = asset_name + processed_mesh_suffix
export_asset_path = os.path.join(source_asset_directory, export_asset_name + FBX_EXTENSION)
print("The source asset path is: " + asset_path)
print("The source asset name is: " + asset_name)
print("The source directory path is: " + source_asset_directory)
# Clear the default Blender scene
bpy.ops.object.select_all(action=BLENDER_ACTION_SELECT)
bpy.ops.object.delete()
# Import the asset in the Blender scene
processing_failed = False
try:
bpy.ops.import_scene.fbx(filepath=asset_path)
except Exception as e:
processing_failed = True
print("Could not import asset at : " + asset_path)
print(e)
# Process the asset
# In this sample, I'm bevelling the asset and exporting the new mesh right next to the old one.
# You can add your custom processing here and replace the sample.
try:
imported_assets = bpy.context.selected_objects
for asset in imported_assets:
if asset.type != BLENDER_TYPE_MESH:
continue
# Apply a bevel modifier on the mesh
bevel_modifier_name = "Bevel Modifier"
asset.modifiers.new(name=bevel_modifier_name, type=BLENDER_MODIFIER_BEVEL)
except Exception as e:
processing_failed = True
print("Could not process asset.")
print(e)
# Export the asset from Blender back to Unity, next to the original asset
if processing_failed:
return
try:
bpy.ops.export_scene.fbx(
filepath=export_asset_path,
use_selection=True)
except Exception as e:
print("Could not export to path: " + export_asset_path)
print(e)
# Triggering the mesh process
setup_and_run_mesh_process()
| 29.579439 | 101 | 0.766193 | 469 | 3,165 | 4.921109 | 0.294243 | 0.062392 | 0.019497 | 0.023397 | 0.063258 | 0.044194 | 0.044194 | 0.044194 | 0.044194 | 0.044194 | 0 | 0.000748 | 0.155134 | 3,165 | 106 | 102 | 29.858491 | 0.862378 | 0.278041 | 0 | 0.177419 | 0 | 0 | 0.09942 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.112903 | 0 | 0.193548 | 0.145161 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7ba5bd647118022f742f767d54083d1e57c29a | 12,562 | py | Python | experiment_launcher.py | alekdimi/arms | f83a32caa8283789c61b59f53832149410be765b | [
"MIT"
] | 2 | 2021-06-15T09:41:45.000Z | 2021-09-08T18:30:44.000Z | experiment_launcher.py | alekdimi/arms | f83a32caa8283789c61b59f53832149410be765b | [
"MIT"
] | null | null | null | experiment_launcher.py | alekdimi/arms | f83a32caa8283789c61b59f53832149410be765b | [
"MIT"
] | null | null | null | import os
from absl import app, flags
import dataset
import networks
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
layers = tf.keras.layers
flags.DEFINE_enum('dataset', 'static_mnist', ['static_mnist', 'dynamic_mnist', 'fashion_mnist', 'omniglot'], 'Dataset to use.')
flags.DEFINE_float('genmo_lr', 1e-4, 'Learning rate for decoder, Generation network.')
flags.DEFINE_float('infnet_lr', 1e-4, 'Learning rate for encoder, Inference network.')
flags.DEFINE_float('prior_lr', 1e-2, 'Learning rate for prior variables.')
flags.DEFINE_integer('batch_size', 50, 'Training batch size.')
flags.DEFINE_integer('num_pairs', 1, ('Number of sample pairs used gradient estimators.'))
flags.DEFINE_integer('num_steps', int(1e6), 'Number of training steps.')
flags.DEFINE_string('encoder_type', 'linear', 'Choice supported: linear, nonlinear')
flags.DEFINE_string('grad_type', 'arm', 'Choice supported: arm, disarm, reinforce')
flags.DEFINE_string('logdir', 'logs/tmp', 'Directory for storing logs.')
flags.DEFINE_bool('verbose', False, 'Whether to turn on training result logging.')
flags.DEFINE_integer('repeat_idx', 0, 'Dummy flag to label the experiments in repeats.')
flags.DEFINE_bool('half_p_trick', False, 'Enforce the p range is [0., 0.5]')
flags.DEFINE_float('epsilon', 0., 'Additive float to prevent numerical underflow in log(x).')
flags.DEFINE_float('temperature', None, 'Temperature for RELAX estimator.')
flags.DEFINE_float('scaling_factor', None, 'Scaling factor for RELAX estimator.')
flags.DEFINE_bool('eager', False, 'Enable eager execution.')
flags.DEFINE_bool('bias_check', False, 'Carry out bias check for RELAX and baseline')
flags.DEFINE_bool('demean_input', False, 'Demean for encoder and decoder inputs.')
flags.DEFINE_bool('initialize_with_bias', False, 'Initialize the final layer bias of decoder with dataset mean.')
flags.DEFINE_integer('seed', 1, 'Global random seed.')
flags.DEFINE_integer('num_eval_samples', None, 'Number of samples for evaluation, default to num_pairs.')
flags.DEFINE_integer('num_train_samples', None, 'Number of samples for evaluation, default to num_pairs.')
flags.DEFINE_bool('debug', False, 'Turn on debugging mode.')
FLAGS = flags.FLAGS
def process_batch_input(input_batch):
input_batch = tf.reshape(input_batch, [tf.shape(input_batch)[0], -1])
input_batch = tf.cast(input_batch, tf.float32)
return input_batch
def initialize_grad_variables(target_variable_list):
return [tf.Variable(tf.zeros(shape=i.shape)) for i in target_variable_list]
def estimate_gradients(input_batch, bvae_model, gradient_type, sample_size=1):
if gradient_type == 'relax':
with tf.GradientTape(persistent=True) as tape:
genmo_loss, reparam_loss, learning_signal, log_q = (
bvae_model.get_relax_loss(input_batch, temperature=FLAGS.temperature,
scaling_factor=FLAGS.scaling_factor, num_samples=sample_size))
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_vars = bvae_model.encoder_vars
infnet_grads_1 = tape.gradient(log_q, infnet_vars, output_gradients=learning_signal)
infnet_grads_2 = tape.gradient(reparam_loss, infnet_vars)
infnet_grads = [infnet_grads_1[i] + infnet_grads_2[i] for i in range(len(infnet_vars))]
else:
with tf.GradientTape(persistent=True) as tape:
elbo, _, infnet_logits, _ = bvae_model(input_batch)
genmo_loss = -1. * tf.reduce_mean(elbo)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grad_multiplier = -1. * bvae_model.get_layer_grad_estimation(input_batch, num_samples=sample_size)
infnet_grads = tape.gradient(infnet_logits, bvae_model.encoder_vars, output_gradients=infnet_grad_multiplier)
del tape
return (genmo_grads, prior_grads, infnet_grads, genmo_loss)
@tf.function
def train_one_step(
train_batch_i,
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable):
"""Train Discrete VAE for 1 step."""
metrics = {}
input_batch = process_batch_input(train_batch_i)
if FLAGS.grad_type in ['loorf', 'arms', 'arms_normal']:
num_samples = 2 * FLAGS.num_pairs
else:
num_samples = FLAGS.num_pairs
if FLAGS.grad_type == 'relax':
with tf.GradientTape(persistent=True) as theta_tape:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, num_samples)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
infnet_grads_sq = [tf.square(grad_i) for grad_i in infnet_grads]
theta_vars = []
if bvae_model.control_nn:
theta_vars.extend(bvae_model.control_nn.trainable_variables)
if FLAGS.temperature is None:
theta_vars.append(bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
theta_vars.append(bvae_model.scaling_variable)
theta_grads = theta_tape.gradient(infnet_grads_sq, theta_vars)
theta_optimizer.apply_gradients(zip(theta_grads, theta_vars))
del theta_tape
metrics['learning_signal'] = bvae_model.mean_learning_signal
else:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, num_samples)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
batch_size_sq = tf.cast(FLAGS.batch_size * FLAGS.batch_size, tf.float32)
encoder_grad_var = bvae_model.compute_grad_variance(
encoder_grad_variable, encoder_grad_sq_variable,
infnet_grads) / batch_size_sq
return (encoder_grad_var, None, genmo_loss, metrics)
@tf.function
def evaluate(model, tf_dataset, max_step=1000, num_eval_samples=None):
"""Evaluate the model."""
if num_eval_samples:
num_samples = num_eval_samples
elif FLAGS.num_eval_samples:
num_samples = FLAGS.num_eval_samples
elif FLAGS.grad_type in ['vimco', 'local-disarm', 'local-arms']:
num_samples = FLAGS.num_pairs * 2
elif FLAGS.grad_type in ['loorf', 'arms', 'arms_normal']:
num_samples = 2 * FLAGS.num_pairs
else:
num_samples = FLAGS.num_pairs
loss = 0.
n = 0.
for batch in tf_dataset.map(process_batch_input):
if n >= max_step: # used for train_ds, which is a `repeat` dataset.
break
if num_samples > 1:
batch_size = tf.shape(batch)[0]
input_batch = tf.tile(batch, [num_samples, 1])
elbo = tf.reshape(model(input_batch)[0], [num_samples, batch_size])
objectives = (tf.reduce_logsumexp(elbo, axis=0, keepdims=False) -
tf.math.log(tf.cast(tf.shape(elbo)[0], tf.float32)))
else:
objectives = model(batch)[0]
loss -= tf.reduce_mean(objectives)
n += 1.
return loss / n
def main(_):
tf.random.set_seed(FLAGS.seed)
logdir = FLAGS.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
genmo_lr = tf.constant(FLAGS.genmo_lr)
infnet_lr = tf.constant(FLAGS.infnet_lr)
prior_lr = tf.constant(FLAGS.prior_lr)
genmo_optimizer = tf.keras.optimizers.Adam(learning_rate=genmo_lr)
infnet_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr)
prior_optimizer = tf.keras.optimizers.SGD(learning_rate=prior_lr)
theta_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr,
beta_1=0.999)
batch_size = FLAGS.batch_size
if FLAGS.dataset == 'static_mnist':
train_ds, valid_ds, test_ds = dataset.get_static_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'dynamic_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'fashion_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(
batch_size, fashion_mnist=True)
train_size = 50000
elif FLAGS.dataset == 'omniglot':
train_ds, valid_ds, test_ds = dataset.get_omniglot_batch(batch_size)
train_size = 23000
num_steps_per_epoch = int(train_size / batch_size)
train_ds_mean = dataset.get_mean_from_iterator(
train_ds, dataset_size=train_size, batch_size=batch_size)
if FLAGS.initialize_with_bias:
bias_value = -tf.math.log(
1./tf.clip_by_value(train_ds_mean, 0.001, 0.999) - 1.).numpy()
bias_initializer = tf.keras.initializers.Constant(bias_value)
else:
bias_initializer = 'zeros'
if FLAGS.encoder_type == 'linear':
encoder_hidden_sizes = [200]
encoder_activations = ['linear']
decoder_hidden_sizes = [784]
decoder_activations = ['linear']
elif FLAGS.encoder_type == 'nonlinear':
encoder_hidden_sizes = [200, 200, 200]
encoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
decoder_hidden_sizes = [200, 200, 784]
decoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
else:
raise NotImplementedError
encoder = networks.BinaryNetwork(
encoder_hidden_sizes,
encoder_activations,
mean_xs=train_ds_mean,
demean_input=FLAGS.demean_input,
name='bvae_encoder')
decoder = networks.BinaryNetwork(
decoder_hidden_sizes,
decoder_activations,
demean_input=FLAGS.demean_input,
final_layer_bias_initializer=bias_initializer,
name='bvae_decoder')
prior_logit = tf.Variable(tf.zeros([200], tf.float32))
if FLAGS.grad_type == 'relax':
control_network = tf.keras.Sequential()
control_network.add(
layers.Dense(137, activation=layers.LeakyReLU(alpha=0.3)))
control_network.add(
layers.Dense(1))
else:
control_network = None
bvae_model = networks.SingleLayerDiscreteVAE(
encoder,
decoder,
prior_logit,
grad_type=FLAGS.grad_type,
half_p_trick=FLAGS.half_p_trick,
epsilon=FLAGS.epsilon,
control_nn=control_network)
bvae_model.build(input_shape=(None, 784))
tensorboard_file_writer = tf.summary.create_file_writer(logdir)
encoder_grad_variable = initialize_grad_variables(bvae_model.encoder_vars)
encoder_grad_sq_variable = initialize_grad_variables(bvae_model.encoder_vars)
start_step = infnet_optimizer.iterations.numpy()
train_iter = train_ds.__iter__()
for step_i in range(start_step, FLAGS.num_steps):
(encoder_grad_var, variance_dict, genmo_loss, metrics) = train_one_step(
train_iter.next(),
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable)
train_loss = tf.reduce_mean(genmo_loss)
if step_i % 1000 == 0:
metrics.update({
'train_objective': train_loss,
'eval_metric/train': evaluate(bvae_model, train_ds, max_step=num_steps_per_epoch, num_eval_samples=FLAGS.num_train_samples),
'eval_metric/valid': evaluate(bvae_model, valid_ds, num_eval_samples=FLAGS.num_eval_samples),
'eval_metric/test': evaluate(bvae_model, test_ds, num_eval_samples=FLAGS.num_eval_samples),
'var/grad': encoder_grad_var
})
if FLAGS.grad_type == 'relax':
if FLAGS.temperature is None:
metrics['relax/temperature'] = tf.math.exp(bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
metrics['relax/scaling'] = bvae_model.scaling_variable
tf.print(step_i, metrics)
with tensorboard_file_writer.as_default():
for k, v in metrics.items():
tf.summary.scalar(k, v, step=step_i)
if variance_dict is not None:
tf.print(variance_dict)
for k, v in variance_dict.items():
tf.summary.scalar(k, v, step=step_i)
if __name__ == '__main__':
app.run(main)
| 38.891641 | 134 | 0.729979 | 1,751 | 12,562 | 4.915477 | 0.16562 | 0.036598 | 0.017892 | 0.013942 | 0.373766 | 0.314744 | 0.302428 | 0.282793 | 0.244685 | 0.225514 | 0 | 0.012976 | 0.165658 | 12,562 | 322 | 135 | 39.012422 | 0.808224 | 0.007881 | 0 | 0.266917 | 0 | 0 | 0.123354 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022556 | false | 0 | 0.022556 | 0.003759 | 0.06391 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7bd354d1693ae42a8a899d55fa9d11d8bad927 | 1,916 | py | Python | tests/test_wikipron/test_languagecodes.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | null | null | null | tests/test_wikipron/test_languagecodes.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | null | null | null | tests/test_wikipron/test_languagecodes.py | Alireza-Sampour/wikipron | ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d | [
"Apache-2.0"
] | null | null | null | import warnings
import iso639
import pytest
import wikipron
from data.src.codes import _get_language_categories, _get_language_sizes
from wikipron.languagecodes import LANGUAGE_CODES
from . import can_connect_to_wiktionary
# We handle languages with at least this number of pronunciation entries.
_MIN_LANGUAGE_SIZE = 100
@pytest.mark.skipif(not can_connect_to_wiktionary(), reason="need Internet")
def test_language_coverage():
"""Check if WikiPron covers languages with a sufficient amount of data.
If any warnings are raised, they should be suppressed by expanding
the LANGUAGE_CODES dict to handle the relevant languages.
"""
categories = _get_language_categories()
sizes = _get_language_sizes(categories)
for language, size in sizes.items():
if size < _MIN_LANGUAGE_SIZE:
continue
if language in ("Mon", "Translingual"):
# "mon" is the ISO 639 code for Mongolian, but there is also
# the Mon language (ISO 639 code: "mnw").
continue
try:
language_code = iso639.to_iso639_2(language)
except iso639.NonExistentLanguageError:
# Check if WikiPron can handle `language` directly.
language_code = language
try:
language_inferred = wikipron.Config(key=language_code).language
except iso639.NonExistentLanguageError:
warnings.warn(f'WikiPron cannot handle "{language}".')
continue
if language_inferred != language:
warnings.warn(
f'WikiPron resolves the key "{language_code}" to '
f'"{language_inferred}", '
f'which is not "{language}" on Wiktionary.'
)
def test_language_codes_dict_keys():
"""LANGUAGE_CODES keys must be in lowercase for Config._get_language."""
for k in LANGUAGE_CODES.keys():
assert k == k.lower()
| 35.481481 | 76 | 0.674322 | 230 | 1,916 | 5.421739 | 0.408696 | 0.044106 | 0.033681 | 0.035285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017458 | 0.25261 | 1,916 | 53 | 77 | 36.150943 | 0.853352 | 0.252088 | 0 | 0.2 | 0 | 0 | 0.123755 | 0.015647 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.057143 | false | 0 | 0.2 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7e89a9c152d6c5a11c4d9af7357acb6500801c | 2,947 | py | Python | libs/DegradationModels.py | prasunroy/cnn-on-degraded-images | 85bb4c62a024e3766da3c4d2556e01c6e12e416a | [
"MIT"
] | 15 | 2018-10-01T20:54:13.000Z | 2021-10-09T10:40:21.000Z | libs/DegradationModels.py | prasunroy/cnn-on-degraded-images | 85bb4c62a024e3766da3c4d2556e01c6e12e416a | [
"MIT"
] | 1 | 2020-04-13T23:58:23.000Z | 2020-05-15T11:54:16.000Z | libs/DegradationModels.py | prasunroy/cnn-on-degraded-images | 85bb4c62a024e3766da3c4d2556e01c6e12e416a | [
"MIT"
] | 4 | 2018-11-22T09:44:29.000Z | 2019-09-17T23:37:40.000Z | # -*- coding: utf-8 -*-
"""
Degradation models.
Created on Thu May 24 11:00:00 2018
Author: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/cnn-on-degraded-images
"""
# imports
import cv2
import numpy
import random
# apply a degradation model on an image
def imdegrade(image, model, mu=0, sigma=0, density=0, gb_ksize=(1, 1),
mb_kernel=numpy.zeros((1, 1), dtype='uint8'), quality=100,
seed=None):
# setup seeds for random number generators
# (only required for reproducibility)
numpy.random.seed(seed)
random.seed(seed)
# create a copy of the input image to prevent direct modification
# on the original input image
image = image.copy()
# add an extra dimension for color channel
# (only required for grayscale images)
if len(image.shape) == 2:
image = numpy.expand_dims(image, 2)
# get dimension of the image
h, w, c = image.shape
# apply a degradation model
model = model.lower()
if model == 'gaussian_white' and sigma > 0:
image = image / 255.0
noise = numpy.random.normal(mu, sigma, (h, w))
noise = numpy.dstack([noise]*c)
image += noise
image = numpy.clip(image, 0, 1)
image = (image * 255.0).astype('uint8')
elif model == 'gaussian_color' and sigma > 0:
image = image / 255.0
noise = numpy.random.normal(mu, sigma, (h, w, c))
image += noise
image = numpy.clip(image, 0, 1)
image = (image * 255.0).astype('uint8')
elif model == 'salt_and_pepper':
if density < 0:
density = 0
elif density > 1:
density = 1
x = random.sample(range(w), w)
y = random.sample(range(h), h)
x, y = numpy.meshgrid(x, y)
xy = numpy.c_[x.reshape(-1), y.reshape(-1)]
n = int(w * h * density)
n = random.sample(range(w*h), n)
for i in n:
if random.random() > 0.5:
image[xy[i][1], xy[i][0], :] = 255
else:
image[xy[i][1], xy[i][0], :] = 0
elif model == 'motion_blur':
image = cv2.filter2D(image, -1, mb_kernel,
borderType=cv2.BORDER_CONSTANT)
elif model == 'gaussian_blur':
image = cv2.GaussianBlur(image, gb_ksize, 0,
borderType=cv2.BORDER_CONSTANT)
elif model == 'jpeg_compression':
if quality < 0:
quality = 0
elif quality > 100:
quality = 100
image = cv2.imencode('.jpg', image,
[int(cv2.IMWRITE_JPEG_QUALITY), quality])[-1]
image = cv2.imdecode(image, -1)
# remove the extra dimension for color channel
# (only required for grayscale images)
if image.shape[-1] == 1:
image = numpy.squeeze(image, 2)
return image
| 30.697917 | 74 | 0.554462 | 387 | 2,947 | 4.178295 | 0.343669 | 0.037106 | 0.032158 | 0.034632 | 0.284477 | 0.284477 | 0.239951 | 0.223871 | 0.223871 | 0.223871 | 0 | 0.044888 | 0.319647 | 2,947 | 95 | 75 | 31.021053 | 0.761596 | 0.214455 | 0 | 0.169492 | 0 | 0 | 0.044464 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.050847 | 0 | 0.084746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7f5d02897acbff32966f7928ccfc825cfa419a | 3,548 | py | Python | apartmentbot/services/listing_service.py | sgarfield/ApartmentBot | 327a47e879998fbb40bd26f84510467f4757d330 | [
"MIT"
] | null | null | null | apartmentbot/services/listing_service.py | sgarfield/ApartmentBot | 327a47e879998fbb40bd26f84510467f4757d330 | [
"MIT"
] | null | null | null | apartmentbot/services/listing_service.py | sgarfield/ApartmentBot | 327a47e879998fbb40bd26f84510467f4757d330 | [
"MIT"
] | null | null | null | """
apartmentbot.services.listing_service
"""
import logging
from dataclasses import dataclass
from typing import List
from dataclasses_json import dataclass_json
from apartmentbot.geolocation.geolocation import distance_finder, neighborhood_locator, place_locator
from apartmentbot.models import Listing, Place, Preferences
from apartmentbot.sources.sources import sources
from apartmentbot.repository.repository import listing_repository
@dataclass_json
@dataclass
class ListingService:
""" Class ListingService finds and saves apartment listings """
def find_listings(self, preferences: Preferences) -> List[Listing]:
"""
Finds all listings that match the set of apartment preferences
:param preferences: A set of apartment preferences
:return: A list of apartment listings (may return nothing)
"""
logging.info("Searching listings", extra={"preferences": preferences})
source_listings = self._search_sources(preferences)
return self._match_additional(preferences, source_listings)
def save_listing(self, listing: Listing):
""" Stores listing in the database """
logging.info("Saving listing", extra={"listing": listing})
return listing_repository.add(listing)
@staticmethod
def _search_sources(preferences: Preferences):
""" Returns listings from all available listing sites """
return [listings for source in sources for listings in source.get_results(preferences)]
def _match_additional(self, preferences: Preferences, listings: List[Listing]) -> List[Listing]:
""" Filters listings by optional additional preferences """
if not preferences.additional:
return listings
if preferences.additional.neighborhoods:
listings = [listing for listing in listings
if self._is_in_neighborhood(listing, preferences.additional.neighborhoods)]
logging.debug('Neighborhood matches: %d', len(listings))
for place in preferences.additional.places:
listings = [listing for listing in listings
if self._is_near_place(listing, place.name, place.distance)]
logging.debug('Place matches: %d', len(listings))
return listings
@staticmethod
def _is_in_neighborhood(listing: Listing, neighborhoods: List[str]) -> bool:
""" Determines whether the listing is in a chosen neighborhood """
neighborhood = neighborhood_locator.find_neighborhood(latlng=listing.geotag)
logging.debug("Listing neighborhood result: %s", neighborhood,
extra={"listing_id": listing.id, "geotag": listing.geotag})
if neighborhood in neighborhoods:
listing.neighborhood = neighborhood
return True
return False
@staticmethod
def _is_near_place(listing: Listing, place_name: str, max_distance: int) -> bool:
""" Determines whether listing is within max_distance of some searchable place """
place = place_locator.find_place(place=place_name, latlng=listing.geotag)
distance = distance_finder.find_distance(origin=listing.geotag, destination=place)
logging.debug("Distance (meters) between listing and %s: %d", place_name, distance,
extra={"listing_id": listing.id, "place_id": place, "geotag": listing.geotag})
if distance <= max_distance:
listing.places.append(Place(place_name, distance))
return True
return False
| 45.487179 | 101 | 0.698985 | 384 | 3,548 | 6.328125 | 0.255208 | 0.018519 | 0.021399 | 0.020576 | 0.054321 | 0.035391 | 0.035391 | 0.035391 | 0.035391 | 0 | 0 | 0 | 0.217587 | 3,548 | 77 | 102 | 46.077922 | 0.87536 | 0.152198 | 0 | 0.211538 | 0 | 0 | 0.070427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd7f8e09909844b7662e6b08100c7c6fbbff9197 | 6,016 | py | Python | stocky.py | Naphtha/stocky | 04e51b4270b28a1aa23597c07d67a2a99e4710cf | [
"MIT"
] | null | null | null | stocky.py | Naphtha/stocky | 04e51b4270b28a1aa23597c07d67a2a99e4710cf | [
"MIT"
] | null | null | null | stocky.py | Naphtha/stocky | 04e51b4270b28a1aa23597c07d67a2a99e4710cf | [
"MIT"
] | null | null | null | import requests
import json
BASE_URL = "https://api.stockfighter.io/ob/api/"
class StockMinion(object):
'''Handles all API related requests for stock/order functionality'''
def __init__(self, api_key, account, venue, stock):
# use sessions to persist the HTTP connection
# this will prevent thrashing HTTP sockets
self.session = requests.Session()
# set header in session, this will be reused by every function
header = {'X-Starfighter-Authorization' : api_key}
self.session.headers.update(header)
# set some basic, usually static, values
self.account = account
self.venue = venue
self.stock = stock
def check_api(self):
# _call_api is sufficiently general to handle all cases
data = self._call_api(BASE_URL + 'heartbeat', 'get')
return data
def check_venue(self):
venue = self.venue
data = self._call_api(BASE_URL + 'venues/{0}/heartbeat'.format(venue), 'get')
return data
def get_stocks_on_venue(self):
venue = self.venue
data = self._call_api(BASE_URL + 'venues/{0}/stocks'.format(venue), 'get')
return data
def get_orderbook(self):
venue = self.venue
stock = self.stock
data = self._call_api(BASE_URL + 'venues/{0}/stocks/{1}'.format(venue, stock), 'get')
return data
# using kwargs here allows me to call this function with keywords or with a dict
def place_order(self, **kwargs):
kwargs['account'] = self.account
kwargs['stock'] = self.stock
kwargs['venue'] = self.venue
# the args we need to make the request of the API
mandatory = ['account', 'venue', 'stock', 'qty', 'direction', 'orderType']
# filter out the args we're missing from the kwargs dict
missing_args = [x for x in mandatory if x not in kwargs]
# raises exception with missing operands
if(missing_args):
raise TypeError("Missing '{0}' arguments in function call".format(', '.join(missing_args)))
# leave the dictionary packed
request_body = kwargs
data = self._call_api(BASE_URL + 'venues/{0}/stocks/{1}/orders'.format(kwargs['venue'], kwargs['stock']),
'post', data=json.dumps(request_body))
return data
def get_quote(self):
venue = self.venue
stock = self.stock
data = self._call_api(BASE_URL + 'venues/{0}/stocks/{1}/quote'.format(venue, stock), 'get')
return data
def get_order_status(self, id):
venue = self.venue
stock = self.stock
data = self._call_api(BASE_URL + 'venues/{0}/stocks/{1}/orders/{2}'.format(venue, stock, id), 'get')
return data
def cancel_order(self, id):
venue = self.venue
stock = self.stock
data = self._call_api(BASE_URL + 'venues/{0}/stocks/{1}/orders/{2}'.format(venue, stock, id), 'delete')
return data
def get_all_orders(self, stock = None):
venue = self.venue
account = self.account
if(stock):
# get orders for specific stock
data = self._call_api(BASE_URL + 'venues/{0}/accounts/{1}/stocks/{2}/orders'.format(venue, account, stock), 'get')
else:
data = self._call_api(BASE_URL + 'venues/{0}/accounts/{1}/orders'.format(venue, account), 'get')
return data
def _call_api(self, url, verb, *args, **kwargs):
# use HTTP verb argument to pick the method to use from the Session object
func = getattr(self.session, verb)
resp = func(url, *args, **kwargs)
data = StockMinion._process_response(resp.text, resp.status_code)
return data
@staticmethod
def _process_json(json_obj):
try:
data = json.loads(json_obj)
except ValueError as e:
data = {}
print(e)
return data
@staticmethod
def _process_status(code):
if(code != 200):
print("Got a status code of {0}".format(code))
else:
pass
@staticmethod
def _process_response(json_obj, code):
data = StockMinion._process_json(json_obj)
StockMinion._process_status(code)
return data
if __name__ == '__main__':
import sys
def print_test_result(data, function):
if(data['ok'] == True):
print("PASS: {0}()".format(function))
else:
print("FAIL: {1}()".format(function))
# run some simple regression tests
TEST_VENUE = "TESTEX"
TEST_STOCK = "FOOBAR"
TEST_ACCOUNT = "EXB123456"
# pick up api key from local untracked file
with open('api.key', 'r') as secret_file:
API_KEY = secret_file.readlines()[0].rstrip('\n')
instance = StockMinion(API_KEY, TEST_ACCOUNT, TEST_VENUE, TEST_STOCK)
data = instance.check_api()
# the numerous calls to print_test_result can probably be eliminated at some point
print_test_result(data, 'check_api')
data = instance.check_venue()
print_test_result(data, 'check_venue')
data = instance.get_stocks_on_venue()
print_test_result(data, 'get_stocks_on_venue')
data = instance.get_orderbook()
print_test_result(data, 'get_orderbook')
data = instance.place_order(qty = 100, direction = "buy", orderType = "limit", price = 100)
print_test_result(data, 'place_order')
order_num = data['id']
data = instance.get_quote()
print_test_result(data, 'get_quote')
data = instance.get_order_status(order_num)
print_test_result(data, 'get_order_status')
data = instance.cancel_order(order_num)
print_test_result(data, 'cancel_order')
data = instance.get_all_orders()
print_test_result(data, 'get_all_orders')
data = instance.get_all_orders(TEST_STOCK)
print_test_result(data, 'get_all_orders')
| 30.231156 | 126 | 0.617686 | 765 | 6,016 | 4.658824 | 0.245752 | 0.032828 | 0.050505 | 0.058642 | 0.297419 | 0.228676 | 0.191077 | 0.156846 | 0.156846 | 0.15376 | 0 | 0.008874 | 0.269448 | 6,016 | 198 | 127 | 30.383838 | 0.802048 | 0.134641 | 0 | 0.258333 | 0 | 0 | 0.132112 | 0.045902 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.016667 | 0.025 | 0 | 0.258333 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd80d005cadf2946180d02a5ee8eb0ce0d0f3e91 | 6,481 | py | Python | crawlers/ucsc_old/ucsc/spiders/registrar_courses.py | coursegraph/CourseGraph | 9f05cd912b393ba14721411fe77f3856812c000f | [
"MIT"
] | 5 | 2018-07-01T15:48:11.000Z | 2020-07-31T17:06:10.000Z | crawlers/ucsc_old/ucsc/spiders/registrar_courses.py | coursegraph/CourseGraph | 9f05cd912b393ba14721411fe77f3856812c000f | [
"MIT"
] | 7 | 2018-07-09T21:17:19.000Z | 2018-07-25T17:05:33.000Z | crawlers/ucsc_old/ucsc/spiders/registrar_courses.py | coursegraph/CourseGraph | 9f05cd912b393ba14721411fe77f3856812c000f | [
"MIT"
] | 4 | 2018-07-01T19:45:23.000Z | 2019-03-17T21:12:03.000Z | # -*- coding: utf-8 -*-
import scrapy
import os
from ucsc.items import FacultyItem, ProgramStatementItem, CourseDescriptionItem
def path_components (path):
if '://' in path:
path = path.split('://')[1]
parts = path.split('/')
while parts and parts[0] == '':
parts = parts[1:]
while parts and parts[-1] == '':
parts = parts[:-1]
return parts
assert(path_components('') == [])
assert(path_components('/') == [])
assert(path_components('foo/') == ['foo'])
assert(path_components('/bar') == ['bar'])
assert(path_components('foo/bar') == ['foo','bar'])
def merge_url (url, rel):
# note: blame seiji for all the issues with this code
thing = url.split('://')[0] if '://' in url else 'https'
if url and url[-1] == '/':
url = path_components(url)
else:
url = path_components(url)[:-1]
for part in path_components(rel):
if part == '..':
url = url[:-1]
else:
url.append(part)
return thing + '://' + '/'.join(url)
assert(merge_url('https://registrar.ucsc.edu/catalog/programs-courses/index.html',
'../foo/bar/../baz.html') == 'https://registrar.ucsc.edu/catalog/foo/baz.html')
assert(merge_url('', 'bar.baz') == 'https://bar.baz')
assert(merge_url('https://foo/bar/baz.html', '') == 'https://foo/bar')
registrar_base_url = 'https://registrar.ucsc.edu/catalog/programs-courses'
base_course_description_url = 'https://registrar.ucsc.edu/catalog/programs-courses/course-descriptions'
base_faculty_url = 'https://registrar.ucsc.edu/catalog/programs-courses/faculty'
base_program_description_url = 'https://registrar.ucsc.edu/catalog/programs-courses/program-statements'
class RegistrarCoursesSpider(scrapy.Spider):
name = 'registrar_courses'
allowed_domains = ['registrar.ucsc.edu']
start_urls = [merge_url(registrar_base_url, 'index.html')]
def __init__(self, *args, **kwargs):
super(RegistrarCoursesSpider, self).__init__(*args, **kwargs)
self.crawled = set()
def parse (self, response):
print("Parsing %s"%response.url)
if base_course_description_url in response.url:
yield self.parse_course_info(response)
elif base_faculty_url in response.url:
yield self.parse_faculty_info(response)
elif base_program_description_url in response.url:
yield self.parse_program_info(response)
all_links = response.xpath('//a')
for link in all_links:
#print("Got link: %s"%link.extract())
try:
href = link.xpath('@href').extract()[0]
def is_local_url (url):
for thing in ('http:','https:','C:','www','ucsc.edu'):
if thing in url:
return False
return True
url = merge_url(response.url, href) if is_local_url(href) else href
if url in self.crawled:
continue
#print("Got URL: %s"%url)
self.crawled.add(url)
if registrar_base_url in url:
yield { 'url': url }
yield scrapy.Request(url, self.parse)
else:
pass
#print("Skipping %s"%url)
except IndexError:
pass
def parse_course_info (self, response):
info = CourseDescriptionItem()
info['url'] = response.url
print("Got %s"%response.url)
return info
def parse_faculty_info (self, response):
info = FacultyItem()
info['url'] = response.url
print("Got %s"%response.url)
return info
def parse_program_info (self, response):
info = ProgramStatementItem()
info['url'] = response.url
print("Got %s"%response.url)
return info
class Unused:
def parse(self, response):
# Get links to all course pages from the registrar
page_content = response\
.xpath('body/div[@id="wrap"]/div[@id="container"]/div[@id="content"]')\
.xpath('div[@id="sprflt"]/div[@id="main"]/div[contains(@class,"content")]')
panel_elems = page_content.xpath('table/tbody/tr/td')
self.depts = {}
self.crawled = set()
for panel in panel_elems:
program_statements = panel.xpath('p/a')
for a in program_statements:
# print(a.xpath('@href').extract())
dept = a.xpath('@href').re(r'program-statements/(\w+)\.html')[0]
title = a.xpath('text()').extract()[0]
url = 'https://registrar.ucsc.edu/catalog/programs-courses/program-statements/%s.html'%dept
self.depts[dept] = title
self.crawled.add(url)
yield scrapy.Request(url, callback=self.parse_program_info)
#course_url = 'https://registrar.ucsc.edu/catalog/programs-courses/course-descriptions/%s.html'%dept
program_url = 'https://registrar.ucsc.edu/catalog/programs-courses/program-statements/%s.html'%dept
faculty_url = 'https://registrar.ucsc.edu/catalog/programs-courses/faculty/%s.html'%dept
#yield scrapy.Request(course_url, callback=self.parse_course_info)
yield scrapy.Request(program_url, callback=self.parse_program_info)
yield scrapy.Request(faculty_url, callback=self.parse_faculty_info)
def parse_program_info (self, response):
page_content = response\
.xpath('body/div[@id="wrap"]/div[@id="container"]/div[@id="content"]')\
.xpath('div[@id="sprflt"]/div[@id="main"]/div[contains(@class,"content")]')
page_links = page_content.xpath('p[3]/a')
for a in page_links:
href, regex = a.xpath('@href'), r'\.\./([\w\-]+/\w+\.html)'
try:
page = href.re(regex)[0]
title = a.xpath('text()').extract()[0]
url = 'https://registrar.ucsc.edu/catalog/programs-courses/program-statements/%s'%page
print("\n%s: %s"%(url, title))
except IndexError:
print("Could not match '%s' with '%s'"%(href, regex))
content = page_content
#print("%s"%content.extract()[0])
def parse_course_info (self, response):
print("Got %s"%response.url)
def parse_faculty_info (self, response):
print("Got %s"%response.url)
| 39.760736 | 117 | 0.583552 | 773 | 6,481 | 4.767141 | 0.182406 | 0.024695 | 0.052103 | 0.062687 | 0.43772 | 0.409498 | 0.354138 | 0.330258 | 0.263501 | 0.257531 | 0 | 0.003558 | 0.262768 | 6,481 | 162 | 118 | 40.006173 | 0.767685 | 0.067428 | 0 | 0.314961 | 0 | 0.015748 | 0.217305 | 0.054036 | 0 | 0 | 0 | 0 | 0.062992 | 1 | 0.094488 | false | 0.015748 | 0.023622 | 0 | 0.212598 | 0.062992 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd82c29e5bee15a3671a883151076960c6a72038 | 1,937 | py | Python | gradient_free_optimizers/optimizers/exp_opt/local_bayes_opt.py | Wollala/Gradient-Free-Optimizers | 8fb1608c264431b87f66fd2d233b76a0fa75316c | [
"MIT"
] | 1 | 2022-02-25T03:14:48.000Z | 2022-02-25T03:14:48.000Z | gradient_free_optimizers/optimizers/exp_opt/local_bayes_opt.py | Wollala/Gradient-Free-Optimizers | 8fb1608c264431b87f66fd2d233b76a0fa75316c | [
"MIT"
] | null | null | null | gradient_free_optimizers/optimizers/exp_opt/local_bayes_opt.py | Wollala/Gradient-Free-Optimizers | 8fb1608c264431b87f66fd2d233b76a0fa75316c | [
"MIT"
] | null | null | null | # Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License
import time
import random
import numpy as np
from ..base_optimizer import BaseOptimizer
from ...search import Search
from ._sub_search_spaces import SubSearchSpaces
from ..smb_opt import BayesianOptimizer
class LocalBayesianOptimizer(BaseOptimizer, Search):
name = "Local Bayesian Optimizer"
def __init__(
self, *args, max_size=300000, n_positions=20, local_range=100, **kwargs
):
super().__init__(*args, **kwargs)
self.max_size = max_size
self.n_positions = n_positions
self.local_range = local_range
self.bayes_opt = BayesianOptimizer(self.conv.search_space)
def create_local_smbo(self, current_position):
local_ss = {}
for idx, para in enumerate(self.conv.para_names):
max_dim = max(0, current_position[idx] + self.local_range)
min_dim = min(
self.conv.dim_sizes[idx], current_position[idx] - self.local_range
)
dim_pos = np.array(self.conv.search_space_positions[idx])
dim_pos_center = np.where(
np.logical_and(dim_pos >= min_dim, dim_pos <= max_dim)
)[0]
local_ss[para] = dim_pos_center
self.bayes_opt = BayesianOptimizer(local_ss)
def finish_initialization(self):
self.create_local_smbo(self.pos_current)
@BaseOptimizer.track_nth_iter
def iterate(self):
pos_loc = self.bayes_opt.iterate()
pos_new = self.bayes_opt.conv.position2value(pos_loc)
return pos_new
def evaluate(self, score_new):
self.bayes_opt.evaluate(score_new)
self.score_new = score_new
self._evaluate_new2current(score_new)
self._evaluate_current2best()
modZero = self.nth_iter % self.n_positions == 0
if modZero:
self.create_local_smbo(self.pos_current)
| 27.671429 | 82 | 0.665978 | 246 | 1,937 | 4.926829 | 0.341463 | 0.041254 | 0.049505 | 0.04703 | 0.107261 | 0.107261 | 0.054455 | 0 | 0 | 0 | 0 | 0.011636 | 0.245741 | 1,937 | 69 | 83 | 28.072464 | 0.817933 | 0.036655 | 0 | 0.044444 | 0 | 0 | 0.012889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.155556 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd83ed580ee241d7bccf7170f916c7c26cd0e7da | 2,235 | py | Python | storage.py | computer-micro-mangangement/cmm_hub | 1ec4ed8c86edcbbd5624396a4be1d6aa7e6132fb | [
"MIT"
] | null | null | null | storage.py | computer-micro-mangangement/cmm_hub | 1ec4ed8c86edcbbd5624396a4be1d6aa7e6132fb | [
"MIT"
] | null | null | null | storage.py | computer-micro-mangangement/cmm_hub | 1ec4ed8c86edcbbd5624396a4be1d6aa7e6132fb | [
"MIT"
] | null | null | null | import psutil
from appJar import gui
import config
import requests as req
import json
import platform
import sysInfo
app = gui(title="CMM Hub", showIcon=False)
navBarElements = []
navBarElementsCallName = []
currentContainer = ""
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def getServerInfo():
request = req.get(config.getServerAddress() + "/api/info", verify=False)
if request.status_code == 200:
jsonData = json.loads(request.text)
return jsonData
return {}
def getUserInfo():
request = req.get(config.getServerAddress() + "/api/user/currentUser", verify=False,
params={"devicesecret": config.getDeviceSecret()})
if request.status_code == 200:
jsonData = json.loads(request.text)
return jsonData
return {}
def getInstallableModules():
serverInfo = getServerInfo()
moduleListURL = serverInfo["moduleListURL"]
request = req.get(moduleListURL, verify=False)
if request.status_code == 200:
modules = {}
data = request.text
lines = data.split('\n')
for line in lines:
elements = line.split(',')
modules[elements[0]] = {}
modules[elements[0]]["link"] = elements[1].replace(" ", "")
modules[elements[0]]["name"] = elements[0].capitalize()
modules[elements[0]]["version"] = elements[2].replace(" ", "")
return modules
def getDeviceInfo():
deviceInfo = {}
uname = platform.uname()
deviceInfo["os"] = uname.system + str(uname.release)
deviceInfo["name"] = uname.node
deviceInfo["architecture"] = uname.machine
deviceInfo["processor"] = {}
deviceInfo["processor"]["processor Declaration"] = uname.processor
deviceInfo["processor"]["cores"] = psutil.cpu_count(logical=False)
deviceInfo["processor"]["threads"] = psutil.cpu_count(logical=True)
svmem = psutil.virtual_memory()
deviceInfo["installed RAM"] = get_size(svmem.total)
return deviceInfo
| 28.653846 | 88 | 0.630425 | 239 | 2,235 | 5.861925 | 0.464435 | 0.03212 | 0.045682 | 0.040685 | 0.189864 | 0.189864 | 0.135617 | 0.104211 | 0.104211 | 0.104211 | 0 | 0.025287 | 0.221477 | 2,235 | 77 | 89 | 29.025974 | 0.779885 | 0.039374 | 0 | 0.155172 | 0 | 0 | 0.100047 | 0.021606 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.12069 | 0 | 0.327586 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd884de362569f988f5fe329cd6525259b1ae410 | 1,840 | py | Python | lawliet/mono/resample.py | Ryuk17/lawliet | ba4734557260b255896707210fca3e2fff311e87 | [
"Apache-2.0"
] | 2 | 2021-04-20T03:51:32.000Z | 2021-06-16T11:48:06.000Z | lawliet/mono/resample.py | Ryuk17/lawliet | ba4734557260b255896707210fca3e2fff311e87 | [
"Apache-2.0"
] | null | null | null | lawliet/mono/resample.py | Ryuk17/lawliet | ba4734557260b255896707210fca3e2fff311e87 | [
"Apache-2.0"
] | null | null | null | """
@FileName: resample.py
@Description: Implement resample
@Author: Ryuk
@CreateDate: 2021/06/27
@LastEditTime: 2021/06/27
@LastEditors: Please set LastEditors
@Version: v0.1
"""
import numpy as np
import math
__all__ = [
"direct_interpolation",
"lagrange_interpolation",
"sine_interpolation",
]
def direct_interpolation(x, L, M):
"""
resample signal with direct interpolation
:param x: input signal
:param L: original frequency
:param M: target frequency
:return: resampled signal
"""
N = len(x)
K = int((M / L) * N)
factor = L / M
y = np.zeros(K)
for k in range(K):
nk = factor * k
n = math.floor(nk)
if n + 1 >= len(x): continue
w1 = nk - n
w2 = 1 - w1
y[k] = w1 * x[n + 1] + w2 * x[n]
return y
def lagrange_interpolation(x, w, L, M):
N = len(x)
K = int((M / L) * N)
factor = L / M
y = np.zeros(K)
for k in range(K):
nk = factor * k
n = math.floor(nk) - 1
for i in range(-w, w, 1):
numerator = 1
denominator = 1
if n - i >= len(x): continue
for j in range(-w, w, 1):
if i != j:
numerator *= nk - (n - j)
denominator *= (j - i)
y[k] += x[n - i] * numerator / denominator
return y
def sine_interpolation(x, w, L, M):
N = len(x)
K = int((M / L) * N)
factor = L / M
y = np.zeros(K)
for k in range(K):
nk = factor * k
n = math.floor(nk)
for i in range(-w, w, 1):
if n - i >= len(x): continue
if nk - n + i == 0: continue
numerator = math.sin((nk - n + i))
denominator = math.pi * (nk - n +i)
y[k] += x[n - i] * numerator / denominator
return y
| 23.589744 | 54 | 0.490761 | 264 | 1,840 | 3.382576 | 0.246212 | 0.015677 | 0.016797 | 0.020157 | 0.402016 | 0.402016 | 0.388578 | 0.320269 | 0.320269 | 0.320269 | 0 | 0.028621 | 0.37337 | 1,840 | 77 | 55 | 23.896104 | 0.74588 | 0.17337 | 0 | 0.54717 | 0 | 0 | 0.040486 | 0.014845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.037736 | 0 | 0.150943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd8b0ea4d67316a57577ad9cd394adb4172f74c1 | 2,130 | py | Python | src/demo.py | lhcezx/Graph-FPN | 55eb9283a7df83e003c84eede65a2700bb9fa45c | [
"MIT"
] | 19 | 2021-11-16T05:32:45.000Z | 2022-01-27T09:29:50.000Z | src/demo.py | lhcezx/Graph-FPN | 55eb9283a7df83e003c84eede65a2700bb9fa45c | [
"MIT"
] | 1 | 2021-11-15T15:44:45.000Z | 2021-12-13T04:26:26.000Z | src/demo.py | lhcezx/Graph-FPN | 55eb9283a7df83e003c84eede65a2700bb9fa45c | [
"MIT"
] | 1 | 2022-01-04T14:10:58.000Z | 2022-01-04T14:10:58.000Z | import os
import zipfile
import tensorflow as tf
import tensorflow_datasets as tfds
import init_path
from configs.configs import parse_configs
from detection.utils.Label import *
from detection.utils.preprocess import *
from model.network import DecodePredictions
from model.get_model import backbone, models
config = parse_configs()
def get_demo_data():
url = "https://github.com/srihari-humbarwadi/datasets/releases/download/v0.1.0/data.zip"
filename = os.path.join(config.root_dir, "data_demo", "data.zip")
tf.keras.utils.get_file(filename, url)
with zipfile.ZipFile(filename, "r") as z_fp:
z_fp.extractall(os.path.join(config.root_dir,"data_demo/"))
def demo():
get_demo_data()
model = models[config.Arch](config.num_classes, backbone[config.backbone])
# fine_tune_checkpoint_type
ckpt = tf.train.Checkpoint(model)
ckpt.restore(tf.train.latest_checkpoint(config.weight)).expect_partial()
# Prepare image for demo
val_dataset, dataset_info = tfds.load("coco/2017",
split="validation",
with_info=True,
data_dir=os.path.join(config.root_dir,"data_demo/data"),
download=False)
int2str = dataset_info.features["objects"]["label"].int2str
for sample in val_dataset.take(2):
image = tf.cast(sample["image"], dtype=tf.float32)
input_image, ratio_short, ratio_long = prepare_image(image)
# Inference
predictions = model(input_image)
detections = DecodePredictions(confidence_threshold=0.5)(input_image, predictions)
num_detections = detections.valid_detections[0]
class_names = [int2str(int(x)) for x in detections.nmsed_classes[0][:num_detections]]
visualize_detections(image,
detections.nmsed_boxes[0][:num_detections].numpy(),
class_names,
detections.nmsed_scores[0][:num_detections].numpy(),
ratio_short, ratio_long
)
if __name__ == "__main__":
demo() | 39.444444 | 99 | 0.656338 | 258 | 2,130 | 5.193798 | 0.426357 | 0.023881 | 0.022388 | 0.035821 | 0.075373 | 0.075373 | 0.075373 | 0.075373 | 0.052239 | 0 | 0 | 0.011714 | 0.238498 | 2,130 | 54 | 100 | 39.444444 | 0.814427 | 0.02723 | 0 | 0 | 0 | 0.023256 | 0.080232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.232558 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd91ba13b85667cd92a61109fae15aca6df3f93b | 3,561 | py | Python | LoveDA/uda/baseline_train.py | edornd/ProDA | ffb092afbbde95e4ca29cb1ec199f9685f6601fb | [
"MIT"
] | null | null | null | LoveDA/uda/baseline_train.py | edornd/ProDA | ffb092afbbde95e4ca29cb1ec199f9685f6601fb | [
"MIT"
] | null | null | null | LoveDA/uda/baseline_train.py | edornd/ProDA | ffb092afbbde95e4ca29cb1ec199f9685f6601fb | [
"MIT"
] | null | null | null | import argparse
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
from eval import evaluate
from ever.core.iterator import Iterator
from module.deeplabv2 import Deeplab
from torch.nn import functional as fn
from tqdm import tqdm
from data.loveda import LoveDALoader
from utils.tools import (
adjust_learning_rate,
count_model_parameters,
get_console_file_logger,
import_config,
loss_calc,
seed_torch,
)
parser = argparse.ArgumentParser(description='Run Baseline methods.')
parser.add_argument('--config_path', type=str, help='config path')
args = parser.parse_args()
cfg = import_config(args.config_path)
def main():
"""Create the model and start the training."""
os.makedirs(cfg.SNAPSHOT_DIR, exist_ok=True)
logger = get_console_file_logger(name='Deeplabv2', logdir=cfg.SNAPSHOT_DIR)
# Create Network
model = Deeplab(nn.BatchNorm2d, num_classes=7)
# model = Deeplabv2(
# dict(
# backbone=dict(
# resnet_type='resnet50',
# output_stride=16,
# pretrained=True,
# ),
# multi_layer=False,
# cascade=False,
# use_ppm=False,
# ppm=dict(
# num_classes=7,
# use_aux=False,
# norm_layer=nn.BatchNorm2d,
# ),
# inchannels=2048,
# num_classes=7))
model.train()
model.cuda()
#cudnn.enabled = True
#cudnn.benchmark = True
logger.info('exp = %s' % cfg.SNAPSHOT_DIR)
count_model_parameters(model, logger)
trainloader = LoveDALoader(cfg.SOURCE_DATA_CONFIG)
epochs = cfg.NUM_STEPS_STOP / len(trainloader)
logger.info('epochs ~= %.3f' % epochs)
trainloader_iter = Iterator(trainloader)
optimizer = optim.SGD(
model.parameters(), lr=cfg.LEARNING_RATE, momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY)
# model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
optimizer.zero_grad()
for i_iter in tqdm(range(cfg.NUM_STEPS_STOP)):
optimizer.zero_grad()
lr = adjust_learning_rate(optimizer, i_iter, cfg)
# Train with Source
batch = trainloader_iter.next()
images_s, labels_s = batch[0]
pred_source = model(images_s.cuda())
# pred_source is a dict with features and actual output
pred_source = pred_source["out"]
pred_source = fn.interpolate(pred_source, labels_s["cls"].size()[1:], mode="bilinear", align_corners=True)
#Segmentation Loss
loss = loss_calc(pred_source, labels_s['cls'].cuda())
loss.backward()
optimizer.step()
if i_iter % 50 == 0:
logger.info('exp = {}'.format(cfg.SNAPSHOT_DIR))
text = 'iter = %d, loss_seg = %.3f, lr = %.3f' % (i_iter, loss, lr)
logger.info(text)
if i_iter >= cfg.NUM_STEPS_STOP - 1:
print('save model ...')
ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '.pth')
torch.save(model.state_dict(), ckpt_path)
evaluate(model, cfg, True, ckpt_path, logger)
break
if i_iter % cfg.EVAL_EVERY == 0 and i_iter != 0:
ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '.pth')
torch.save(model.state_dict(), ckpt_path)
evaluate(model, cfg, True, ckpt_path, logger)
model.train()
if __name__ == '__main__':
seed_torch(2333)
main()
| 33.914286 | 114 | 0.627352 | 450 | 3,561 | 4.744444 | 0.366667 | 0.018735 | 0.039344 | 0.028103 | 0.123653 | 0.104918 | 0.104918 | 0.104918 | 0.104918 | 0.104918 | 0 | 0.012126 | 0.258916 | 3,561 | 104 | 115 | 34.240385 | 0.796893 | 0.184499 | 0 | 0.117647 | 0 | 0 | 0.058435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0 | 0.220588 | 0 | 0.235294 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd9906411d0902be3e6498e5a1a56da473448736 | 2,161 | py | Python | tests/tensorflow/pruning/test_flops_pruning.py | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac | [
"Apache-2.0"
] | 1 | 2021-12-30T05:49:10.000Z | 2021-12-30T05:49:10.000Z | tests/tensorflow/pruning/test_flops_pruning.py | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac | [
"Apache-2.0"
] | 1 | 2021-07-23T07:46:52.000Z | 2021-07-23T07:46:52.000Z | tests/tensorflow/pruning/test_flops_pruning.py | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from tests.tensorflow.helpers import create_compressed_model_and_algo_for_test
from tests.tensorflow.pruning.helpers import get_basic_pruning_config
from tests.tensorflow.pruning.helpers import get_test_model_shared_convs
@pytest.mark.parametrize(
("model", "all_weights", "ref_full_flops", "ref_current_flops",
"ref_full_params", "ref_current_params"),
(
(get_test_model_shared_convs, True, 461438976, 276385312,
11534848, 6908711),
(get_test_model_shared_convs, False, 461438976, 270498816,
11534848, 6761608)
)
)
def test_flops_calulation_for_spec_layers(model, all_weights, ref_full_flops, ref_current_flops,
ref_full_params, ref_current_params):
config = get_basic_pruning_config(8)
config['compression']['algorithm'] = 'filter_pruning'
config['compression']['pruning_init'] = 0.4
config['compression']['params']['pruning_flops_target'] = 0.4
config['compression']['params']['prune_first_conv'] = True
config['compression']['params']['prune_last_conv'] = True
config['compression']['params']['all_weights'] = all_weights
input_shape = [1, 8, 8, 1]
model = model(input_shape)
model.compile()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert compression_ctrl.full_flops == ref_full_flops
assert compression_ctrl.full_params_num == ref_full_params
assert compression_ctrl.current_flops == ref_current_flops
assert compression_ctrl.current_params_num == ref_current_params
| 44.102041 | 96 | 0.745951 | 289 | 2,161 | 5.283737 | 0.415225 | 0.039293 | 0.060249 | 0.035363 | 0.310413 | 0.193844 | 0.193844 | 0.092993 | 0.092993 | 0.092993 | 0 | 0.045907 | 0.16335 | 2,161 | 48 | 97 | 45.020833 | 0.798673 | 0.258677 | 0 | 0 | 0 | 0 | 0.168561 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.032258 | false | 0 | 0.129032 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd99e09ac29cee3774d16f59b6db9c324075e704 | 9,812 | py | Python | HCI/emotions.py | shinkansan/2019-UGRP-DPoom | eedee93b47e068f22bf420140d869a43f7551876 | [
"Apache-2.0"
] | 33 | 2020-07-16T06:31:38.000Z | 2022-03-23T18:34:58.000Z | HCI/emotions.py | shinkansan/2019-UGRP-DPoom | eedee93b47e068f22bf420140d869a43f7551876 | [
"Apache-2.0"
] | 5 | 2020-08-27T08:06:21.000Z | 2022-02-23T12:34:09.000Z | HCI/emotions.py | shinkansan/2019-UGRP-DPoom | eedee93b47e068f22bf420140d869a43f7551876 | [
"Apache-2.0"
] | 10 | 2020-08-05T15:05:58.000Z | 2021-11-19T10:20:44.000Z | """
Dpoom Face Expression Windows 2019
"""
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtPrintSupport import *
import fall_body_1013 as fall_body
import os
import sys
import numpy as np
import argparse
import imutils
import time
import cv2
import os
import pyrealsense2 as rs
import threading
import matplotlib.pyplot as plt
import uuid
import queue
specificSet = [
'/Users/shinkansan/anaconda3/envs/HyunSoo/lib/python36.zip',
'/Users/shinkansan/anaconda3/envs/HyunSoo/lib/python3.6',
'/Users/shinkansan/anaconda3/envs/HyunSoo/lib/python3.6/lib-dynload',
'/Users/shinkansan/anaconda3/envs/HyunSoo/lib/python3.6/site-packages']
#sys.path = specificSet
MainIndex = "file:///home/dpoom2/dpoom_few/index.html"
class AboutDialog(QDialog):
def __init__(self, *args, **kwargs):
super(AboutDialog, self).__init__(*args, **kwargs)
QBtn = QDialogButtonBox.Ok # No cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout = QVBoxLayout()
title = QLabel("DPoom FEW")
font = title.font()
font.setPointSize(20)
title.setFont(font)
layout.addWidget(title)
layout.addWidget(QLabel("Version 1"))
layout.addWidget(QLabel("Copyright TEAM DPOOM."))
for i in range(0, layout.count()):
layout.itemAt(i).setAlignment(Qt.AlignHCenter)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
class MainWindow(QMainWindow):
thread_signal = pyqtSignal()
send_instances_signal = pyqtSignal("PyQt_PyObject")
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.status_emeregency = False
self.browser = QWebEngineView()
self.browser.setUrl(QUrl(MainIndex))
self.browser.urlChanged.connect(self.update_urlbar)
self.browser.loadFinished.connect(self.update_title)
self.browser.loadFinished.connect(self.setDefaultExpr)
self.setCentralWidget(self.browser)
self.status = QStatusBar()
self.setStatusBar(self.status)
navtb = QToolBar("Navigation")
navtb.setIconSize(QSize(16, 16))
#self.addToolBar(navtb)
back_btn = QAction("Back", self)
back_btn.setStatusTip("Back to previous page")
back_btn.triggered.connect(self.browser.back)
navtb.addAction(back_btn)
next_btn = QAction(QIcon(os.path.join('images', 'arrow-000.png')), "Forward", self)
next_btn.setStatusTip("Forward to next page")
next_btn.triggered.connect(self.browser.forward)
navtb.addAction(next_btn)
reload_btn = QAction(QIcon(os.path.join('images', 'arrow-circle-315.png')), "Reload", self)
reload_btn.setStatusTip("Reload page")
reload_btn.triggered.connect(self.browser.reload)
navtb.addAction(reload_btn)
home_btn = QAction(QIcon(os.path.join('images', 'home.png')), "Home", self)
home_btn.setStatusTip("Go home")
home_btn.triggered.connect(self.navigate_home)
navtb.addAction(home_btn)
navtb.addSeparator()
self.urlbar = QLineEdit()
self.urlbar.returnPressed.connect(self.navigate_to_url)
navtb.addWidget(self.urlbar)
stop_btn = QAction( "Stop", self)
stop_btn.setStatusTip("Stop loading current page")
stop_btn.triggered.connect(self.browser.stop)
navtb.addAction(stop_btn)
# Uncomment to disable native menubar on Mac
# self.menuBar().setNativeMenuBar(False)
file_menu = self.menuBar().addMenu("&File")
open_file_action = QAction( "Open file...", self)
open_file_action.setStatusTip("Open from file")
open_file_action.triggered.connect(self.open_file)
file_menu.addAction(open_file_action)
# save_file_action = QAction(QIcon(os.path.join('images', 'disk--pencil.png')), "Save Page As...", self)
# save_file_action.setStatusTip("Save current page to file")
# save_file_action.triggered.connect(self.save_file)
# file_menu.addAction(save_file_action)
# print_action = QAction(QIcon(os.path.join('images', 'printer.png')), "Print...", self)
# print_action.setStatusTip("Print current page")
# print_action.triggered.connect(self.print_page)
#file_menu.addAction(print_action)
about_action = QAction("Specif Setting", self)
about_action.setStatusTip("detail") # Hungry!
about_action.triggered.connect(self.about)
file_menu.addAction(about_action)
navigate_mozarella_action = QAction("Go Homepage", self)
navigate_mozarella_action.setStatusTip("Go to Dpoom home")
navigate_mozarella_action.triggered.connect(self.navigate_mozarella)
file_menu.addAction(navigate_mozarella_action)
self.showFullScreen()
self.show()
self.th = Worker(parent=self)
self.th.start()
self.th2 = YoloWorker(parent=self)
self.th2.start()
self.setWindowIcon(QIcon(os.path.join('images', 'ma-icon-64.png')))
def setDefaultExpr(self):
self.browser.page().runJavaScript("eyes.startBlinking()")
print('set default expr')
def setExpr(self, classN):
emoClass = {
0:"eyes.startBlinking()",
1:"eyes.stopBlinking()",
2:"eyes.blink()",
3:"eyes.express({type: 'happy'})",
4:"eyes.express({type: 'sad'})",
5:"eyes.express({type: 'angry'})",
6:"eyes.express({type: 'focused'})",
7:"eyes.express({type: 'confused'})"
}
self.browser.page().runJavaScript(emoClass.get(classN))
pass
def declareEmergency(self):
self.status_emeregency = not self.status_emeregency
if self.status_emeregency:
self.browser.page().runJavaScript('clearInterval(light)')
self.browser.page().runJavaScript('var light = setInterval("lightning()",360);')
else:
self.browser.page().runJavaScript('clearInterval(light)')
self.browser.page().runJavaScript('var light = setInterval("getBackwhite()",360);')
def update_title(self):
title = self.browser.page().title()
self.setWindowTitle("Dpoom FEW")
def navigate_mozarella(self):
self.browser.setUrl(MainIndex)
def about(self):
dlg = AboutDialog()
dlg.exec_()
def open_file(self):
filename, _ = QFileDialog.getOpenFileName(self, "Open file", "",
"Hypertext Markup Language (*.htm *.html);;"
"All files (*.*)")
if filename:
with open(filename, 'r') as f:
html = f.read()
self.browser.setHtml(html)
self.urlbar.setText(filename)
def save_file(self):
filename, _ = QFileDialog.getSaveFileName(self, "Save Page As", "",
"Hypertext Markup Language (*.htm *html);;"
"All files (*.*)")
if filename:
html = self.browser.page().toHtml()
with open(filename, 'w') as f:
f.write(html)
def print_page(self):
dlg = QPrintPreviewDialog()
dlg.paintRequested.connect(self.browser.print_)
dlg.exec_()
def navigate_home(self):
self.browser.setUrl(QUrl(""))
def navigate_to_url(self): # Does not receive the Url
q = QUrl(self.urlbar.text())
if q.scheme() == "":
q.setScheme("http")
self.browser.setUrl(q)
def update_urlbar(self, q):
if q.scheme() == 'https':
# Secure padlock icon
pass
else:
# Insecure padlock icon
pass
#self.urlbar.setText(q.toString())
#self.urlbar.setCursorPosition(0)
class Worker(QThread):
#sec_changed = pyqtSignal(str)
def __init__(self, sec=0, parent=None):
super(Worker, self).__init__()
self.main = parent
self.working = True
self.sec = sec
# self.main.add_sec_signal.connect(self.add_sec) # custom signal from main thread to worker thread
def __del__(self):
print(".... end thread.....")
self.wait()
def defaultAction(self):
while(True):
if fall_body.fallFlag:
print("fall body detected!!!!!!!")
elif fall_body.humanFlag:
print("human detected !!!")
###cascade_1013
emoNumber= int(np.random.uniform(3, 8))
try:
emoNumber = int(emoNumber)
except:
pass
#window.about()
else:
window.setExpr(int(emoNumber))
time.sleep(3)
print('active')
def run(self):
self.defaultAction();
class YoloWorker(QThread):
def __init__(self, parent=None):
super(YoloWorker, self).__init__()
self.main = parent
self.working = True
def __del__(self):
print('yolo thread dead')
self.wait()
def yolo_main(self):
print('yolo thread working')
if self.working:
self.working = not self.working
fall_body.main(verbose=0)
def run(self):
self.yolo_main()
app = QApplication(sys.argv)
app.setApplicationName("Dpoom FEW")
app.setOrganizationName("Dpoom FEW")
app.setOrganizationDomain("github.com/shinkansan")
window = MainWindow()
app.exec_()
| 30.190769 | 112 | 0.614248 | 1,078 | 9,812 | 5.457328 | 0.288497 | 0.043005 | 0.033996 | 0.015298 | 0.174061 | 0.128336 | 0.113038 | 0.096209 | 0.047935 | 0.031617 | 0 | 0.009662 | 0.261618 | 9,812 | 324 | 113 | 30.283951 | 0.802346 | 0.094884 | 0 | 0.134259 | 0 | 0 | 0.143987 | 0.04193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101852 | false | 0.018519 | 0.087963 | 0 | 0.217593 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd9a2a22106cfdf6f802047a7e11b687baf754c4 | 4,522 | py | Python | datasets/pasval_voc_writer.py | jiabaocui/SEGS | c03d3bcb6fdcc4e6e6e13767bed8eae754beb726 | [
"MIT"
] | null | null | null | datasets/pasval_voc_writer.py | jiabaocui/SEGS | c03d3bcb6fdcc4e6e6e13767bed8eae754beb726 | [
"MIT"
] | null | null | null | datasets/pasval_voc_writer.py | jiabaocui/SEGS | c03d3bcb6fdcc4e6e6e13767bed8eae754beb726 | [
"MIT"
] | null | null | null | import os
import random
import xml.etree.ElementTree as ET
import tensorflow as tf
def int64_feature(value):
"""Wrapper for inserting int64 features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value):
"""Wrapper for inserting float features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
DEFUALT_PATHS = {
'images': '/mnt/disk/chenyifeng/VOC2012/JPEGImages',
'annotations': '/mnt/disk/chenyifeng/VOC2012/Annotations',
'segmentations': '/mnt/disk/chenyifeng/VOC2012/SegmentationClassAug'
}
class PascalVocWriter:
"""
PASCAL VOC 2012 DataSet to TF record Writer
"""
def __init__(self, paths=DEFUALT_PATHS):
self.img_path = paths['images']
self.ano_path = paths['annotations']
self.sgm_path = paths['segmentations']
def convert_to_example(self, file_name):
img_path = os.path.join(self.img_path, file_name + '.jpg')
ano_path = os.path.join(self.ano_path, file_name + '.xml')
sgm_path = os.path.join(self.sgm_path, file_name + '.png')
img_data = tf.gfile.FastGFile(img_path, 'rb').read()
sgm_data = tf.gfile.FastGFile(sgm_path, 'rb').read()
# img_data = imread(img_path).tostring()
# sgm_data = imread(sgm_path).tostring()
anno_tree = ET.parse(ano_path)
anno_root = anno_tree.getroot()
# is_sgmt = int(anno_root.find('segmented').text)
# if is_sgmt == 0:
# print('{} is not a Segmentation Sample. So Skipped'.format(file_name))
size = anno_root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
image_format = b'JPEG'
segment_format = b'PNG'
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/name':bytes_feature(file_name.encode()),
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(img_data),
'label/format': bytes_feature(segment_format),
'label/encoded': bytes_feature(sgm_data)
}
)
)
return example
def add_to_record(self, file_name, tfrecord_writer):
example = self.convert_to_example(file_name)
tfrecord_writer.write(example.SerializeToString())
def run(self, pic_names, output_dir, shuffling=False, size=300):
if shuffling:
random.seed(1314)
random.shuffle(pic_names)
total_num = len(pic_names)
for start in range(0, total_num, size):
tf_filename = '%s/%03d.tfrecord' % (output_dir, start // size)
tf_recorder = tf.python_io.TFRecordWriter(tf_filename)
print('=>' * (start * 5 // total_num) + '{:.0f}% Finished'.format(start / total_num * 100))
for pic_idx in range(start, min(start + 300, total_num)):
pic_name = pic_names[pic_idx]
self.add_to_record(pic_name, tf_recorder)
print('=>' * 5 + '{:.0f}% Finished'.format(100))
def convert_val():
writer = PascalVocWriter()
pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/val.txt').readlines()
pic_names = [i.strip(' \n') for i in pic_names]
writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/val')
def convert_train():
writer = PascalVocWriter()
pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/train.txt').readlines()
pic_names = [i.strip(' \n') for i in pic_names]
writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/train')
if __name__ == '__main__':
# convert_train()
convert_val()
| 34.784615 | 103 | 0.623176 | 558 | 4,522 | 4.842294 | 0.268817 | 0.035529 | 0.044041 | 0.062176 | 0.280163 | 0.225759 | 0.225759 | 0.225759 | 0.225759 | 0.225759 | 0 | 0.021981 | 0.245467 | 4,522 | 129 | 104 | 35.054264 | 0.76993 | 0.102609 | 0 | 0.119048 | 0 | 0 | 0.158537 | 0.089597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.047619 | 0 | 0.214286 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd9ca71b402899675dbdc36257c37a8b3b2984d6 | 861 | py | Python | pelican/plugins/issues/__init__.py | GlowstoneMC/glowstonemc.github.io | 1d1a453251816ef90fb8aaa63a689c81aaad4848 | [
"Artistic-2.0"
] | 6 | 2016-07-30T00:44:10.000Z | 2021-07-09T02:24:36.000Z | pelican/plugins/issues/__init__.py | GlowstoneMC/glowstonemc.github.io | 1d1a453251816ef90fb8aaa63a689c81aaad4848 | [
"Artistic-2.0"
] | 16 | 2016-07-30T01:01:30.000Z | 2021-07-09T21:33:51.000Z | pelican/plugins/issues/__init__.py | GlowstoneMC/glowstonemc.github.io | 1d1a453251816ef90fb8aaa63a689c81aaad4848 | [
"Artistic-2.0"
] | 10 | 2015-01-21T19:57:43.000Z | 2017-09-01T22:15:21.000Z | import itertools
import re
from pelican import signals
ISSUE_REGEX = re.compile(r"([\s(])(#[\d]+)([\s),.])")
ISSUE_URL = "https://github.com/GlowstoneMC/Glowstone/issues/{}"
ISSUE_HTML = """{}<a href="{}">{}</a>{}"""
def process_content(article):
done_tags = set()
for start, tag, end in ISSUE_REGEX.findall(article._content):
if tag in done_tags:
continue
done_tags.add(tag)
num = tag[1:]
article._content = article._content.replace(
"{}{}{}".format(start, tag, end),
ISSUE_HTML.format(start, ISSUE_URL.format(num), tag, end),
)
def get_issue_links(generator):
blog = itertools.chain(generator.articles, generator.drafts)
for article in blog:
process_content(article)
def register():
signals.article_generator_finalized.connect(get_issue_links)
| 24.6 | 70 | 0.637631 | 107 | 861 | 4.943925 | 0.485981 | 0.079395 | 0.079395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001464 | 0.206736 | 861 | 34 | 71 | 25.323529 | 0.77306 | 0 | 0 | 0 | 0 | 0 | 0.119628 | 0.027875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd9d44c68b6bd8ea2a3e03df9da24659e42178bb | 17,295 | py | Python | athena/layers/commons.py | iou2much/athena | 156dfceb0267e8c105e5d040aac017e2d8b9ad9d | [
"Apache-2.0"
] | null | null | null | athena/layers/commons.py | iou2much/athena | 156dfceb0267e8c105e5d040aac017e2d8b9ad9d | [
"Apache-2.0"
] | null | null | null | athena/layers/commons.py | iou2much/athena | 156dfceb0267e8c105e5d040aac017e2d8b9ad9d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=too-few-public-methods, invalid-name
# pylint: disable=no-self-use, missing-function-docstring
"""Utils for common layers."""
import tensorflow as tf
from athena.layers.functional import make_positional_encoding, collapse4d, gelu
from athena.layers.functional import splice
from athena.utils.misc import gated_linear_layer
class PositionalEncoding(tf.keras.layers.Layer):
"""positional encoding can be used in transformer"""
def __init__(self, d_model, max_position=800, scale=False):
super().__init__()
self.d_model = d_model
self.scale = scale
self.pos_encoding = make_positional_encoding(max_position, d_model)
def call(self, x):
""" call function """
seq_len = tf.shape(x)[1]
if self.scale:
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
return x
class ScaledPositionalEncoding(PositionalEncoding):
"""scaled positional encoding,
reference: https://arxiv.org/pdf/1809.08895.pdf"""
def __init__(self, d_model, max_position=800):
super().__init__(d_model, max_position, scale=False)
def build(self, _):
self.alpha = self.add_weight(
name="alpha", initializer=tf.keras.initializers.constant(1)
)
def call(self, x):
seq_len = tf.shape(x)[1]
x += self.alpha * self.pos_encoding[:, :seq_len, :]
return x
class Collapse4D(tf.keras.layers.Layer):
"""collapse4d can be used in cnn-lstm for speech processing
reshape from [N T D C] -> [N T D*C]
"""
def call(self, x):
return collapse4d(x)
class Gelu(tf.keras.layers.Layer):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
x: with the GELU activation applied.
"""
def call(self, x):
return gelu(x)
class TdnnLayer(tf.keras.layers.Layer):
"""An implementation of Tdnn Layer
Args:
context: a int of left and right context, or a list of context indexes, e.g. (-2, 0, 2).
output_dim: the dim of the linear transform
"""
def __init__(self, context, output_dim, use_bias=False, **kwargs):
super().__init__(**kwargs)
if hasattr(context, "__iter__"):
self.context_size = len(context)
self.context_list = context
else:
self.context_size = context * 2 + 1
self.context_list = range(-context, context + 1)
self.output_dim = output_dim
self.linear = tf.keras.layers.Dense(output_dim, use_bias=use_bias)
def call(self, x, training=None, mask=None):
x = splice(x, self.context_list)
x = self.linear(x, training=training, mask=mask)
return x
class GroupNormalization(tf.keras.layers.Layer):
def __init__(
self,
groups: int = 2,
axis: int = -1,
epsilon: float = 1e-3,
center: bool = True,
scale: bool = True,
beta_initializer = "zeros",
gamma_initializer = "ones",
beta_regularizer = None,
gamma_regularizer = None,
beta_constraint = None,
gamma_constraint = None,
**kwargs
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super().build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(
inputs, input_shape, tensor_input_shape
)
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
return outputs
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": tf.keras.initializers.serialize(self.beta_initializer),
"gamma_initializer": tf.keras.initializers.serialize(
self.gamma_initializer
),
"beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": tf.keras.regularizers.serialize(
self.gamma_regularizer
),
"beta_constraint": tf.keras.constraints.serialize(self.beta_constraint),
"gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(1, len(group_shape)))
axis = -2 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
mean, variance = tf.nn.moments(
reshaped_inputs, group_reduction_axes, keepdims=True
)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon,
)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
"Axis " + str(self.axis) + " of "
"input tensor should have a defined dimension "
"but the layer received an input with shape " + str(input_shape) + "."
)
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
raise ValueError(
"Number of groups (" + str(self.groups) + ") cannot be "
"more than the number of channels (" + str(dim) + ")."
)
if dim % self.groups != 0:
raise ValueError(
"Number of groups (" + str(self.groups) + ") must be a "
"multiple of the number of channels (" + str(dim) + ")."
)
def _check_axis(self):
if self.axis == 0:
raise ValueError(
"You are trying to normalize your batch axis. Do you want to "
"use tf.layer.batch_normalization instead"
)
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(
ndim=len(input_shape), axes={self.axis: dim}
)
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
return broadcast_shape
class InstanceNormalization(GroupNormalization):
"""Instance normalization layer.
References
- [Instance Normalization: The Missing Ingredient for Fast Stylization]
(https://arxiv.org/abs/1607.08022)
"""
def __init__(self, **kwargs):
kwargs["groups"] = -1
super().__init__(**kwargs)
class DownSampleBlock(tf.keras.layers.Layer):
"""conv2d downsample block for stargan, instance norm is used because batch size is 1
"""
def __init__(self, filters, kernel_size, strides):
super(DownSampleBlock, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same")
self.conv2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same")
self.norm1 = InstanceNormalization(epsilon=1e-8)
self.norm2 = InstanceNormalization(epsilon=1e-8)
def call(self, x):
h1 = self.conv1(x)
h1_norm = self.norm1(h1)
h1_gates = self.conv2(x)
h1_gates_norm = self.norm2(h1_gates)
h1_glu = gated_linear_layer(inputs=h1_norm, gates=h1_gates_norm)
return h1_glu
class UpSampleBlock(tf.keras.layers.Layer):
"""conv2d upsample block for stargan, instance norm is used because batch size is 1
"""
def __init__(self, filters, kernel_size, strides):
super(UpSampleBlock, self).__init__()
self.conv1 = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same")
self.conv2 = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same")
self.norm1 = InstanceNormalization(epsilon=1e-8)
self.norm2 = InstanceNormalization(epsilon=1e-8)
def call(self, x):
h1 = self.conv1(x)
h1_norm = self.norm1(h1)
h1_gates = self.conv2(x)
h1_gates_norm = self.norm2(h1_gates)
h1_glu = gated_linear_layer(inputs=h1_norm, gates=h1_gates_norm)
return h1_glu
class ConditionalInstanceNormalisation(tf.keras.layers.Layer):
"""CIN Block."""
def __init__(self, in_channel):
super(ConditionalInstanceNormalisation, self).__init__()
self.dim_in = in_channel
self.gamma = tf.keras.layers.Dense(in_channel)
self.beta = tf.keras.layers.Dense(in_channel)
def call(self, x, c):
u = tf.math.reduce_mean(x, axis=1, keepdims=True)
var = tf.math.reduce_mean((x - u) * (x - u), axis=1, keepdims=True)
std = tf.math.sqrt(var + 1e-8)
gamma = self.gamma(c)
gamma = tf.reshape(gamma, [-1, 1, self.dim_in])
beta = self.beta(c)
beta = tf.reshape(beta, [-1, 1, self.dim_in])
h = (x - u) / std
h = h * gamma + beta
return h
class ResidualBlock(tf.keras.layers.Layer):
"""Residual Block with instance normalization."""
def __init__(self, out_channel):
super(ResidualBlock, self).__init__()
self.conv_1 = tf.keras.layers.Conv1D(filters=out_channel, kernel_size=3, strides=1, padding="same", use_bias=False)
self.cin_1 = ConditionalInstanceNormalisation(out_channel)
def call(self, x, c):
x = self.conv_1(x)
x = self.cin_1(x, c)
x = gated_linear_layer(inputs=x, gates=x)
return x
class Down2d_init(tf.keras.layers.Layer):
def __init__(self, filters , kernel_size, stride):
super(Down2d_init, self).__init__()
self.c1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=stride, padding="same")
def call(self, x):
x1 = self.c1(x)
x1 = gated_linear_layer(inputs=x1, gates=x1)
return x1
class Down2d(tf.keras.layers.Layer):
def __init__(self, filters , kernel_size, stride):
super(Down2d, self).__init__()
self.c1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=stride, padding="same")
self.norm1 = InstanceNormalization(epsilon=1e-8)
def call(self, x):
x1 = self.c1(x)
x1 = self.norm1(x1)
x1 = gated_linear_layer(inputs=x1, gates=x1)
return x1
class Up2d(tf.keras.layers.Layer):
"""docstring for Up2d."""
def __init__(self, filters, kernel_size, stride):
super(Up2d, self).__init__()
self.c1 = tf.keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=stride, padding="same")
self.norm1 = InstanceNormalization(epsilon=1e-8)
def call(self, x):
x1 = self.c1(x)
x1 = self.norm1(x1)
x1 = gated_linear_layer(inputs=x1, gates=x1)
return x1
class ZoneOutCell(tf.keras.layers.LSTMCell):
"""Wrapper for LSTM cell to create ZoneOut Cell
inspired by:
https://github.com/teganmaharaj/zoneout/blob/master/zoneout_tensorflow.py
Published by one of 'https://arxiv.org/pdf/1606.01305.pdf' paper writers.
"""
def __init__(self, zoneout_rate=0., **kwargs):
super().__init__(**kwargs)
self.zoneout_rate = zoneout_rate
self.drop_layer = tf.keras.layers.Dropout(self.zoneout_rate)
def call(self, inputs, states, training=False):
"""Runs vanilla LSTM Cell and applies zoneout.
"""
# Apply vanilla LSTM
outputs, new_states = super().call(inputs, states, training=training)
if self.zoneout_rate == 0:
return outputs, new_states
# Apply zoneout
h = (1 - self.zoneout_rate) * \
self.drop_layer(new_states[0] - states[0], training=training) + \
states[0]
c = (1 - self.zoneout_rate) * \
self.drop_layer(new_states[1] - states[1], training=training) + \
states[1]
return outputs, [h, c]
def get_config(self):
config = super().get_config()
config['zoneout_rate'] = self.zoneout_rate
return config
SUPPORTED_RNNS = {
"lstm": tf.keras.layers.LSTMCell,
"gru": tf.keras.layers.GRUCell,
"cudnnlstm": tf.keras.layers.LSTMCell,
"cudnngru": tf.keras.layers.GRUCell
}
ACTIVATIONS = {
"relu": tf.nn.relu,
"relu6": tf.nn.relu6,
"elu": tf.nn.elu,
"selu": tf.nn.selu,
"gelu": gelu,
"leaky_relu": tf.nn.leaky_relu,
"sigmoid": tf.nn.sigmoid,
"softplus": tf.nn.softplus,
"softsign": tf.nn.softsign,
"tanh": tf.nn.tanh,
}
| 34.179842 | 123 | 0.6244 | 2,131 | 17,295 | 4.855936 | 0.169404 | 0.030441 | 0.037688 | 0.020874 | 0.38017 | 0.310881 | 0.242076 | 0.22845 | 0.200812 | 0.179938 | 0 | 0.015778 | 0.263429 | 17,295 | 505 | 124 | 34.247525 | 0.79653 | 0.122637 | 0 | 0.256484 | 0 | 0 | 0.041311 | 0.001869 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123919 | false | 0 | 0.011527 | 0.008646 | 0.242075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bd9d807e7a31d0445e4cb5f019e63d43a7bf4018 | 12,879 | py | Python | oscar/data/mars.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | 13 | 2021-02-12T18:41:53.000Z | 2022-01-14T07:17:15.000Z | oscar/data/mars.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | null | null | null | oscar/data/mars.py | IntelLabs/OSCAR | 25d1dea35727379117e11b7238b5a0d1ed19acad | [
"BSD-3-Clause"
] | 2 | 2021-03-05T18:27:23.000Z | 2021-03-05T23:16:09.000Z | #
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
from collections import Counter
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms as T
from torchvision.transforms import functional as TF
import pytorch_lightning as pl
from sklearn.model_selection import StratifiedShuffleSplit
from oscar.data.ucf101 import UCF101Dataset
from oscar.data.video import ClipSampler, MiddleClipSampler
from oscar.data.transforms import ExCompose, Permute, Squeeze, Unsqueeze, ExSplitLambda
from MARS.dataset.preprocess_data import get_mean
logger = logging.getLogger(__name__)
class MARSDataModule(pl.LightningDataModule):
def __init__(
self,
modality,
frames_root,
annotation_dir,
fold=1,
batch_size=16,
num_workers=1,
frame_size=112,
clip_length=16,
clip_step=1,
mid_clip_only=False,
random_resized_crop_scale=(0.5, 1.0),
test_indices=None,
test_size=0,
random_seed=0,
collate_fn=None,
frame_cache_dir=None,
train_file_patterns=["{:05d}.jpg", "TVL1jpg_x_{:05d}.jpg", "TVL1jpg_y_{:05d}.jpg"],
test_file_patterns=["{:05d}.jpg"],
):
super().__init__()
assert modality in ['RGB', 'RGB_Flow',
'RGBMasked_Flow', 'RGBMasked_FlowMasked',
'RGBSeg_Flow',
'RGBSegMC_Flow',
'RGBSegSC_Flow', 'RGBKeySC_Flow']
self.modality = modality
self.frames_root = frames_root
self.annotation_dir = annotation_dir
self.fold = fold
self.batch_size = batch_size
self.num_workers = num_workers
self.frame_size = frame_size
self.clip_length = clip_length
self.clip_step = clip_step
self.mid_clip_only = mid_clip_only
self.random_resized_crop_scale = random_resized_crop_scale
self.test_indices = test_indices
self.test_size = test_size
self.random_seed = random_seed
self.collate_fn = collate_fn
self.frame_cache_dir = frame_cache_dir
self.train_file_patterns = train_file_patterns
self.test_file_patterns = test_file_patterns
from detectron2.data import MetadataCatalog
self.palette = MetadataCatalog.get('coco_2017_val').thing_colors
if 'RGBSegMC_' in self.modality:
self.input_channels = len(self.palette) + 2 # COCO-things + XY
elif 'RGBSegSC_' in self.modality or 'RGBKeySC_' in self.modality:
self.input_channels = 1 + 2 # Mask + XY
else:
self.input_channels = 3 + 2 # RGB + XY
@classmethod
def add_argparse_args(cls, parser):
group = parser.add_argument_group(cls.__name__)
group.add_argument('--modality', default='RGB', type=str, choices=['RGB', 'RGB_Flow', 'RGBMasked_Flow', 'RGBMasked_FlowMasked', 'RGBSeg_Flow', 'RGBSegMC_Flow', 'RGBSegSC_Flow', 'RGBKeySC_Flow'])
group.add_argument('--dataset', default='UCF101', type=str, choices=['UCF101'])
group.add_argument('--only_RGB', default=False, action='store_true')
group.add_argument('--batch_size', default=32, type=int)
group.add_argument('--frame_dir', default=None, type=str)
group.add_argument('--annotation_path', default=None, type=str)
group.add_argument('--frame_mask_dir', default=None, type=str)
group.add_argument('--n_workers', default=4, type=int)
group.add_argument('--split', default=1, type=int, choices=[1, 2, 3])
group.add_argument('--sample_size', default=112, type=int)
group.add_argument('--sample_duration', default=16, type=int)
group.add_argument('--step_between_clips', default=1, type=int)
group.add_argument('--random_resized_crop_scale_min', default=0.5, type=float)
group.add_argument('--random_resized_crop_scale_max', default=1.0, type=float)
group.add_argument('--test_size', default=0, type=int)
group.add_argument('--test_index', default=None, type=int, nargs='+')
group.add_argument('--random_seed', default=1, type=bool, help='Manually set random seed of sampling validation clip')
group.add_argument('--mid_clip_only', default=False, type=bool)
group.add_argument('--shuffle_axes', default=None, type=int, nargs='+')
return parser
def prepare_data(self):
UCF101Dataset(self.frames_root,
self.annotation_dir,
self.train_file_patterns,
fold=self.fold)
def setup(self, stage=None):
logger.info("Setting up data module for stage: %s", stage)
channels_mean = torch.tensor([*get_mean('activitynet'), 127.5, 127.5])
train_channels_mean = channels_mean
test_channels_mean = channels_mean[0:3]
# Create robust feature transform
robust_extractor = None
if 'RGBMasked_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.ablator import AblatorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = AblatorPyTorch(channels_mean / 255, detectron2=dt2)
elif 'RGBSeg_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.paletted_semantic_segmentor import PalettedSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = PalettedSemanticSegmentorPyTorch(channels_mean[0:3] / 255, detectron2=dt2, palette=self.palette)
elif 'RGBSegMC_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.multichannel_semantic_segmentor import MultichannelSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = MultichannelSemanticSegmentorPyTorch(detectron2=dt2, nb_channels=len(self.palette))
train_channels_mean = 127.5
test_channels_mean = 127.5
elif 'RGBSegSC_' in self.modality or 'RGBKeySC_' in self.modality:
# TODO: Create another segmentor class that is faster and selects objects relevant to UCF101
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.multichannel_semantic_segmentor import MultichannelSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = MultichannelSemanticSegmentorPyTorch(detectron2=dt2, nb_channels=1) # 1 channel == person mask
train_channels_mean = 127.5
test_channels_mean = 127.5
# Apply robust feature extractor to RGB channels only if not _FlowMasked
if robust_extractor is not None and '_FlowMasked' not in self.modality:
robust_extractor = ExSplitLambda(robust_extractor, 3, 0, dim=-1)
robust_transform = ExCompose([
T.Normalize(0, 255), # [0, 255] -> [0, 1]
Permute(0, 2, 3, 1), # TCHW -> THWC
Unsqueeze(0), # THWC -> NTHWC
robust_extractor, # Apply robust feature extractor
Squeeze(0), # NTHWC -> THWC
Permute(0, 3, 1, 2), # THWC -> TCHW
T.Normalize(0, 1/255), # [0, 1] -> [0, 255]
])
# Train transform
# FIXME: Don't load flow when modality does not specify _Flow!
# FIXME: Is there a way to decouple rgb and flow datasets like we did above?
# The problem is they need to be synchronized somehow.
train_transform = ExCompose([
robust_transform,
T.RandomResizedCrop(self.frame_size, scale=self.random_resized_crop_scale, ratio=(1., 1.)), # Crop then Resize
T.RandomApply([TF.hflip, ExSplitLambda(T.Normalize(255, -1), 1, -2, dim=-1)]), # Horizontal flip and invert x-flow randomly
T.Normalize(train_channels_mean, 1), # [0, 255] -> ~[-128, 128]
Permute(1, 0, 2, 3), # TCHW -> CTHW
])
train_sampler = ClipSampler(self.clip_length, self.clip_step)
# Test transform
test_transform = ExCompose([
robust_transform,
T.Resize(self.frame_size),
T.CenterCrop(self.frame_size),
T.Normalize(test_channels_mean, 1), # [0, 255] -> ~[-128, 128]
Permute(1, 0, 2, 3), # TCHW -> CTHW
])
test_sampler = range
if self.mid_clip_only:
test_sampler = MiddleClipSampler(self.clip_length, self.clip_step)
if stage == 'fit' or stage is None:
logger.info("Loading training data...")
self.train_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.train_file_patterns,
train=True,
fold=self.fold,
transform=train_transform,
sampler=train_sampler)
logger.info("train data = %d", len(self.train_dataset))
logger.info("Loading validation data...")
self.val_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.test_file_patterns,
train=False,
fold=self.fold,
transform=test_transform,
sampler=train_sampler)
logger.info("val data = %d", len(self.val_dataset))
if stage == 'test' or stage is None:
logger.info("Loading test data...")
test_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.test_file_patterns,
train=False,
fold=self.fold,
transform=test_transform,
sampler=test_sampler)
# Select test indices...
if self.test_indices is not None:
logger.info("Selecting data indices: %s", self.test_indices)
test_dataset = torch.utils.data.Subset(test_dataset, self.test_indices)
# ...or subsample test_dataset using a stratified split of test_size elements.
elif self.test_size > 0:
y = test_dataset.targets
if test_dataset.target_transform is not None:
y_transform = [test_dataset.target_transform(y_) for y_ in y]
sss = StratifiedShuffleSplit(n_splits=1, test_size=self.test_size, random_state=self.random_seed)
_, indices = next(sss.split(y, y_transform))
y_selected = [y[i] for i in indices]
logger.info("Stratified subsampling test dataset to %d samples: %s", self.test_size, Counter(y_selected))
test_dataset = torch.utils.data.Subset(test_dataset, indices)
self.test_dataset = test_dataset
logger.info("test data = %d", len(self.test_dataset))
def train_dataloader(self):
return DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self.collate_fn)
def val_dataloader(self):
return DataLoader(self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self.collate_fn)
def test_dataloader(self):
return DataLoader(self.test_dataset,
batch_size=1, # Must be 1 because we can't batch whole videos
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
drop_last=False,
collate_fn=self.collate_fn)
| 45.508834 | 202 | 0.601444 | 1,412 | 12,879 | 5.255666 | 0.20255 | 0.029646 | 0.040965 | 0.031263 | 0.415577 | 0.348875 | 0.322733 | 0.291066 | 0.252796 | 0.251853 | 0 | 0.025222 | 0.310428 | 12,879 | 282 | 203 | 45.670213 | 0.810382 | 0.074618 | 0 | 0.25 | 0 | 0 | 0.079855 | 0.005217 | 0 | 0 | 0 | 0.003546 | 0.004545 | 1 | 0.031818 | false | 0 | 0.095455 | 0.013636 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda0ee2b60b3089f82bfd69b9afe911afcc77e80 | 4,454 | py | Python | dolfyn/tests/test_vs_nortek.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | dolfyn/tests/test_vs_nortek.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | dolfyn/tests/test_vs_nortek.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | from dolfyn.tests import test_read_adp as tr
from dolfyn.tests import base
from dolfyn.rotate.api import rotate2
from numpy.testing import assert_allclose
import numpy as np
import scipy.io as sio
"""
Testing against velocity and bottom-track velocity data in Nortek mat files
exported from SignatureDeployment.
inst2earth rotation fails for AHRS-equipped istruments and I don't know why -
I believe it's due to an RC filter (or some such) on Nortek's side after they
load in the orientation matrix from the AHRS (Check out the difference
colorplots compared to non-AHRS instruments.) Using HPR- or quaterion-calc'd
orientation matrices doesn't close the gap.
"""
def load_nortek_matfile(filename):
# remember to transpose this data
data = sio.loadmat(filename,
struct_as_record=False,
squeeze_me=True)
d = data['Data']
# print(d._fieldnames)
burst = 'Burst'
bt = 'BottomTrack'
beam = ['_VelBeam1', '_VelBeam2', '_VelBeam3', '_VelBeam4']
b5 = 'IBurst_VelBeam5'
inst = ['_VelX', '_VelY', '_VelZ1', '_VelZ2']
earth = ['_VelEast', '_VelNorth', '_VelUp1', '_VelUp2']
axis = {'beam': beam, 'inst': inst, 'earth': earth}
AHRS = 'Burst_AHRSRotationMatrix' # , 'IBurst_AHRSRotationMatrix']
vel = {'beam': {}, 'inst': {}, 'earth': {}}
for ky in vel.keys():
for i in range(len(axis[ky])):
vel[ky][i] = np.transpose(getattr(d, burst+axis[ky][i]))
vel[ky] = np.stack((vel[ky][0], vel[ky][1],
vel[ky][2], vel[ky][3]), axis=0)
if AHRS in d._fieldnames:
vel['omat'] = np.transpose(getattr(d, AHRS))
if b5 in d._fieldnames:
vel['b5'] = np.transpose(getattr(d, b5))
#vel['omat5'] = getattr(d, AHRS[1])
if bt+beam[0] in d._fieldnames:
vel_bt = {'beam': {}, 'inst': {}, 'earth': {}}
for ky in vel_bt.keys():
for i in range(len(axis[ky])):
vel_bt[ky][i] = np.transpose(getattr(d, bt+axis[ky][i]))
vel_bt[ky] = np.stack((vel_bt[ky][0], vel_bt[ky][1],
vel_bt[ky][2], vel_bt[ky][3]), axis=0)
return vel, vel_bt
else:
return vel
def rotate(axis):
# BenchFile01.ad2cp
td_sig = rotate2(tr.dat_sig, axis, inplace=False)
# Sig1000_IMU.ad2cp no userdata
td_sig_i = rotate2(tr.dat_sig_i, axis, inplace=False)
# VelEchoBT01.ad2cp
td_sig_ieb = rotate2(tr.dat_sig_ieb, axis,
inplace=False)
# Sig500_Echo.ad2cp
td_sig_ie = rotate2(tr.dat_sig_ie, axis,
inplace=False)
td_sig_vel = load_nortek_matfile(base.rfnm('BenchFile01.mat'))
td_sig_i_vel = load_nortek_matfile(base.rfnm('Sig1000_IMU.mat'))
td_sig_ieb_vel, vel_bt = load_nortek_matfile(base.rfnm('VelEchoBT01.mat'))
td_sig_ie_vel = load_nortek_matfile(base.rfnm('Sig500_Echo.mat'))
nens = 100
# ARHS inst2earth orientation matrix check
# Checks the 1,1 element because the nortek orientmat's shape is [9,:] as
# opposed to [3,3,:]
if axis == 'inst':
assert_allclose(td_sig_i.orientmat[0][0].values,
td_sig_i_vel['omat'][0, :nens], atol=1e-7)
assert_allclose(td_sig_ieb.orientmat[0][0].values,
td_sig_ieb_vel['omat'][0, :][..., :nens], atol=1e-7)
# 4-beam velocity
assert_allclose(td_sig.vel.values, td_sig_vel[axis][..., :nens], atol=1e-5)
assert_allclose(td_sig_i.vel.values,
td_sig_i_vel[axis][..., :nens], atol=5e-3)
assert_allclose(td_sig_ieb.vel.values,
td_sig_ieb_vel[axis][..., :nens], atol=5e-3)
assert_allclose(td_sig_ie.vel.values,
td_sig_ie_vel[axis][..., :nens], atol=1e-5)
# 5th-beam velocity
if axis == 'beam':
assert_allclose(td_sig_i.vel_b5.values,
td_sig_i_vel['b5'][..., :nens], atol=1e-5)
assert_allclose(td_sig_ieb.vel_b5.values,
td_sig_ieb_vel['b5'][..., :nens], atol=1e-5)
assert_allclose(td_sig_ie.vel_b5.values,
td_sig_ie_vel['b5'][..., :nens], atol=1e-5)
# bottom-track
assert_allclose(td_sig_ieb.vel_bt.values,
vel_bt[axis][..., :nens], atol=5e-3)
def test_rotate2_beam():
rotate('beam')
def test_rotate2_inst():
rotate('inst')
def test_rotate2_earth():
rotate('earth')
| 35.632 | 79 | 0.606646 | 642 | 4,454 | 3.990654 | 0.26324 | 0.052693 | 0.062451 | 0.074161 | 0.300156 | 0.233802 | 0.12178 | 0.088993 | 0.077283 | 0.056206 | 0 | 0.031147 | 0.250337 | 4,454 | 124 | 80 | 35.919355 | 0.736149 | 0.085317 | 0 | 0.051282 | 0 | 0 | 0.080245 | 0.006687 | 0 | 0 | 0 | 0 | 0.141026 | 1 | 0.064103 | false | 0 | 0.076923 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda33f238048fa796ed848c9125688fdcab82f49 | 1,331 | py | Python | backend/flaskr/formula.py | jyyang42/RobinOptionCalculator | be3f06f6ae54c7e2dd4badc258a9888e3e240a4a | [
"MIT"
] | 1 | 2020-11-19T19:47:48.000Z | 2020-11-19T19:47:48.000Z | backend/flaskr/formula.py | jyyang42/RobinOptionCalculater | be3f06f6ae54c7e2dd4badc258a9888e3e240a4a | [
"MIT"
] | 7 | 2020-06-23T07:07:10.000Z | 2020-08-24T23:43:53.000Z | backend/flaskr/formula.py | jyyang42/RobinOptionCalculater | be3f06f6ae54c7e2dd4badc258a9888e3e240a4a | [
"MIT"
] | 2 | 2020-08-25T02:45:10.000Z | 2020-11-19T19:47:38.000Z | import math
def get_d1(p0, X, t, sigma, Rho):
# P0 stock price 62
# X exercise Price 60
# t time to expiration days/365 40
# sigma Volatility 0.32
# Rho Risk-Free Rate 0.04
# d1 = {ln(62/60) + [0.04 + 0.5 * 0.32 ^ 2] * (40/365)} / 0.32 * sqrt(40/365)
a = math.log(p0/X) + (Rho + 0.5 * sigma * sigma) * (t / 365)
b = sigma * math.sqrt(40/365)
return a/b
def get_d2(d1, sigma, t):
# d1 - sigma * sqrt(t/365)
return d1 - sigma * math.sqrt(t/365)
def get_cumulative_standard_normal_distribution(d):
return 0.5 * (1 + math.erf(d/math.sqrt(2)))
def get_call(p0, Nd1, X, Krf, t, Nd2):
a = p0 * Nd1
b = X / (math.pow(math.e, Krf * t/365))
return a - b * Nd2
def get_put(Vc, X, Krf, t, p0):
return Vc + X / math.pow(math.e, Krf * t/365) - p0
if __name__ == "__main__":
# Z = (x - µ) / sigma
p0 = 62
X = 60
t = 40
sigma = 0.32
Rho = 0.04
d1 = get_d1(p0, X, t, sigma, Rho)
d2 = get_d2(d1, sigma, t)
Nd1 = get_cumulative_standard_normal_distribution(d1)
Nd2 = get_cumulative_standard_normal_distribution(d2)
Vc = get_call(p0, Nd1, X, Rho, t, Nd2)
Vp = get_put(Vc, X, Rho, t, p0)
print("d1:", d1)
print("d2:", d2)
print("Nd1:", Nd1)
print("Nd2:", Nd2)
print("Vc:", Vc)
print("Vp:", Vp) | 25.596154 | 81 | 0.552968 | 235 | 1,331 | 3.012766 | 0.251064 | 0.042373 | 0.088983 | 0.114407 | 0.34322 | 0.10452 | 0.10452 | 0.056497 | 0 | 0 | 0 | 0.123958 | 0.278738 | 1,331 | 52 | 82 | 25.596154 | 0.613542 | 0.180316 | 0 | 0 | 0 | 0 | 0.025854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.030303 | 0.090909 | 0.333333 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda34ec056c93e4918c1bc75e8154a348bc2e5e2 | 932 | py | Python | bot/modules/magnet.py | AliAryanTech/Nyaa-Telegram-Bot | d1614ed218fd9f413d046eec61978df269b325b6 | [
"MIT"
] | 12 | 2020-12-01T04:40:37.000Z | 2022-01-22T14:19:04.000Z | bot/modules/magnet.py | AliAryanTech/Nyaa-Telegram-Bot | d1614ed218fd9f413d046eec61978df269b325b6 | [
"MIT"
] | null | null | null | bot/modules/magnet.py | AliAryanTech/Nyaa-Telegram-Bot | d1614ed218fd9f413d046eec61978df269b325b6 | [
"MIT"
] | 19 | 2021-02-09T19:20:59.000Z | 2022-03-18T12:05:08.000Z | from .get_response import nyaa_id, sukebei_id
from bot import NYAA, botname
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
INVALID_TEXT = """
No ID found!
"""
@NYAA.on_message(filters.command(["magnet", f"magnet@{botname}"], prefixes = "/") & ~filters.edited)
async def get_magnet(client, message):
query = message.text.split(maxsplit = 2)
if len(query) < 2 or len(query) > 2:
await NYAA.send_message(chat_id = message.chat.id, text = INVALID_TEXT)
return
buttons = [
[
InlineKeyboardButton("Nyaa", f"nyaa {query[-1]}"),
InlineKeyboardButton("Sukebei", f"sukebei {query[-1]}")
]
]
await NYAA.send_message(chat_id = message.chat.id, text = "Where do you wanna search?", reply_markup = InlineKeyboardMarkup(buttons)) | 40.521739 | 137 | 0.637339 | 107 | 932 | 5.439252 | 0.457944 | 0.075601 | 0.089347 | 0.068729 | 0.147766 | 0.147766 | 0.147766 | 0.147766 | 0.147766 | 0.147766 | 0 | 0.007102 | 0.244635 | 932 | 23 | 137 | 40.521739 | 0.819602 | 0 | 0 | 0 | 0 | 0 | 0.119649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda4133fe40f05627ed065666e33a64ba888ab8f | 18,060 | py | Python | fhir/resources/DSTU2/implementationguide.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 144 | 2019-05-08T14:24:43.000Z | 2022-03-30T02:37:11.000Z | fhir/resources/DSTU2/implementationguide.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 82 | 2019-05-13T17:43:13.000Z | 2022-03-30T16:45:17.000Z | fhir/resources/DSTU2/implementationguide.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 48 | 2019-04-04T14:14:53.000Z | 2022-03-30T06:07:31.000Z | # -*- coding: utf-8 -*-
"""
Profile: https://www.hl7.org/fhir/DSTU2/implementationguide.html
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class ImplementationGuide(domainresource.DomainResource):
"""A set of rules about how FHIR is used.
A set of rules of how a particular interoperability or standards problem is
solved - typically through the use of FHIR resources. This resource is used
to gather all the parts of an implementation guide into a logical whole and
to publish a computable definition of all the parts.
"""
resource_type = Field("ImplementationGuide", const=True)
binary: ListType[fhirtypes.Uri] = Field(
None,
alias="binary",
title="List of `uri` items.",
description="Image, css, script, etc..",
)
contact: ListType[fhirtypes.ImplementationGuideContactType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
)
copyright: fhirtypes.String = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the implementation guide and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the implementation guide."
),
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the implementation guide was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the implementation guide "
"changes."
),
)
dependency: ListType[fhirtypes.ImplementationGuideDependencyType] = Field(
None,
alias="dependency",
title="Another Implementation guide this depends on",
description=(
"Another implementation guide that this implementation depends on. "
"Typically, an implementation guide uses value sets, profiles "
"etc.defined in other implementation guides."
),
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Natural language description of the implementation guide",
description=(
"A free text natural language description of the implementation guide "
"from a consumer's perspective."
),
)
experimental: fhirtypes.Boolean = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this implementation guide is authored"
" for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
)
fhirVersion: fhirtypes.Id = Field(
None,
alias="fhirVersion",
title="FHIR Version this Implementation Guide targets",
description=(
"The version(s) of the FHIR specification that this ImplementationGuide"
" targets - e.g. describes how to use. The value of this element is the"
" formal version of the specification, without the revision number, "
"e.g. [publication].[major].[minor], which is 4.0.1. for this version."
),
)
global_fhir: ListType[fhirtypes.ImplementationGuideGlobalType] = Field(
None,
alias="global",
title="Profiles that apply globally",
description=(
"A set of profiles that all resources covered by this implementation "
"guide must conform to."
),
)
name: fhirtypes.String = Field(
...,
alias="name",
title="Name for this implementation guide (computer friendly)",
description=(
"A natural language name identifying the implementation guide. This "
"name should be usable as an identifier for the module by machine "
"processing applications such as code generation."
),
)
package: ListType[fhirtypes.ImplementationGuidePackageType] = Field(
...,
alias="package",
title="List of `ImplementationGuidePackage` items (represented as `dict` in JSON).",
description="Group of resources as used in .page.package.",
)
page: fhirtypes.ImplementationGuidePageType = Field(
...,
alias="page",
title="Type `ImplementationGuidePage` (represented as `dict` in JSON).",
description="Page/Section in the Guide.",
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the "
"implementation guide."
),
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="draft | active | retired",
description=(
"The status of this implementation guide. Enables tracking the life-"
"cycle of the content."
),
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired"],
)
url: fhirtypes.Uri = Field(
...,
alias="url",
title=(
"Canonical identifier for this implementation guide, represented as a "
"URI (globally unique)"
),
description=(
"An absolute URI that is used to identify this implementation guide "
"when it is referenced in a specification, model, design or an "
"instance; also called its canonical identifier. This SHOULD be "
"globally unique and SHOULD be a literal address at which at which an "
"authoritative instance of this implementation guide is (or will be) "
"published. This URL can be the target of a canonical reference. It "
"SHALL remain the same when the implementation guide is stored on "
"different servers."
),
)
useContext: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate implementation guide instances."
),
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the implementation guide",
description=(
"The identifier that is used to identify this version of the "
"implementation guide when it is referenced in a specification, model, "
"design or instance. This is an arbitrary value managed by the "
"implementation guide author and is not expected to be globally unique."
" For example, it might be a timestamp (e.g. yyyymmdd) if a managed "
"version is not available. There is also no expectation that versions "
"can be placed in a lexicographical sequence."
),
)
class ImplementationGuideContact(backboneelement.BackboneElement):
"""Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_type = Field("ImplementationGuideContact", const=True)
name: fhirtypes.String = Field(
None,
alias="name",
title="Type `str`.",
description="Name of a individual to contact.",
)
telecom: ListType[fhirtypes.ContactPointType] = Field(
None,
alias="telecom",
title="List of `ContactPoint` items (represented as `dict` in JSON).",
description="Contact details for individual or publisher.",
)
class ImplementationGuideDependency(backboneelement.BackboneElement):
"""Another Implementation guide this depends on.
Another implementation guide that this implementation depends on.
Typically, an implementation guide uses value sets, profiles etc.defined in
other implementation guides.
"""
resource_type = Field("ImplementationGuideDependsOn", const=True)
type: fhirtypes.Code = Field(
...,
alias="type",
title="Type `str`.",
description="reference | inclusion.",
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["reference", "inclusion"],
)
uri: fhirtypes.Uri = Field(
...,
alias="uri",
title="Identity of the IG that this depends on",
description="A canonical reference to the Implementation guide for the dependency.",
)
class ImplementationGuideGlobal(backboneelement.BackboneElement):
"""Profiles that apply globally.
A set of profiles that all resources covered by this implementation guide
must conform to.
"""
resource_type = Field("ImplementationGuideGlobal", const=True)
profile: fhirtypes.ReferenceType = Field(
...,
alias="profile",
title="Profile that all resources must conform to",
description="A reference to the profile that all instances must conform to.",
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
type: fhirtypes.Code = Field(
...,
alias="type",
title="Type this profile applies to",
description="The type of resource that all instances must conform to.",
)
class ImplementationGuidePackage(backboneelement.BackboneElement):
"""Group of resources as used in .page.package.
A logical group of resources. Logical groups can be used when building
pages.
"""
resource_type = Field("ImplementationGuidePackage", const=True)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `str`.",
description="Human readable text describing the package.",
)
name: fhirtypes.String = Field(
...,
alias="name",
title="Type `str`.",
description="Name used .page.package.",
)
resource: ListType[fhirtypes.ImplementationGuidePackageResourceType] = Field(
...,
alias="resource",
title=(
"List of `ImplementationGuidePackageResource` items (represented as `dict` "
"in JSON)."
),
description="Resource in the implementation guide.",
)
class ImplementationGuidePackageResource(backboneelement.BackboneElement):
"""Resource in the implementation guide.
A resource that is part of the implementation guide. Conformance resources
(value set, structure definition, conformance statements etc.) are obvious
candidates for inclusion, but any kind of resource can be included as an
example resource.
"""
resource_type = Field("ImplementationGuidePackageResource", const=True)
acronym: fhirtypes.String = Field(
None,
alias="acronym",
title="Type `str`.",
description="Short code to identify the resource.",
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `str`.",
description="Reason why included in guide.",
)
exampleFor: fhirtypes.ReferenceType = Field(
None,
alias="exampleFor",
title=(
"Type `Reference` referencing `StructureDefinition` (represented as `dict` "
"in JSON)."
),
description="Resource this is an example of (if applicable).",
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Type `str`.",
description="Human Name for the resource.",
)
purpose: fhirtypes.Code = Field(
...,
alias="purpose",
title="Type `str`.",
description=(
"example | terminology | profile | extension | dictionary | logical."
),
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"example",
"terminology",
"profile",
"extension",
"dictionary",
"logical",
],
)
sourceReference: fhirtypes.ReferenceType = Field(
None,
alias="sourceReference",
title="Type `Reference` referencing `Resource` (represented as `dict` in JSON).",
description="Location of the resource.",
# Choice of Data Types. i.e timing[x]
one_of_many="source",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
sourceUri: fhirtypes.Uri = Field(
None,
alias="sourceUri",
title="Type `str`.",
description="Location of the resource.",
# Choice of Data Types. i.e timing[x]
one_of_many="source",
one_of_many_required=True,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"source": ["sourceReference", "sourceUri"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class ImplementationGuidePage(backboneelement.BackboneElement):
"""Page/Section in the Guide.
A page / section in the implementation guide. The root page is the
implementation guide home page.
"""
resource_type = Field("ImplementationGuidePage", const=True)
format: fhirtypes.Code = Field(
None,
alias="format",
title="Type `str`.",
description="Format of the page (e.g. html, markdown, etc.).",
)
kind: fhirtypes.Code = Field(
...,
alias="kind",
title="Type `str`.",
description=(
"page | example | list | include | directory | dictionary | toc | resource."
),
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"page",
"example",
"list",
"include",
"directory",
"dictionary",
"toc",
"resource",
],
)
name: fhirtypes.String = Field(
...,
alias="name",
title="Type `str`.",
description="Short name shown for navigational assistance.",
)
package: ListType[fhirtypes.String] = Field(
None,
alias="package",
title="List of `str` items.",
description="Name of package to include.",
)
page: ListType[fhirtypes.ImplementationGuidePageType] = Field(
None,
alias="page",
title=(
"List of `ImplementationGuidePage` items (represented as `dict` in JSON)."
),
description="Nested Pages / Sections.",
)
source: fhirtypes.Uri = Field(
...,
alias="source",
title="Type `Uri`.",
description="Where to find that page.",
)
type: ListType[fhirtypes.Code] = Field(
None,
alias="type",
title="List of `Code` items.",
description="Kind of resource to include in the list.",
)
| 34.334601 | 92 | 0.612901 | 1,916 | 18,060 | 5.748956 | 0.216597 | 0.060372 | 0.031775 | 0.025057 | 0.326464 | 0.271448 | 0.240309 | 0.208352 | 0.180209 | 0.180209 | 0 | 0.001418 | 0.297176 | 18,060 | 525 | 93 | 34.4 | 0.866383 | 0.155371 | 0 | 0.347044 | 0 | 0.002571 | 0.416461 | 0.023859 | 0 | 0 | 0 | 0 | 0.002571 | 1 | 0.002571 | false | 0 | 0.010283 | 0 | 0.154242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda5691ceabca9d9b32498222c4155793f52475a | 2,325 | py | Python | src/mnkgame.py | isihya/minimax_algorithm_MNKgame | 9876c12d065422334d87bf24c6d82171c7ace89e | [
"MIT"
] | null | null | null | src/mnkgame.py | isihya/minimax_algorithm_MNKgame | 9876c12d065422334d87bf24c6d82171c7ace89e | [
"MIT"
] | null | null | null | src/mnkgame.py | isihya/minimax_algorithm_MNKgame | 9876c12d065422334d87bf24c6d82171c7ace89e | [
"MIT"
] | null | null | null | import numpy as np
from game import Game
class MNKgame(Game):
"""
https://en.wikipedia.org/wiki/M,n,k-game
If m=3, n=3, k=3. This is TicTakToe and default
"""
def __init__(self, n=3, m=3, k=3, field=None):
self.n = n
self.m = m
self.k = k
self.field = field
if field is None:
self.field = np.zeros((n, m))
self.winner = 0
def evaluate(self, field) -> bool:
# down
for x in range(self.m):
score = self.scan(field, (1, 0), 0, x)
if score != 0:
return score
# right
for y in range(self.n):
score = self.scan(field, (0, 1), y, 0)
if score != 0:
return score
# right down
for x in range(self.m):
score = self.scan(field, (1, 1), 0, x)
if score != 0:
return score
for y in range(self.n):
score = self.scan(field, (1, 1), y, 0)
if score != 0:
return score
# right up
for x in range(self.n):
score = self.scan(field, (-1, 1), self.m, x)
if score != 0:
return score
for y in range(self.m):
score = self.scan(field, (-1, 1), y, 0)
if score != 0:
return score
return 0
def scan(self, field, d, i, j) -> bool:
cnt_player = 0
cnt_enemy = 0
while(self.is_in_field(i, j)):
if int(field[i][j]) == 1:
cnt_player += 1
if cnt_player == self.k:
return 1
elif int(field[i][j]) == -1:
cnt_enemy += 1
if cnt_enemy == self.k:
return -1
else:
cnt_player = 0
cnt_enemy = 0
i += d[0]
j += d[1]
return 0
def is_in_field(self, i, j):
if 0 <= i and i < self.n and 0 <= j and j < self.m:
return True
return False
def update(self, action, val):
self.field[action[0]][action[1]] = val
def get_actions(self, field):
indexes = np.where(field == 0)
if len(indexes[0]) == 0:
return []
return list(zip(indexes[0], indexes[1]))
| 28.012048 | 59 | 0.44129 | 324 | 2,325 | 3.114198 | 0.20679 | 0.048563 | 0.065411 | 0.107037 | 0.424182 | 0.420218 | 0.349851 | 0.328048 | 0.328048 | 0.296333 | 0 | 0.040335 | 0.434839 | 2,325 | 82 | 60 | 28.353659 | 0.727549 | 0.051613 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.030303 | 0 | 0.348485 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda5f88b1ed70dd6d4320c4922009e7031b24847 | 2,721 | py | Python | memoit/main/forms.py | Szymon-I/Memo-IT-App | f435331c4fbd68d34a5fb1d1f6b54117bab6b864 | [
"MIT"
] | null | null | null | memoit/main/forms.py | Szymon-I/Memo-IT-App | f435331c4fbd68d34a5fb1d1f6b54117bab6b864 | [
"MIT"
] | 14 | 2019-08-06T02:06:17.000Z | 2022-03-11T23:49:01.000Z | memoit/main/forms.py | Szymon-I/Memo-IT-App | f435331c4fbd68d34a5fb1d1f6b54117bab6b864 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .choices import *
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import ObjectDoesNotExist
from django.forms import ValidationError
# override basic authentication form to allow logging in with email or username
class EmailAuthenticationForm(AuthenticationForm):
def clean_username(self):
username = self.data['username']
if '@' in username:
try:
username = User.objects.get(email=username).username
except ObjectDoesNotExist:
raise ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return username
# override basic user creation for to add required email field
class NewUserForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
def save(self, commit=True):
user = super(NewUserForm, self).save(commit=False)
if commit:
user.save()
return user
# form for creating basic text note
class NoteForm(forms.Form):
title = forms.CharField(max_length=100)
content = forms.CharField(
widget=forms.Textarea(attrs={'width': "100%", 'cols': "80", 'rows': "20", 'height': '100%'}), required=False)
theme = forms.ChoiceField(choices=THEMES, label="Theme", initial='', widget=forms.Select(), required=True)
# form for creating list note
class NoteListForm(forms.Form):
title = forms.CharField(max_length=100)
content = forms.CharField(required=False, label="List items",
widget=forms.TextInput(attrs={'placeholder': 'Add item and press Enter'}))
theme = forms.ChoiceField(choices=THEMES, label="Theme", initial='', widget=forms.Select(), required=True)
# form for creating picture note
class NotePictureForm(forms.Form):
title = forms.CharField(max_length=100)
content = forms.CharField(
widget=forms.Textarea(attrs={'width': "100%", 'cols': "80", 'rows': "20", 'height': '100%'}), required=False)
picture = forms.ImageField()
# override picture note form to show actual picture path
class NotePictureFormUpdate(forms.Form):
title = forms.CharField(max_length=100)
content = forms.CharField(
widget=forms.Textarea(attrs={'width': "100%", 'cols': "80", 'rows': "20", 'height': '100%'}),
required=False)
picture = forms.ImageField(required=False)
| 37.791667 | 117 | 0.669974 | 307 | 2,721 | 5.905537 | 0.345277 | 0.061776 | 0.030888 | 0.041919 | 0.408163 | 0.408163 | 0.372863 | 0.372863 | 0.372863 | 0.372863 | 0 | 0.020456 | 0.209482 | 2,721 | 71 | 118 | 38.323944 | 0.822408 | 0.105108 | 0 | 0.215686 | 0 | 0 | 0.091433 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0.019608 | 0.137255 | 0 | 0.607843 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda7487b706cb9a94241cf9262532248e9f7dfec | 4,617 | py | Python | tests/export_traces_test.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 2 | 2021-11-07T10:17:16.000Z | 2021-11-07T10:17:19.000Z | tests/export_traces_test.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 5 | 2021-11-03T12:43:03.000Z | 2021-12-16T10:34:52.000Z | tests/export_traces_test.py | galizia-lab/pyview | 07bef637b0c60fae8830c1b3947e4a7bcd14bb2c | [
"BSD-3-Clause"
] | 1 | 2021-09-23T15:46:26.000Z | 2021-09-23T15:46:26.000Z | from common import initialize_test_yml_list_measurement
from view import VIEW
import pathlib as pl
import shutil
from view.python_core.ctvs import get_all_available_ctvs
from view.python_core.gdm_generation.gdm_data_classes import GDMFile
class TraceExporter(object):
def __init__(self):
super().__init__()
test_yml, self.test_animal, self.test_measu = initialize_test_yml_list_measurement()
self.view = VIEW()
self.view.update_flags_from_ymlfile(test_yml)
def load_and_export(self, flags_to_update, file_suffix, flags_suffix):
self.view.update_flags(flags_to_update)
self.view.initialize_animal(self.test_animal)
roi_data_dict, roi_file = self.view.get_roi_info_for_current_animal()
# initialize and empty data frame to accumulate data
gdm_file = GDMFile()
# iterate over measurements of the animal
for measu in self.view.get_measus_for_current_animal(analyze_values_to_use=(1,)):
# load a measurement for the animal
self.view.load_measurement_data_from_current_animal(measu)
# calculate signals
self.view.calculate_signals()
# create glodatamix for the loaded measurement
gdm_file_this_measu, _ = self.view.get_gdm_file_for_current_measurement(roi_data_dict)
# accumulate
gdm_file.append_from_a_gdm_file(gdm_file_this_measu)
# compose output file name
output_file = self.view.flags.get_gloDatamix_file_for_current_animal()
output_file_path = pl.Path(output_file)
test_gdm_folder =\
pl.Path(self.view.flags["STG_OdorReportPath"]) / "test_gdms" / \
f"{output_file_path.stem}{file_suffix}"
if not test_gdm_folder.is_dir():
test_gdm_folder.mkdir(parents=True)
test_output_file = test_gdm_folder / f"gdm{flags_suffix}{output_file_path.suffix}"
# save gloDatamix file
gdm_file.write_to_csv(test_output_file)
def test_export_traces_rois():
"""
Testing exporting traces using .roi files
"""
exporter = TraceExporter()
coor_path = pl.Path(exporter.view.flags["STG_OdormaskPath"])
dest_roi_file = coor_path / "Fake_data.roi"
for fle in coor_path.iterdir():
if fle.name.startswith("FakeData") and fle.suffix == ".roi":
shutil.copy(str(fle), str(dest_roi_file))
exporter.load_and_export(
flags_to_update={"RM_ROITrace": 3},
file_suffix=f"_from_roi{fle.stem.lstrip('FakeData')}",
flags_suffix="_defaults"
)
dest_roi_file.unlink()
def test_export_traces_mask_tif():
"""
Testing exporting traces using .roi.tif files
"""
exporter = TraceExporter()
exporter.load_and_export(
flags_to_update={"RM_ROITrace": 4},
file_suffix="_from_roi_tif",
flags_suffix="_defaults"
)
def test_export_traces_different_ctvs():
"""
Testing exporting traces with different CTVs
"""
exporter = TraceExporter()
for ctv in get_all_available_ctvs():
exporter.load_and_export(
flags_to_update={"RM_ROITrace": 3, "CTV_Method": ctv},
file_suffix=f"_from_roi",
flags_suffix=f"_ctv{ctv}"
)
def test_export_traces_within_ROI():
"""
Testing exporting traces considering the area file
"""
exporter = TraceExporter()
exporter.load_and_export(
flags_to_update={"RM_ROITrace": 3, "GDM_withinArea": True},
file_suffix="_from_roi",
flags_suffix="_withinArea_True"
)
def test_export_traces_chunks_only():
"""
Testing exporting traces considering the area file
"""
exporter = TraceExporter()
exporter.load_and_export(
flags_to_update=
{
"RM_ROITrace": 3,
"GDM_outputType": "chunks_only",
"GDM_chunkPostStim": 2, # in seconds
"GDM_chunkPreStim": 2, # in seconds
},
file_suffix="_chunks_only",
flags_suffix="_2secPrePostStim"
)
exporter.load_and_export(
flags_to_update=
{
"RM_ROITrace": 3,
"GDM_outputType": "chunks_only",
"GDM_chunkPostStim": 100, # in seconds
"GDM_chunkPreStim": 100, # in seconds
},
file_suffix="_chunks_only",
flags_suffix="_full"
)
if __name__ == '__main__':
test_export_traces_rois()
# test_export_traces_mask_tif()
# test_export_traces_within_ROI()
test_export_traces_chunks_only() | 27.981818 | 98 | 0.65627 | 566 | 4,617 | 4.922261 | 0.236749 | 0.031587 | 0.051687 | 0.045226 | 0.335607 | 0.208543 | 0.208543 | 0.208543 | 0.179828 | 0.179828 | 0 | 0.004638 | 0.252762 | 4,617 | 165 | 99 | 27.981818 | 0.802899 | 0.126922 | 0 | 0.225806 | 0 | 0 | 0.131118 | 0.029419 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075269 | false | 0 | 0.064516 | 0 | 0.150538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bda7fb3776a5d1e908f28700d29753c520db4037 | 2,551 | py | Python | app/entries/forms.py | singh-prashant/blog | 7c4d2e2d6890d3f0b48741b1090e41a990cad1de | [
"MIT"
] | null | null | null | app/entries/forms.py | singh-prashant/blog | 7c4d2e2d6890d3f0b48741b1090e41a990cad1de | [
"MIT"
] | null | null | null | app/entries/forms.py | singh-prashant/blog | 7c4d2e2d6890d3f0b48741b1090e41a990cad1de | [
"MIT"
] | null | null | null | from wtforms import Form, StringField, TextAreaField,SelectField, FileField,HiddenField
from wtforms.validators import DataRequired, Optional, Email, URL, Length
from models import Entry, Tag
class TagField(StringField):
def _value(self):
if self.data:
#Display tags as a comma-separated list.
return ', '.join([tag.name for tag in self.data])
return ''
def get_tags_from_string(self, tag_string):
raw_tags = tag_string.split(',')
#Filter out any empty tag
tag_names = [name.strip() for name in raw_tags if name.strip()]
#Query the database and retrieve any tags we have already saved
existing_tags = Tag.query.filter(Tag.name.in_(tag_names))
#Determine which tag names are new.
new_names = set(tag_names) - set([tag.name for tag in existing_tags])
#Create a list of unsaved Tag instances for the new tags
new_tags = [Tag(name=name) for name in new_names]
#Return all the existing tags + all new, unsaved tags
return list(existing_tags)+new_tags
def process_formdata(self, valuelist):
if valuelist:
self.data = self.get_tags_from_string(valuelist[0])
else:
self.data = []
class ImageForm(Form):
file = FileField('Image File')
class EntryForm(Form):
title = StringField('Title', validators=[DataRequired()])
body = TextAreaField('Body', validators=[DataRequired()])
status = SelectField(
'Entry Status',
choices=(
(Entry.STATUS_PUBLIC,'Public'),
(Entry.STATUS_DRAFT,'Draft')),
coerce=int
)
tags = TagField(
'Tag',
description='Separate multiple tags with commas.'
)
def save_entry(self, entry):
self.populate_obj(entry)
entry.generate_slug()
return entry
class CommentForm(Form):
name = StringField('Name',validators=[DataRequired()])
email = StringField('Email',validators=[DataRequired(),Email()])
url = StringField('Url', validators=[Optional(), URL()])
body = TextAreaField('Comment', validators=[DataRequired(),Length(min=10, max=3000)])
entry_id = HiddenField(validators=[DataRequired()])
def validate(self):
if not super(CommentForm, self).validate():
return False
entry = Entry.query.filter(
(Entry.status == Entry.STATUS_PUBLIC),
(Entry.id == self.entry_id.data)
).first()
if not entry:
return False
return True | 31.109756 | 89 | 0.633477 | 301 | 2,551 | 5.265781 | 0.355482 | 0.083281 | 0.012618 | 0.016404 | 0.018927 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003686 | 0.255586 | 2,551 | 82 | 90 | 31.109756 | 0.830964 | 0.104273 | 0 | 0.035714 | 0 | 0 | 0.044737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.053571 | 0 | 0.517857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdab079e812edf7ae50cfd1cbee57eb0f820a648 | 5,978 | py | Python | oidc/endpoints/authorize.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | null | null | null | oidc/endpoints/authorize.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | null | null | null | oidc/endpoints/authorize.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | null | null | null | from aca.client import ACAClient
from aries_cloudcontroller.aries_controller import AriesAgentController
from asgiref.sync import sync_to_async, async_to_sync
from django.utils import timezone
from datetime import timedelta
from aca.models import PresentationFactory
from oidc.utils.shortener import create_short_url
from oidc.models import AuthSession, PresentationConfigurations, MappedUrl
from django.conf import settings
from datetime import datetime, timedelta
import asyncio
WEBHOOK_HOST = "https://8b1dec9d51dd.ngrok.io"
WEBHOOK_PORT = 443
WEBHOOK_BASE = "https://8b1dec9d51dd.ngrok.io/webhooks/"
def authorization(pres_req_conf_id: str, request_parameters: dict):
aca_client = ACAClient(settings.ACA_PY_URL, settings.ACA_PY_TRANSPORT_URL)
presentation_configuration = PresentationConfigurations.objects.get(
id=pres_req_conf_id
)
response = aca_client.create_proof_request(presentation_configuration.to_json())
print('PROOF CREATE', response)
public_did = aca_client.get_public_did()
print('DID', public_did)
endpoint = aca_client.get_endpoint_url()
print('ENDPOINT', endpoint)
presentation_request = PresentationFactory.from_params(
presentation_request=response.get("presentation_request"),
p_id=response.get("thread_id"),
verkey=[public_did.get("verkey")],
endpoint=endpoint,
).to_json()
print('PROOF REQUEST ', presentation_request)
presentation_request_id = response["presentation_exchange_id"]
session = AuthSession.objects.create(
presentation_record_id=pres_req_conf_id,
presentation_request_id=presentation_request_id,
presentation_request=presentation_request,
request_parameters=request_parameters,
expired_timestamp=timezone.now() + timedelta(minutes=60),
)
url, b64_presentation = create_short_url(presentation_request)
mapped_url = MappedUrl.objects.create(url=url, session=session)
short_url = mapped_url.get_short_url()
return short_url, str(session.pk), presentation_request_id, b64_presentation
@sync_to_async
def getPresentationConfig(pres_req_conf_id: str):
return PresentationConfigurations.objects.get(
id=pres_req_conf_id
)
@sync_to_async
def createSession(pres_req_conf_id, presentation_request_id, presentation_request, request_parameters, url):
session = AuthSession.objects.create(
presentation_record_id=pres_req_conf_id,
presentation_request_id=presentation_request_id,
presentation_request=presentation_request,
request_parameters=request_parameters,
expired_timestamp= timezone.now() + timedelta(minutes=60),
)
mapped_url = MappedUrl.objects.create(url=url, session=session)
print(mapped_url)
short_url = mapped_url.get_short_url()
print(short_url)
return session, mapped_url, short_url
async def authorization_async(pres_req_conf_id: str, request_parameters: dict):
# Based on the aca-py agent you wish to control
# print('AGENT CONNECT')
agent_controller = AriesAgentController(admin_url=settings.ACA_PY_URL)
# print('ACAPY AGENT CONNECTED')
# print('WEBHOOOKS STARTING')
# await asyncio.gather(agent_controller.init_webhook_server(webhook_host=WEBHOOK_HOST, webhook_port=WEBHOOK_PORT, webhook_base=WEBHOOK_BASE))
# print('WEBHOOOKS STARTED')
presentation_configuration = await getPresentationConfig(pres_req_conf_id)
print('PRESENTATION CONFIG: ', presentation_configuration)
# response = await agent_controller.proofs.create_request(presentation_configuration.to_json())
response = await asyncio.gather(agent_controller.proofs.create_request(presentation_configuration.to_json()))
response = response[0]
print('PROOF CREATE: ', response)
# TODO - the current DID of the Agent is already ledgered on Stagingnet
# This creates a scenario where the endpoint being fetched is wrong
# Need to update the code so that new DIDs can be ledgered to stagingnet together with endpoints
public_did = await asyncio.gather(agent_controller.wallet.get_public_did())
public_did = public_did[0]['result']
print('PUBLIC DID: ', public_did)
endpoint = await asyncio.gather(agent_controller.ledger.get_did_endpoint(public_did['did']))
endpoint = endpoint[0]['endpoint']
print('ENDPOINT: ', endpoint)
# TODO - this will wail due to no TAA accepted on ledger
TAA_response = await agent_controller.ledger.get_taa()
TAA = TAA_response['result']['taa_record']
TAA['mechanism'] = "service_agreement"
# print(TAA)
TAA_accept = await agent_controller.ledger.accept_taa(TAA)
## Will return {} if successful
print(TAA_accept)
await asyncio.gather(agent_controller.wallet.set_did_endpoint(public_did['did'], settings.ACA_PY_TRANSPORT_URL, 'Endpoint'))
endpoint = await asyncio.gather(agent_controller.ledger.get_did_endpoint(public_did['did']))
endpoint = endpoint[0]['endpoint']
print('ENDPOINT ', endpoint)
presentation_request = PresentationFactory.from_params(
presentation_request=response.get("presentation_request"),
p_id=response.get("thread_id"),
verkey=[public_did.get("verkey")],
endpoint=endpoint,
).to_json()
print('PROOF REQUEST: ', presentation_request)
presentation_request_id = response["presentation_exchange_id"]
url, b64_presentation = create_short_url(presentation_request)
print(url)
session, mapped_url, short_url = await createSession(pres_req_conf_id, presentation_request_id, presentation_request, request_parameters, url)
print('SESSION ', session)
print('sessionpk: ', str(session.pk))
print('mapped_url: ', mapped_url)
print('short_url: ', short_url)
print('presx_id: ', presentation_request_id)
print('b64 presx: ', b64_presentation)
await agent_controller.terminate()
return short_url, str(session.pk), presentation_request_id, b64_presentation
| 42.7 | 146 | 0.761124 | 732 | 5,978 | 5.915301 | 0.202186 | 0.118476 | 0.053349 | 0.030023 | 0.564896 | 0.5 | 0.481986 | 0.469053 | 0.40485 | 0.381293 | 0 | 0.006485 | 0.148712 | 5,978 | 139 | 147 | 43.007194 | 0.84437 | 0.119438 | 0 | 0.423077 | 0 | 0 | 0.085366 | 0.009146 | 0 | 0 | 0 | 0.007194 | 0 | 1 | 0.028846 | false | 0 | 0.105769 | 0.009615 | 0.173077 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdae540df0f84f457e5404b4e6682360e4f75f83 | 5,163 | py | Python | psana/psana/graphqt/IVSpectrum.py | ZLLentz/lcls2 | 3edbea556779f619944ee9b97fb33cd815a19a37 | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/graphqt/IVSpectrum.py | ZLLentz/lcls2 | 3edbea556779f619944ee9b97fb33cd815a19a37 | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/graphqt/IVSpectrum.py | ZLLentz/lcls2 | 3edbea556779f619944ee9b97fb33cd815a19a37 | [
"BSD-3-Clause-LBNL"
] | null | null | null |
"""Class :py:class:`IVSpectrum` is a QWidget with histogram, two axes, and color bar
====================================================================================
Usage ::
# Run test: python lcls2/psana/psana/graphqt/IVSpectrum.py
from psana.graphqt.IVSpectrum import IVSpectrum
w = IVSpectrum()
Created on 2021-06-22 by Mikhail Dubrovin
"""
import logging
logger = logging.getLogger(__name__)
from psana.graphqt.FWViewHist import FWViewHist
from psana.graphqt.FWViewAxis import FWViewAxis
from psana.graphqt.FWViewColorBar import FWViewColorBar
import psana.graphqt.ColorTable as ct
from PyQt5.QtWidgets import QWidget, QGridLayout, QPushButton, QTextEdit
from PyQt5.QtCore import Qt, QRectF
def test_image():
import psana.pyalgos.generic.NDArrGenerators as ag
return ag.random_standard((8,12), mu=0, sigma=10)
class IVSpectrum(QWidget):
"""QWidget for Image Viewer"""
def __init__(self, **kwargs):
parent = kwargs.get('parent', None)
image = kwargs.get('image', test_image())
QWidget.__init__(self, parent)
ctab = ct.color_table_interpolated()
rs=QRectF(0, 0, 100, 1000)
self.whis = FWViewHist(self, rs, origin='DR', scale_ctl='V', fgcolor='yellow', bgcolor='dark', orient='V')
self.wcbar = FWViewColorBar(self, coltab=ctab, orient='V')
r = self.whis.sceneRect()
rscx = QRectF(r.x(), 0, r.width(), 1)
rscy = QRectF(0, r.y(), 1, r.height())
self.wax = FWViewAxis(None, rscx, side='U', origin='UR', scale_ctl=True, wwidth=30, wlength=200)
self.way = FWViewAxis(None, rscy, side='L', origin='DL', scale_ctl=True, wwidth=60, wlength=200)
self.but_reset = QPushButton('Reset')
self.edi_info = QTextEdit('Info')
self.box = QGridLayout()
self.box.setSpacing(0)
self.box.setVerticalSpacing(0)
self.box.setHorizontalSpacing(0)
self.box.addWidget(self.edi_info, 0, 0, 1, 11)
self.box.addWidget(self.way, 1, 10, 9, 1)
self.box.addWidget(self.whis, 1, 0, 9, 10)
self.box.addWidget(self.wax, 10, 0, 1, 9)
self.box.addWidget(self.wcbar, 1, 9, 9, 1)
self.box.addWidget(self.but_reset, 10, 9, 1, 2, alignment=Qt.AlignCenter)
self.setLayout(self.box)
self.set_tool_tips()
self.set_style()
self.connect_scene_rect_changed()
self.but_reset.clicked.connect(self.on_but_reset)
def connect_scene_rect_changed(self):
self.whis.connect_scene_rect_changed_to(self.on_whis_scene_rect_changed)
self.wax.connect_scene_rect_changed_to(self.on_wax_scene_rect_changed)
self.way.connect_scene_rect_changed_to(self.on_way_scene_rect_changed)
def disconnect_scene_rect_changed(self):
self.whis.disconnect_scene_rect_changed_from(self.on_whis_scene_rect_changed)
self.wax.disconnect_scene_rect_changed_from(self.on_wax_scene_rect_changed)
self.way.disconnect_scene_rect_changed_from(self.on_way_scene_rect_changed)
def on_but_reset(self):
logger.debug('on_but_reset')
if self.whis is not None:
self.whis.reset_original_size()
def on_whis_scene_rect_changed(self, r):
#logger.debug('on_whis_scene_rect_changed: %s'%str(r))
self.wax.set_view(rs=QRectF(r.x(), 0, r.width(), 1))
self.way.set_view(rs=QRectF(0, r.y(), 1, r.height()))
self.update_info()
def on_wax_scene_rect_changed(self, r):
#logger.debug('on_wax_scene_rect_changed: %s'%str(r))
rs = self.whis.scene().sceneRect()
self.whis.set_view(rs=QRectF(r.x(), rs.y(), r.width(), rs.height()))
def on_way_scene_rect_changed(self, r):
#logger.debug('on_way_scene_rect_changed: %s'%str(r))
rs = self.whis.scene().sceneRect()
self.whis.set_view(rs=QRectF(rs.x(), r.y(), rs.width(), r.height()))
self.update_info()
def update_info(self):
r = self.whis.scene().sceneRect()
self.edi_info.setText('Spectrum min: %d max: %d' % (r.y(), r.y()+r.height()))
def set_tool_tips(self):
self.whis.setToolTip('Spectrum')
def set_style(self):
self.layout().setContentsMargins(0,0,0,0)
#self.but_reset.setFixedSize(60,30)
self.wcbar.setFixedWidth(25)
#self.edi_info.setFixedHeight(100)
self.edi_info.setMaximumHeight(50)
def set_pixmap_from_arr(self, arr, set_def=True):
"""shortcat to image"""
self.whis.set_pixmap_from_arr(arr, set_def)
def reset_original_size(self):
"""shortcat to image"""
self.whis.reset_original_size()
if __name__ == "__main__":
import os
import sys
os.environ['LIBGL_ALWAYS_INDIRECT'] = '1' #export LIBGL_ALWAYS_INDIRECT=1
from PyQt5.QtWidgets import QApplication
logging.basicConfig(format='[%(levelname).1s] L%(lineno)04d %(name)s : %(message)s', level=logging.DEBUG)
app = QApplication(sys.argv)
w = IVSpectrum()
w.setGeometry(100, 50, 300, 800)
w.setWindowTitle('Image with two axes')
w.show()
app.exec_()
del w
del app
# EOF
| 32.88535 | 114 | 0.650978 | 723 | 5,163 | 4.432918 | 0.26556 | 0.05897 | 0.104836 | 0.062403 | 0.313885 | 0.263027 | 0.206864 | 0.127925 | 0.042434 | 0.042434 | 0 | 0.026195 | 0.201433 | 5,163 | 156 | 115 | 33.096154 | 0.751152 | 0.131125 | 0 | 0.065217 | 0 | 0 | 0.042405 | 0.004712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.141304 | false | 0 | 0.119565 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdb6b56e79718d96881ce563456b6ed24e5bfc35 | 2,912 | py | Python | saige/load_results.py | Nealelab/ukb_common | ee063971d48e15ea4c525d26cf6745930d7106dc | [
"MIT"
] | 8 | 2020-03-06T12:32:44.000Z | 2021-11-17T18:00:13.000Z | saige/load_results.py | Nealelab/ukb_common | ee063971d48e15ea4c525d26cf6745930d7106dc | [
"MIT"
] | 1 | 2021-11-02T20:09:05.000Z | 2021-11-03T13:10:05.000Z | saige/load_results.py | Nealelab/ukb_common | ee063971d48e15ea4c525d26cf6745930d7106dc | [
"MIT"
] | 3 | 2020-07-27T04:14:52.000Z | 2021-09-15T13:43:23.000Z | #!/usr/bin/env python3
__author__ = 'konradk'
from ukb_common import *
import argparse
import tempfile
PHENO_KEY_FIELDS = ('trait_type', 'phenocode', 'pheno_sex', 'coding', 'modifier')
def main(args):
hl.init(master=f'local[{args.n_threads}]',
log=hl.utils.timestamp_path(os.path.join(tempfile.gettempdir(), 'load_results'), suffix='.log'),
default_reference=args.reference)
cases, controls = get_cases_and_controls_from_log(args.saige_run_log_format)
quantitative_trait = args.trait_type in ('continuous', 'biomarkers')
heritability = get_heritability_from_log(args.null_glmm_log, quantitative_trait) if args.null_glmm_log else -1.0
inv_normalized = get_inverse_normalize_status(args.null_glmm_log) if args.null_glmm_log else 'NA'
saige_version = get_saige_version_from_log(args.null_glmm_log) if args.null_glmm_log else 'NA'
extension = 'single.txt' if args.analysis_type == 'gene' else 'single_variant.txt'
pheno_key_dict = {k: getattr(args, k) for k in PHENO_KEY_FIELDS}
if args.analysis_type == 'gene':
load_gene_data(args.input_dir, pheno_key_dict, args.gene_map_ht_raw_path, cases, controls, heritability, saige_version, inv_normalized, args.overwrite)
load_variant_data(args.input_dir, pheno_key_dict, args.ukb_vep_ht_path, extension, cases, controls, heritability, saige_version, inv_normalized, args.overwrite,
args.legacy_annotations)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', help='Input directory', required=True)
parser.add_argument('--trait_type', help='Trait type', required=True)
parser.add_argument('--phenocode', help='Phenotype ID', required=True)
parser.add_argument('--pheno_sex', help='Phenotype sex', default='both_sexes')
parser.add_argument('--coding', help='Phenotype coding', default='')
parser.add_argument('--modifier', help='Phenotype modifier', default='')
parser.add_argument('--null_glmm_log', help='Path to log file from null model')
parser.add_argument('--saige_run_log_format', help='Path to log file from SAIGE test with {chr} placeholder', required=True)
parser.add_argument('--analysis_type', help='Analysis type', choices=('gene', 'variant'), default='gene')
parser.add_argument('--reference', help='Reference genome', default='GRCh38')
parser.add_argument('--gene_map_ht_raw_path', help='Path to raw gene map')
parser.add_argument('--ukb_vep_ht_path', help='Path to UKB VEP data', required=True)
parser.add_argument('--n_threads', help='Number of threads to run', type=int, default=8)
parser.add_argument('--legacy_annotations', help='Use old annotation picking (preferred for genotype data)', action='store_true')
parser.add_argument('--overwrite', help='Overwrite everything', action='store_true')
args = parser.parse_args()
main(args) | 56 | 164 | 0.736607 | 404 | 2,912 | 5.007426 | 0.309406 | 0.066733 | 0.12605 | 0.044488 | 0.286209 | 0.176965 | 0.131488 | 0.131488 | 0.099852 | 0.037568 | 0 | 0.002368 | 0.129808 | 2,912 | 52 | 165 | 56 | 0.795975 | 0.007212 | 0 | 0 | 0 | 0 | 0.260809 | 0.023175 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.076923 | 0 | 0.102564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdb72defd6c6f62fdddbb438cb6348c91bc60611 | 4,981 | py | Python | app/db/crud/recipeBewertung.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 1 | 2022-01-29T20:33:30.000Z | 2022-01-29T20:33:30.000Z | app/db/crud/recipeBewertung.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 2 | 2022-03-08T06:41:22.000Z | 2022-03-09T11:52:06.000Z | app/db/crud/recipeBewertung.py | baldur132/essensfindung | e1a8106d8a1de857340229a5fe36ca6910c55b35 | [
"MIT"
] | 6 | 2022-01-06T15:02:59.000Z | 2022-02-02T08:08:56.000Z | """All DB functions for the Bewertung table"""
from typing import List
from typing import Union
import sqlalchemy
from sqlalchemy.orm import Session
from db.base import BewertungRecipe
from db.base import Person
from db.crud.user import get_user_by_mail
from schemes import scheme_recipe
from schemes import scheme_user
from schemes.exceptions import DatabaseException
from schemes.exceptions import DuplicateEntry
from schemes.exceptions import UserNotFound
from tools.my_logging import logger
def get_bewertung_from_user_to_recipe(
db: Session, user: scheme_user.UserBase, recipe: scheme_recipe.RecipeBase
) -> BewertungRecipe:
"""Return a specific bewertung from a user to only one recipe
Args:
db (Session): Session to the DB
user (scheme_user.UserBase): Specifie the User
recipe (scheme_recipe.RecipeBase): Specifie the reciepe
Returns:
BewertungRecipe: Return one bewertung that match the recipe - user
"""
return (
db.query(BewertungRecipe)
.join(Person, Person.email == BewertungRecipe.person_email)
.filter(Person.email == user.email)
.filter(BewertungRecipe.rezept_id == recipe.id)
.first()
)
def get_all_user_bewertungen(db: Session, user: scheme_user.UserBase) -> Union[List[BewertungRecipe], None]:
"""Return all bewertugen from one to the recipes User
Args:
db (Session): Session to the DB
user (scheme_user.UserBase): The user to select
Returns:
Union[List[BewertungRecipe], None]
"""
user: Person = get_user_by_mail(db, user.email)
if user is None:
return None
else:
return user.bewertungenRezept
def create_bewertung(db: Session, assessment: scheme_recipe.RecipeBewertungCreate) -> BewertungRecipe:
"""Create / Add a Bewertung to the DB. Timestamp and ID will set automatic.
Args:
db (Session): Session to the DB
assessment (scheme_recipe.RecipeBewertungCreate): Bewertung to add. This include the
Person and Recipe for the mapping of the Bewertung
Raises:
UserNotFound: If the user does not exist
DuplicateEntry: Duplicate Primary Key
Returns:
BewertungRecipe: Return if success
"""
if get_user_by_mail(db, assessment.person.email) is None:
raise UserNotFound(f"User {assessment.person.email} does not exist", assessment.person.email)
db_assessment = BewertungRecipe(
person_email=assessment.person.email,
rezept_id=assessment.recipe.id,
rezept_name=assessment.name,
kommentar=assessment.comment,
rating=assessment.rating,
)
try:
db.add(db_assessment)
db.commit()
db.refresh(db_assessment)
logger.info(
"Added assessment to db... recipe id:%s\temail:%s\trating:%s\tcomment:%s",
db_assessment.rezept_id,
db_assessment.person_email,
db_assessment.rating,
db_assessment.kommentar,
)
return db_assessment
except sqlalchemy.exc.IntegrityError as error:
raise DuplicateEntry("Assessment already exist") from error
def update_assessment(
db: Session, old_bewertung: scheme_recipe.RecipeBewertungCreate, new_bewertung: scheme_recipe.RecipeBewertungCreate
) -> BewertungRecipe:
"""Update the comment and rating of a bewertung
Args:
db (Session): Session to the DB
old_bewertung (scheme_recipe.RecipeBewertungCreate): The old Bewertung
new_bewertung (scheme_recipe.RecipeBewertungCreate): The updated Bewertung
Returns:
BewertungRecipe: New Bewertung from `get_bewertung_from_user_to_recipe`
"""
rows = (
db.query(BewertungRecipe)
.filter(BewertungRecipe.person_email == old_bewertung.person.email)
.filter(BewertungRecipe.rezept_id == old_bewertung.recipe.id)
.update({BewertungRecipe.kommentar: new_bewertung.comment, BewertungRecipe.rating: new_bewertung.rating})
)
if rows == 0:
raise DatabaseException("Can not update assessment. Does the User and the Recipe exist?")
db.commit()
logger.info("Updated bewertung %s - %s", old_bewertung.person.email, old_bewertung.recipe.id)
return get_bewertung_from_user_to_recipe(db, new_bewertung.person, new_bewertung.recipe)
def delete_bewertung(db: Session, user: scheme_user.UserBase, recipe: scheme_recipe.RecipeBase) -> int:
"""Delete one Bewertung
Args:
db (Session): Session to the db
user (scheme_user.User): The owner of the Bewertung
recipe (scheme_recipe.RecipeBase): The corrosponding Recipe
Returns:
int: Number of effected rows
"""
rows = (
db.query(BewertungRecipe)
.filter(BewertungRecipe.person_email == user.email, BewertungRecipe.rezept_id == recipe.id)
.delete()
)
db.commit()
logger.info("Deleted bewertung %s - %s", user.email, recipe.id)
return rows
| 34.116438 | 119 | 0.700863 | 598 | 4,981 | 5.704013 | 0.202341 | 0.041923 | 0.024626 | 0.032249 | 0.278804 | 0.164761 | 0.147464 | 0.122545 | 0.075344 | 0.075344 | 0 | 0.000257 | 0.218229 | 4,981 | 145 | 120 | 34.351724 | 0.875706 | 0.299739 | 0 | 0.131579 | 0 | 0.013158 | 0.076271 | 0.019068 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.171053 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdb903aa24f6b049c642dc46fbe0678bd7b992ac | 35,706 | py | Python | modules/tbc_mage.py | ClawDoctor/TBC_GUI_sim | ebdb40ef348f5b00b10f6323f07260f47e8aab74 | [
"MIT"
] | null | null | null | modules/tbc_mage.py | ClawDoctor/TBC_GUI_sim | ebdb40ef348f5b00b10f6323f07260f47e8aab74 | [
"MIT"
] | null | null | null | modules/tbc_mage.py | ClawDoctor/TBC_GUI_sim | ebdb40ef348f5b00b10f6323f07260f47e8aab74 | [
"MIT"
] | null | null | null | import fns
import numpy as np
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
from .libs import tbc_mage_backend as bck
import importlib
importlib.reload(bck)
def read_stat_file(location, file, stats):
loc = '/'.join(location.split('/')[:-1])+'/'+file
with open(loc) as f:
#print('stats for '+location+': '+loc)
for line in f:
if '#' in line:
line = line.split('#')[0]
sp = line.split()
if len(sp)>1:
if not sp[0][0] == '#':
if sp[0] == 'intellect':
stats['intellect'] = float(sp[1].strip())
if sp[0] == 'spirit':
stats['spirit'] = float(sp[1].strip())
if sp[0] == 'common_spell_damage':
stats['common_spell_damage'] = float(sp[1].strip())
if sp[0] == 'crit_rating':
stats['crit_rating'] = float(sp[1].strip())
if sp[0] == 'hit_rating':
stats['hit_rating'] = float(sp[1].strip())
if sp[0] == 'mp5':
stats['mp5'] = float(sp[1].strip())
if sp[0] == 'fire_damage':
stats['fire_damage'] = float(sp[1].strip())
if sp[0] == 'frost_damage':
stats['frost_damage'] = float(sp[1].strip())
if sp[0] == 'arcane_damage':
stats['arcane_damage'] = float(sp[1].strip())
if sp[0] == 'haste_rating':
stats['haste_rating'] = float(sp[1].strip())
class mage_file:
def __init__(self,location):
self.location= location
self.label = 'no label'
self.stats = {}
self.talents = bck.make_talents()
self.burn_rot = []
self.save_rot = []
with open(location) as f:
for line in f:
if '#' in line:
line = line.split('#')[0]
sp = line.split()
if len(sp)>1:
if not sp[0][0] == '#':
if sp[0] == 'stats_file':
read_stat_file(location, sp[1], self.stats)
if sp[0] == 'intellect':
self.stats['intellect'] = float(sp[1].strip())
if sp[0] == 'spirit':
self.stats['spirit'] = float(sp[1].strip())
if sp[0] == 'common_spell_damage':
self.stats['common_spell_damage'] = float(sp[1].strip())
if sp[0] == 'crit_rating':
self.stats['crit_rating'] = float(sp[1].strip())
if sp[0] == 'hit_rating':
self.stats['hit_rating'] = float(sp[1].strip())
if sp[0] == 'mp5':
self.stats['mp5'] = float(sp[1].strip())
if sp[0] == 'fire_damage':
self.stats['fire_damage'] = float(sp[1].strip())
if sp[0] == 'frost_damage':
self.stats['frost_damage'] = float(sp[1].strip())
if sp[0] == 'arcane_damage':
self.stats['arcane_damage'] = float(sp[1].strip())
if sp[0] == 'haste_rating':
self.stats['haste_rating'] = float(sp[1].strip())
for talent in self.talents:
if sp[0] == talent:
self.talents[talent] = int(sp[1].strip())
if sp[0] == 'burn_rotation:':
for i in range(1,len(sp)):
self.burn_rot.append(sp[i])
if sp[0] == 'save_rotation:':
for i in range(1,len(sp)):
self.save_rot.append(sp[i])
if sp[0] == 'label':
self.label = ' '.join(sp[1:])
if sp[0] == 'color':
self.color = [0,0,0,1]
self.color[0] = float(sp[1])
self.color[1] = float(sp[2])
self.color[2] = float(sp[3])
def parse_rot(rot):
new_rot =[]
l = len(rot)
for i, spell in enumerate(rot):
if spell == 'fireball':
pos_ign = 0
if rot[(i+1)%l] == 'fireball':
pos_ign +=1
if rot[(i+2)%l] == 'fireball':
pos_ign +=1
elif rot[(i+2)%l] == 'scorch' and rot[(i+3)%l] == 'scorch':
pos_ign +=1
elif rot[(i+1)%l] == 'scorch' and rot[(i+2)%l] == 'scorch':
pos_ign +=1
if rot[(i+3)%l] == 'fireball':
pos_ign +=1
elif rot[(i+3)%l] == 'scorch' and rot[(i+4)%l] == 'scorch':
pos_ign +=1
if pos_ign == 2:
new_rot.append('fireball_13_one_tick')
elif pos_ign == 1:
new_rot.append('fireball_13_one_tick_one_roll')
elif pos_ign == 0:
new_rot.append('fireball_13_one_tick_no_roll')
elif spell == 'scorch':
pos_ign = 0
if rot[(i+1)%l] == 'fireball':
pos_ign +=1
if rot[(i+2)%l] == 'fireball':
pos_ign +=1
elif rot[(i+2)%l] == 'scorch' and rot[(i+3)%l] == 'scorch':
pos_ign +=1
elif rot[(i+1)%l] == 'scorch' and rot[(i+2)%l] == 'scorch':
pos_ign +=1
if rot[(i+3)%l] == 'fireball':
pos_ign +=1
elif rot[(i+3)%l] == 'scorch' and rot[(i+4)%l] == 'scorch':
pos_ign +=1
if pos_ign == 2:
new_rot.append('scorch_9')
elif pos_ign == 1:
new_rot.append('scorch_9_one_roll')
elif pos_ign == 0:
new_rot.append('scorch_9_no_roll')
new_rot.append('scorch_9')
elif spell == 'fireblast':
new_rot.append('fireblast')
elif spell == 'arcane_missiles':
new_rot.append('arcane_missiles_10')
elif spell == 'frostbolt':
new_rot.append('frostbolt_13')
elif spell == 'arcane_blast_0speed_0mana':
new_rot.append('arcane_blast_1_0speed_0mana')
elif spell == 'arcane_blast_1speed_1mana':
new_rot.append('arcane_blast_1_1speed_1mana')
elif spell == 'arcane_blast_2speed_2mana':
new_rot.append('arcane_blast_1_2speed_2mana')
elif spell == 'arcane_blast_3speed_3mana':
new_rot.append('arcane_blast_1_3speed_3mana')
elif spell == 'arcane_blast_1speed_0mana':
new_rot.append('arcane_blast_1_1speed_0mana')
elif spell == 'arcane_blast_2speed_0mana':
new_rot.append('arcane_blast_1_2speed_0mana')
elif spell == 'arcane_blast_3speed_0mana':
new_rot.append('arcane_blast_1_3speed_0mana')
else:
print('spell '+ spell+ ' not found, possible spells are:')
pos_spells = ['fireball_13_one_tick',
'fireball',
'scorch',
'fireblast',
#'pyroblast',
#'pom_pyroblast',
'arcane_missiles',
'arcane_blast_0speed_0mana',
'arcane_blast_1speed_1mana',
'arcane_blast_2speed_2mana',
'arcane_blast_3speed_3mana',
'arcane_blast_1speed_0mana',
'arcane_blast_2speed_0mana',
'arcane_blast_3speed_0mana',
'frostbolt',
]
for spell in pos_spells:
print(spell)
return new_rot
class moduleClass:
filetypes=['mage']
def __init__ (self, fig, locations, frame, ui):
self.fig=fig
self.frame=frame
self.locations=locations
self.ui=ui
def run(self):
if self.ui['save_check']:
try:
import os
os.makedirs(self.ui['save_filename'])
except:
None
ui=self.ui
fig=self.fig
#prepare figure
fig.clear()
#load mages
mage_colors = [[0.5,0,1,1],
[1,0.5,0,1],
[0.2,0.2,1,1],
[0,0,0,1],
[0.5,0,1,1],
[1,1,0,1],
[0.2,1,1,1],
[0,1,0,1],
]
self.mages=[]
for i, location in enumerate(self.locations):
self.mages.append(mage_file(location))
if self.mages[-1].save_rot[0] == 'arcane_frost_clearcasting_optimized':
None
elif self.mages[-1].save_rot[0] == 'fireball_spam_clearcasting_optimized':
None
elif self.mages[-1].save_rot[0] == 'frostbolt_spam_clearcasting_optimized':
None
elif self.mages[-1].save_rot[0] == 'scorch_spam_clearcasting_optimized':
None
else:
self.mages[-1].save_rot = parse_rot(self.mages[-1].save_rot)
if self.mages[-1].burn_rot[0] == 'None':
None
elif self.mages[-1].burn_rot[0] == 'AB_spam_clearcasting_optimized':
None
else:
self.mages[-1].burn_rot = parse_rot(self.mages[-1].burn_rot)
if not hasattr(self.mages[-1],'color'):
self.mages[-1].color = mage_colors[i%8]
for key in ['disable_arcane_power', 'disable_icy_veins', 'disable_cold_snap', 'disable_water_elemental', 'disable_combustion', 'disable_PoM_pyro', 'ignore_scorch_ramp']:
self.mages[-1].talents[key] = ui[key]
#load buffs
buff_cases = []
for i in range(5):
#merge coe and cos, as in patch 2.4(?)
ui['buff_case_'+str(i)+'_curse_of_shadow'] = ui['buff_case_'+str(i)+'_curse_of_elements']
buff_cases.append({})
buff_case_str = 'buff_case_'+str(i)+'_'
for key in ui:
if buff_case_str in key:
buff = key.split(buff_case_str)[1]
try:
buff_cases[i][buff] = int(ui[key])
except:
buff_cases[i][buff] = ui[key]
if buff_cases[i]['armor'] == 'mage armor':
buff_cases[i]['mage_armor'] = 1
buff_cases[i]['molten_armor'] = 0
else:
buff_cases[i]['mage_armor'] = 0
buff_cases[i]['molten_armor'] = 1
#buttons.append({'key': 'buff_case_'+str(k)+'armor', 'type': 'radio:text', 'texts': ['molten armor', 'mage armor']','default': '0', 'tab': 1, 'row': i})
#buttons.append({'key': 'buff_case_'+str(k)+'_molten_armor', 'type': 'check', 'text': 'molten armor','default': '1', 'tab': 1, 'row': i})
#buttons.append({'key': 'buff_case_'+str(k)+'_mage_armor', 'type': 'check', 'text': 'mage armor','default': '0', 'tab': 1, 'row': i})
#plot measurements
linestyles=['-','-.','--',(0, (3, 1, 1, 1, 1, 1)),':']
self.frame.hidden_figure.set_dpi(300)
self.frame.hidden_figure.set_size_inches(6,4)
#self.frame.update()
#self.frame.figure. canvas.draw()
if ui['plot_dmg']:
ax = fns.add_axis(self.fig,2)
ax.grid()
misc = []
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in self.mages:
misc = plot_dps(ui, mage, buff_case, i, linestyle, ax, misc, fractions = ui['include_rotation_fractions'], DMG = True)
if ui['save_check']:
misc = []
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
tempax.grid()
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in self.mages:
misc = plot_dps(ui, mage, buff_case, i, linestyle, tempax, misc, fractions = ui['include_rotation_fractions'], DMG = True)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/dmg.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/dmg.png')
#self.frame.update()
#self.frame.figure.canvas.draw()
if ui['plot_dps']:
ax = fns.add_axis(self.fig,2)
ax.grid()
misc = []
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in self.mages:
misc = plot_dps(ui, mage, buff_case, i, linestyle, ax, misc, fractions = ui['include_rotation_fractions'], DMG = False)
if ui['save_check']:
misc = []
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
tempax.grid()
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in self.mages:
misc = plot_dps(ui, mage, buff_case, i, linestyle, tempax, misc, fractions = ui['include_rotation_fractions'], DMG = False)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/dps.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/dps.png')
#self.frame.update()
#self.frame.figure.canvas.draw()
if ui['plot_compare_buff_states']:
num_buff_cases = 0
for i, buff_case in enumerate(buff_cases):
if buff_case['check'] == 1:
num_buff_cases+=1
if num_buff_cases>1:
ax = fns.add_axis(self.fig,2)
plot_compare_buff_states(ui, self.mages, buff_cases, linestyles, ax)
if ui['save_check']:
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
plot_compare_buff_states(ui, self.mages, buff_cases, linestyles, tempax)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/comp_buff_states.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/comp_buff_states.png')
if ui['plot_compare_mages']:
if hasattr(self.frame,'default_mage'):
default_mage=mage_file(self.frame.default_mage)
if default_mage.save_rot[0] == 'arcane_frost_clearcasting_optimized':
None
elif default_mage.save_rot[0] == 'fireball_spam_clearcasting_optimized':
None
elif default_mage.save_rot[0] == 'frostbolt_spam_clearcasting_optimized':
None
elif default_mage.save_rot[0] == 'scorch_spam_clearcasting_optimized':
None
else:
default_mage.save_rot = parse_rot(default_mage.save_rot)
if default_mage.burn_rot[0] == 'None':
None
elif default_mage.burn_rot[0] == 'AB_spam_clearcasting_optimized':
None
else:
default_mage.burn_rot = parse_rot(default_mage.burn_rot)
if not hasattr(default_mage,'color'):
default_mage.color = mage_colors[i%8]
for key in ['disable_arcane_power', 'disable_icy_veins', 'disable_cold_snap', 'disable_water_elemental', 'disable_combustion', 'disable_PoM_pyro', 'ignore_scorch_ramp']:
default_mage.talents[key] = ui[key]
ax = fns.add_axis(self.fig,2)
plot_compare_mages(ui, default_mage, self.mages, buff_cases, linestyles, ax)
if ui['save_check']:
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
plot_compare_mages(ui, default_mage, self.mages, buff_cases, linestyles, tempax)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/comp_mages.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/comp_mages.png')
if ui['plot_spell_dps']:
ax = fns.add_axis(self.fig,2)
plot_spell_dps(ui, self.mages, buff_cases, linestyles, ax)
if ui['save_check']:
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
plot_spell_dps(ui, self.mages, buff_cases, linestyles, tempax)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/spell_dps.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/spell_dps.png')
#self.frame.figure.canvas.draw()
if ui['plot_spell_dpm']:
ax = fns.add_axis(self.fig,2)
plot_spell_dps(ui, self.mages, buff_cases, linestyles, ax, DPM= True)
if ui['save_check']:
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
plot_spell_dps(ui, self.mages, buff_cases, linestyles, tempax, DPM= True)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/spell_dpm.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/spell_dpm.png')
#self.frame.figure.canvas.draw()
if ui['plot_stat_weights']:
ax = fns.add_axis(self.fig,2)
plot_stat_weights(ui, self.mages, buff_cases, linestyles, ax)
if ui['save_check']:
self.frame.hidden_figure.clf()
tempax = self.frame.hidden_figure.add_subplot(111)
plot_stat_weights(ui, self.mages, buff_cases, linestyles, tempax)
self.frame.hidden_figure.tight_layout()
#print(self.frame.tempfig)
self.frame.hidden_figure.savefig(ui['save_filename']+'/stat_weights.svg')
self.frame.hidden_figure.savefig(ui['save_filename']+'/stat_weights.png')
'''
ax.legend()
#set x and ylabel
ax.set_xlabel(ui['XYxlabel'])
ax.set_xlim([ui['XYxmin'],ui['XYxmax']])
ax.set_ylabel(ui['XYylabel'])
'''
if ui['save_check']:
self.fig.savefig(ui['save_filename']+'/all.svg')
self.fig.savefig(ui['save_filename']+'/all.png')
fig.canvas.draw()
self.frame.update()
def addButtons():
buttons=[
{'key': 'mage_tab_0_name', 'type': 'tabname', 'text': 'misc', 'tab': 0} ,
{'key': 'mage_tab_1_name', 'type': 'tabname', 'text': 'buffs', 'tab': 1} ,
{'key': 'plot_dmg', 'type': 'check', 'text': 'plot_dmg','default': '1', 'tab': 0, 'row': 0},
{'key': 'plot_dps', 'type': 'check', 'text': 'plot_dps','default': '1', 'tab': 0, 'row': 0},
{'key': 'include_rotation_fractions', 'type': 'check', 'text': 'include rotation fractions','default': '0', 'tab': 0, 'row': 0},
{'key': 'plot_compare_buff_states', 'type': 'check', 'text': 'plot_compare_buff_states','default': '1', 'tab': 0, 'row': 0},
{'key': 'set_default_mage', 'type': 'click', 'text': 'set_default_mage','bind': set_default_mage, 'tab': 0, 'row': 0},
{'key': 'plot_compare_mages', 'type': 'check', 'text': 'plot_compare_mages','default': '1', 'tab': 0, 'row': 0},
#{'key': 'clear_default_mage', 'type': 'click', 'text': 'set_default_mage','bind': clear_default_mage, 'tab': 10, 'row': 0},
{'key': 'plot_spell_dps', 'type': 'check', 'text': 'plot_spell_dps','default': '0', 'tab': 0, 'row': 0},
{'key': 'plot_spell_dpm', 'type': 'check', 'text': 'plot_spell_dpm','default': '0', 'tab': 0, 'row': 0},
{'key': 'plot_stat_weights', 'type': 'check', 'text': 'plot_stat_weights','default': '0', 'tab': 0, 'row': 0},
{'key': 'time_min', 'type': 'txt:float', 'text': 'time_min', 'default': '40', 'width': 4, 'tab': 0, 'row': 1} ,
{'key': 'time_max', 'type': 'txt:float', 'text': 'time_max', 'default': '180', 'width': 4, 'tab': 0, 'row': 1} ,
{'key': 'dps_min', 'type': 'txt:float', 'text': 'dps_min', 'default': '0', 'width': 4, 'tab': 0, 'row': 2} ,
{'key': 'dps_max', 'type': 'txt:float', 'text': 'dps_max', 'default': '2000', 'width': 4, 'tab': 0, 'row': 2} ,
{'key': 'stat_weight_ymax', 'type': 'txt:int', 'text': 'stat_weight_ymax', 'default': '2', 'width': 4, 'tab': 0, 'row': 2} ,
{'key': 'disable_arcane_power', 'type': 'check', 'text': 'disable_arcane_power','default': '0', 'tab': 0, 'row': 3},
{'key': 'disable_icy_veins', 'type': 'check', 'text': 'disable_icy_veins','default': '0', 'tab': 0, 'row': 3},
{'key': 'disable_cold_snap', 'type': 'check', 'text': 'disable_cold_snap','default': '0', 'tab': 0, 'row': 3},
{'key': 'disable_water_elemental', 'type': 'check', 'text': 'disable_water_elemental','default': '0', 'tab': 0, 'row': 3},
{'key': 'disable_combustion', 'type': 'check', 'text': 'disable_combustion','default': '0', 'tab': 0, 'row': 3},
{'key': 'disable_PoM_pyro', 'type': 'check', 'text': 'disable_PoM_pyro','default': '0', 'tab': 0, 'row': 3},
{'key': 'ignore_scorch_ramp', 'type': 'check', 'text': 'ignore_scorch_ramp','default': '0', 'tab': 0, 'row': 3},
]
j = len(buttons)
for k in range(5):
i=k*2
buttons.append({'key': 'buff_case_'+str(k)+'_check', 'type': 'check', 'text': 'Buffs '+str(k),'default': '0', 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_label', 'type': 'txt', 'text': 'label:','default': 'buffs '+str(k), 'width': 10, 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_arcane_intellect', 'type': 'check', 'text': 'AI','default': '1', 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_armor', 'type': 'radio:text', 'texts': ['molten armor', 'mage armor'],'default': '0', 'tab': 1, 'row': i})
#buttons.append({'key': 'buff_case_'+str(k)+'_molten_armor', 'type': 'check', 'text': 'molten armor','default': '1', 'tab': 1, 'row': i})
#buttons.append({'key': 'buff_case_'+str(k)+'_mage_armor', 'type': 'check', 'text': 'mage armor','default': '0', 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_misc_add_mana', 'type': 'txt:float', 'text': '| misc mana (mana ruby, potions, etc)','default': '2400','width': 5, 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_innervate', 'type': 'txt:float', 'text': '# of innervates','default': '0','width': 2, 'tab': 1, 'row': i})
buttons.append({'key': 'buff_case_'+str(k)+'_dummy_label', 'type': 'label', 'text': ' ', 'tab': 1, 'row': i+1})
#{'key': 'XYxlabel', 'type': 'txt', 'text': 'x label', 'default': r'$2\theta$', 'width': 10, 'tab': 0, 'row': 1} ,
#buttons.append({'key': 'buff_case_'+str(k)+'_curse_of_shadow', 'type': 'check', 'text': 'CoS','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_curse_of_elements', 'type': 'check', 'text': 'CoE','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_malediction', 'type': 'check', 'text': 'Malediction','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_divine_spirit', 'type': 'check', 'text': 'D.spirit','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_improved_divine_spirit', 'type': 'check', 'text': 'Imp.d.spirit','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_wrath_of_air_totem', 'type': 'check', 'text': 'WoA totem','default': '0', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_improved_wrath_of_air_totem', 'type': 'check', 'text': 'imp.WoA','default': '0', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_totem_of_wrath', 'type': 'check', 'text': 'totem of wrath','default': '0', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_mark_of_the_wild', 'type': 'check', 'text': 'MotW','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_improved_mark_of_the_wild', 'type': 'check', 'text': 'imp.MotW','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_blessing_of_kings', 'type': 'check', 'text': 'BoK','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_blessing_of_wisdom', 'type': 'check', 'text': 'BoW','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_judgement_of_wisdom', 'type': 'check', 'text': 'JoW','default': '1', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_shadow_priest_dps', 'type': 'txt:float', 'text': 'SP dps', 'default': '0', 'width': 4, 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_misery', 'type': 'check', 'text': 'misery','default': '0', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_2_tier5_set_bonus', 'type': 'check', 'text': '2_tier5_set_bonus','default': '0', 'tab': 1, 'row': i+1})
buttons.append({'key': 'buff_case_'+str(k)+'_spellfire_set', 'type': 'check', 'text': 'spellfire set','default': '0', 'tab': 1, 'row': i+1})
buttons[j]['default'] = 1
#{'key': 'XYxmin', 'type': 'txt:float', 'text': 'x min', 'default': '0', 'width': 4, 'tab': 0, 'row': 1} ,
#{'key': 'XYxmax', 'type': 'txt:float', 'text': 'x max', 'default': '120', 'width': 4, 'tab': 0, 'row': 1} ,
#{'key': 'XYxlabel', 'type': 'txt', 'text': 'x label', 'default': r'$2\theta$', 'width': 10, 'tab': 0, 'row': 1} ,
#{'key': 'XYnormalize', 'type': 'check', 'text': 'Normalize y-axis', 'tab': 0, 'row': 2} ,
#{'key': 'XYylabel_text', 'type': 'label', 'text': 'ylabel: ', 'tab': 0, 'row': 2} ,
#{'key': 'XYylabel', 'type': 'radio:text', 'texts': ['Counts', 'Intensity'], 'tab': 0, 'row': 2,'default': 0} ,
return buttons
import copy
def get_dmg(mage, buffs,times):
new_stats_0 = copy.deepcopy(mage.stats)
new_talents = copy.deepcopy(mage.talents)
bck.buff_me(new_stats_0, new_talents, buffs)
spells, new_stats = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
if mage.save_rot[0] == 'arcane_frost_clearcasting_optimized':
save_rot = bck.get_dps_mps_rot_clearcasting_optimal(new_stats_0, new_talents, bck.game_config, spells_to_cast = 20000)
elif mage.save_rot[0] == 'fireball_spam_clearcasting_optimized':
new_talents['force_clearcasting'] = -1
spells_no_c, stats_no_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 1
spells_forced_c, stats_forced_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 0 # reset
optimized_spells = [spells_no_c['fireball_13_one_tick']]*7
optimized_spells.append(spells_no_c['fireball_13_one_tick_one_roll'])
optimized_spells.append(spells_no_c['fireball_13_three_tick_no_roll'])
optimized_spells.append(spells_forced_c['arcane_missiles_10'])
save_rot = bck.get_dps_mps_rotation(optimized_spells)
elif mage.save_rot[0] == 'scorch_spam_clearcasting_optimized':
new_talents['force_clearcasting'] = -1
spells_no_c, stats_no_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 1
spells_forced_c, stats_forced_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 0 # reset
optimized_spells = [spells_no_c['scorch_9']]*7
optimized_spells.append(spells_no_c['scorch_9_no_roll'])
optimized_spells.append(spells_no_c['scorch_9_no_roll'])
optimized_spells.append(spells_forced_c['arcane_missiles_10'])
save_rot = bck.get_dps_mps_rotation(optimized_spells)
elif mage.save_rot[0] == 'frostbolt_spam_clearcasting_optimized':
new_talents['force_clearcasting'] = -1
spells_no_c, stats_no_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 1
spells_forced_c, stats_forced_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 0 # reset
optimized_spells = [spells_no_c['frostbolt_13']]*9
optimized_spells.append(spells_forced_c['arcane_missiles_10'])
save_rot = bck.get_dps_mps_rotation(optimized_spells)
else:
save_rot = bck.get_dps_mps_rotation([spells[x] for x in mage.save_rot])
if mage.burn_rot[0] == 'None':
burn_rot = [0,10**10]
elif mage.burn_rot[0] == 'AB_spam_clearcasting_optimized':
new_talents['force_clearcasting'] = -1
spells_no_c, stats_no_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 1
spells_forced_c, stats_forced_c = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
new_talents['force_clearcasting'] = 0 # reset
optimized_spells = [spells_no_c['arcane_blast_1_3speed_3mana']]*9
optimized_spells.append(spells_forced_c['arcane_missiles_10'])
burn_rot = bck.get_dps_mps_rotation(optimized_spells)
else:
burn_rot = bck.get_dps_mps_rotation([spells[x] for x in mage.burn_rot])
IV_replace = None
if 'arcane_frost_clearcasting_optimized' in mage.save_rot or 'arcane_blast_1_3speed_0mana' in mage.save_rot:
#print(mage.location)
IV_replace = bck.get_dps_mps_rotation([spells[x] for x in ['frostbolt_13']])
dmg, dmg_burn, dmg_save, dmg_other, time_shift = bck.optimize_cycles_return_damage(new_stats,times,new_talents, burn_rot, save_rot, return_fractions=True, IV_replace=IV_replace )
return dmg, dmg_burn, dmg_save, dmg_other, time_shift
def plot_dps(ui, mage, buffs, i, linestyle, ax, misc, fractions = False, DMG = False):
times = np.arange(ui['time_min'],ui['time_max']+1, 1)
dmg, dmg_burn, dmg_save, dmg_other, time_shift = get_dmg(mage, buffs, times)
if DMG:
times_mod = 1
ax.set_ylabel('Damage [DMG]')
#ax.set_ylim([u,ui['dps_max']])
else:
times_mod = times
ax.set_ylabel('Average dps [DMG/s]')
ax.set_ylim([ui['dps_min'],ui['dps_max']])
if fractions:
if not 'dmg_frac_label' in misc:
misc.append('dmg_frac_label')
ax.fill_between(times,
np.zeros(len(times)),
dmg_save/times_mod,
color=[0.5,0,1,0.2], label = 'save')
ax.fill_between(times,
dmg_save/times_mod,
dmg_save/times_mod+dmg_burn/times_mod,
color=[1,0,0.5,0.2], label = 'burn')
else:
ax.fill_between(times,
np.zeros(len(times)),
dmg_save/times_mod,
color=[0.5,0,1,0.2])
ax.fill_between(times,
dmg_save/times_mod,
dmg_save/times_mod+dmg_burn/times_mod,
color=[1,0,0.5,0.2])
if np.sum(dmg_other)>1000:
if not 'dmg_frac_other_label' in misc:
misc.append('dmg_frac_other_label')
ax.fill_between(times,
dmg_save/times_mod+dmg_burn/times_mod,
dmg_save/times_mod+dmg_burn/times_mod+dmg_other/times_mod,
color=[0,0,0,0.2], label = 'other (pom+pyro, etc)')
else:
ax.fill_between(times,
dmg_save/times_mod+dmg_burn/times_mod,
dmg_save/times_mod+dmg_burn/times_mod+dmg_other/times_mod,
color=[0,0,0,0.2])
ax.plot(times, dmg/times_mod, linestyle= linestyle, color=mage.color, label = mage.label+', '+ui['buff_case_'+str(i)+'_label'])
ax.set_xticks(ticks=np.arange((int((times[0]-1)/30)+1)*30,times[-1]+1,30))
ax.set_xlabel('Total casting time before boss dead [s]')
'''ax.annotate('Evocation', xy=(43, 1100),
xytext=(48, 1400),
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left', verticalalignment='top',
)
ax.annotate('OOM', xy=(110, 800),
xytext=(120,1100),
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left', verticalalignment='top',
)'''
ax.legend()
ax.set_xlim([ui['time_min'],ui['time_max']])
ylim = ax.get_ylim()
if ylim[0]<0:
ax.set_ylim([0,ylim[1]])
#fig.savefig('optimized_spam.png')
return misc
def plot_spell_dps(ui, mages, buff_cases, linestyles, ax, DPM = False):
#ax.grid()
spell_names = ['frostbolt_13','fireball_13_one_tick',
'scorch_9',
'arcane_blast_1_0speed_0mana',
'arcane_blast_1_1speed_1mana',
'arcane_blast_1_2speed_2mana',
'arcane_blast_1_3speed_3mana',
'arcane_blast_1_3speed_0mana',
'arcane_missiles_10',
]
x = np.arange(len(spell_names))
tot_cases = 0
for i, buff_case in enumerate(buff_cases):
if buff_case['check'] == 1:
tot_cases+=len(mages)
j=0
width = 0.8/(tot_cases)
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in mages:
new_stats_0 = copy.deepcopy(mage.stats)
new_talents = copy.deepcopy(mage.talents)
bck.buff_me(new_stats_0, new_talents, buff_case)
spells, new_stats = bck.get_spells_stats(new_stats_0, new_talents, bck.game_config)
dpms = []
dpss = []
for spell_name in spell_names:
dps = spells[spell_name].average_damage / spells[spell_name].actual_cast_time
dpss.append(dps)
dpm = spells[spell_name].average_damage / spells[spell_name].actual_mana
dpms.append(dpm)
offset = -0.8/2+(j+0.5)*0.8/(tot_cases)
color = [mage.color[0], mage.color[1], mage.color[2],0.5]
edgecolor = [mage.color[0], mage.color[1], mage.color[2],1]
if not DPM:
rects = ax.bar(x +offset, dpss, width, linestyle=linestyle, edgecolor= edgecolor, color=color, label=mage.label)
else:
rects = ax.bar(x +offset, dpms, width, linestyle=linestyle, edgecolor= edgecolor, color=color, label=mage.label)
#rects = ax[1].bar(x +offset, dpms, width, color=mage.color, label=mage.label)
j+=1
if not DPM:
ax.set_ylabel('spell dps')
else:
ax.set_ylabel('spell dpm')
#ax.set_ylabel('spell dpm')
spell_names_short = ['Frostbolt',
'Fireball',
'Scorch',
'AB0',
'AB1',
'AB2',
'AB3',
'AB3\ncost1',
'AM',
]
ax.set_xticks(np.arange(0,len(spell_names_short),1))
ax.set_xticklabels(spell_names_short)
#ax[1].legend()
#fig.tight_layout()
return
def plot_stat_weights(ui, mages, buff_cases, linestyles, ax, DPM = False):
stats_list = ['intellect','common_spell_damage',
'crit_rating','hit_rating','haste_rating','mp5','spirit']
stats_names = ['Intellect','+Spelldamage','Crit rating',
'Hit rating','Haste','mp5','Spirit']
x_step = ui['time_max']-ui['time_min']
xlim = [ui['time_min'],ui['time_max']+3*x_step]
times = np.arange(ui['time_min'],ui['time_max']+1, 1)
max_ylim = ui['stat_weight_ymax']
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
for mage in mages:
tmp = get_dmg(mage, buff_case, times)
dps_0 = tmp[0]/times
xo=-x_step
yo=max_ylim
for i, stat in enumerate(stats_list):
if i==4:
xo=0
yo-=max_ylim
else:
xo+=x_step
mage.stats[stat]-=10
#print('arcane')
out = get_dmg(mage, buff_case, times)
dps_new = out[0]/times
mage.stats[stat]+=10
fraction_increase_per_stat = -0.1*(dps_new/dps_0-1)
#stat_per_percent_fire[stat_per_percent_fire<0]=np.nan
#stat_per_percent_fire[stat_per_percent_fire>max_ylim]=np.nan
stat_per_percent = 0.01/fraction_increase_per_stat
y= 20/stat_per_percent
y[y<-0.0001] = np.nan
y[y>max_ylim] = np.nan
ax.plot(times+xo,y+yo,linestyle= linestyle,color=mage.color)
xo=-x_step
yo=max_ylim
for i, stat in enumerate(stats_list):
if i==4:
xo=0
yo-=max_ylim
else:
xo+=x_step
ax.text(xo+xlim[0]+0.05*x_step, yo+max_ylim-0.05*max_ylim, stats_names[i],ha='left', va='top')
ax.set_xlim(xlim)
ax.set_ylim([0,2*max_ylim])
ax.set_xticks([])
ax.set_yticks(np.arange(max_ylim*4)/2)
ax.set_yticklabels(np.arange(max_ylim*4)/2%max_ylim)
ax.plot(xlim, [max_ylim,max_ylim], lw=0.5,color=[0,0,0,1])
ax.grid()
for i in range(1,4):
ax.plot([xlim[0]+x_step*i]*2, [0,max_ylim*2], lw=0.5,color=[0,0,0,1])
x_ticks_0 = np.arange((int((times[0]-1)/30)+1)*30,times[-1],30)
x_ticks = []
for i in range(4):
for x in x_ticks_0:
x_ticks.append(x+i*x_step)
ax.set_xticks(ticks=x_ticks)
x_ticks = []
for i in range(4):
for x in x_ticks_0:
x_ticks.append(int(x))
ax.set_xticklabels(x_ticks)
ax.set_xlabel('Total casting time before boss dead [s]')
ax.set_ylabel('Stat weight [-]')
'''axes[i].set_title(stats_names[i])
axes[i].set_ylim([0,max_ylim])
axes[i].set_yticks([0,1,2,3,4,5])
axes[i].grid()
axes[i].set_xlim([20,180])'''
#axes[-1].set_axis_off()
#fig.suptitle('Stat weights')
#fig.tight_layout()
def plot_compare_buff_states(ui, mages, buff_cases, linestyles, ax):
xlim = [ui['time_min'],ui['time_max']]
times = np.arange(ui['time_min'],ui['time_max']+1, 1)
max_ylim = ui['stat_weight_ymax']
ax.plot(times, np.zeros(times.shape), color=[0,0,0,1])
for mage in mages:
done_first = 0
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
if done_first ==0:
tmp = get_dmg(mage, buff_case, times)
dps_0 = tmp[0]/times
done_first = 1
label_0 = ui['buff_case_'+str(i)+'_label']
else:
tmp = get_dmg(mage, buff_case, times)
dps_1 = tmp[0]/times
ax.plot(times, 100*(dps_1/dps_0-1), linestyle= linestyle, color=mage.color,
label = mage.label+', '+ui['buff_case_'+str(i)+'_label'])
ax.set_xticks(ticks=np.arange((int((times[0]-1)/30)+1)*30,times[-1]+1,30))
ax.set_xlabel('Total casting time before boss dead [s]')
ax.set_ylabel('% damage increase vs '+label_0)
ax.legend()
ax.set_xlim([ui['time_min'],ui['time_max']])
ax.grid()
def set_default_mage(event):
frame = event.widget
while not hasattr(frame,'nav'):
frame = frame.master
frame.nav.clear_color('color3')
frame.nav.color_selected('color3')
mages = frame.nav.get_paths_of_selected_items()
if len(mages)>0:
frame.default_mage = frame.nav.get_paths_of_selected_items()[0]
print('set default_mage:',frame.default_mage )
else:
delattr(frame,'default_mage')
print('cleared default_mage' )
frame.nav.deselect()
def plot_compare_mages(ui, default_mage, mages, buff_cases, linestyles, ax):
xlim = [ui['time_min'],ui['time_max']]
times = np.arange(ui['time_min'],ui['time_max']+1, 1)
max_ylim = ui['stat_weight_ymax']
ax.plot(times, np.zeros(times.shape), color=default_mage.color)
for i, buff_case in enumerate(buff_cases):
linestyle = linestyles[i]
if buff_case['check'] == 1:
tmp = get_dmg(default_mage, buff_case, times)
dps_0 = tmp[0]/times
for mage in mages:
if mage.location == default_mage.location:
continue
tmp = get_dmg(mage, buff_case, times)
dps_1 = tmp[0]/times
ax.plot(times, 100*(dps_1/dps_0-1), linestyle= linestyle, color=mage.color,
label = mage.label+', '+ui['buff_case_'+str(i)+'_label'])
ax.set_xticks(ticks=np.arange((int((times[0]-1)/30)+1)*30,times[-1]+1,30))
ax.grid()
ax.set_xlabel('Total casting time before boss dead [s]')
ax.set_ylabel('% damage increase vs '+default_mage.label)
ax.legend()
ax.set_xlim([ui['time_min'],ui['time_max']])
| 42.406176 | 183 | 0.651711 | 5,561 | 35,706 | 3.936342 | 0.073548 | 0.025582 | 0.019598 | 0.035496 | 0.721243 | 0.65651 | 0.639242 | 0.588305 | 0.555642 | 0.509137 | 0 | 0.028567 | 0.154904 | 35,706 | 841 | 184 | 42.456599 | 0.696868 | 0.074245 | 0 | 0.413649 | 0 | 0 | 0.239812 | 0.055903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019499 | false | 0 | 0.011142 | 0 | 0.041783 | 0.008357 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdb9e345a7126b6fc13fd0106c741d7ab14d3f93 | 36,682 | py | Python | PyPixel/SkyBlockStats.py | M4axim/PyPixel | 8f77773a6e4c1541a41c98fd8edb86b4bb2aba67 | [
"MIT"
] | 2 | 2021-03-25T16:52:22.000Z | 2021-09-22T16:42:57.000Z | PyPixel/SkyBlockStats.py | M4axim/PyPixel | 8f77773a6e4c1541a41c98fd8edb86b4bb2aba67 | [
"MIT"
] | null | null | null | PyPixel/SkyBlockStats.py | M4axim/PyPixel | 8f77773a6e4c1541a41c98fd8edb86b4bb2aba67 | [
"MIT"
] | 2 | 2021-03-23T18:40:19.000Z | 2022-01-03T18:17:08.000Z | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 plun1331
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
from .utils import SkyBlockUtils
types = {'zombie': SkyBlockUtils.zombieSlayer,
'spider': SkyBlockUtils.spiderSlayer,
'wolf': SkyBlockUtils.wolfSlayer}
class SkyBlockStats(object):
r"""Represents a player's SkyBlock Statistics.
:param stats: The player's stats from their memberdata retrieved from the API.
:type stats: dict"""
def __init__(self, stats: dict):
self.top_crit_damage = stats['highest_crit_damage'] if 'highest_crit_damage' in stats else None
self.kills = int(stats['kills']) if 'kills' in stats else None
self.zombie_kills = int(stats['kills_zombie']) if 'kills_zombie' in stats else None
self.bids = int(stats['auctions_bids']) if 'auctions_bids' in stats else None
self.highest_bid = stats['auctions_highest_bid'] if 'auctions_highest_bid' in stats else None
self.zombie_villager_kills = int(stats['kills_zombie_villager']) if 'kills_zombie_villager' in stats else None
self.skeleton_kills = int(stats['kills_skeleton']) if 'kills_skeleton' in stats else None
self.spider_kills = int(stats['kills_spider']) if 'kills_spider' in stats else None
self.enderman_kills = int(stats['kills_enderman']) if 'kills_enderman' in stats else None
self.deaths = int(stats['deaths']) if 'deaths' in stats else None
self.zombie_deaths = int(stats['deaths_zombie']) if 'deaths_zombie' in stats else None
self.void_deaths = int(stats['deaths']) if 'deaths' in stats else None
self.skeleton_deaths = int(stats['deaths_skeleton']) if 'deaths_skeleton' in stats else None
self.fire_deaths = int(stats['deaths_fire']) if 'deaths_fire' in stats else None
self.auctions_won = int(stats['auctions_won']) if 'auctions_won' in stats else None
self.uncommon_auctions_bought = int(
stats['auctions_bought_uncommon']) if 'auctions_bought_uncommon' in stats else None
self.auctions_gold_spent = int(stats['auctions_gold_spent']) if 'auctions_gold_spent' in stats else None
self.auctions_created = int(stats['auctions_created']) if 'auctions_created' in stats else None
self.auction_fees_spent = int(stats['auctions_fees']) if 'auctions_fees' in stats else None
self.player_deaths = int(stats['deaths_player']) if 'deaths_player' in stats else None
self.auctions_completed = int(stats['auctions_completed']) if 'auctions_completed' in stats else None
self.uncommon_auctions_sold = int(
stats['auctions_sold_uncommon']) if 'auctions_sold_uncommon' in stats else None
self.auction_gold_earned = int(stats['auctions_gold_earned']) if 'auctions_gold_earned' in stats else None
self.invisible_creeper_kills = int(
stats['kills_invisible_creeper']) if 'kills_invisible_creeper' in stats else None
self.emerald_slime_kills = int(stats['kills_emerald_slime']) if 'kills_emerald_slime' in stats else None
self.diamond_zombie_kills = int(stats['kills_diamond_zombie']) if 'kills_diamond_zombie' in stats else None
self.diamond_skeleton_deaths = int(
stats['deaths_diamond_skeleton']) if 'deaths_diamond_skeleton' in stats else None
self.diamond_zombie_deaths = int(stats['deaths_diamond_zombie']) if 'deaths_diamond_zombie' in stats else None
self.diamond_skeleton_kills = int(
stats['kills_diamond_skeleton']) if 'kills_diamond_skeleton' in stats else None
self.lapis_zombie_kills = int(stats['kills_lapis_zombie']) if 'kills_lapis_zombie' in stats else None
self.emerald_slime_deaths = int(stats['deaths_emerald_slime']) if 'deaths_emerald_slime' in stats else None
self.redstone_pigman_kills = int(stats['kills_redstone_pigman']) if 'kills_redstone_pigman' in stats else None
self.redstone_pigman_deaths = int(
stats['deaths_redstone_pigman']) if 'deaths_redstone_pigman' in stats else None
self.splitter_spider_silverfish_kills = int(
stats['kills_splitter_spider_silverfish']) if 'kills_splitter_spider_silverfish' in stats else None
self.jockey_shot_silverfish_kills = int(
stats['kills_jockey_shot_silverfish']) if 'kills_jockey_shot_silverfish' in stats else None
self.wither_skeleton_kills = int(stats['kills_wither_skeleton']) if 'kills_wither_skeleton' in stats else None
self.magma_cube_kills = int(stats['kills_magma_cube']) if 'kills_magma_cube' in stats else None
self.magma_cube_fireball_kills = int(
stats['kills_fireball_magma_cube']) if 'kills_fireball_magma_cube' in stats else None
self.cow_kills = int(stats['kills_cow']) if 'kills_cow' in stats else None
self.pig_kills = int(stats['kills_pig']) if 'kills_pig' in stats else None
self.items_fished = int(stats['items_fished']) if 'items_fished' in stats else None
self.normal_items_fished = int(stats['items_fished_normal']) if 'items_fished_normal' in stats else None
self.treasure_items_fished = int(stats['items_fished_treasure']) if 'items_fished_treasure' in stats else None
self.common_auctions_bought = int(
stats['auctions_bought_common']) if 'auctions_bought_common' in stats else None
self.witch_kills = int(stats['kills_witch']) if 'kills_witch' in stats else None
self.spider_deaths = int(stats['deaths_spider']) if 'deaths_spider' in stats else None
self.epic_auctions_bought = int(stats['auctions_bought_epic']) if 'auctions_bought_epic' in stats else None
self.magma_cube_fireball_deaths = int(
stats['deaths_fireball_magma_cube']) if 'deaths_fireball_magma_cube' in stats else None
self.weaver_spider_kills = int(stats['kills_weaver_spider']) if 'kills_weaver_spider' in stats else None
self.splitter_spider_kills = int(stats['kills_splitter_spider']) if 'kills_splitter_spider' in stats else None
self.jockey_skeleton_kills = int(stats['kills_jockey_skeleton']) if 'kills_jockey_skeleton' in stats else None
self.spider_jockey_kills = int(stats['kills_spider_jockey']) if 'kills_spider_jockey' in stats else None
self.dasher_spider_kills = int(stats['kills_dasher_spider']) if 'kills_dasher_spider' in stats else None
self.spider_jockey_deaths = int(stats['deaths_spider_jockey']) if 'deaths_spider_jockey' in stats else None
self.dasher_spider_deaths = int(stats['deaths_dasher_spider']) if 'deaths_dasher_spider' in stats else None
self.jockey_shot_silverfish_deaths = int(
stats['deaths_jockey_shot_silverfish']) if 'deaths_jockey_shot_silverfish' in stats else None
self.splitter_spider_deaths = int(
stats['deaths_splitter_spider']) if 'deaths_splitter_spider' in stats else None
self.common_auctions_sold = int(stats['auctions_sold_common']) if 'auctions_sold_common' in stats else None
self.no_bid_auctions = int(stats['auctions_no_bids']) if 'auctions_no_bids' in stats else None
self.ghast_kills = int(stats['kills_ghast']) if 'kills_ghast' in stats else None
self.rare_auctions_sold = int(stats['auctions_sold_rare']) if 'auctions_sold_rare' in stats else None
self.epic_auctions_sold = int(stats['auctions_sold_epic']) if 'auctions_sold_epic' in stats else None
self.magma_cube_boss_deaths = int(
stats['deaths_magma_cube_boss']) if 'deaths_magma_cube_boss' in stats else None
self.blaze_kills = int(stats['kills_blaze']) if 'kills_blaze' in stats else None
self.wither_skeleton_deaths = int(
stats['deaths_wither_skeleton']) if 'deaths_wither_skeleton' in stats else None
self.magma_cube_deaths = int(stats['deaths_magma_cube']) if 'deaths_magma_cube' in stats else None
self.respawning_skeleton_kills = int(
stats['kills_respawning_skeleton']) if 'kills_respawning_skeleton' in stats else None
self.fall_deaths = int(stats['deaths_fall']) if 'deaths_fall' in stats else None
self.rare_auctions_bought = int(stats['auctions_bought_rare']) if 'auctions_bought_rare' in stats else None
self.rabbit_kills = int(stats['kills_rabbit']) if 'kills_rabbit' in stats else None
self.sheep_kills = int(stats['kills_sheep']) if 'kills_sheep' in stats else None
self.pigman_kills = int(stats['kills_pigman']) if 'kills_pigman' in stats else None
self.player_kills = int(stats['kills_player']) if 'kills_player' in stats else None
self.ruin_wolf_kills = int(stats['kills_ruin_wolf']) if 'kills_ruin_wolf' in stats else None
self.night_respawning_skeleton_kills = int(
stats['kills_night_respawining_skeleton']) if 'kills_night_respawining_skeleton' in stats else None
self.legendary_auctions_bought = int(
stats['auctions_bought_legendary']) if 'auctions_bought_legendary' in stats else None
self.chicken_kills = int(stats['kills_chicken']) if 'kills_chicken' in stats else None
self.respawning_skeleton_deaths = int(
stats['deaths_respawning_skeleton']) if 'deaths_respawning_skeleton' in stats else None
self.ruin_wolf_deaths = int(stats['deaths_ruin_wolf']) if 'deaths_ruin_wolf' in stats else None
self.unburried_zombie_deaths = int(
stats['deaths_unburied_zombie']) if 'deaths_unburied_zombie' in stats else None
self.unburried_zombie_kills = int(
stats['kills_unburried_zombie']) if 'kills_unburried_zombie' in stats else None
self.enderman_deaths = int(stats['deaths_enderman']) if 'deaths_enderman' in stats else None
self.endermite_deaths = int(stats['deaths_endermite']) if 'deaths_endermite' in stats else None
self.endermite_kills = int(stats['kills_endermite']) if 'kills_endermite' in stats else None
self.zealot_enderman_deaths = int(
stats['deaths_zealot_enderman']) if 'deaths_zealot_enderman' in stats else None
self.wise_dragon_deaths = int(stats['deaths_wise_dragon']) if 'deaths_wise_dragon' in stats else None
self.watcher_deaths = int(stats['deaths_watcher']) if 'deaths_watcher' in stats else None
self.watcher_kills = int(stats['kills_watcher']) if 'kills_watcher' in stats else None
self.random_slime_kills = int(stats['kills_random_slime']) if 'kills_random_slime' in stats else None
self.voracious_spider_kills = int(
stats['kills_voracious_spider']) if 'kills_voracious_spider' in stats else None
self.wolf_deaths = int(stats['deaths_wolf']) if 'deaths_wolf' in stats else None
self.old_wolf_kills = int(stats['kills_old_wolf']) if 'kills_old_wolf' in stats else None
self.olf_wolf_deaths = int(stats['deaths_old_wolf']) if 'deaths_old_wolf' in stats else None
self.zealot_enderman_kills = int(stats['kills_zealot_enderman']) if 'kills_zealot_enderman' in stats else None
self.obsidian_wither_kills = int(stats['kills_obsidian_wither']) if 'kills_obsidian_wither' in stats else None
self.howling_spirit_kills = int(stats['kills_howling_spirit']) if 'kills_howling_spirit' in stats else None
self.howling_spirit_deaths = int(stats['deaths_howling_spirit']) if 'deaths_howling_spirit' in stats else None
self.unknown_deaths = int(stats['deaths_unknown']) if 'deaths_unknown' in stats else None
self.sea_walker_kills = int(stats['kills_sea_walker']) if 'kills_sea_walker' in stats else None
self.pond_squid_kills = int(stats['kills_pond_squid']) if 'kills_pond_squid' in stats else None
self.sea_guardian_kills = int(stats['deaths_sea_guardian']) if 'deaths_sea_guardian' in stats else None
self.sea_archer_kills = int(stats['kills_sea_archer']) if 'kills_sea_archer' in stats else None
self.young_dragon_deaths = int(stats['deaths_young_dragon']) if 'deaths_young_dragon' in stats else None
self.zombie_deep_kills = int(stats['kills_zombie_deep']) if 'kills_zombie_deep' in stats else None
self.gifts_given = int(stats['gifts_given']) if 'gifts_given' in stats else None
self.gifts_recieved = int(stats['gifts_recieved']) if 'gifts_recieved' in stats else None
self.frozen_steve_deaths = int(stats['deaths_frozen_steve']) if 'deaths_frozen_steve' in stats else None
self.brood_mother_spider_kills = int(
stats['kills_brood_mother_spider']) if 'kills_brood_mother_spider' in stats else None
self.brood_mother_cave_spider_kills = int(
stats['kills_brood_mother_cave_spider']) if 'kills_brood_mother_cave_spider' in stats else None
self.foraging_race_best_time = int(
stats['foraging_race_best_time']) if 'foraging_race_best_time' in stats else None
self.legendary_auctions_sold = int(
stats['auctions_sold_legendary']) if 'auctions_sold_legendary' in stats else None
self.special_auctions_sold = int(stats['auctions_sold_special']) if 'auctions_sold_special' in stats else None
self.generator_magma_cube_kills = int(
stats['kills_generator_magma_cube']) if 'kills_generator_magma_cube' in stats else None
self.bat_pinata_kills = int(stats['kills_bat_pinata']) if 'kills_bat_pinata' in stats else None
self.special_auctions_bought = int(
stats['auctions_bought_special']) if 'auctions_bought_special' in stats else None
self.horseman_zombie_kills = int(stats['kills_horseman_zombie']) if 'kills_horseman_zombie' in stats else None
self.old_dragon_deaths = int(stats['deaths_old_dragon']) if 'deaths_old_dragon' in stats else None
self.liquid_hot_magma_deaths = int(
stats['deaths_liquid_hot_magma']) if 'deaths_liquid_hot_magma' in stats else None
self.liquid_hot_magma_kills = int(
stats['kills_liquid_hot_magma']) if 'kills_liquid_hot_magma' in stats else None
self.most_winter_snowballs_hit = int(
stats['most_winter_snowballs_hit']) if 'most_winter_snowballs_hit' in stats else None
self.most_winter_damage_dealt = int(
stats['most_winter_damage_dealt']) if 'most_winter_damage_dealt' in stats else None
self.most_winter_magma_damage_dealt = int(
stats['most_winter_magma_damage_dealt']) if 'most_winter_magma_damage_dealt' in stats else None
self.ender_crystals_destroyed = int(
stats['ender_crystals_destroyed']) if 'ender_crystals_destroyed' in stats else None
self.most_winter_cannonballs_hit = int(
stats['most_winter_cannonballs_hit']) if 'most_winter_cannonballs_hit' in stats else None
self.slime_kills = int(stats['kills_slime']) if 'kills_slime' in stats else None
self.unstable_dragon_deaths = int(
stats['deaths_unstable_dragon']) if 'deaths_unstable_dragon' in stats else None
self.superior_dragon_deaths = int(
stats['deaths_superior_dragon']) if 'deaths_superior_dragon' in stats else None
self.forest_island_bat_kills = int(
stats['kills_forest_island_bat']) if 'kills_forest_island_bat' in stats else None
self.strong_dragon_deaths = int(stats['deaths_strong_dragon']) if 'deaths_strong_dragon' in stats else None
self.pet_milestone_ores_mined = int(
stats['pet_milestone_ores_mined']) if 'pet_milestone_ores_mined' in stats else None
self.pet_milestone_sea_creatures_killed = int(
stats['pet_milestone_sea_creatures_killed']) if 'pet_milestone_sea_creatures_killed' in stats else None
self.chicken_deep_kills = int(stats['kills_chicken_deep']) if 'kills_chicken_deep' in stats else None
self.corrupted_protector_deaths = int(
stats['deaths_corrupted_protector']) if 'deaths_corrupted_protector' in stats else None
self.pack_spirit_kills = int(stats['kills_pack_spirit']) if 'kills_pack_spirit' in stats else None
self.soul_of_the_alpha_kills = int(
stats['kills_soul_of_the_alpha']) if 'kills_soul_of_the_alpha' in stats else None
self.frosty_the_snowman_kills = int(
stats['kills_frosty_the_snowman']) if 'kills_frosty_the_snowman' in stats else None
self.frozen_steve_kills = int(stats['kills_frozen_steve']) if 'kills_frozen_steve' in stats else None
self.catfish_kills = int(stats['kills_catfish']) if 'kills_catfish' in stats else None
self.dungeon_hub_crystal_core_anything_no_return_best_time = stats[
'dungeon_hub_crystal_core_anything_no_return_best_time'
] if 'dungeon_hub_crystal_core_anything_no_return_best_time' in stats else None
self.dungeon_hub_giant_mushroom_anything_no_return_best_time = stats[
'dungeon_hub_giant_mushroom_anything_no_return_best_time'
] if 'dungeon_hub_giant_mushroom_anything_no_return_best_time' in stats else None
self.dungeon_hub_giant_mushroom_no_pearls_no_return_best_time = stats[
'dungeon_hub_giant_mushroom_no_pearls_no_return_best_time'
] if 'dungeon_hub_giant_mushroom_no_pearls_no_return_best_time' in stats else None
self.dungeon_hub_precursor_ruins_anything_no_return_best_time = stats[
'dungeon_hub_precursor_ruins_anything_no_return_best_time'
] if 'dungeon_hub_precursor_ruins_anything_no_return_best_time' in stats else None
self.dungeon_hub_precursor_ruins_nothing_no_return_best_time = stats[
'dungeon_hub_precursor_ruins_nothing_no_return_best_time'
] if 'dungeon_hub_precursor_ruins_nothing_no_return_best_time' in stats else None
self.dungeon_hub_precursor_ruins_no_pearls_no_return_best_time = stats[
'dungeon_hub_precursor_ruins_no_pearls_no_return_best_time'
] if 'dungeon_hub_precursor_ruins_no_pearls_no_return_best_time' in stats else None
self.crypt_lurker_kills = int(stats['kills_crypt_lurker']) if 'kills_crypt_lurker' in stats else None
self.dungeon_respawning_skeleton_kills = int(
stats['kills_dungeon_respawning_skeleton']) if 'kills_dungeon_respawning_skeleton' in stats else None
self.scared_skeleton_kills = int(stats['kills_scared_skeleton']) if 'kills_scared_skeleton' in stats else None
self.skeleton_grunt_kills = int(stats['kills_skeleton_grunt']) if 'kills_skeleton_grunt' in stats else None
self.crypt_dreadlord_kills = int(stats['kills_scared_skeleton']) if 'kills_scared_skeleton' in stats else None
self.crypt_souleater_kills = int(stats['kills_crypt_souleater']) if 'kills_crypt_souleater' in stats else None
self.crypt_tank_zombie_kills = int(
stats['kills_crypt_tank_zombie']) if 'kills_crypt_tank_zombie' in stats else None
self.diamond_guy_kills = int(stats['kills_diamond_guy']) if 'kills_diamond_guy' in stats else None
self.zombie_grunt_kills = int(stats['kills_zombie_grunt']) if 'kills_zombie_grunt' in stats else None
self.crypt_lurker_deaths = int(stats['deaths_crypt_lurker']) if 'deaths_crypt_lurker' in stats else None
self.lost_adventurer_deaths = int(
stats['deaths_lost_adventurer']) if 'deaths_lost_adventurer' in stats else None
self.watcher_summon_undead_kills = int(
stats['kills_watcher_summon_undead']) if 'kills_watcher_summon_undead' in stats else None
self.skeleton_soldier_kills = int(
stats['kills_skeleton_soldier']) if 'kills_skeleton_soldier' in stats else None
self.diamond_guy_deaths = int(stats['deaths_diamond_guy']) if 'deaths_diamond_guy' in stats else None
self.watcher_summon_undead_deaths = int(
stats['deaths_watcher_summon_undead']) if 'deaths_watcher_summon_undead' in stats else None
self.bonzo_summon_undead_kills = int(
stats['kills_bonzo_summon_undead']) if 'kills_bonzo_summon_undead' in stats else None
self.lost_adventurer_kills = int(stats['kills_lost_adventurer']) if 'kills_lost_adventurer' in stats else None
self.skeleton_master_kills = int(stats['kills_skeleton_master']) if 'kills_skeleton_master' in stats else None
self.sniper_skeleton_kills = int(stats['kills_sniper_skeleton']) if 'kills_sniper_skeleton' in stats else None
self.skeleton_soldier_deaths = int(
stats['deaths_skeleton_soldier']) if 'deaths_skeleton_soldier' in stats else None
self.trap_deaths = int(stats['deaths_trap']) if 'deaths_trap' in stats else None
self.crypt_undead_kills = int(stats['kills_crypt_undead']) if 'kills_crypt_undead' in stats else None
self.skeleton_grunt_deaths = int(stats['deaths_skeleton_grunt']) if 'deaths_skeleton_grunt' in stats else None
self.scarf_warrior_deaths = int(stats['deaths_scarf_warrior']) if 'deaths_scarf_warrior' in stats else None
self.skeleton_master_deaths = int(
stats['deaths_skeleton_master']) if 'deaths_skeleton_master' in stats else None
self.blaze_higher_or_lower_kills = int(
stats['kills_blaze_higher_or_lower']) if 'kills_blaze_higher_or_lower' in stats else None
self.dungeon_respawning_skeleton_deaths = int(
stats['deaths_dungeon_respawning_skeleton']) if 'deaths_dungeon_respawning_skeleton' in stats else None
self.scarf_deaths = int(stats['deaths_scarf']) if 'deaths_scarf' in stats else None
self.bonzo_summon_undead_deaths = int(
stats['deaths_bonzo_summon_undead']) if 'deaths_bonzo_summon_undead' in stats else None
self.bonzo_deaths = int(stats['deaths_bonzo']) if 'deaths_bonzo' in stats else None
self.lonely_spider_kills = int(stats['kills_lonely_spider']) if 'kills_lonely_spider' in stats else None
self.parasite_kills = int(stats['kills_parasite']) if 'kills_parasite' in stats else None
self.cellar_spider_kills = int(stats['kills_cellar_spiders']) if 'kills_cellar_spiders' in stats else None
self.dungeon_secret_bat_kills = int(
stats['kills_dungeon_secret_bat']) if 'kills_dungeon_secret_bat' in stats else None
self.scarf_mage_kills = int(stats['kills_scarf_mage']) if 'kills_scarf_mage' in stats else None
self.crypt_undead_friedrich_kills = int(
stats['kills_crypt_undead_friedrich']) if 'kills_crypt_undead_friedrich' in stats else None
self.guardian_defender_kills = int(
stats['kills_guardian_defender']) if 'kills_guardian_defender' in stats else None
self.crypt_dreadlord_deaths = int(
stats['deaths_crypt_dreadlord']) if 'deaths_crypt_dreadlord' in stats else None
self.zombie_soldier_kills = int(stats['kills_zombie_soldier']) if 'kills_zombie_soldier' in stats else None
self.skeletor_deaths = int(stats['deaths_skeletor']) if 'deaths_skeletor' in stats else None
self.skeletor_kills = int(stats['kills_skeletor']) if 'kills_skeletor' in stats else None
self.professer_mage_guardian_deaths = int(
stats['deaths_professor_mage_guardian']) if 'deaths_professor_mage_guardian' in stats else None
self.sea_leech_kills = int(stats['kills_sea_leech']) if 'kills_sea_leech' in stats else None
self.sea_witch_kills = int(stats['kills_sea_witch']) if 'kills_sea_witch' in stats else None
self.skeleton_emperor_kills = int(
stats['kills_skeleton_emperor']) if 'kills_skeleton_emperor' in stats else None
self.mythos_burrows_dug_next = int(
stats['mythos_burrows_dug_next']) if 'mythos_burrows_dug_next' in stats else None
self.common_mythos_burrows_dug_next = int(
stats['mythos_burrows_dug_next_COMMON']) if 'mythos_burrows_dug_next_COMMON' in stats else None
self.mythos_burrows_dug_combat = int(
stats['mythos_burrows_dug_combat']) if 'mythos_burrows_dug_combat' in stats else None
self.common_mythos_burrows_dug_combat = int(
stats['mythos_burrows_dug_combat_COMMON']) if 'mythos_burrows_dug_combat_COMMON' in stats else None
self.mythos_kills = int(stats['kills_mythos']) if 'kills_mythos' in stats else None
self.minos_hunter_kills = int(stats['kills_minos_hunter']) if 'kills_minos_hunter' in stats else None
self.mythos_burrows_dug_treasure = int(
stats['mythos_burrows_dug_treasure']) if 'mythos_burrows_dug_treasure' in stats else None
self.common_mythos_burrows_dug_treasure = int(
stats['mythos_burrows_dug_treasure_COMMON']) if 'mythos_burrows_dug_treasure_COMMON' in stats else None
self.siamese_lynx_kills = int(stats['kills_siamese_lynx']) if 'kills_siamese_lynx' in stats else None
self.mythos_burrows_chains_complete = int(
stats['mythos_burrows_chains_complete']) if 'mythos_burrows_chains_complete' in stats else None
self.common_mythos_burrows_chains_complete = int(stats['mythos_burrows_chains_complete_COMMON']
) if 'mythos_burrows_chains_complete_COMMON' in stats else None
self.rare_mythos_burrows_dug_next = int(
stats['mythos_burrows_dug_next_RARE']) if 'mythos_burrows_dug_next_RARE' in stats else None
self.rare_mythos_burrows_dug_combat = int(
stats['mythos_burrows_dug_combat_RARE']) if 'mythos_burrows_dug_combat_RARE' in stats else None
self.minotaur_deaths = int(stats['deaths_minotaur']) if 'deaths_minotaur' in stats else None
self.minotaur_kills = int(stats['kills_minotaur']) if 'kills_minotaur' in stats else None
self.gaia_construct_kills = int(stats['kills_gaia_construct']) if 'kills_gaia_construct' in stats else None
self.rare_mythos_burrows_dug_treasure = int(
stats['mythos_burrows_dug_treasure_RARE']) if 'mythos_burrows_dug_treasure_RARE' in stats else None
self.rare_mythos_burrows_chains_complete = int(
stats['mythos_burrows_chains_complete_RARE']) if 'mythos_burrows_chains_complete_RARE' in stats else None
self.gaia_construct_deaths = int(stats['deaths_gaia_construct']) if 'deaths_gaia_construct' in stats else None
self.siamese_lynx_deaths = int(stats['deaths_siamese_lynx']) if 'deaths_siamese_lynx' in stats else None
self.deep_sea_protector_kills = int(
stats['kills_deep_sea_protector']) if 'kills_deep_sea_protector' in stats else None
self.water_hydra_kills = int(stats['kills_water_hydra']) if 'kills_water_hydra' in stats else None
self.blue_shark_kills = int(stats['kills_blue_shark']) if 'kills_blue_shark' in stats else None
self.tiger_shark_kills = int(stats['kills_tiger_shark']) if 'kills_tiger_shark' in stats else None
self.nurse_shark_kills = int(stats['kills_nurse_shark']) if 'kills_nurse_shark' in stats else None
self.crypt_souleater_deaths = int(
stats['deaths_crypt_souleater']) if 'deaths_crypt_souleater' in stats else None
self.zombie_knight_kills = int(stats['kills_zombie_knight']) if 'kills_zombie_knight' in stats else None
self.crypt_undead_valentin_kills = int(
stats['kills_crypt_undead_valentin']) if 'kills_crypt_undead_valentin' in stats else None
self.soul_of_the_alpha_deaths = int(
stats['deaths_soul_of_the_alpha']) if 'deaths_soul_of_the_alpha' in stats else None
self.dungeon_hub_precursor_ruins_no_abilities_no_return_best_time = stats[
'dungeon_hub_precursor_ruins_no_abilities_no_return_best_time']
self.crypt_wither_skeleton_kills = int(
stats['kills_crypt_witherskeleton']) if 'kills_crypt_witherskeleton' in stats else None
self.crypt_wither_skeleton_deaths = int(
stats['deaths_crypt_witherskeleton']) if 'deaths_crypt_witherskeleton' in stats else None
self.spirit_wolf_kills = int(stats['kills_spirit_wolf']) if 'kills_spirit_wolf' in stats else None
self.spirit_sheep_kills = int(stats['kills_spirit_sheep']) if 'kills_spirit_sheep' in stats else None
self.spirit_bull_kills = int(stats['kills_spirit_bull']) if 'kills_spirit_bull' in stats else None
self.spirit_rabbit_kills = int(stats['kills_spirit_rabbit']) if 'kills_spirit_rabbit' in stats else None
self.thork_kills = int(stats['kills_thorn']) if 'kills_thorn' in stats else None
self.livid_clone_deaths = int(stats['deaths_livid_clone']) if 'deaths_livid_clone' in stats else None
self.sniper_skeleton_deaths = int(
stats['deaths_sniper_skeleton']) if 'deaths_sniper_skeleton' in stats else None
self.super_tank_zombie_kills = int(
stats['kills_super_tank_zombie']) if 'kills_super_tank_zombie' in stats else None
self.super_archer_kills = int(stats['kills_super_archer']) if 'kills_super_archer' in stats else None
self.tentaclees_deaths = int(stats['deaths_tentaclees']) if 'deaths_tentaclees' in stats else None
self.corrupted_protector_kills = int(
stats['kills_corrupted_protector']) if 'kills_corrupted_protector' in stats else None
self.professer_guardian_summon_kills = int(
stats['kills_professor_guardian_summon']) if 'kills_professor_guardian_summon' in stats else None
self.unstable_dragon_kills = int(stats['kills_unstable_dragon']) if 'kills_unstable_dragon' in stats else None
self.strong_dragon_kills = int(stats['kills_strong_dragon']) if 'kills_strong_dragon' in stats else None
self.spirit_bat_kills = int(stats['kills_spirit_bat']) if 'kills_spirit_bat' in stats else None
self.shadow_assassin_kills = int(stats['kills_shadow_assassin']) if 'kills_shadow_assassin' in stats else None
self.tentaclees_kills = int(stats['kills_tentaclees']) if 'kills_tentaclees' in stats else None
self.livid_deaths = int(stats['deaths_livid']) if 'deaths_livid' in stats else None
self.sadan_statue_deaths = int(stats['deaths_sadan_statue']) if 'deaths_sadan_statue' in stats else None
self.scary_jerry_kills = int(stats['kills_scary_jerry']) if 'kills_scary_jerry' in stats else None
self.wither_gourd_kills = int(stats['kills_wither_gourd']) if 'kills_wither_gourd' in stats else None
self.trick_or_treater_kills = int(
stats['kills_trick_or_treater']) if 'kills_trick_or_treater' in stats else None
self.phantom_spirit_kills = int(stats['kills_phantom_spirit']) if 'kills_phantom_spirit' in stats else None
self.wraith_kills = int(stats['kills_wraith']) if 'kills_wraith' in stats else None
self.batty_witch_kills = int(stats['kills_batty_witch']) if 'kills_batty_witch' in stats else None
self.zombie_commander_kills = int(
stats['kills_zombie_commander']) if 'kills_zombie_commander' in stats else None
self.watcher_guardian_deaths = int(
stats['deaths_watcher_guardian']) if 'deaths_watcher_guardian' in stats else None
self.skeletor_prime_kills = int(stats['kills_skeletor_prime']) if 'kills_skeletor_prime' in stats else None
self.super_tank_zombie_deaths = int(
stats['deaths_super_tank_zombie']) if 'deaths_super_tank_zombie' in stats else None
self.skeletor_prime_deaths = int(stats['deaths_skeletor_prime']) if 'deaths_skeletor_prime' in stats else None
self.great_white_shark_kills = int(
stats['kills_great_white_shark']) if 'kills_great_white_shark' in stats else None
self.zombie_knight_deaths = int(stats['deaths_zombie_knight']) if 'deaths_zombie_knight' in stats else None
self.suffocation_deaths = int(stats['deaths_suffocation']) if 'deaths_suffocation' in stats else None
self.protector_dragon_deaths = int(
stats['deaths_protector_dragon']) if 'deaths_protector_dragon' in stats else None
self.sadan_deaths = int(stats['deaths_sadan']) if 'deaths_sadan' in stats else None
self.sadan_golem_deaths = int(stats['deaths_sadan_golem']) if 'deaths_sadan_golem' in stats else None
self.watcher_scarf_deaths = int(stats['deaths_watcher_scarf']) if 'deaths_watcher_scarf' in stats else None
self.scarf_warrior_kills = int(stats['kills_scarf_warrior']) if 'kills_scarf_warrior' in stats else None
self.crypt_undead_deaths = int(stats['deaths_crypt_undead']) if 'deaths_crypt_undead' in stats else None
self.watcher_scarf_kills = int(stats['kills_watcher_scarf']) if 'kills_watcher_scarf' in stats else None
self.spirit_bat_deaths = int(stats['deaths_spirit_bat']) if 'deaths_spirit_bat' in stats else None
self.spirit_miniboss_deaths = int(
stats['deaths_spirit_miniboss']) if 'deaths_spirit_miniboss' in stats else None
self.spirit_chicken_deaths = int(stats['deaths_spirit_chicken']) if 'deaths_spirit_chicken' in stats else None
self.spirit_sheep_deaths = int(stats['deaths_spirit_sheep']) if 'deaths_spirit_sheep' in stats else None
self.crypt_undead_marius_kills = int(
stats['kills_crypt_undead_marius']) if 'kills_crypt_undead_marius' in stats else None
class SkyBlockObjective(object):
r"""Represents a SkyBlock Objective.
:param objective_name: The name of the objective.
:type objective_name: str
:param objective_data: The objective's data.
:type objective_data: dict"""
def __init__(self, objective_name: str, objective_data: dict):
self.name = objective_name
self.status = objective_data['status']
self.progress = objective_data['progress']
self.completed_at = datetime.datetime.fromtimestamp(
objective_data['completed_at'] / 1000
) if objective_data['completed_at'] != 0 else None
class SkyBlockQuest(object):
r"""Represents a SkyBlock quest.
:param quest_name: The name of the quest.
:type quest_name: str
:param quest_data: The quest's data.
:type quest_data: dict"""
def __init__(self, quest_name: str, quest_data: dict):
self.name = quest_name
self.status = quest_data['status']
self.activated_at = datetime.datetime.fromtimestamp(
quest_data['activated_at'] / 1000
)
self.completed_at = datetime.datetime.fromtimestamp(
quest_data['completed_at'] / 1000
)
class SkyBlockSlayer(object):
r"""Represents a SkyBlock slayer.
:param slayer: The name of the slayer.
:type slayer: str
:param slayer_data: The slayer's data.
:type slayer_data: dict"""
def __init__(self, slayer: str, slayer_data: dict):
self.slayer = slayer
self.claimed_levels = slayer_data['claimed_levels']
self.xp = slayer_data['xp']
self.level = types[slayer](slayer_data['xp'])
class SkyBlockPet(object):
r"""Represents a SkyBlock pet.
:param pet_data: The pet's data.
:type pet_data: dict"""
def __init__(self, pet_data: dict):
self.uuid = pet_data['uuid']
self.type = pet_data['type']
self.xp = pet_data['exp']
self.active = pet_data['active']
self.tier = pet_data['tier']
self.held_item = pet_data['heldItem']
self.candy_used = pet_data['candyUsed']
self.skin = pet_data['skin']
class SkyBlockSkill(object):
r"""Represents a SkyBlock skill.
:param name: The skill's name.
:type name: str
:param skill_data: The skill's data.
:type skill_data: dict"""
def __init__(self, name, skill_data):
self.name = name
self.level = skill_data['level']
self.xp = skill_data['xp']
| 73.955645 | 120 | 0.733684 | 5,198 | 36,682 | 4.825702 | 0.076953 | 0.084835 | 0.11621 | 0.158468 | 0.638056 | 0.462805 | 0.320722 | 0.147385 | 0.102256 | 0.063028 | 0 | 0.000735 | 0.184287 | 36,682 | 495 | 121 | 74.105051 | 0.837578 | 0.052832 | 0 | 0.004728 | 0 | 0 | 0.321464 | 0.185294 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014184 | false | 0 | 0.004728 | 0 | 0.033097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdba7af199ee6c2c990e85c3f998b299c41d4413 | 604 | py | Python | nicos_virt_mlz/reseda/setups/guidehall.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_virt_mlz/reseda/setups/guidehall.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_virt_mlz/reseda/setups/guidehall.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'FRM II Neutron guide hall west infrastructure devices'
group = 'lowlevel'
devices = dict(
Sixfold = device('nicos.devices.generic.ManualSwitch',
description = 'Sixfold shutter status',
states = ('closed', 'open'),
pollinterval = 60,
maxage = 120,
),
Crane = device('nicos.devices.generic.ManualMove',
description = 'The position of the crane in the guide '
'hall West measured from the east end',
abslimits = (0, 60),
pollinterval = 5,
maxage = 30,
unit = 'm',
fmtstr = '%.1f',
),
)
| 27.454545 | 69 | 0.584437 | 63 | 604 | 5.603175 | 0.698413 | 0.050992 | 0.073654 | 0.141643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028302 | 0.298013 | 604 | 21 | 70 | 28.761905 | 0.804245 | 0 | 0 | 0.105263 | 0 | 0 | 0.395695 | 0.109272 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdbb658871214d92211c98f23c493a5bef0ef8d6 | 2,366 | py | Python | papermerge/core/management/commands/checkaccess.py | MinchinWeb/papermerge | 8a5f73207413a3ea8989d277e140d448baa35ca4 | [
"Apache-2.0"
] | null | null | null | papermerge/core/management/commands/checkaccess.py | MinchinWeb/papermerge | 8a5f73207413a3ea8989d277e140d448baa35ca4 | [
"Apache-2.0"
] | null | null | null | papermerge/core/management/commands/checkaccess.py | MinchinWeb/papermerge | 8a5f73207413a3ea8989d277e140d448baa35ca4 | [
"Apache-2.0"
] | null | null | null | import logging
from django.core.management.base import BaseCommand
try:
from django_tenants.utils import get_tenant_model
except:
get_tenant_model = None
from django.db import connection
from papermerge.core.models import (
BaseTreeNode,
Access
)
from papermerge.core.auth import (
create_access_perms
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Lists/Updates Access Models associated with nodes.
"""
def add_arguments(self, parser):
parser.add_argument(
'--count',
'-c',
action="store_true",
help="Count nodes with/without associated access model."
)
parser.add_argument(
'--update',
'-u',
action="store_true",
help="Updated nodes without associated access model."
)
parser.add_argument(
'--schema-name',
'-s',
help="Run checkaccess for this schema."
)
def run_count(
self,
):
total_count = BaseTreeNode.objects.count()
without_access_count = BaseTreeNode.objects.filter(
access__isnull=True
).count()
print(
f"total={total_count}, without_access={without_access_count}"
)
def run_update(
self
):
perms = create_access_perms()
for node in BaseTreeNode.objects.all():
if node.access_set.count() == 0:
access = Access.objects.create(
user=node.user,
access_type='allow',
node=node
)
access.permissions.add(*perms)
def handle(self, *args, **options):
count = options.get(
'count',
False
)
update = options.get(
'update',
False
)
schema_name = options.get('schema_name', False)
TenantModel = get_tenant_model()
if schema_name:
tenant_list = TenantModel.objects.filter(name=schema_name)
else:
tenant_list = TenantModel.objects.exclude(name="public")
for tenant in tenant_list:
connection.set_tenant(tenant)
if count:
self.run_count()
elif update:
self.run_update()
| 24.645833 | 73 | 0.554522 | 235 | 2,366 | 5.395745 | 0.353191 | 0.039432 | 0.033123 | 0.029968 | 0.070978 | 0.070978 | 0.070978 | 0 | 0 | 0 | 0 | 0.000653 | 0.352494 | 2,366 | 95 | 74 | 24.905263 | 0.827024 | 0 | 0 | 0.113924 | 0 | 0 | 0.136517 | 0.015638 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.075949 | 0 | 0.151899 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdbe3e3e964d3de239112acfd757c2553c93386b | 678 | py | Python | 11.dumbo-octopus/py/part1.py | rolandbernard/adventofcode-2021 | 9249815af62d0fcf79b71357330a1456ea3be1ed | [
"BSD-2-Clause"
] | null | null | null | 11.dumbo-octopus/py/part1.py | rolandbernard/adventofcode-2021 | 9249815af62d0fcf79b71357330a1456ea3be1ed | [
"BSD-2-Clause"
] | null | null | null | 11.dumbo-octopus/py/part1.py | rolandbernard/adventofcode-2021 | 9249815af62d0fcf79b71357330a1456ea3be1ed | [
"BSD-2-Clause"
] | null | null | null |
import sys
import numpy as np
raw = sys.stdin.read()
map = np.array([[c for c in l] for l in raw.split('\n') if len(l) != 0], dtype=int)
def energize(map, i, j):
if i >= 0 and j >= 0 and i < map.shape[0] and j < map.shape[1] and map[i, j] < 10:
map[i, j] += 1
if map[i, j] >= 10:
for di, dj in [(di, dj) for di in range(-1, 2) for dj in range(-1, 2) if di != 0 or dj != 0]:
energize(map, i + di, j + dj)
flashes = 0
for _ in range(100):
for i in range(map.shape[0]):
for j in range(map.shape[1]):
energize(map, i, j)
flashes += (map >= 10).sum()
map[map >= 10] = 0
print('Result:', flashes)
| 26.076923 | 105 | 0.513274 | 130 | 678 | 2.669231 | 0.3 | 0.069164 | 0.072046 | 0.074928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057325 | 0.30531 | 678 | 25 | 106 | 27.12 | 0.679406 | 0 | 0 | 0 | 0 | 0 | 0.013314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.166667 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdbe6b219cb1418af34e685a56c258fed902050b | 10,853 | py | Python | rlb/utils.py | jaekyeom/drop-bottleneck | 85b64ce72ac22af56e167da2817c295b79a03eb7 | [
"Apache-2.0",
"MIT"
] | 8 | 2021-03-16T05:37:41.000Z | 2021-06-18T05:15:15.000Z | rlb/utils.py | jaekyeom/drop-bottleneck | 85b64ce72ac22af56e167da2817c295b79a03eb7 | [
"Apache-2.0",
"MIT"
] | null | null | null | rlb/utils.py | jaekyeom/drop-bottleneck | 85b64ce72ac22af56e167da2817c295b79a03eb7 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-06-23T08:15:16.000Z | 2021-08-30T14:13:58.000Z | from __future__ import print_function
from collections import OrderedDict, defaultdict
import numpy as np
import random
import copy
#from mpi_util import mpi_moments
#def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
# with tf.variable_scope(scope):
# nin = x.get_shape()[1].value
# w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
# b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
# return tf.matmul(x, w)+b
#
#def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False, bias_initializer=tf.constant_initializer(0.0)):
# if data_format == 'NHWC':
# channel_ax = 3
# strides = [1, stride, stride, 1]
# bshape = [1, 1, 1, nf]
# elif data_format == 'NCHW':
# channel_ax = 1
# strides = [1, 1, stride, stride]
# bshape = [1, nf, 1, 1]
# else:
# raise NotImplementedError
# bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
# nin = x.get_shape()[channel_ax].value
# wshape = [rf, rf, nin, nf]
# with tf.variable_scope(scope):
# w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
# b = tf.get_variable("b", bias_var_shape, initializer=bias_initializer)
# if not one_dim_bias and data_format == 'NHWC':
# b = tf.reshape(b, bshape)
# return b + tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format)
#
#
#def deconv(x, scope, *, nf, rf, stride, init_scale=1.0, data_format='NHWC'):
# if data_format == 'NHWC':
# channel_ax = 3
# strides = (stride, stride)
# #strides = [1, stride, stride, 1]
# elif data_format == 'NCHW':
# channel_ax = 1
# strides = (stride, stride)
# #strides = [1, 1, stride, stride]
# else:
# raise NotImplementedError
#
# with tf.variable_scope(scope):
# out = tf.contrib.layers.conv2d_transpose(x,
# num_outputs=nf,
# kernel_size=rf,
# stride=strides,
# padding='VALID',
# weights_initializer=ortho_init(init_scale),
# biases_initializer=tf.constant_initializer(0.0),
# activation_fn=None,
# data_format=data_format)
# return out
#
#
#def ortho_init(scale=1.0):
# def _ortho_init(shape, dtype, partition_info=None):
# #lasagne ortho init for tf
# shape = tuple(shape)
# if len(shape) == 2:
# flat_shape = shape
# elif len(shape) == 4: # assumes NHWC
# flat_shape = (np.prod(shape[:-1]), shape[-1])
# else:
# raise NotImplementedError
# a = np.random.normal(0.0, 1.0, flat_shape)
# u, _, v = np.linalg.svd(a, full_matrices=False)
# q = u if u.shape == flat_shape else v # pick the one with the correct shape
# q = q.reshape(shape)
# return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
# return _ortho_init
def tile_images(array, n_cols=None, max_images=None, div=1):
if max_images is not None:
array = array[:max_images]
if len(array.shape) == 4 and array.shape[3] == 1:
array = array[:, :, :, 0]
assert len(array.shape) in [3, 4], "wrong number of dimensions - shape {}".format(array.shape)
if len(array.shape) == 4:
assert array.shape[3] == 3, "wrong number of channels- shape {}".format(array.shape)
if n_cols is None:
n_cols = max(int(np.sqrt(array.shape[0])) // div * div, div)
n_rows = int(np.ceil(float(array.shape[0]) / n_cols))
def cell(i, j):
ind = i * n_cols + j
return array[ind] if ind < array.shape[0] else np.zeros(array[0].shape)
def row(i):
return np.concatenate([cell(i, j) for j in range(n_cols)], axis=1)
return np.concatenate([row(i) for i in range(n_rows)], axis=0)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
#from mpi4py import MPI
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
#def explained_variance_non_mpi(ypred,y):
# """
# Computes fraction of variance that ypred explains about y.
# Returns 1 - Var[y-ypred] / Var[y]
#
# interpretation:
# ev=0 => might as well have predicted zero
# ev=1 => perfect prediction
# ev<0 => worse than just predicting zero
#
# """
# assert y.ndim == 1 and ypred.ndim == 1
# vary = np.var(y)
# return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
#
#def mpi_var(x):
# return mpi_moments(x)[1]**2
#
#def explained_variance(ypred,y):
# """
# Computes fraction of variance that ypred explains about y.
# Returns 1 - Var[y-ypred] / Var[y]
#
# interpretation:
# ev=0 => might as well have predicted zero
# ev=1 => perfect prediction
# ev<0 => worse than just predicting zero
#
# """
# assert y.ndim == 1 and ypred.ndim == 1
# vary = mpi_var(y)
# return np.nan if vary==0 else 1 - mpi_var(y-ypred)/vary
def add_noise(img, noise_p, noise_type):
noise_mask = np.random.binomial(1, noise_p, size=img.shape[0]).astype(np.bool)
w = 12
n = 84//12
idx_list = np.arange(n*n)
random.shuffle(idx_list)
idx_list = idx_list[:np.random.randint(10, 40)]
for i in range(img.shape[0]):
if not noise_mask[i]:
continue
for idx in idx_list:
y = (idx // n)*w
x = (idx % n)*w
img[i, y:y+w, x:x+w, -1] += np.random.normal(0, 255*0.3, size=(w,w)).astype(np.uint8)
img = np.clip(img, 0., 255.)
return img
g_font = [None]
def draw_text_to_image(text, height=None, width=None, channels=None):
from PIL import Image, ImageDraw, ImageFont
if g_font[0] is None:
g_font[0] = ImageFont.load_default()
font = g_font[0]
# ImageFont.ImageFont.getsize doesn't work for multi-line strings.
# https://github.com/python-pillow/Pillow/issues/2966
#text_size = font.getsize(text)
dummy_img = Image.fromarray(np.zeros((1, 1), dtype=np.uint8))
dummy_draw = ImageDraw.Draw(dummy_img)
text_size = dummy_draw.textsize(text, font=font)
if channels is None:
shape = (height or text_size[1], width or text_size[0])
else:
shape = (height or text_size[1], width or text_size[0], channels)
i = np.zeros(shape, dtype=np.uint8)
img = Image.fromarray(i)
draw = ImageDraw.Draw(img)
draw.text((3, 0), text, font=font, fill=(255,)*channels)
return np.asarray(img)
def get_percentile_indices(data, percentiles=np.arange(0.0, 1.05, 0.1)):
assert len(data.shape) == 1
data_asc = np.argsort(data)
percentile_indices = (percentiles * (len(data_asc) - 1)).astype(int)
percentile_indices = data_asc[percentile_indices]
#assert np.all(data[percentile_indices[:-1]] <= data[percentile_indices[1:]])
return percentile_indices
class CContext():
def __init__(self, verbose=False, print_func=print):
self._state_funcs = OrderedDict()
self._evaluated_states = OrderedDict()
self._dependencies = defaultdict(set)
self._eval_context = []
self._verbose = verbose
self._print_func = print_func
def register_state(self, name, create):
if name in self._state_funcs:
raise Exception('State already registered: {}'.format(name))
self._state_funcs[name] = create
def invalidate_state(self, name):
if name not in self._evaluated_states:
return
del self._evaluated_states[name]
if self._verbose:
self._print_func('Invalidated state "{}"'.format(name))
for n in self._dependencies[name]:
self.invalidate_state(n)
del self._dependencies[name]
def __getattr__(self, attr):
if attr not in self._state_funcs:
raise Exception('Unknown state {}'.format(attr))
if attr in self._eval_context:
raise Exception('Circular dependency detected: {}, {}'.format(attr, self._eval_context))
self._dependencies[attr] = self._dependencies[attr].union(set(self._eval_context))
if attr not in self._evaluated_states:
self._eval_context.append(attr)
evaluated_state = self._state_funcs[attr](self)
if self._verbose:
self._print_func('Evaluated state "{}"'.format(attr))
self._eval_context.pop()
self._evaluated_states[attr] = evaluated_state
return self._evaluated_states[attr]
class EmptyClass():
pass
# From https://github.com/openai/large-scale-curiosity/blob/0c3d179fd61ee46233199d0891c40fbe7964d3aa/cppo_agent.py#L226-L236
class RewardForwardFilter(object):
def __init__(self, gamma):
self.rewems = None
self.gamma = gamma
def update(self, rews):
if self.rewems is None:
self.rewems = rews
else:
self.rewems = self.rewems * self.gamma + rews
return self.rewems
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
class SimpleWeightedMovingScalarMeanStd():
def __init__(self, alpha=0.0001):
self._alpha = alpha
self.mean = 0.0
self.var = 1.0
def update(self, values):
self.mean = (1 - self._alpha) * self.mean + self._alpha * np.mean(values)
self.var = (1 - self._alpha) * self.var + self._alpha * np.mean(np.square(values - self.mean))
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
| 36.789831 | 155 | 0.609417 | 1,501 | 10,853 | 4.223185 | 0.207195 | 0.017353 | 0.017984 | 0.014198 | 0.265184 | 0.189778 | 0.13693 | 0.118946 | 0.097492 | 0.097492 | 0 | 0.024378 | 0.262969 | 10,853 | 294 | 156 | 36.914966 | 0.768096 | 0.400719 | 0 | 0.04698 | 0 | 0 | 0.032415 | 0 | 0 | 0 | 0 | 0 | 0.020134 | 1 | 0.127517 | false | 0.013423 | 0.053691 | 0.006711 | 0.281879 | 0.033557 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc046b158b884fea2bcbaf2bb1204d34d3b4b00 | 4,565 | py | Python | pydtnsim/routing/cgr_utils.py | ducktec/pydtnsim | 916b0ebfa2b65b7a80af293dd4c39f862eadeae3 | [
"MIT"
] | 8 | 2018-12-11T17:39:44.000Z | 2021-05-07T10:24:03.000Z | pydtnsim/routing/cgr_utils.py | Elianelin/pydtnsim | 916b0ebfa2b65b7a80af293dd4c39f862eadeae3 | [
"MIT"
] | 13 | 2019-01-14T14:08:15.000Z | 2021-06-12T17:03:43.000Z | pydtnsim/routing/cgr_utils.py | Elianelin/pydtnsim | 916b0ebfa2b65b7a80af293dd4c39f862eadeae3 | [
"MIT"
] | 4 | 2019-03-20T15:12:40.000Z | 2022-02-22T06:16:24.000Z | """Module of commonly shared functions of various flavours of CGR."""
import math
def cgr_neighbor_function(contact_graph, node, destination, current_distance,
set_visited, suppressed_contacts, lookahead_time):
"""Neighbor function of CGR used by the Dijkstra algorithm.
Used to determine feasible direct neigbors of a given node.
Args:
contact_graph (ContactGraph): The topology information in the form
of a contact graph
node (tuple): The evaluated node in the contact graph node form
``(from_node, to_node, from_time, to_time, data_rate)``.
destination (tuple): The nominal destination node in the form
``(destination_id, destination_id, 0, math.inf, math.inf)``
current_distance (int): Contains the weight of the shortest path
to the currently investigated node (in ms).
set_visited (set): Set used for storing the visited flag
of nodes during the Dijkstra runs. Also used for excluding
suppressed (physical) nodes.
suppressed_contacts (list): List of contacts that shall not be
considered for forwarding (and thus neighbor selection)
lookahead_time (int): Time value that specifies a time window
(or rather a maximum time) only in which routes are searched.
This reduces the time necessary to find a shortest route.
Returns:
list: A list of all feasible neighbors with items of the form
``(<node_id>, weight)`` with ``<node_id>`` representing a certain
contact in the contact graph.
"""
neighbors = []
# Set the node as visited
set_visited.add(node.from_node)
# Extract the start time of the given node
for edge in contact_graph.graph[node].successors:
# Break the loop if the found edge to_time is smaller than the
# current distance. As the successor list is sorted, all subsequent
# edges will be smaller as well.
if edge.to_time <= current_distance:
break
# Only consider when neigbor has not been visited by dijkstra yet
# and it is not in the suppressed_contacts list
# and can be reached given the currently consideret point in time
# and if it is within the lookahead window (only when a lookahead
# window is used)
if ((lookahead_time is None or edge.from_time < lookahead_time)
and edge.to_node not in set_visited
and edge not in suppressed_contacts
and (edge.to_time > current_distance)):
# Only add to neighbors if no artificial end node or artificial end
# node is bundle's destination
if edge == destination or edge.from_node != edge.to_node:
# Calculate the time (which is either positive or 0, relevant
# for artificial terminal nodes)
weight = edge.from_time - current_distance
weight = max(weight, 0)
# Append to neighbor list with weight
neighbors.append((edge, weight))
return neighbors
def cgr_get_route_characteristics(route, distance):
"""Calculate characteristics of a certain route.
Args:
route (list): A list of the nodes of the calculated route that's
elements comprise of all relevant information for determining the
characteristics'
distance (int): The precalculated distance
Returns:
tuple: A tuple consisting of the (precalculated) distance, the capacity
and the end time of the availability of that route
"""
capacity = math.inf
distance = 0
# Iterate over all nodes in route and check if capacity is smaller than
# already found minimum
for node in route:
distance = max(distance, node.from_time)
# Generate capacity for node's contact
capacity_new = ((node.to_time - distance) * node.datarate)
# Update capacity if smaller
if capacity_new < capacity:
capacity = capacity_new
# The to_time of a route is the minimum end time of a contact within this
# route (minus the assumed signal propagation delay, in the rr considered
# to be neglegible)
to_time = min([node.to_time for node in route])
# Return the characteristics tuple consisting of the route distance (i.e.
# the arrival time), the route capacity and the route availability end
# time (i.e. the to-time)
return (distance, capacity, to_time)
| 42.268519 | 79 | 0.660241 | 611 | 4,565 | 4.847791 | 0.289689 | 0.020257 | 0.016205 | 0.011479 | 0.01688 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001225 | 0.284556 | 4,565 | 107 | 80 | 42.663551 | 0.905695 | 0.645126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc04f43fced1ed2108de24776b9c054870c3a6d | 785 | py | Python | rxbp/multicast/multicastobservers/mapmulticastobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 24 | 2018-11-22T21:04:49.000Z | 2021-11-08T11:18:09.000Z | rxbp/multicast/multicastobservers/mapmulticastobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 1 | 2019-02-06T15:58:46.000Z | 2019-02-12T20:31:50.000Z | rxbp/multicast/multicastobservers/mapmulticastobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 1 | 2021-01-26T12:41:37.000Z | 2021-01-26T12:41:37.000Z | from dataclasses import dataclass
from typing import Callable
from rxbp.multicast.multicastobserver import MultiCastObserver
from rxbp.multicast.typing import MultiCastItem
@dataclass
class MapMultiCastObserver(MultiCastObserver):
source: MultiCastObserver
func: Callable[[MultiCastItem], MultiCastItem]
def on_next(self, item: MultiCastItem) -> None:
try:
def map_gen():
for v in item:
yield self.func(v)
next = map_gen()
except Exception as exc:
self.source.on_error(exc)
else:
self.source.on_next(next)
def on_error(self, exc: Exception) -> None:
self.source.on_error(exc)
def on_completed(self) -> None:
self.source.on_completed() | 27.068966 | 62 | 0.650955 | 88 | 785 | 5.704545 | 0.386364 | 0.079681 | 0.095618 | 0.067729 | 0.079681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.26879 | 785 | 29 | 63 | 27.068966 | 0.874564 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc402ae42475915911f7485b03af5085a350424 | 2,413 | py | Python | clean-up-pdf.py | spider-walker/reading-pdf-files-mongo | 3a7b5346bd8e5bedfba388ea9a0053cd8330d332 | [
"Apache-2.0"
] | null | null | null | clean-up-pdf.py | spider-walker/reading-pdf-files-mongo | 3a7b5346bd8e5bedfba388ea9a0053cd8330d332 | [
"Apache-2.0"
] | null | null | null | clean-up-pdf.py | spider-walker/reading-pdf-files-mongo | 3a7b5346bd8e5bedfba388ea9a0053cd8330d332 | [
"Apache-2.0"
] | null | null | null | with open('./data/data2017.txt') as f:
lines = f.readlines()
for ln in lines:
ln = ln.replace(',', '').replace(':', '').replace('int64', '') \
.replace('Name', '').replace('dtype', '').replace('/', ' ') \
.replace('object', '').replace('float64', ' ') \
.replace('NaN', '').replace('NaN', ' ') \
.replace('.', ' ')
text = ln.split()
if text[0].isnumeric():
text.pop(0)
if text[0].isnumeric():
text.pop(0)
if text[0].isnumeric():
text.pop(0)
if not text[1].isnumeric():
text[0] = f'{text[0]} {text[1]}'
text.pop(1)
if not text[2].isnumeric():
text.insert(2, '0')
text.insert(3, '0')
if not text[1].isnumeric():
text[0] = f'{text[0]} {text[1]}'
text.pop(1)
if len(text) > 6 and not text[5].isnumeric():
text[4] = f'{text[4]} {text[5]}'
text.pop(5)
if len(text) > 6 and not text[5].isnumeric():
text[4] = f'{text[4]} {text[5]}'
text.pop(5)
if len(text) > 7 and not text[6].isnumeric():
text.insert(6, '0')
text.insert(7, '0')
if len(text) > 9 and not text[9].isnumeric():
text[8] = f'{text[8]} {text[9]}'
text.pop(9)
if len(text) > 9 and not text[9].isnumeric():
text[8] = f'{text[8]} {text[9]}'
text.pop(9)
if len(text) > 9 and not text[9].isnumeric():
text[8] = f'{text[8]} {text[9]}'
text.pop(9)
if len(text) > 7 and not text[7].isnumeric():
text.insert(7, '0')
text.insert(9, '0')
if len(text) == 10:
text.insert(9, '0')
if len(text) == 10:
text.insert(9, '0')
if len(text) == 11 and text[9].isnumeric() and int(text[9].strip())>100:
text.insert(9, '0')
if len(text) == 7:
text.insert(1, '0')
text.insert(3, '0')
text.insert(6, '0')
text.insert(7, '0')
text.insert(9, '0')
notwanted = ['-------------------------------', 'CONSTITUENCY_NAME', 'GRAND TOTAL', 'CAW']
if not set(text) & set(notwanted):
text.insert(0, f'{len(text)}')
s = ','.join(text)
print(f' {s}')
| 30.544304 | 98 | 0.426855 | 312 | 2,413 | 3.298077 | 0.166667 | 0.145773 | 0.09621 | 0.048591 | 0.637512 | 0.613217 | 0.613217 | 0.571429 | 0.501458 | 0.501458 | 0 | 0.064267 | 0.35516 | 2,413 | 78 | 99 | 30.935897 | 0.597044 | 0 | 0 | 0.66129 | 0 | 0 | 0.11811 | 0.012847 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc63bfed4044eff802e2301570d6c7de4fbc7e5 | 1,423 | py | Python | cv_workshops/7-day/9-clazz.py | afterloe/opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 5 | 2020-03-13T07:34:30.000Z | 2021-10-01T03:03:05.000Z | cv_workshops/7-day/9-clazz.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | null | null | null | cv_workshops/7-day/9-clazz.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 1 | 2020-03-01T12:35:02.000Z | 2020-03-01T12:35:02.000Z | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
"""
形态学分析应用 - 使用基本梯度对轮廓进行分析处理
使用形态学的二值化处理,对是别内容进行轮廓分析,在OCR上是其处理的手段之一,相比于threshold的二值化而言,对图像会有更好的分割效
果,技术路线如下:
1 图像形态学梯度
2 灰度
3 全局阈值二值化
4 轮廓分析
"""
def main():
src = cv.imread("../../pic/1.jpg")
blur = cv.medianBlur(src, 3)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
gradient = cv.morphologyEx(blur, cv.MORPH_GRADIENT, kernel)
cv.imshow("gradient", gradient)
gray = cv.cvtColor(gradient, cv.COLOR_BGR2GRAY)
_, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
cv.imshow("binary", binary)
# binary = cv.morphologyEx(binary, cv.MORPH_DILATE, cv.getStructuringElement(cv.MORPH_CROSS, (3, 3))) # 膨胀 3*3 十字交叉
contours, _ = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
if 0 == len(contours):
print("未搜寻到结果")
return
for index in range(len(contours)):
contour = contours[index]
x, y, w, h = cv.boundingRect(contour) # 获取最大外接矩形
area = cv.contourArea(contour) # 获取轮廓面积
if not 10 < area < 500 or not 10 < h < 60:
continue
cv.rectangle(src, (x, y), (x + w, y + h), (0, 0, 255), 2, cv.LINE_8)
cv.imshow("src", src)
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
| 31.622222 | 121 | 0.598032 | 182 | 1,423 | 4.56044 | 0.516484 | 0.048193 | 0.060241 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036294 | 0.264231 | 1,423 | 44 | 122 | 32.340909 | 0.756447 | 0.120871 | 0 | 0 | 0 | 0 | 0.045635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.038462 | 0 | 0.115385 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc6adac6ca6afd61c4934fbf7cff6d47b19bb9a | 1,118 | py | Python | vaccibot/__main__.py | fsoubelet/vaccibot | f0956ddbf9f0ac712d3e6e10d9fb5f3edb3dda11 | [
"MIT"
] | null | null | null | vaccibot/__main__.py | fsoubelet/vaccibot | f0956ddbf9f0ac712d3e6e10d9fb5f3edb3dda11 | [
"MIT"
] | null | null | null | vaccibot/__main__.py | fsoubelet/vaccibot | f0956ddbf9f0ac712d3e6e10d9fb5f3edb3dda11 | [
"MIT"
] | null | null | null | import sys
from loguru import logger
from rich.console import Console, RenderGroup
from rich.panel import Panel
from vaccibot.constants import LOGURU_FORMAT
from vaccibot.parsing import ARGS
from vaccibot.process import retrieve_all_suitable_appointments
from vaccibot.render import make_department_table
logger.remove()
logger.add(sys.stdout, level=f"{ARGS.logs.upper()}", format=LOGURU_FORMAT)
@logger.catch()
def main() -> None:
"""Parses arguments from the commandline, fetches data and renders it in the terminal."""
console = Console()
panels = []
suitable_appointments: dict = retrieve_all_suitable_appointments()
for department, appointments in suitable_appointments.items():
if appointments: # do not make a panel and table if no appointments found
panels.append(
Panel(
make_department_table(appointments),
title=department,
expand=True,
border_style="scope.border",
)
)
console.print(*panels)
if __name__ == "__main__":
main()
| 30.216216 | 93 | 0.674419 | 127 | 1,118 | 5.755906 | 0.503937 | 0.065663 | 0.051984 | 0.084815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.246869 | 1,118 | 36 | 94 | 31.055556 | 0.868171 | 0.124329 | 0 | 0 | 0 | 0 | 0.040082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.285714 | 0 | 0.321429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdc856fc9ff913efd9003763250b43ae605b0ec6 | 1,277 | py | Python | pi4home/components/text_sensor/custom.py | khzd/pi4home | 937bcdcf77bab111cca10af1fe45c63a55c29aae | [
"MIT"
] | 1 | 2019-05-16T02:52:12.000Z | 2019-05-16T02:52:12.000Z | pi4home/components/text_sensor/custom.py | khzd/pi4home | 937bcdcf77bab111cca10af1fe45c63a55c29aae | [
"MIT"
] | null | null | null | pi4home/components/text_sensor/custom.py | khzd/pi4home | 937bcdcf77bab111cca10af1fe45c63a55c29aae | [
"MIT"
] | null | null | null | import voluptuous as vol
from pi4home.components import text_sensor
import pi4home.config_validation as cv
from pi4home.const import CONF_ID, CONF_LAMBDA, CONF_NAME, CONF_TEXT_SENSORS
from pi4home.cpp_generator import add, process_lambda, variable
from pi4home.cpp_types import std_vector
CustomTextSensorConstructor = text_sensor.text_sensor_ns.class_('CustomTextSensorConstructor')
PLATFORM_SCHEMA = text_sensor.PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(CustomTextSensorConstructor),
vol.Required(CONF_LAMBDA): cv.lambda_,
vol.Required(CONF_TEXT_SENSORS):
cv.ensure_list(text_sensor.TEXT_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(text_sensor.TextSensor),
})),
})
def to_code(config):
for template_ in process_lambda(config[CONF_LAMBDA], [],
return_type=std_vector.template(text_sensor.TextSensorPtr)):
yield
rhs = CustomTextSensorConstructor(template_)
custom = variable(config[CONF_ID], rhs)
for i, conf in enumerate(config[CONF_TEXT_SENSORS]):
rhs = custom.Pget_text_sensor(i)
add(rhs.set_name(conf[CONF_NAME]))
text_sensor.register_text_sensor(rhs, conf)
BUILD_FLAGS = '-DUSE_CUSTOM_TEXT_SENSOR'
| 36.485714 | 96 | 0.750196 | 161 | 1,277 | 5.621118 | 0.360248 | 0.132597 | 0.049724 | 0.044199 | 0.095028 | 0.095028 | 0.095028 | 0.095028 | 0 | 0 | 0 | 0.004673 | 0.162099 | 1,277 | 34 | 97 | 37.558824 | 0.841122 | 0 | 0 | 0 | 0 | 0 | 0.039937 | 0.039937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdcc6e536cb58033fe61c7df47d8e2e7c55ae4c2 | 1,902 | py | Python | awsume/autoawsume/process.py | ignatenkobrain/awsume | 8191c35e8d60495e608c77801698c0a1a332d76f | [
"MIT"
] | 654 | 2016-04-05T16:51:22.000Z | 2022-03-28T21:07:30.000Z | awsume/autoawsume/process.py | ignatenkobrain/awsume | 8191c35e8d60495e608c77801698c0a1a332d76f | [
"MIT"
] | 149 | 2016-12-01T17:30:58.000Z | 2022-03-29T23:49:50.000Z | awsume/autoawsume/process.py | ignatenkobrain/awsume | 8191c35e8d60495e608c77801698c0a1a332d76f | [
"MIT"
] | 90 | 2016-04-12T00:50:04.000Z | 2022-03-30T20:44:45.000Z | import argparse
import psutil
from ..awsumepy.lib.aws_files import delete_section, get_aws_files, read_aws_file
from ..awsumepy.lib.logger import logger
def kill_autoawsume():
logger.debug('Killing autoawsume')
for proc in psutil.process_iter():
try:
for command_string in proc.cmdline():
if 'autoawsume' in command_string:
proc.kill()
except Exception:
pass
def kill(arguments: argparse.Namespace):
_, credentials_file = get_aws_files(None, None)
if arguments.profile_name:
logger.debug('Stoping auto-refresh of profile {}'.format(arguments.profile_name))
profiles = read_aws_file(credentials_file)
if 'autoawsume-{}'.format(arguments.profile_name) in profiles:
delete_section('autoawsume-{}'.format(arguments.profile_name), credentials_file)
profiles.pop('autoawsume-{}'.format(arguments.profile_name))
if arguments.profile_name in profiles and profiles[arguments.profile_name].get('autoawsume'):
delete_section(arguments.profile_name, credentials_file)
profiles.pop(arguments.profile_name)
autoawsume_profiles = [{k: v} for k, v in profiles.items() if v.get('autoawsume')]
if any(autoawsume_profiles):
print('Stop {}'.format(arguments.profile_name))
return
else:
logger.debug('There were not more autoawsume profiles, stopping autoawsume')
print('Kill')
kill_autoawsume()
else:
logger.debug('Stopping all auto refreshing and removing autoawsume profiles')
kill_autoawsume()
profiles = read_aws_file(credentials_file)
for profile in profiles:
if 'autoawsume-' in profile or profiles[profile].get('autoawsume'):
delete_section(profile, credentials_file)
print('Kill')
| 41.347826 | 101 | 0.664038 | 215 | 1,902 | 5.688372 | 0.293023 | 0.130826 | 0.163532 | 0.106296 | 0.235487 | 0.130826 | 0.075225 | 0 | 0 | 0 | 0 | 0 | 0.239222 | 1,902 | 45 | 102 | 42.266667 | 0.845197 | 0 | 0 | 0.2 | 0 | 0 | 0.146162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.025 | 0.1 | 0 | 0.175 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdcd63c3f87c9cdba74f8e75512a7d40eef94fd9 | 8,787 | py | Python | engine/sprite.py | amirgeva/retroupy | 1ee19b36a72c5f592cce150d1d0382a00ccdc4a0 | [
"BSD-3-Clause"
] | null | null | null | engine/sprite.py | amirgeva/retroupy | 1ee19b36a72c5f592cce150d1d0382a00ccdc4a0 | [
"BSD-3-Clause"
] | null | null | null | engine/sprite.py | amirgeva/retroupy | 1ee19b36a72c5f592cce150d1d0382a00ccdc4a0 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import gc
from .app import get_screen
from .utils import Rect
from .bitmatrix import BitMatrix
class SpritesManager:
def __init__(self):
self.free_indices = []
self.last_used = -1
self.limit = 160
def clear(self):
self.free_indices = []
self.last_used = -1
def allocate(self, data):
if len(self.free_indices) > 0:
sprite_id = self.free_indices[-1]
del self.free_indices[-1]
else:
sprite_id = self.last_used + 1
if sprite_id >= self.limit:
return -1
self.last_used = sprite_id
get_screen().set_sprite(sprite_id, data)
return sprite_id
sprites_manager = SpritesManager()
class SpriteSheet:
def __init__(self, filename=''):
self.width = 0
self.height = 0
self.data = None
self.sprites = {}
self.rect = None
if filename:
self.load(filename)
def clean(self):
self.data = None
gc.collect()
def load(self, filename):
try:
gc.collect()
print("Loading "+filename)
print("Free Mem: "+str(gc.mem_free()))
with open(filename, 'rb') as f:
data = f.read(4)
self.width = (int(data[1]) << 8) | int(data[0])
self.height = (int(data[3]) << 8) | int(data[2])
data = None
self.data = f.read()
self.rect = Rect(0, 0, self.width, self.height)
return True
except OSError:
return False
def get_sprite_data(self, rect):
if rect in self.sprites:
return self.sprites.get(rect)
if rect.valid() and self.rect.contains(rect) and rect.width() == 32 and rect.height() == 32:
data = bytearray(32 * 32 * 2)
src = (rect.tl.y * self.width + rect.tl.x) * 2
dst = 0
mask = BitMatrix(32, 32)
mask.setall(True)
for i in range(32):
data[dst:(dst + 64)] = self.data[src:(src + 64)]
for j in range(32):
if data[dst + j * 2] == 0x20 and data[dst + j * 2 + 1] == 0:
mask.set(j, i, False)
dst = dst + 64
src = src + self.width * 2
sprite_data = sprites_manager.allocate(bytes(data)), mask
self.sprites[rect] = sprite_data
else:
sprite_data = -1, None
return sprite_data
sprite_sheets = {}
def get_sprite_sheet(filename):
if filename in sprite_sheets:
return sprite_sheets.get(filename)
s = SpriteSheet(filename)
sprite_sheets[filename] = s
return s
# EXPORT
class Sprite(object):
def __init__(self, sprite_id, mask, duration=0.0, flags=0):
self.sprite_id = sprite_id
self.mask = mask
self.duration = duration
self.flags = flags
def draw(self, position):
get_screen().draw_sprite(position.x, position.y, self.sprite_id, self.flags)
@staticmethod
def get_rect():
return Rect(0, 0, 32, 32)
@staticmethod
def deserialize(filename, obj):
r = [int(a) for a in obj['Rect'].strip().split(',')]
dur = obj['Duration']
flags = 0
if 'Flags' in obj:
flags = obj['Flags']
rect = Rect(r[0], r[1], r[2], r[3])
sheet = get_sprite_sheet(filename)
sprite_id, mask = sheet.get_sprite_data(rect)
return Sprite(sprite_id, mask, dur, flags)
# EXPORT
class AnimationSequence(object):
def __init__(self, name, base_vel=1.0):
self.name = name
self.base_vel = base_vel
self.sprites = []
def add_sprite(self, sprite):
self.sprites.append(sprite)
def deserialize(self, filename, seq):
self.sprites = []
for frame in seq['Frames']:
self.add_sprite(Sprite.deserialize(filename, frame))
def __getitem__(self, index):
return self.sprites[index]
def __len__(self):
return len(self.sprites)
# EXPORT
class StaticSprite:
def __init__(self, sprite=None):
self.sprite = sprite
def get_current_sprite(self):
return self.sprite
def get_rect(self):
if self.sprite:
return self.sprite.get_rect()
return Rect(0, 0, 32, 32)
def draw(self, pos):
if self.sprite:
self.sprite.draw(pos)
# EXPORT
class AnimatedSprite(object):
def __init__(self):
self.sheet = None
self.sequences = {}
self.flags = {}
self.active_sequence = None
self.cur_sprite = 0
self.dt = 0.0
self.anim_dir = ''
def add_flag(self, name, value):
if name == 'AnimDir':
self.anim_dir = value
self.flags[name] = value
def get_longest_sequence(self):
mx = 0
res = None
for name in self.sequences:
seq = self.sequences.get(name)
if len(seq) > mx:
mx = len(seq)
res = seq
return res
def get_sequence_by_name(self, name):
return self.sequences.get(name)
def get_sequence_by_index(self, index):
for name in self.sequences.keys():
if index == 0:
return self.sequences.get(name)
index -= 1
return None
def get_active_sequence_name(self):
if not self.active_sequence:
return ''
return self.active_sequence.name
def set_active_sequence(self, name):
if name != self.get_active_sequence_name() and name in self.sequences:
self.active_sequence = self.sequences.get(name)
self.dt = 0.0
self.cur_sprite = 0
def add_sequence(self, seq):
self.sequences[seq.name] = seq
if not self.active_sequence:
self.active_sequence = seq
def calculate_axial_velocity(self, velocity):
if self.anim_dir == 'X':
return abs(velocity.x)
if self.anim_dir == 'Y':
return abs(velocity.y)
return velocity.length()
def advance(self, dt, velocity):
axial_velocity = self.calculate_axial_velocity(velocity)
# print "axial={}".format(axial_velocity)
if self.active_sequence and len(self.active_sequence) > 0:
mult = 1.0
if self.active_sequence.base_vel > 0 and axial_velocity > 0.001:
mult = axial_velocity / self.active_sequence.base_vel
# print "mult={}".format(mult)
self.dt = self.dt + dt * mult
# print "self.dt={}".format(self.dt)
if self.cur_sprite >= len(self.active_sequence):
self.cur_sprite = 0
spr = self.active_sequence[self.cur_sprite]
while self.dt >= spr.duration:
self.dt = self.dt - spr.duration
self.cur_sprite += 1
if self.cur_sprite >= len(self.active_sequence):
self.cur_sprite = 0
return True
def get_current_sprite(self):
if self.active_sequence:
return self.active_sequence[self.cur_sprite]
return None
def get_current_height(self):
spr = self.get_current_sprite()
if spr:
return spr.height()
return 0
def draw(self, position):
spr = self.get_current_sprite()
if spr:
spr.draw(position)
def get_rect(self):
spr = self.get_current_sprite()
if spr:
return spr.get_rect()
return Rect(0, 0, 1, 1)
def deserialize(self, obj, overrides):
filename = obj['Image']
flags = obj['Flags']
for key in flags:
self.add_flag(key, flags[key])
for seq in obj['Sequences']:
base_vel = seq['BaseVelocity']
if 'BaseVelocity' in overrides:
base_vel = overrides.get('BaseVelocity')
s = AnimationSequence(seq['Name'], base_vel)
s.deserialize(filename, seq)
self.add_sequence(s)
for name in sprite_sheets:
sprite_sheets.get(name).clean()
def load(self, filename, overrides={}):
return self.deserialize(json.load(open(filename, "r")), overrides)
# EXPORT
def load_json_file(filename):
obj = json.load(open(filename, "r"))
a = AnimatedSprite()
a.deserialize(obj)
return a
# EXPORT
def load_json_str(s):
obj = json.loads(s)
a = AnimatedSprite()
a.deserialize(obj)
return a
# EXPORT
def load_file(filename):
return load_json_file(filename)
# EXPORT
def load_str(s):
return load_json_str(s)
if __name__ == '__main__':
print(os.getcwd())
| 27.719243 | 100 | 0.561739 | 1,102 | 8,787 | 4.315789 | 0.137931 | 0.052986 | 0.05677 | 0.027754 | 0.178091 | 0.105341 | 0.08831 | 0.082422 | 0.059294 | 0.059294 | 0 | 0.017315 | 0.329578 | 8,787 | 316 | 101 | 27.806962 | 0.790019 | 0.018095 | 0 | 0.228571 | 0 | 0 | 0.01474 | 0 | 0 | 0 | 0.000464 | 0 | 0 | 1 | 0.167347 | false | 0 | 0.02449 | 0.032653 | 0.363265 | 0.012245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdcfa69692cd0e84e62228bc835f2c497955444e | 10,882 | py | Python | natcap/versioner/versioning.py | natcap/versioner | 65e4c1cf38115dcfec260f0d186cedca192b0b2e | [
"BSD-3-Clause"
] | null | null | null | natcap/versioner/versioning.py | natcap/versioner | 65e4c1cf38115dcfec260f0d186cedca192b0b2e | [
"BSD-3-Clause"
] | null | null | null | natcap/versioner/versioning.py | natcap/versioner | 65e4c1cf38115dcfec260f0d186cedca192b0b2e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import logging
import os
import re
import subprocess
import six
LOGGER = logging.getLogger('natcap.versioner.versioning')
LOGGER.setLevel(logging.ERROR)
class VCSQuerier(object):
name = 'VCS'
is_archive = False
repo_data_location = ''
def __init__(self, repo_path):
repo_root = self._find_repo_root(repo_path)
if not repo_root:
raise ValueError('Not within a %s repository: %s' % (
self.name, repo_path))
self._repo_path = repo_root
def _find_repo_root(self, dirpath):
"""Walk up the directory tree and locate the directory that contains
the repo data."""
abs_repo_path = os.path.abspath(dirpath)
def _locate_data(path):
# base case: we can't go up another directory and still haven't
# found the repo data.
if os.path.dirname(path) == path:
return None
if os.path.exists(os.path.join(path, self.repo_data_location)):
return path
return _locate_data(os.path.dirname(path))
return _locate_data(abs_repo_path)
def _run_command(self, cmd, cwd=None):
"""Run a subprocess.Popen command.
All output to stdout, stdin and stderr will be treated as stdout,
captured, and returned. Commands are executed as shell commands.
Parameters:
cmd (string) - a python string to be executed in the shell.
cwd=None (string or None) - the string path to the directory on
disk to use as the CWD. If None, the current CWD will be
used.
Returns:
A python bytestring of the output of the given command."""
p = subprocess.check_output(
cmd, shell=True, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd)
return p.strip().decode('utf-8') # output without leading/trailing newlines
@property
def tag_distance(self):
raise NotImplementedError
@property
def build_id(self):
raise NotImplementedError
@property
def latest_tag(self):
raise NotImplementedError
@property
def branch(self):
raise NotImplementedError
@property
def node(self):
raise NotImplementedError
@property
def release_version(self):
"""This function gets the release version. Returns either the latest tag
(if we're on a release tag) or None, if we're on a dev changeset."""
if self.tag_distance == 0:
return self.latest_tag
return None
@property
def version(self):
"""This function gets the module's version string. This will be either the
dev build ID (if we're on a dev build) or the current tag if we're on a
known tag. Either way, the return type is a string."""
release_version = self.release_version
if release_version is None:
return self.build_dev_id(self.build_id)
return release_version
def build_dev_id(self, build_id=None):
"""This function builds the dev version string. Returns a string."""
if build_id is None:
build_id = self.build_id
return 'dev%s' % (build_id)
def pep440(self, branch=True, method='post'):
assert method in ['pre', 'post'], ('Versioning method %s '
'not valid') % method
# If we're at a tag, return the tag only.
if self.tag_distance == 0:
return self.latest_tag
template_string = "%(latesttag)s.%(method)s%(tagdist)s+n%(node)s"
if branch is True:
template_string += "-%(branch)s"
latest_tag = self.latest_tag
if method == 'pre':
latest_tag = _increment_tag(latest_tag)
data = {
'tagdist': self.tag_distance,
'latesttag': latest_tag,
'node': self.node,
'branch': self.branch,
'method': method,
}
version_string = template_string % data
return version_string
class HgArchive(VCSQuerier):
name = 'Mercurial Archive'
shortnode_len = 12
is_archive = True
repo_data_location = '.hg_archival.txt'
@property
def build_id(self):
attrs = _get_archive_attrs(self._repo_path)
return '{latesttagdistance}:{latesttag} [{node}]'.format(
latesttagdistance=attrs['latesttagdistance'],
latesttag=attrs['latesttag'],
node=attrs['node'][:self.shortnode_len],
)
@property
def tag_distance(self):
try:
return _get_archive_attrs(self._repo_path)['latesttagdistance']
except KeyError:
# This happens when we are at a tag.
return 0
@property
def latest_tag(self):
attrs = _get_archive_attrs(self._repo_path)
try:
return six.text_type(attrs['latesttag'])
except KeyError:
# This happens when we are at a tag.
return six.text_type(attrs['tag'])
@property
def branch(self):
return _get_archive_attrs(self._repo_path)['branch']
@property
def node(self):
return _get_archive_attrs(self._repo_path)['node'][:self.shortnode_len]
class HgRepo(VCSQuerier):
name = 'Mercurial'
is_archive = False
repo_data_location = '.hg'
def _log_template(self, template_string):
hg_call = 'hg log -r . --config ui.report_untrusted=False'
cmd = (hg_call + ' --template="%s"') % template_string
return self._run_command(cmd, cwd=self._repo_path)
@property
def build_id(self):
"""Call mercurial with a template argument to get the build ID. Returns a
python bytestring."""
return self._log_template('{latesttagdistance}:{latesttag} '
'[{node|short}]')
@property
def tag_distance(self):
"""Call mercurial with a template argument to get the distance to the latest
tag. Returns an int."""
return int(self._log_template('{latesttagdistance}'))
@property
def latest_tag(self):
"""Call mercurial with a template argument to get the latest tag. Returns a
python bytestring."""
return self._log_template('{latesttag}')
@property
def branch(self):
"""Get the current branch from hg."""
return self._log_template('{branch}')
@property
def node(self):
return self._log_template('{node|short}')
class GitRepo(VCSQuerier):
name = 'Git'
repo_data_location = '.git'
def __init__(self, repo_path):
VCSQuerier.__init__(self, repo_path)
self._tag_distance = None
self._latest_tag = None
self._commit_hash = None
def _run_command(self, cmd):
return VCSQuerier._run_command(self, cmd, self._repo_path)
@property
def branch(self):
branch_cmd = 'git branch'
current_branches = self._run_command(branch_cmd)
for line in current_branches.split('\n'):
if line.startswith('* '):
return line.replace('* ', '').strip()
raise IOError('Could not detect current branch')
def _describe_current_rev(self):
self._tag_distance = None
self._latest_tag = None
self._commit_hash = None
current_branch = self.branch
try:
data = self._run_command('git describe --tags')
except subprocess.CalledProcessError:
# when there are no tags
self._latest_tag = 'null'
num_commits_cmd = 'git rev-list %s --count' % current_branch
self._tag_distance = self._run_command(num_commits_cmd)
commit_hash_cmd = 'git log -1 --pretty="format:%h"'
self._commit_hash = self._run_command(commit_hash_cmd)
else:
if '-' not in data:
# then we're at a tag
self._latest_tag = str(data)
self._tag_distance = 0
commit_hash_cmd = 'git log -1 --pretty="format:%h"'
self._commit_hash = self._run_command(commit_hash_cmd)
else:
# we're not at a tag, so data has the format:
# data = tagname-tagdistange-commit_hash
tagname, tag_dist, _commit_hash = data.split('-')
self._tag_distance = int(tag_dist)
self._latest_tag = tagname
self._commit_hash = self.node
@property
def build_id(self):
self._describe_current_rev()
return "%s:%s [%s]" % (self._tag_distance, self._latest_tag,
self._commit_hash)
@property
def tag_distance(self):
self._describe_current_rev()
return self._tag_distance
@property
def latest_tag(self):
self._describe_current_rev()
return self._latest_tag
@property
def node(self):
return self._run_command('git rev-parse HEAD').strip()[:8]
@property
def is_archive(self):
# Archives are a mercurial feature.
return False
def _increment_tag(version_string):
assert len(re.findall('([0-9].?)+', version_string)) >= 1, (
'Version string must be a release')
# increment the minor version number and not the update num.
tag = [int(s) for s in version_string.split('.')]
tag[-1] += 1
return '.'.join([str(i) for i in tag])
def _get_archive_attrs(archive_path):
"""
If we're in an hg archive, there will be a file '.hg_archival.txt' in the
repo root. If this is the case, we can fetch relevant build information
from this file that we might normally be able to get directly from hg.
Parameters:
attr (string): The archive attr to fetch. One of
"repo"|"node"|"branch"|"latesttag"|"latesttagdistance"|"changessincelatesttag"
archive_path (string): The path to the mercurial archive.
The .hg_archival.txt file must exist right inside this directory.
Returns:
A dict of the attributes within the .hg_archival file.
Raises:
IOError when the .hg_archival.txt file cannot be found.
KeyError when `attr` is not in .hg_archival.txt
"""
archival_filepath = os.path.join(archive_path, '.hg_archival.txt')
attributes = {}
with open(archival_filepath) as archival_file:
for line in archival_file:
attr_name, value = line.strip().split(': ')
# Try to cast the attribute to an int (since it might be a
# revision number). If it doesn't cast, leave it as a string.
try:
value = int(value)
except ValueError:
pass
attributes[attr_name] = value
return attributes
| 32.195266 | 86 | 0.609355 | 1,360 | 10,882 | 4.675735 | 0.188971 | 0.039786 | 0.020758 | 0.028306 | 0.301462 | 0.19437 | 0.142947 | 0.126435 | 0.089322 | 0.077685 | 0 | 0.002355 | 0.297647 | 10,882 | 337 | 87 | 32.290801 | 0.829648 | 0.231299 | 0 | 0.37156 | 0 | 0 | 0.09464 | 0.019593 | 0 | 0 | 0 | 0 | 0.009174 | 1 | 0.16055 | false | 0.004587 | 0.027523 | 0.027523 | 0.417431 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd18a04d17e95c0953ca2e0c3d8db5c195b7e67 | 1,054 | py | Python | src/two_level_aspect_entity_embedding_generation/clusterd_knowledge_graph_statistics.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | 4 | 2022-03-06T17:41:57.000Z | 2022-03-22T08:42:58.000Z | src/two_level_aspect_entity_embedding_generation/clusterd_knowledge_graph_statistics.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | null | null | null | src/two_level_aspect_entity_embedding_generation/clusterd_knowledge_graph_statistics.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | 1 | 2022-03-19T14:04:42.000Z | 2022-03-19T14:04:42.000Z | from __future__ import print_function
import numpy as np
import random
import json
import sys
import os
import networkx as nx
from networkx.readwrite import json_graph
version_info = list(map(int, nx.__version__.split('.')))
major = version_info[0]
minor = version_info[1]
assert (major <= 1) and (minor <= 11), "networkx major version > 1.11"
if __name__ == "__main__":
graph_file = sys.argv[1]
#out_file = sys.argv[2]
G_data = json.load(open(graph_file))
#print(G_data)
G = json_graph.node_link_graph(G_data)
nodes = [n for n in G.nodes() if not G.node[n]["val"] and not G.node[n]["test"]]
G = G.subgraph(nodes)
count = 0
max_node_degree = 0
for count, node in enumerate(nodes):
if G.degree(node) == 0:
continue
else :
count += G.degree(node)
if G.degree(node)>max_node_degree:
max_node_degree = G.degree(node)
avg_node_degree = count/len(nodes)
print(len(nodes), avg_node_degree, max_node_degree)
print(nx.is_connected(G))
| 28.486486 | 84 | 0.651803 | 164 | 1,054 | 3.926829 | 0.365854 | 0.093168 | 0.080745 | 0.02795 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01599 | 0.228653 | 1,054 | 36 | 85 | 29.277778 | 0.776138 | 0.033207 | 0 | 0 | 0 | 0 | 0.044248 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd1f5712640c660f6739b854770d7e695f1c4d4 | 8,167 | py | Python | src/aioros/tcpros/topic.py | mgrrx/aioros | 9bd750020d0d5fb466891346f61b6f083cbb8f05 | [
"Apache-2.0"
] | 8 | 2020-08-27T17:16:59.000Z | 2022-02-02T13:39:41.000Z | src/aioros/tcpros/topic.py | mgrrx/aioros | 9bd750020d0d5fb466891346f61b6f083cbb8f05 | [
"Apache-2.0"
] | 3 | 2022-02-09T19:18:12.000Z | 2022-03-08T21:12:00.000Z | src/aioros/tcpros/topic.py | mgrrx/aioros | 9bd750020d0d5fb466891346f61b6f083cbb8f05 | [
"Apache-2.0"
] | null | null | null | from asyncio import AbstractEventLoop
from asyncio import iscoroutinefunction
from asyncio import Event
from asyncio import IncompleteReadError
from asyncio import Queue
from asyncio import open_connection
from asyncio import open_unix_connection
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
from typing import Type
from genpy import Message
from ..api.node_api_client import NodeApiClient
from .protocol import Serializer
from .protocol import encode_header
from .protocol import read_data
from .protocol import read_header
from .publisher import Publisher
from .subscription import Subscription
class SubscriberInitError(Exception):
pass
class Topic:
def __init__(
self,
loop: AbstractEventLoop,
node_name: str,
topic_name: str,
msg_type: Type[Message]
) -> None:
self._loop = loop
self._node_name = node_name
self._topic_name = topic_name
self._msg_type = msg_type
self._connected_subscribers: Dict[str, Queue] = {}
self._connected_publishers: Dict[str, Event] = {}
self._has_connected_subscribers: Event = Event()
self._has_connected_publishers: Event = Event()
self._internal_subscriptions: Set[Subscription] = set()
self._internal_publishers: Set[Publisher] = set()
self._latched_msgs: Dict[Publisher, bytes] = {}
self._serializer: Serializer = Serializer()
@property
def name(self) -> str:
return self._topic_name
@property
def type(self) -> Type[Message]:
return self._msg_type
@property
def type_name(self) -> str:
return self._msg_type._type
@property
def md5sum(self) -> str:
return self._msg_type._md5sum
@property
def nr_connected_subscribers(self) -> int:
return len(self._connected_subscribers)
@property
def nr_connected_publishers(self) -> int:
return len(self._connected_publishers)
async def wait_for_connected_subscribers(self) -> None:
await self._has_connected_subscribers.wait()
async def wait_for_connected_publishers(self) -> None:
await self._has_connected_publishers.wait()
@property
def has_subscriptions(self) -> bool:
return bool(self._internal_subscriptions)
@property
def has_publishers(self) -> bool:
return bool(self._internal_publishers)
@property
def is_latching(self) -> bool:
return any(pub.latch for pub in self._internal_publishers)
def get_publisher_header(self) -> Dict[str, str]:
return dict(
topic=self.name,
type=self.type_name,
latching='1' if self.is_latching else '0',
message_definition=self.type._full_text,
md5sum=self.md5sum,
callerid=self._node_name)
def register_publisher(
self,
publisher: Publisher
) -> None:
self._internal_publishers.add(publisher)
async def unregister_publisher(
self,
publisher: Publisher
) -> bool:
self._latched_msgs.pop(publisher, None)
self._internal_publishers.discard(publisher)
return self.has_publishers
def register_subscription(
self,
subscription: Subscription
) -> None:
self._internal_subscriptions.add(subscription)
async def unregister_subscription(
self,
subscription: Subscription
) -> bool:
self._internal_subscriptions.discard(subscription)
if not self.has_subscriptions:
for event in self._connected_publishers.values():
event.set()
return self.has_subscriptions
def publish(
self,
publisher: Publisher,
msg: Message
) -> None:
if not self._connected_subscribers and not self.is_latching:
return
with self._serializer.serialize(msg) as serialized_msg:
for queue in self._connected_subscribers.values():
queue.put_nowait(serialized_msg)
if publisher.latch:
self._latched_msgs[publisher] = serialized_msg
async def connect_subscriber(
self,
node_name: str,
queue: Queue
) -> None:
for publisher in self._internal_publishers:
if publisher.on_peer_connect:
msg = publisher.on_peer_connect(node_name)
if msg:
with self._serializer.serialize(msg) as serialized_msg:
await queue.put(serialized_msg)
serialized_msg = self._latched_msgs.get(publisher)
if serialized_msg is not None:
await queue.put(serialized_msg)
self._connected_subscribers[node_name] = queue
self._has_connected_subscribers.set()
def disconnect_subscriber(
self,
node_name: str
) -> None:
for publisher in self._internal_publishers:
if publisher.on_peer_disconnect:
publisher.on_peer_disconnect(node_name)
del self._connected_subscribers[node_name]
if not self._connected_subscribers:
self._has_connected_subscribers.clear()
def connect_to_publishers(
self,
publishers: List[str]
) -> None:
publishers_set = set(publishers)
for publisher_uri in publishers:
if publisher_uri in self._connected_publishers:
continue
self._connected_publishers[publisher_uri] = Event()
self._loop.create_task(
self._subscribe(publisher_uri))
for publisher_uri in self._connected_publishers:
if publisher_uri not in publishers_set:
self._connected_publishers[publisher_uri].set()
async def _subscribe(
self,
publisher_uri: str
) -> None:
connection_params = await self._get_publisher_connection_params(
publisher_uri)
try:
if connection_params[0] == 'UNIXROS':
reader, writer = await open_unix_connection(
connection_params[1])
elif connection_params[0] == 'TCPROS':
reader, writer = await open_connection(
connection_params[1],
int(connection_params[2]))
header = dict(
topic=self.name,
message_definition=self.type._full_text,
tcp_nodelay='1',
md5sum=self.md5sum,
type=self.type_name,
callerid=self._node_name)
writer.write(encode_header(header))
await writer.drain()
header_dict = await read_header(reader)
if 'error' in header_dict:
raise SubscriberInitError(header_dict['error'])
self._has_connected_publishers.set()
while not self._connected_publishers[publisher_uri].is_set():
msg = self.type()
msg.deserialize(await read_data(reader))
for sub in self._internal_subscriptions:
if iscoroutinefunction(sub.callback):
self._loop.create_task(sub.callback(msg))
else:
self._loop.call_soon(sub.callback, msg)
except (ConnectionResetError, IncompleteReadError):
pass
finally:
writer.close()
if hasattr(writer, 'wait_closed'):
await writer.wait_closed()
self._connected_publishers.pop(publisher_uri)
if not self._connected_publishers:
self._has_connected_publishers.clear()
async def _get_publisher_connection_params(
self,
publisher_uri: str
) -> Tuple[str, int]:
client = NodeApiClient(self._node_name, publisher_uri)
topic = await client.request_topic(
self.name,
[['UNIXROS'], ['TCPROS']])
await client.close()
if topic[0] not in ('UNIXROS', 'TCPROS'):
raise ValueError('protocol is not supported')
return topic
| 32.40873 | 75 | 0.628872 | 869 | 8,167 | 5.628308 | 0.156502 | 0.045185 | 0.047025 | 0.022081 | 0.212635 | 0.116132 | 0.041709 | 0.041709 | 0.023308 | 0.023308 | 0 | 0.002609 | 0.29607 | 8,167 | 251 | 76 | 32.537849 | 0.848148 | 0 | 0 | 0.260465 | 0 | 0 | 0.010775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074419 | false | 0.009302 | 0.093023 | 0.046512 | 0.24186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd325e14189de11da8187eeb203eba8b96feec5 | 1,911 | py | Python | tests/core/test_local.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 595 | 2021-05-21T22:30:48.000Z | 2022-03-31T15:40:25.000Z | tests/core/test_local.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 463 | 2021-05-24T21:32:59.000Z | 2022-03-31T17:12:33.000Z | tests/core/test_local.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | 29 | 2021-05-21T22:27:52.000Z | 2022-03-28T16:43:45.000Z | import json
import os
import unittest
from opta.core.local import Local
from opta.layer import Layer
class LocalTests(unittest.TestCase):
def setUp(self) -> None:
self.layer = Layer(
name="testname",
org_name="testorg",
providers={"local": {"path": "/tmp"}},
modules_data=[],
path="/tmp",
parent=None,
)
self.local = Local(self.layer)
self.local.tf_file = "/tmp/tfconfig"
self.local.config_file_path = "/tmp/localconfig"
with open(self.local.config_file_path, "w") as f:
json.dump(
{
"opta_version": "dev",
"date": "2021-11-15T18:26:47.553097",
"original_spec": "",
"defaults": {},
},
f,
)
with open(self.local.tf_file, "w") as f:
f.write("Some tf state for testing")
return super().setUp()
def tearDown(self) -> None:
if os.path.isfile("/tmp/localconfig"):
os.remove("/tmp/localconfig")
if os.path.isfile("/tmp/tfconfig"):
os.remove("/tmp/tfconfig")
return super().tearDown()
def test_get_remote_config(self) -> None:
assert self.local.get_remote_config() == {
"opta_version": "dev",
"date": "2021-11-15T18:26:47.553097",
"original_spec": "",
"defaults": {},
}
def test_upload_opta_config(self) -> None:
self.local.upload_opta_config()
dict = json.load(open(self.local.config_file_path, "r"))
assert set(dict.keys()) == set(
["opta_version", "original_spec", "date", "defaults"]
)
def test_delete_remote_state(self) -> None:
self.local.delete_remote_state()
assert os.path.isfile(self.local.tf_file) is False
| 30.333333 | 65 | 0.529042 | 214 | 1,911 | 4.574766 | 0.341122 | 0.091931 | 0.036772 | 0.045965 | 0.233912 | 0.175689 | 0.120531 | 0.120531 | 0.120531 | 0.120531 | 0 | 0.031177 | 0.328624 | 1,911 | 62 | 66 | 30.822581 | 0.731878 | 0 | 0 | 0.150943 | 0 | 0 | 0.165358 | 0.027211 | 0 | 0 | 0 | 0 | 0.056604 | 1 | 0.09434 | false | 0 | 0.09434 | 0 | 0.245283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd62727280cef056fc775786f12ebffe8812748 | 28,253 | py | Python | tiknib/feature/asm_const.py | SoftSec-KAIST/tiknib | 5f1e25df0ff652cf35574dae3e6a3cfb3b163e63 | [
"MIT"
] | null | null | null | tiknib/feature/asm_const.py | SoftSec-KAIST/tiknib | 5f1e25df0ff652cf35574dae3e6a3cfb3b163e63 | [
"MIT"
] | null | null | null | tiknib/feature/asm_const.py | SoftSec-KAIST/tiknib | 5f1e25df0ff652cf35574dae3e6a3cfb3b163e63 | [
"MIT"
] | null | null | null | from tiknib.feature.asm_ppc import PPC_GRP_MAP
# ==================== x86 32 =============================================
# data transfer
X86_GRP_DTRANSFER = [
# general purpose instructions
"CMOV",
"CMOVA",
"CMOVAE",
"CMOVB",
"CMOVBE",
"CMOVC",
"CMOVE",
"CMOVG",
"CMOVGE",
"CMOVL",
"CMOVLE",
"CMOVNA",
"CMOVNAE",
"CMOVNB",
"CMOVNBE",
"CMOVNC",
"CMOVNE",
"CMOVNG",
"CMOVNGE",
"CMOVNL",
"CMOVNLE",
"CMOVNO",
"CMOVNP",
"CMOVNS",
"CMOVNZ",
"CMOVO",
"CMOVP",
"CMOVPE",
"CMOVPO",
"CMOVS",
"CMOVZ",
"BSWAP",
"XCHG",
"XADD",
"CMPXCHG",
"CMPXCHG8B",
"POP",
"POPA",
"POPAD",
"PUSH",
"PUSHA",
"PUSHAD",
"CDQ",
"CDQE",
"CBW",
"CWD",
"CWDE",
"MOV",
"MOVD",
"MOVQ",
"MOVABS",
"MOVSX",
"MOVSXD",
"MOVZX",
"MOVZXD",
# string
"MOVS",
"MOVSB",
"MOVSD",
"MOVSW",
"STOS",
"STOSB",
"STOSD",
"STOSW",
"LODS",
"LODSB",
"LODSD",
"LODSW",
# segment register
"LDS",
"LES",
"LFS",
"LGS",
"LSS",
# user mode extended
"XSAVE",
"XSAVEC",
"XSAVEOPT",
"XRSTOR",
"XGETBV",
"XSETBV",
# BMI1, BMI2
"BEXTR",
"BLSI",
"PDEP",
"PEXT",
# MMX
"PACKSSDW",
"PACKSSWB",
"PACKUSDW",
"PACKUSWB",
"PUNPCKHBW",
"PUNPCKHDQ",
"PUNPCKHWD",
"PUNPCKLBW",
"PUNPCKLDQ",
"PUNPCKLWD",
"EMMS",
# SSE 64-bit integer
"PMOVMSKB",
"PSHUFW",
# SSE2 128-bit integer
"MOVDQA",
"MOVDQU",
"MOVQ2DQ",
"MOVDQ2Q",
"PSHUFLW",
"PSHUFHW",
"PSHUFD",
"PUNPCKLQDQ",
"PUNPCKHQDQ",
# SSSE2
"PSHUFB",
"PALIGNR",
# SSE4
"MOVNTDQA",
"PBLENDVB",
"PBLENDW",
"PINSRB",
"PINSRD",
"PINSRQ",
"PEXTRB",
"PEXTRW",
"PEXTRD",
"PEXTRQ",
"PMOVSXBW",
"PMOVZXBW",
"PMOVSXBD",
"PMOVZXBD",
"PMOVSXWD",
"PMOVZXWD",
"PMOVSXBQ",
"PMOVZXBQ",
"PMOVSXWQ",
"PMOVZXWQ",
"PMOVSXDQ",
"PMOVZXDQ",
"PACKUSDW",
"LGDT",
"SGDT",
"LLDT",
"SLDT",
"LTR",
"STR",
"LIDT",
"SIDT",
"MOV",
"LMSW",
"SMSW",
"CLTS",
"LSL",
"LAR",
"VERR",
"VERW",
# 64-bit
"CDQE",
"CQO",
]
X86_GRP_FLOAT_DTRANSFER = [
# floating point instrutions
"FLD",
"FST",
"FSTP",
"FILD",
"FIST",
"FISTP",
"FBLD",
"FBSTP",
"FXCH",
"FCMOVB",
"FCMOVBE",
"FCMOVE",
"FCMOVNB",
"FCMOVNBE",
"FCMOVNE",
"FCMOVNU",
"FCMOVU",
# floating point load const instructions
"FLD1",
"FLDZ",
"FLDPI",
"FLDL2E",
"FLDLN2",
"FLDL2T",
"FLDLG2",
# FPU register related
"FCLEX",
"FFREE",
"FINIT",
"FLDCW",
"FLDENV",
"FNCLEX",
"FNINIT",
"FNOP",
"FNSAVE",
"FNSTCW",
"FNSTENV",
"FNSTSW",
"FRSTOR",
"FSAVE",
"FSTCW",
"FSTENV",
"FSTSW",
# SSE
"MOVAPS",
"MOVUPS",
"MOVHPS",
"MOVHLPS",
"MOVLPS",
"MOVLHPS",
"MOVMSKPS",
"MOVSS",
# SSE2
"MOVAPD",
"MOVUPD",
"MOVHPD",
"MOVHLPD",
"MOVLPD",
"MOVLHPD",
"MOVMSKPD",
"MOVSD",
# SSE Shuffle
"SHUFPS",
"UNPCKHPS",
"UNPCKLPS",
# SSE2 shuffle
"SHUFPD",
"UNPCKHPD",
"UNPCKLPD",
# SSE Conversion
"CVTPI2PS",
"CVTSI2SS",
"CVTPS2PI",
"CVTTPS2PI",
"CVTSS2SI",
"CVTTSS2SI",
# SSE2 Conversion
"CVTPD2PI",
"CVTTPD2PI",
"CVTPI2PD",
"CVTPD2DQ",
"CVTTPD2DQ",
"CVTDQ2PD",
"CVTPS2PD",
"CVTPD2PS",
"CVTSS2SD",
"CVTSD2SS",
"CVTSD2SI",
"CVTTSD2SI",
"CVTSI2SD",
"CVTDQ2PS",
"CVTPS2DQ",
"CVTTPS2DQ",
# SSE MXCSR State
"LDMXCSR",
"STMXCSR",
# SSE 64-bit
"PEXTRW",
"PINSRW",
# SSE cache
"MASKMOVQ",
"MOVNTQ",
"MOVNTPS",
"PREFETCH",
"SFENCE",
# SSE3
"FISTTP",
"LDDQU",
"MOVSHDUP",
"MOVSLDUP",
"MOVDDUP",
# SSE4
"BLENDPD",
"BLENDPS",
"BLENDVPD",
"BLENDVPS",
"EXTRACTPS",
"INSERTPS",
# 16-bit FP
"VCVTPS2PH",
"VCVTPS2PH",
# Vector
"VALIGN",
"VBLEND",
"VCOMPRESS",
"VEXTRACT",
"VINSERT",
"VMOV",
"VFIXUP",
"VGET",
"VEXPAND",
"VCVT",
"VPBLEND",
"VPBROAD",
"VPCOMPRESS",
"VPERM" "VPEXPAND" "VPMOV",
"VPSCATTER",
"VSCATTER",
"VSHUF",
]
# - Miscellaneous Instructions:
X86_GRP_MISC = [
"NOP",
"UD",
"UD2",
"LEA",
"XLAT",
"XLATB",
"CPUID",
"MOVBE",
"PREFETCHW",
"PREFETCHWT1",
"CLFLUSH",
"CLFLUSHOPT",
# SSE2 cache
"CLFLUSH",
"LFENCE",
"MFENCE",
"MASKMOVDQU",
"MOVNTPD",
"MOVNTDQ",
"MOVNTI",
]
X86_GRP_ARITH = [
# general purpose binary arithmetic instructions
"ADCX",
"ADOX",
"ADC",
"ADD",
"XADD",
"SUB",
"SBB",
"IMUL",
"MUL",
"IDIV",
"DIV",
"INC",
"DEC",
"NEG",
"CMP",
# decimal arithmetic instructions
"DAA",
"DAS",
"AAA",
"AAS",
"AAM",
"AAD",
# flag
"STC",
"CLC",
"CMC",
"CLD",
"STD",
# BMI1, BMI2
"MULX",
# MMX
"PADD",
"PADDB",
"PADDW",
"PADDD",
"PADDSB",
"PADDSW",
"PADDUSB",
"PADDUSW",
"PSUB",
"PSUBB",
"PSUBW",
"PSUBD",
"PSUBSB",
"PSUBSW",
"PSUBUSB",
"PSUBUSW",
"PMULHW",
"PMULLW",
"PMADDWD",
# SSE 64bit integer
"PAVGB",
"PAVGW",
"PMAXUB",
"PMAXSB",
"PMINUB",
"PMINSB",
"PMULHUW",
"PSADBW",
# SSE 128-bit integer
"PMULUDQ",
"PADDQ",
"PSUBQ",
# SSSE3
"PHADDW",
"PHADDSW",
"PHADDD",
"PHSUBW",
"PHSUBSW",
"PHSUBD",
"PABSB",
"PABSW",
"PABSD",
"PABSQ",
"PMADDUBSW",
"PMULHRSW",
"PSIGNB",
"PSIGNW",
"PSIGND",
# SSE4
"PMULLD",
"PMULDQ",
"PMINUW",
"PMINUD",
"PMINSB",
"PMINSD",
"PMAXUW",
"PMAXUD",
"PMAXSB",
"PMAXSD",
"ROUNDPS",
"ROUNDPD",
"ROUNDSS",
"ROUNDSD",
"PMPSADBW",
# AESNI
"AESDEC",
"AESDECLAST",
"AESENC",
"AESENCLAST",
"AESIMC",
"AESKEYGENASSIST",
"PCLMULQDQ",
# SHA1
"SHA1MSG1",
"SHA1MSG2",
"SHA1NEXTE",
"SHA1RNDS4",
"SHA256MSG1",
"SHA256MSG2",
"SHA256RNDS2",
"CRC32",
# BMI1, BMI2
"BLSMSK",
"BLSR",
"CLAC",
"STAC",
]
X86_GRP_FLOAT_CMP = [
# floating point compare instructions
"FCOM",
"FCOMP",
"FCOMPP",
"FUCOM",
"FUCOMP",
"FUCOMPP",
"FICOM",
"FICOMP",
"FCOMI",
"FUCOMI",
"FCOMIP",
"FUCOMIP",
"FTST",
"FXAM",
# SSE
"CMPPS",
"CMPEQPS",
"CMPNEQPS",
"CMPLTPS",
"CMPNLTPS",
"CMPSS",
"CMPEQSS",
"CMPNEQSS",
"CMPLTSS",
"CMPNLTSS",
"COMISS",
"UCOMISS",
"CMPPD",
"CMPEQPD",
"CMPNEQPD",
"CMPLTPD",
"CMPNLTPD",
"CMPSD",
"CMPEQSD",
"CMPNEQSD",
"CMPLTSD",
"CMPNLTSD",
"COMISD",
"UCOMISD",
# vector
"VPCMP",
]
X86_GRP_FLOAT_ARITH = [
# - floating point instructions:
"FADD",
"FADDP",
"FIADD",
"FSUB",
"FSUBP",
"FISUB",
"FSUBR",
"FSUBRP",
"FISUBR",
"FMUL",
"FMULP",
"FIMUL",
"FDIV",
"FDIVP",
"FIDIV",
"FDIVR",
"FDIVRP",
"FIDIVR",
"FPREM",
"FPREM1",
"FABS",
"FCHS",
"FRNDINT",
"FSCALE",
"FSQRT",
"FXTRACT",
# floating point transcendental instructions
"FSIN",
"FCOS",
"FSINCOS",
"FPTAN",
"FPATAN",
"F2XM1",
"FYL2X",
"FYL2XP1",
# fpu register related
"FINCSTP",
"FDECSTP",
# SSE
"ADDPS",
"ADDSS",
"SUBPS",
"SUBSS",
"MULPS",
"MULSS",
"DIVPS",
"DIVSS",
"RCPPS",
"RCPSS",
"SQRTPS",
"SQRTSS",
"RSQRTPS",
"RSQRTSS",
"MAXPS",
"MAXSS",
"MINPS",
"MINSS",
# SSE2
"ADDSD",
"SUBSD",
"MULSD",
"DIVSD",
"RCPSD",
"SQRTSD",
"RSQRTSD",
"MAXSD",
"MINSD",
# SSE3
"ADDSUBPS",
"ADDSUBPD",
"HADDPS",
"HSUBPS",
"HADDPD",
"HSUBPD",
# SSE4
"DPPD",
"DPPS",
# vector
"VPMAX",
"VPMIN",
"VRCP",
"VRNDSCAL",
"VRSQRT",
"VSCALE",
"ADDPD",
"ADDSD",
"MULPD",
"MULSD",
"SUBPD",
"SUBSD",
"DIVPD",
"DIVSD",
"RCPPD",
"RCPSD",
]
X86_GRP_CMP = [
"CMP",
"COMI",
"CLT",
# from dtransfer
"CMPXCHG",
"CMPXCHG8B",
# from bit
"TEST",
# from string
"CMPS",
"CMPSB",
"CMPSD",
"CMPSW",
# MMX
"PCMPEQB",
"PCMPEQW",
"PCMPEQD",
"PCMPGTB",
"PCMPGTW",
"PCMPGTD",
# SSE4
"PHMINPOSUW",
"PTEST",
"PCMPEQQ",
# SSE4.2
"PCMPESTRI",
"PCMPESTRM",
"PCMPISTRI",
"PCMPISTRM",
"PCMPGTQ",
# Vector
"VPTEST",
]
# Shift and Rotate Instructions:
X86_GRP_SHIFT = [
# general purpose instructions
"SAR",
"SHR",
"SAL",
"SHL",
"SHRD",
"SHLD",
"ROR",
"ROL",
"RCR",
"RCL",
# BMI1, BMI2
"RORX",
"SARX",
"SHLX",
"SHRX",
# MMX
"PSLLW",
"PSLLD",
"PSLLQ",
"PSRLW",
"PSRLD",
"PSRLQ",
"PSRAW",
"PSRAD",
# SSE2 128-bit integer
"PSLLDQ",
"PSRLDQ",
# vector
"VPROL",
"VPROR",
"VPSRA",
"VPSLL",
"VPSRA",
]
# Logical Instructions:
X86_GRP_LOGIC = [
# general purpose instructions
"AND",
"NOT",
"OR",
"XOR",
# BMI1, BMI2
"ANDN",
# MMX
"PAND",
"PANDN",
"POR",
"PXOR",
# SSE
"ANDPS",
"ANDNPS",
"ORPS",
"XORPS",
# SSE2
"ANDPD",
"ANDNPD",
"ORPD",
"XORPD",
# Vector
"VPTERLOG",
]
# bit and byte instructions:
X86_GRP_BIT = [
# general purpose instructions
"SETA",
"SETAE",
"SETB",
"SETBE",
"SETC",
"SETE",
"SETG",
"SETGE",
"SETL",
"SETLE",
"SETNA",
"SETNAE",
"SETNB",
"SETNBE",
"SETNC",
"SETNE",
"SETNG",
"SETNGE",
"SETNL",
"SETNLE",
"SETNO",
"SETNP",
"SETNS",
"SETNZ",
"SETO",
"SETP",
"SETPE",
"SETPO",
"SETS",
"SETZ",
"TEST",
"CRC32",
# BMI1, BMI2
"BLSMSK",
"BLSR",
"CLAC",
"STAC",
# from bit
"TEST",
"BT",
"BTS",
"BTR",
"BTC",
"BSF",
"BSR",
"POPCNT",
"TZCNT",
"LZCNT",
]
# control transfer instructions:
X86_GRP_CTRANSFER = [
# general purpose instructions
"JMP",
"CALL",
"RET",
"IRET",
"INT",
"INTO",
"BOUND",
"ENTER",
"LEAVE",
# flag
"CLI",
"STI",
# SSE2
"PAUSE",
# SSE3
"MONITOR",
"MWAIT",
"XABORT",
"XACQUIRE",
"XRELEASE",
"XBEGIN",
"XEND",
"XTEST",
"HLT",
"SYSCALL",
"SYSENTER",
"SYSEXIT",
"SYSRET",
"FWAIT",
"WAIT",
# vm related instructions
"VMCALL",
"VMLAUNCH",
"VMMCALL",
"VMRESUME",
"VMRUN",
"VMFUNC",
"VMCLEAR",
"VMXON",
"VMXOFF",
]
X86_GRP_COND_CTRANSFER = [
# general purpose instructions
"JA",
"JAE",
"JB",
"JBE",
"JC",
"JCXZ",
"JE",
"JECXZ",
"JRCXZ",
"JG",
"JGE",
"JL",
"JLE",
"JNAE",
"JNB",
"JNBE",
"JNC",
"JNE",
"JNG",
"JNGE",
"JNL",
"JNLE",
"JNO",
"JNP",
"JNS",
"JNZ",
"JO",
"JP",
"JPE",
"JPO",
"JS",
"JZ",
"LOOP",
"LOOPE",
"LOOPNE",
"LOOPNZ",
"LOOPZ",
# string
"REP",
"REP MOVSQ",
"REP STOSQ",
"REPNE",
"REPNZ",
"REPE",
"REPZ",
]
# ==================== ARM 32 =============================================
ARM_GRP_DTRANSFER = [
# general purpose instructions
"LDA",
"ADR",
"ADRP",
"LDR",
"LDRD",
"LDRB",
"LDRBT",
"LDRH",
"LDRS",
"LDRSB",
"LDRSBT",
"LDRSH",
"LDRSHT",
"LDRT",
"LDRHT",
"STR",
"STRB",
"STRD",
"STRH",
"STRBT",
"STRT",
"LDM",
"LDMDA",
"LDMDB",
"LDMIB",
"STM",
"STMDA",
"STMDB",
"STMIB",
"PLD",
"SWP",
"MOV",
"MOVI",
"MOVK",
"MOVZ",
"MOVT",
"MOVN",
"MVN",
"MVNI",
"STP",
"LDP",
"RFEIB",
# coprocessor data operations
"CDP",
"MCR",
"MCRR",
"MRC",
"MRR",
"LDC",
"LDCL",
"STC",
"STCL",
"PUSH",
"SBFX",
"SBFIZ",
"BFX",
"BFXIL",
"UBFX",
"UBFIZ",
"VLD",
"VST",
"VST2",
"VSTMDB",
"VTBL",
"VTBX",
"ZIP",
"ZIP1",
"ZIP2",
"UZP",
"UZP1",
"UZP2",
"XTN",
"XTN2",
"CSEL",
"LD1",
"LD2",
"LD4",
"ST1",
"ST2",
"ST4",
"LDPSW",
"LDRSW",
"SXTAB",
"SXTB",
"SXTH",
"SXTW",
"EXT",
"EXTR",
"INS",
"UXTAB",
"UXTB",
"UXTH",
"UXTW",
"BFC",
"BFI",
"BIC",
"CLZ",
"REV",
"REV16",
"REV32",
"REV64",
"CSET",
]
ARM_GRP_FLOAT_DTRANSFER = [
# floating point data transfer instructions
"FCPY",
"FCVTMS",
"FCVTMU",
"FCVTZS",
"FCVTZU",
"FCVT",
"FLD",
"FST",
"FMR",
"FMD",
"FMS",
"FMX",
"FSITO",
"FUITO",
"FTOSI",
"FTOUI",
"FMOV",
"UMOV",
"LDUR",
"LDURB",
"LDURH",
"LDURSB",
"LDURSH",
"LDURSW",
"STUR",
"STURB",
"STURH",
"STURSB",
"STURSH",
"STURSW",
"DUP",
"SCVTF",
"UCVTF",
]
ARM_GRP_MISC = [
"UDF",
"NOP",
"MRS",
"MSR",
"MAR",
"MRA",
"VMRS",
"VMSR",
"DBG",
"DMB",
"DSB",
"ISB",
"SETEND",
]
# binary arithmetic instructions:
ARM_GRP_ARITH = [
# general purpose instructions
"ADD",
"ADDW",
"ADDP",
"ADDV",
"ADC",
"SUB",
"SBC",
"RSB",
"RSC",
"CMN",
"CLZ",
"MUL",
"MLA",
"MLS",
"CINC",
"CINV",
"NEG",
"NEGS",
"DIV",
"SMAX",
"SMAXV",
"SMIN",
"SMINV",
"UMULL",
"UMLAL",
"UMLAL2",
"SMLA",
"SMLAL",
"SMLALTT",
"SMUL",
"SMSUB",
"MADD",
"MNEG",
"MSUB",
"SMADDL",
"SMNEGL",
"SMSUBL",
"SMULH",
"SMULL",
"UMADDL",
"UMNEGL",
"UMSUBL",
"UMULH",
"UMULL",
"SDIV",
"UDIV",
"MIA",
"QADD",
"QSUB",
"QDADD",
"QDSUB",
"QASX",
"SADD",
"SADDW",
"SADDW2",
"SASX",
"SHADD",
"SHASX",
"SMLSD",
"SMMLA",
"SMUAD",
"SMUSD",
"SSUB",
"SAT",
"SAX",
"UADD",
"UADDW",
"UADDW2",
"USAT",
"USAX",
"UASX",
"UHADD",
"UHASX",
"UMLSD",
"UMMLA",
"UQADD",
"UQSAX",
"UQSUB",
"UHSAX",
"VABA",
"VABD",
"MAX",
"MIN",
"VMLA",
"VMLS",
"VNMUL",
"VNMLA",
"VNMLS",
"VFMS",
"VFMS",
"VFMA",
"VFMS",
"VFNMA",
"VFNMS",
"VRECPE",
"VSQRT",
"VQRSH",
"UMULL",
"UMAAL",
"UMLAL",
"USADA8",
"VNEG",
"CNEG",
"CSINC",
"CSINV",
"CSNEG",
]
ARM_GRP_FLOAT_ARITH = [
# floating point arithmetic instructions
"FABS",
"FABD",
"FADD",
"FSUB",
"FDIV",
"FMUL",
"FNMUL",
"FSQRT",
"FMAC",
"FNMAC",
"FMSC",
"FNMSC",
"FNEG",
"FMADD",
"FMSUB",
"FNMADD",
"FNMSUB",
"FPINT",
"FCSEL",
"FMAX",
"FMIN",
"FMLA",
"FMLS",
"FRINTM",
"FRINTP",
"FRINT",
]
ARM_GRP_SHIFT = [
# shift operations
"ASR",
"LSL",
"LSR",
"ROR",
"RRX",
"PKHBT",
"PKHTB",
"SHL",
"USHL",
"USHLL",
"USHLL2",
"USHR",
"USRA",
"SSHL",
"SSHLL",
"SSHLL2",
"SSHR",
]
ARM_GRP_CMP = [
# compare instructions
"CMEQ",
"CMGT",
"CMHI",
"CMHS",
"CMP",
"CCMN",
"CCMP",
"VCEQ",
"VCGE",
"VCGT",
"VCLE",
"VCLT",
# from bit
"TST",
"TEQ",
]
ARM_GRP_FLOAT_CMP = [
"VCMP",
"VCMPE",
"FCMPE",
"FCMGT",
"FCM",
"FCMP",
"FCCMP",
"VCM",
]
# Logical Instructions:
ARM_GRP_LOGIC = [
"AND",
"ORR",
"EOR",
"EON",
"ORN",
]
# bit and byte instructions:
ARM_GRP_BIT = [
"TST",
"TEQ",
"BSL",
"BIF",
"BIT",
"BFC",
"BFI",
"BIC",
"CLZ",
"RBIT",
"REV",
"REV16",
"REV32",
"REV64",
"CSET",
]
# control transfer instructions:
ARM_GRP_CTRANSFER = [
"B",
"BR",
"BL",
"BLR",
"BX",
"BLX",
"BXJ",
"BAL",
"BLAL",
"BXAL",
"BLXAL",
"BXJAL",
"SWI",
"BKPT",
"RET",
"YIELD",
"WFE",
"WFI",
"SEV",
"SEVL",
"CPS",
"BRK",
"HLT",
"SVC",
"HVC",
"SMC",
"TRAP",
"ERET",
# ARM POP is return
"POP",
]
ARM_GRP_COND_CTRANSFER = [
"BEQ",
"BNE",
"BCS",
"BCC",
"BMI",
"BPL",
"BVS",
"BVC",
"BHI",
"BLS",
"BGE",
"BLT",
"BGT",
"BLE",
"BLEQ",
"BLNE",
"BLCS",
"BLCC",
"BLMI",
"BLPL",
"BLVS",
"BLVC",
"BLHI",
"BLLS",
"BLGE",
"BLLT",
"BLGT",
"BLLE",
"BXEQ",
"BXNE",
"BXCS",
"BXCC",
"BXMI",
"BXPL",
"BXVS",
"BXVC",
"BXHI",
"BXLS",
"BXGE",
"BXLT",
"BXGT",
"BXLE",
"BLXEQ",
"BLXNE",
"BLXCS",
"BLXCC",
"BLXMI",
"BLXPL",
"BLXVS",
"BLXVC",
"BLXHI",
"BLXLS",
"BLXGE",
"BLXLT",
"BLXGT",
"BLXLE",
"BXJEQ",
"BXJNE",
"BXJCS",
"BXJCC",
"BXJMI",
"BXJPL",
"BXJVS",
"BXJVC",
"BXJHI",
"BXJLS",
"BXJGE",
"BXJLT",
"BXJGT",
"BXJLE",
"TBZ",
"TBNZ",
# combined instructions
"CBZ",
"CBNZ",
]
# ==================== MIPS 32 =============================================
# data transfer
# refernce : https://www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf
MIPS_GRP_DTRANSFER = [
"LB",
"LBU",
"LH",
"LHU",
"LL",
"LW",
"LWU",
"LD",
"LDL",
"LDR",
"LWL",
"LWR",
"PREF",
"SB",
"SC",
"SD",
"SDL",
"SDR",
"SH",
"ST",
"SW",
"SWL",
"SWR",
"SYNC",
"LUI",
"LDXC1",
"LWXC1",
"SDXC1",
"SWXC1",
"MFHI",
"MFLO",
"MOV",
"MOVF",
"MOVN",
"MOVT",
"MOVZ",
"MTHI",
"MTLO",
"MOVE",
"CVT",
"LDC",
"LWC",
"SDC",
"SWC",
# move
"CFC",
"CTC",
"MFC",
"MTC",
"PREF",
"SYNC",
"SPLAT",
"CFCMSA",
"CTCMSA",
"COPY",
"PUSH",
"SEH",
"SEB",
"WSBH",
"DSBH",
"DSHD",
"MTC0",
"MFC0",
"LDC3",
"LWC3",
"SDC3",
"SWC3",
# coprocessor load, store
"COP2",
"LDC2",
"LWC2",
"SDC2",
"SWC2",
# cop move
"CFC2",
"CTC2",
"MFC2",
"MTC2",
]
MIPS_GRP_FLOAT_DTRANSFER = [
# floating point
"FRINT",
"FCLASS",
# load, store, memory
"LDC1",
"LWC1",
"SDC1",
"SWC1",
# move
"CFC1",
"CTC1",
"MFC1",
"FMOV",
"MOVF",
"MOVN",
"MOVT",
"MOVZ",
"MTC1",
# convert
"FEX",
"FFINT",
"FFQ",
"FTINT",
"FTRUN",
"FTQ",
"FCVT",
"FLOOR",
"ROUND",
"TRUNC",
"FFLOOR",
"FROUND",
"FTRUNC",
"DMFC",
"DMFC1",
"DMTC",
"DMTC1",
"MTHC1",
"MFHC1",
]
# binary arithmetic instructions:
MIPS_GRP_ARITH = [
# general purpose instructions
"ADD",
"ADDI",
"ADDU",
"ADDIU",
"SUB",
"SUBU",
"MUL",
"MULT",
"MULTU",
"CLO",
"CLZ",
"DIV",
"DIVU",
"MADD",
"MADDU",
"MSUB",
"MSUBU",
"AADD",
"ASUB",
"ABS",
"NEG",
"NEGU",
# additional
"DAA",
"DSUB",
"DSUBU",
"DSUBIU",
"DDIV",
"DDIVU",
"DDIVIU",
"DMUL",
"DMULT",
"DMULTU",
"DOTP",
"DPADD",
"DPSUB",
"MADD",
"MAX",
"MIN",
"MSUB",
"MOD",
"SAT",
"HSUB",
"SQRT",
"AUI",
"DAUI",
"DAHI",
"DATI",
"ADDIUPC",
"AUIPC",
"ALUIPC",
"DADD",
"DADDU",
"DADDIU",
"DCLZ",
# from bit
"BMZ",
"BMN",
"BNEG",
]
MIPS_GRP_CMP = [
"SLT",
"SLTI",
"SLTIU",
"SLTU",
# compare instructions
"CMP",
"CEQ",
"CLE",
"CLT",
"CF",
"CUN",
"CEQ",
"CUEQ",
"COLT",
"CULT",
"COLE",
"CULE",
"CSF",
"CNGLE",
"CSEQ",
"CNGL",
"CLT",
"CNGE",
"CLE",
"CNGT",
"CMP",
"CEQ",
"CLE",
"CLT",
"CF",
"CUN",
"CEQ",
"CUEQ",
"COLT",
"CULT",
"COLE",
"CULE",
"CSF",
"CNGLE",
"CSEQ",
"CNGL",
"CLT",
"CNGE",
"CLE",
"CNGT",
"C",
]
MIPS_GRP_FLOAT_CMP = [
# floating point compare instructions
"FACF",
"FC",
"FS",
]
MIPS_GRP_SHIFT = [
# shift operation
"SLL",
"SLLV",
"SRL",
"SRLV",
"SRA",
"SRAV",
"SHL",
"SHR",
"SLD",
"DSLL",
"DSLL32",
"DSLLV",
"DSRA",
"DSRA32",
"DSRAV",
"DSRL",
"DSRL32",
"DSRLV",
"ROTR",
"ROTRV",
"DROTR",
"DROTR32",
"DROTRV",
"LSA",
"DLSA",
]
MIPS_GRP_FLOAT_ARITH = [
# floating point
"FABS",
"FADD",
"FDIV",
"FMADD",
"FMSUB",
"FMUL",
"FNEG",
"FNMADD",
"FNMSUB",
"FEXP",
"FLOG",
"FMAX",
"FMIN",
"FRCP",
"RECIP",
"FRECIP",
"FRSQRT",
"FSQRT",
"FSUB",
]
# Logical Instructions:
MIPS_GRP_LOGIC = [
"AND",
"ANDI",
"NOR",
"OR",
"NOT",
"ORI",
"XOR",
"XORI",
]
# bit and byte instructions:
MIPS_GRP_BIT = [
"BINS",
"DINS",
"DEXT",
"EXT",
"INS",
"BMZ",
"BMN",
"BNEG",
"BSEL",
"BSET",
"BCLR",
# bit wise count
"NLOC",
"NLZC",
"PCNT",
]
MIPS_GRP_MISC = [
"NOP",
"SSNOP",
"CACHE",
"TLBP",
"TLBR",
"TLBWI",
"TLBWR",
]
# control transfer instructions:
MIPS_GRP_CTRANSFER = [
"B",
"BAL",
"J",
"JAL",
"JR",
"JALR",
"BREAK",
"SYSCALL",
"PAUSE",
"WAIT",
"HLT",
"ERET",
"DERET",
"SDBBP",
"BKPT",
"RET",
"MFC0",
"MTC0",
# MIPS POP is return
"POP",
# float
"BC1",
"BC1F",
"BC1T",
"BC1FL",
"BC1TL",
# cop
"BC2F",
"BC2T",
"BC2FL",
"BC2TL",
"BC3F",
"BC3T",
"BC3FL",
"BC3TL",
]
MIPS_GRP_COND_CTRANSFER = [
"BEQ",
"BEQZ",
"BNE",
"BGE",
"BGEZ",
"BGEZAL",
"BGTZ",
"BLEZ",
"BLTZ",
"BLTZAL",
"BNEL",
"BNEZ",
"BNZ",
"TEQ",
"TEQI",
"TGE",
"TGEI",
"TGEIU",
"TGEU",
"TLT",
"TLTI",
"TLTIU",
"TLTU",
"TNE",
"TNEI",
"BEQL",
"BGEZALL",
"BGEZL",
"BGTZL",
"BLEZL",
"BLTZALL",
"BLTZL",
"BNEL",
]
# ============================================
# Below part creates dictionary which groups instructions
X86_GRP_MAP = {
9: X86_GRP_FLOAT_DTRANSFER + X86_GRP_FLOAT_CMP + X86_GRP_FLOAT_ARITH,
10: X86_GRP_MISC + X86_GRP_FLOAT_DTRANSFER + X86_GRP_DTRANSFER,
11: X86_GRP_FLOAT_ARITH + X86_GRP_SHIFT + X86_GRP_ARITH,
12: X86_GRP_LOGIC,
13: X86_GRP_COND_CTRANSFER + X86_GRP_CTRANSFER,
20: X86_GRP_FLOAT_DTRANSFER + X86_GRP_DTRANSFER,
21: X86_GRP_FLOAT_ARITH + X86_GRP_ARITH,
22: X86_GRP_FLOAT_CMP + X86_GRP_CMP,
23: X86_GRP_SHIFT,
24: X86_GRP_BIT,
26: X86_GRP_COND_CTRANSFER,
27: X86_GRP_CTRANSFER,
28: X86_GRP_MISC,
30: [],
}
ARM_GRP_MAP = {
9: ARM_GRP_FLOAT_DTRANSFER + ARM_GRP_FLOAT_CMP + ARM_GRP_FLOAT_ARITH,
10: ARM_GRP_MISC + ARM_GRP_FLOAT_DTRANSFER + ARM_GRP_DTRANSFER,
11: ARM_GRP_FLOAT_ARITH + ARM_GRP_SHIFT + ARM_GRP_ARITH,
12: ARM_GRP_LOGIC,
13: ARM_GRP_COND_CTRANSFER + ARM_GRP_CTRANSFER,
20: ARM_GRP_FLOAT_DTRANSFER + ARM_GRP_DTRANSFER,
21: ARM_GRP_FLOAT_ARITH + ARM_GRP_ARITH,
22: ARM_GRP_FLOAT_CMP + ARM_GRP_CMP,
23: ARM_GRP_SHIFT,
24: ARM_GRP_BIT,
26: ARM_GRP_COND_CTRANSFER,
27: ARM_GRP_CTRANSFER,
28: ARM_GRP_MISC,
30: [],
}
# A64 does not allow instructions to be conditionally executed as ARM.
def _copy_for_arm64():
import copy
return copy.deepcopy(ARM_GRP_MAP)
ARM64_GRP_MAP = _copy_for_arm64()
# ARM instructions may have conditional suffix. Thus, initialize here. However,
# reference : http://infocenter.arm.com/help/index.jsp
ARM_COND_GROUPS = [9, 10, 11, 13, 20, 21, 22, 26]
ARM_GRP_COND_CODE = [
"EQ",
"NE",
"CS",
"HS",
"CC",
"LO",
"MI",
"PL",
"VS",
"VC",
"HI",
"LS",
"GE",
"LT",
"GT",
"LE",
"AL",
]
# for group_no in ARM_COND_GROUPS:
# for inst in ARM_GRP_MAP[group_no]:
# for cond in ARM_GRP_COND_CODE:
# ARM_GRP_MAP[group_no].append(inst + cond)
MIPS_GRP_MAP = {
9: MIPS_GRP_FLOAT_DTRANSFER + MIPS_GRP_FLOAT_CMP + MIPS_GRP_FLOAT_ARITH,
10: MIPS_GRP_MISC + MIPS_GRP_FLOAT_DTRANSFER + MIPS_GRP_DTRANSFER,
11: MIPS_GRP_FLOAT_ARITH + MIPS_GRP_SHIFT + MIPS_GRP_ARITH,
12: MIPS_GRP_LOGIC,
13: MIPS_GRP_COND_CTRANSFER + MIPS_GRP_CTRANSFER,
20: MIPS_GRP_FLOAT_DTRANSFER + MIPS_GRP_DTRANSFER,
21: MIPS_GRP_FLOAT_ARITH + MIPS_GRP_ARITH,
# mips usually contains compare in conditional branch
22: MIPS_GRP_FLOAT_CMP + MIPS_GRP_CMP + MIPS_GRP_COND_CTRANSFER,
23: MIPS_GRP_SHIFT,
24: MIPS_GRP_BIT,
26: MIPS_GRP_COND_CTRANSFER,
27: MIPS_GRP_CTRANSFER,
28: MIPS_GRP_MISC,
30: [],
}
# ============================================
GRP_NO_MAP = {
# Among capstone's default mapping, use 1, 2, 3 as they are common in all
# architectures.
1: "grp_jump",
2: "grp_call",
3: "grp_ret",
9: "floatinst",
10: "abs_dtransfer",
11: "abs_arith",
12: "logic",
13: "abs_ctransfer",
20: "dtransfer",
21: "arith",
22: "cmp",
23: "shift",
24: "bitflag",
26: "cndctransfer",
27: "ctransfer",
28: "misc",
30: "unknown",
}
GRP_NAME_MAP = {val: key for key, val in GRP_NO_MAP.items()}
# ============================================
# Below part maps capstone's internal instruction numbers to pre-defined groups
def _check_inst(target_inst, check_list, suffixes=[]):
target_inst = target_inst.split("_")[0]
target_inst = target_inst.split(".")[0]
target_inst = target_inst.upper()
for inst in check_list:
if target_inst == inst:
return True
# Check conditional code
if target_inst.startswith(inst):
if len(target_inst) - len(inst) == 2:
for suffix in suffixes:
if target_inst == inst + suffix:
return True
return False
def _init_inst_groups(prefix, target, groups):
insts = list(filter(lambda x: x.startswith(prefix), dir(target)))
inst_map = {}
if prefix == "ARM_INS_":
suffixes = ARM_GRP_COND_CODE
else:
suffixes = []
for inst in insts:
inst_no = getattr(target, inst)
inst = inst.replace(prefix, "")
inst_map[inst_no] = []
for group_no, grouped_insts in groups.items():
if _check_inst(inst, grouped_insts, suffixes):
inst_map[inst_no].append(group_no)
if not inst_map[inst_no]:
inst_map[inst_no].append(GRP_NAME_MAP["unknown"])
return inst_map
def _init_groups():
import capstone
x86 = _init_inst_groups("X86_INS_", capstone.x86, X86_GRP_MAP)
arm = _init_inst_groups("ARM_INS_", capstone.arm, ARM_GRP_MAP)
arm64 = _init_inst_groups("ARM64_INS_", capstone.arm64, ARM64_GRP_MAP)
mips = _init_inst_groups("MIPS_INS_", capstone.mips, MIPS_GRP_MAP)
ppc = _init_inst_groups("PPC_INS_", capstone.ppc, PPC_GRP_MAP)
return x86, arm, arm64, mips, ppc
X86_INST_MAP, ARM_INST_MAP, ARM64_INST_MAP, MIPS_INST_MAP, PPC_INST_MAP = _init_groups()
| 14.753525 | 88 | 0.455952 | 2,666 | 28,253 | 4.683421 | 0.587772 | 0.020663 | 0.016338 | 0.006407 | 0.106279 | 0.077607 | 0.045971 | 0.016979 | 0.016979 | 0.016979 | 0 | 0.024486 | 0.343751 | 28,253 | 1,914 | 89 | 14.761233 | 0.64894 | 0.108201 | 0 | 0.174723 | 0 | 0 | 0.297529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00233 | false | 0 | 0.001747 | 0 | 0.007571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd85f75dbf5d4102a25ebc2445a8a860dd88729 | 4,920 | py | Python | networkunit/scores/score_kl_divergence.py | morales-gregorio/NetworkUnit | b858c3b2698fe3c0a7324ae8b8b388b74fd13c4d | [
"BSD-3-Clause"
] | 8 | 2017-11-16T08:45:48.000Z | 2021-11-29T16:51:45.000Z | networkunit/scores/score_kl_divergence.py | morales-gregorio/NetworkUnit | b858c3b2698fe3c0a7324ae8b8b388b74fd13c4d | [
"BSD-3-Clause"
] | 17 | 2017-11-16T07:53:26.000Z | 2021-05-07T10:27:34.000Z | networkunit/scores/score_kl_divergence.py | russelljjarvis/NetworkUnit | 32179371d3a0ba354e6637cf4f97ba70522d4054 | [
"BSD-3-Clause"
] | 5 | 2019-03-23T00:55:33.000Z | 2020-01-24T10:12:11.000Z | import numpy as np
from scipy.stats import entropy
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import sciunit
class kl_divergence(sciunit.Score):
"""
Kullback-Leibner Divergence D_KL(P||Q)
Calculates the difference of two sampled distributions P and Q in form of
an entropy measure. The D_KL measure is effectively the difference of the
cross-entropy of the of both distribution P,Q and the entropy of P.
D_KL can be interpreted as the amount of information lost when
approximating P by Q.
. math $$ D\mathrm{KL}(P||Q) =\sum{i} P(i) \log_2 \frac{P(i)}{Q(i)}
= H(P,Q) - H(P) $$
The returned score is the symmetric version of the kl divergence
. math $$ D_\mathrm{KL}(P,Q) := \frac{1}{2} \left(D_\mathrm{KL}(P|Q) +
D_\mathrm{KL}(Q|P)\right)$$
Parameters
----------
kl_binsize : float
Bin size of the histogram, used to calculate the KL divergence.
"""
score = np.nan
@classmethod
def compute(self, data_sample_1, data_sample_2, kl_binsize=0.005, **kwargs):
# filtering out nans
sample1 = np.array(data_sample_1)[np.isfinite(data_sample_1)]
sample2 = np.array(data_sample_2)[np.isfinite(data_sample_2)]
max_value = max([max(sample1),max(sample2)])
min_value = min([min(sample1),min(sample2)])
bins = (max_value - min_value) / kl_binsize
edges = np.linspace(min_value, max_value, bins)
P, edges = np.histogram(sample1, bins=edges, density=True)
Q, _____ = np.histogram(sample2, bins=edges, density=True)
# dx = np.diff(edges)[0]
# edges = edges[:-1]
# P *= dx
# Q *= dx
init_len = len(P)
Qnot0 = np.where(Q != 0.)[0]
P_non0 = P[Qnot0]
Q_non0 = Q[Qnot0]
Pnot0 = np.where(P_non0 != 0.)[0]
Q_non0 = Q_non0[Pnot0]
P_non0 = P_non0[Pnot0]
final_len = len(P_non0)
discard = init_len - final_len
D_KL_PQ = entropy(P_non0, Q_non0, base=2)
D_KL_QP = entropy(Q_non0, P_non0, base=2)
D_KL = .5 * (D_KL_PQ + D_KL_QP)
self.score = kl_divergence(D_KL)
self.score.data_size = [len(sample1), len(sample2)]
self.score.discarded_values = discard
self.score.bins = len(edges)-1
return self.score
@classmethod
def plot(self, data_sample_1, data_sample_2, ax=None, palette=None,
var_name='Measured Parameter', kl_binsize=0.005,
sample_names=['observation', 'prediction'], **kwargs):
if ax is None:
fig, ax = plt.subplots()
ax.set_ylabel('Probability Density')
ax.set_xlabel(var_name)
if palette is None:
palette = [sns.color_palette()[0], sns.color_palette()[1]]
sample1 = np.array(data_sample_1)[np.isfinite(data_sample_1)]
sample2 = np.array(data_sample_2)[np.isfinite(data_sample_2)]
max_value = max([max(sample1),max(sample2)])
min_value = min([min(sample1),min(sample2)])
bins = (max_value - min_value) / kl_binsize
edges = np.linspace(min_value, max_value, bins)
P, edges = np.histogram(sample1, bins=edges, density=True)
Q, _____ = np.histogram(sample2, bins=edges, density=True)
dx = np.diff(edges)[0]
edges = edges[:-1]
xvalues = edges + dx/2.
xvalues = np.append(np.append(xvalues[0]-dx, xvalues), xvalues[-1]+dx)
def secure_log(E, D):
log = np.zeros_like(E)
i = 0
for e, d in zip(E, D):
if e == 0 or d == 0:
log[i] = 0.
else:
log[i] = np.log(e/d)
i += 1
return log
diffy = .5 * (P - Q) * secure_log(P, Q.astype(float))
P = np.append(np.append(0, P), 0)
Q = np.append(np.append(0, Q), 0)
filly = np.append(np.append(0., diffy), 0.)
ax.fill_between(xvalues, filly, 0, color='0.8', label='d/dx DKL')
if palette is None:
palette = [sns.color_palette()[0], sns.color_palette()[1]]
ax.plot(xvalues, P, lw=2, color=palette[0], label=sample_names[0])
ax.plot(xvalues, Q, lw=2, color=palette[1], label=sample_names[1])
ax.set_xlim(xvalues[0], xvalues[-1])
ax.set_yscale('log')
plt.legend()
return ax
@property
def sort_key(self):
return self.score
def __str__(self):
return "\n\n\033[4mKullback-Leibler-Divergence\033[0m" \
+ "\n\tdatasize: {} \t {}" \
.format(self.data_size[0], self.data_size[1]) \
+ "\n\tdiscarded: {}" \
.format(self.discarded_values) \
+ "\n\tD_KL = {:.3f} \t bins = {}\n\n" \
.format(self.score, self.bins) | 35.912409 | 80 | 0.570732 | 706 | 4,920 | 3.815864 | 0.24221 | 0.044543 | 0.024499 | 0.025241 | 0.344469 | 0.312546 | 0.301411 | 0.282108 | 0.282108 | 0.282108 | 0 | 0.032184 | 0.292683 | 4,920 | 137 | 81 | 35.912409 | 0.741954 | 0.172764 | 0 | 0.263736 | 0 | 0 | 0.047595 | 0.011273 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054945 | false | 0 | 0.065934 | 0.021978 | 0.197802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd862ba88395be8cb418cfab8ce408473323919 | 635 | py | Python | kikyo/config.py | jadbin/kikyo | 98d875e85a28b4141cbd6616bba3d397a4219dc9 | [
"MIT"
] | null | null | null | kikyo/config.py | jadbin/kikyo | 98d875e85a28b4141cbd6616bba3d397a4219dc9 | [
"MIT"
] | null | null | null | kikyo/config.py | jadbin/kikyo | 98d875e85a28b4141cbd6616bba3d397a4219dc9 | [
"MIT"
] | null | null | null | import base64
import io
import requests
import yaml
from kikyo import Kikyo, Settings
def configure_by_consul(config_url: str, **kwargs) -> Kikyo:
"""从Consul拉取YAML格式的配置文件
:param config_url: 获取配置项的URL地址
"""
resp = requests.get(config_url)
resp.raise_for_status()
settings = Settings()
for data in resp.json():
v = data['Value']
if not v:
continue
s = base64.b64decode(v)
conf: dict = yaml.safe_load(io.BytesIO(s))
if 'kikyo' in conf:
settings.merge(conf['kikyo'])
break
settings.merge(kwargs)
return Kikyo(settings)
| 19.84375 | 60 | 0.617323 | 77 | 635 | 4.987013 | 0.545455 | 0.070313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013129 | 0.280315 | 635 | 31 | 61 | 20.483871 | 0.827133 | 0.08189 | 0 | 0 | 0 | 0 | 0.026408 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd932ec045f92a204fc0462b20bc5fe9de822e1 | 5,077 | py | Python | tests/ep_canvas_test.py | PytLab/catplot | 63ad46218b17d5cdffdd026dad7d775cf4caa50b | [
"MIT"
] | 35 | 2015-12-23T08:01:15.000Z | 2021-11-03T01:34:20.000Z | tests/ep_canvas_test.py | PytLab/catplot | 63ad46218b17d5cdffdd026dad7d775cf4caa50b | [
"MIT"
] | 1 | 2015-11-25T05:52:43.000Z | 2017-04-11T14:06:00.000Z | tests/ep_canvas_test.py | PytLab/catplot | 63ad46218b17d5cdffdd026dad7d775cf4caa50b | [
"MIT"
] | 10 | 2015-11-06T20:23:32.000Z | 2020-05-16T19:18:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
import matplotlib.pyplot as plt
from catplot.ep_components.ep_canvas import EPCanvas
from catplot.ep_components.ep_lines import ElementaryLine
from catplot.ep_components.ep_chain import EPChain
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertIsNone(canvas.figsize)
self.assertIsNone(canvas.dpi)
self.assertIsNone(canvas.facecolor)
self.assertIsNone(canvas.edgecolor)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
plt.close(canvas.figure)
def test_draw(self):
""" Make sure the lines can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.draw()
plt.close(canvas.figure)
def test_add_species_annotations(self):
""" Make sure the species annotations can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8],
rxn_equation="CO_b + O_b <-> CO-O_2b -> CO2_g + 2*_b")
canvas.add_lines([line])
canvas.add_species_annotations(line)
plt.close(canvas.figure)
def test_add_horizontal_auxiliary_line(self):
""" Make sure the horizontal line can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_horizontal_auxiliary_line(line)
plt.close(canvas.figure)
def test_add_vertical_auxiliary_line(self):
""" Make sure the vertical line can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_vertical_auxiliary_lines(line)
plt.close(canvas.figure)
def test_add_energy_annotations(self):
""" Make sure the energy annotations can be added correctly.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_energy_annotations(line)
plt.close(canvas.figure)
def test_add_chain(self):
""" Test energy profile chain can be added correctly to canvas.
"""
canvas = EPCanvas()
self.assertFalse(canvas.lines)
self.assertFalse(canvas.chains)
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
chain = EPChain([l1, l2])
canvas.add_chain(chain)
self.assertEqual(len(canvas.lines), 2)
for l in canvas.lines:
self.assertTrue(isinstance(l, ElementaryLine))
self.assertEqual(len(canvas.chains), 1)
self.assertTrue(isinstance(canvas.chains[0], EPChain))
# Exception is expected if add the chain again.
self.assertRaises(ValueError, canvas.add_chain, chain)
plt.close(canvas.figure)
def test_contains(self):
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
chain = EPChain([l1])
canvas.add_chain(chain)
self.assertTrue(l1 in canvas)
self.assertTrue(chain in canvas)
self.assertFalse(l2 in canvas)
plt.close(canvas.figure)
def test_add_line(self):
""" Test the line can be add to canvas correctly.
"""
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
canvas.add_line(l1)
# Add repeat line, exception raises.
self.assertRaises(ValueError, canvas.add_line, l1)
plt.close(canvas.figure)
def test_add_lines(self):
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
canvas.add_lines([l1, l2])
canvas.lines = []
self.assertRaises(ValueError, canvas.add_lines, [l1, l1])
plt.close(canvas.figure)
def test_add_all_horizontal_auxiliary_lines(self):
""" Make sure we can add all horizontal auxiliary lines to canvas.
"""
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
canvas.add_lines([l1, l2])
canvas.add_all_horizontal_auxiliary_lines()
plt.close(canvas.figure)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| 30.769697 | 84 | 0.623203 | 631 | 5,077 | 4.885895 | 0.187005 | 0.011677 | 0.072657 | 0.077198 | 0.508271 | 0.378852 | 0.334415 | 0.314953 | 0.272138 | 0.242945 | 0 | 0.031957 | 0.26039 | 5,077 | 164 | 85 | 30.957317 | 0.789081 | 0.157179 | 0 | 0.425743 | 0 | 0 | 0.010921 | 0 | 0 | 0 | 0 | 0 | 0.217822 | 1 | 0.118812 | false | 0 | 0.049505 | 0 | 0.178218 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdd9723808bf563a488aa0b07c42aceeac435545 | 458 | py | Python | Leetcode/medium/bitwise-and-of-numbers-range.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 6 | 2021-07-29T03:26:20.000Z | 2022-01-28T15:11:45.000Z | Leetcode/medium/bitwise-and-of-numbers-range.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 2 | 2021-09-30T09:47:23.000Z | 2022-01-31T03:08:24.000Z | Leetcode/medium/bitwise-and-of-numbers-range.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 5 | 2021-08-10T06:41:11.000Z | 2022-01-29T17:50:20.000Z | """
# BITWISE AND OF NUMBERS RANGE
Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.
Example 1:
Input: [5,7]
Output: 4
Example 2:
Input: [0,1]
Output: 0
"""
class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
count = 0
while m < n:
m = m >> 1
n = n >> 1
count += 1
return m << count | 17.615385 | 117 | 0.508734 | 66 | 458 | 3.530303 | 0.515152 | 0.025751 | 0.103004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080139 | 0.373362 | 458 | 26 | 118 | 17.615385 | 0.731707 | 0.482533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdde05efcf874ead71f10d44b9b94987c03fce5e | 995 | py | Python | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 1 | 2021-07-22T05:25:35.000Z | 2021-07-22T05:25:35.000Z | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 37 | 2017-07-27T06:07:25.000Z | 2020-12-11T12:57:31.000Z | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 1 | 2019-04-02T08:36:32.000Z | 2019-04-02T08:36:32.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from builtins import * # noqa
import pytest
from hbkit import libs
@pytest.fixture
def git_config():
content = \
"""
[core]
repositoryformatversion = 0
filemode = true
bare = false
logallrefupdates = true
ignorecase = true
precomposeunicode = true
[remote "origin"]
url = https://github.com/graycarl/hbkit.git
fetch = +refs/heads/*:refs/remotes/origin/*
[remote "other"]
url = https://gitlab.com/graycarl/hbkit.git
fetch = +refs/heads/*:refs/remotes/origin/*
[branch "master"]
remote = origin
merge = refs/heads/master
[branch "Github-Check-CI"]
remote = origin
merge = refs/heads/Github-Check-CI
"""
return content
def test_iter_remote_from_git_config(git_config):
remotes = list(libs.git.iter_remotes_from_git_config(git_config))
expect = [
'https://github.com/graycarl/hbkit.git',
'https://gitlab.com/graycarl/hbkit.git'
]
assert remotes == expect
| 23.690476 | 69 | 0.706533 | 127 | 995 | 5.393701 | 0.417323 | 0.065693 | 0.093431 | 0.110949 | 0.405839 | 0.265693 | 0.145985 | 0.145985 | 0.145985 | 0.145985 | 0 | 0.002398 | 0.161809 | 995 | 41 | 70 | 24.268293 | 0.818945 | 0.026131 | 0 | 0 | 0 | 0 | 0.15914 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.133333 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bddf54c37693012c2ebee8e890c2bc5f10dfd58d | 5,510 | py | Python | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from nnabla.ext_utils import get_extension_context
import nnabla as nn
import args
import data_loader as dl
import classifier as clf
from utils import utils
def model_train_setting(opt):
"""
Get the model train settings
Args:
opt : variables that containing values for all of your options
Returns:
variables which you need to train
"""
attr_list = utils.get_all_attr()
if opt['model_train'] == 'baseline':
data_params = {
"train_beg": opt['train_beg'],
"valid_beg": opt['valid_beg'],
"test_beg": opt['test_beg'],
}
data_setting = {
'path': opt['base_img_path'],
'protected_attribute': opt['protected_attribute'],
'attribute': opt['attribute'],
'data_params': data_params,
'batch_size': opt['batch_size'],
'learning_rate': opt['learning_rate'],
'max_iter': opt['max_iter_base']
}
opt['data_setting'] = data_setting
if opt['model_train'] == 'gan_debiased':
data_params = {
"train_beg": opt['train_beg'],
"valid_beg": opt['valid_beg'],
"test_beg": opt['test_beg'],
}
real_params = {
'path': opt['base_img_path'],
'attribute': opt['attribute'],
'protected_attribute': opt['protected_attribute'],
'data_params': data_params
}
generated_images = "{}/AllGenImages".format(opt["fake_data_dir"])
flipped_images = "{}/{}/".format(opt["fake_data_dir"],
attr_list[opt['attribute']])
label_score = "{}/all_{}_scores.pkl".format(opt['fake_data_dir'],
attr_list[opt['attribute']])
domain_score = "{}/all_{}_scores.pkl".format(opt['fake_data_dir'],
attr_list[opt['protected_attribute']])
generated_params = {
'generated_image_path': generated_images,
'flipped_images_path': flipped_images,
'label_path': label_score,
'domain_path': domain_score,
# flipped the images from 15000 to 175000
'flipped_image_range': (15000, 175000),
'orig_label_range': (160000, 320000), # original label range
'new_range': (0, 160000), # new images
}
data_setting = {
'real_params': real_params,
'gen_params': generated_params,
'batch_size': opt['batch_size'],
'learning_rate': opt['learning_rate'],
'max_iter': opt['max_iter_gan_debiased']
}
opt['data_setting'] = data_setting
return opt
def main():
"""
main method
"""
opt = args.get_args()
opt = model_train_setting(opt)
ctx = get_extension_context(
opt['context'], device_id=opt['device_id'], type_config=opt['type_config'])
nn.set_default_context(ctx)
# model configurations
batch_size = opt['data_setting']['batch_size']
learning_rate = opt['data_setting']['learning_rate']
max_iter = opt['data_setting']['max_iter']
if (opt["model_train"] == 'baseline'):
train = dl.actual_celeba_dataset(opt['data_setting'], batch_size,
augment=True, split='train', shuffle=True)
val = dl.actual_celeba_dataset(opt['data_setting'], batch_size,
augment=False, split='valid', shuffle=False)
val_weight = None
elif (opt["model_train"] == 'gan_debiased'):
train = dl.debiased_celeba_dataset(opt['data_setting'], batch_size,
augment=True, split='train', shuffle=True)
val = dl.actual_celeba_dataset(opt['data_setting']['real_params'], batch_size,
augment=False, split='valid', shuffle=False)
val_weight = utils.compute_class_weight(val)
else:
print("please provide proper argument")
sys.exit(0)
attr_list = utils.get_all_attr()
if not os.path.exists(opt['model_save_path']):
os.makedirs(opt['model_save_path'])
monitor_path = os.path.join(
opt['model_save_path'], attr_list[opt['attribute']])
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
attribute_classifier_model = clf.attribute_classifier(batch_size=batch_size,
learning_rate=learning_rate,
max_iter=max_iter,
monitor_path=monitor_path,
val_weight=val_weight)
attribute_classifier_model.train(train, val)
if __name__ == '__main__':
main()
| 38.531469 | 91 | 0.580581 | 622 | 5,510 | 4.853698 | 0.287781 | 0.047367 | 0.041736 | 0.027824 | 0.398145 | 0.262007 | 0.262007 | 0.245446 | 0.245446 | 0.229215 | 0 | 0.013089 | 0.306715 | 5,510 | 142 | 92 | 38.802817 | 0.777225 | 0.147913 | 0 | 0.294118 | 0 | 0 | 0.229275 | 0.004534 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.078431 | 0 | 0.107843 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde0ad9f17012d7ebc6ee66313fe41b54189ab35 | 5,109 | py | Python | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | 1 | 2019-09-24T15:22:05.000Z | 2019-09-24T15:22:05.000Z | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | null | null | null | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
from IPython.core.display import display, HTML
import glob
from .compat import *
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_hs_content(resid):
resdir = find_resource_directory(resid)
content = {}
for f in glob.glob('%s/*/data/contents/*' % resdir):
fname = os.path.basename(f)
content[fname] = f
return content
def find_resource_directory(resid):
download_dir = os.environ.get('JUPYTER_DOWNLOADS', 'hs_downloads')
# loop over all the files in userspace
for dirpath, dirnames, filenames in os.walk(download_dir):
for dirname in [d for d in dirnames]:
if dirname == resid:
return os.path.join(dirpath, dirname)
return None
def check_for_ipynb(content_files):
links = {}
for f, p in content_files.items():
if f[-5:] == 'ipynb':
fname = os.path.basename(p)
url = urlencode(p)
links[fname] = url
return links
def display_tree(resid):
# todo: display a tree view of the resource bagit, based on id
pass
def display_resource_content_files(content_file_dictionary,
text='Found the following content when parsing the HydroShare resource:'):
# get ipynb files
nbs = check_for_ipynb(content_file_dictionary)
if len(nbs.keys()) > 0:
display(HTML('<b>Found the following notebook(s) associated with this HydroShare resource.</b><br>Click the link(s) below to launch the notebook.'))
for name, url in nbs.items():
display(HTML('<a href=%s target="_blank">%s<a>' % (url, name)))
# print the remaining files
if len(content_file_dictionary.keys()) > 0:
display(HTML('<b>Found the following file(s) associated with this HydroShare resource.</b>'))
text = '<br>'.join(content_file_dictionary.keys())
display(HTML(text))
if (len(content_file_dictionary.keys()) + len(nbs.keys())) > 0:
display(HTML('These files are stored in a dictionary called <b>hs.content</b> for your convenience. To access a file, simply issue the following command where MY_FILE is one of the files listed above: <pre>hs.content["MY_FILE"] </pre> '))
def load_environment(env_path=None):
# load the environment path (if it exists)
if env_path is None:
env_path = os.path.join(os.environ.get('NOTEBOOK_HOME', './'), '.env' )
if not os.path.exists(env_path):
return
with open(env_path, 'r') as f:
lines = f.readlines()
print('Adding the following system variables:')
for line in lines:
k, v = line.strip().split('=')
os.environ[k] = v
print(' %s = %s' % (k, v))
print('\nThese can be accessed using the following command: ')
print(' os.environ[key]')
print('\n (e.g.)\n os.environ["HS_USR_NAME"] => %s' % os.environ['HS_USR_NAME'])
def get_env_var(varname):
if varname in os.environ.keys():
return os.environ[varname]
else:
return input('Could not find %s, please specify a value: ' % varname).strip()
def get_server_url_for_path(p):
"""
gets the url corresponding to a given file or directory path
p : path to convert into a url
returns the url path for the filepath p
"""
load_environment()
rel_path = os.path.relpath(p, os.environ['NOTEBOOK_HOME'])
url = urlencode(rel_path)
return url
def get_relative_path(p):
"""
gets the path relative to the jupyter home directory
p: path to convert into relative path
returns the path relative to the default jupyter home directory
"""
return os.path.relpath(p, os.environ['NOTEBOOK_HOME'])
def _realname(path, root=None):
if root is not None:
path = os.path.join(root, path)
result = os.path.basename(path)
if os.path.islink(path):
realpath = os.readlink(path)
result = '%s -> %s' % (os.path.basename(path), realpath)
return result
def tree(startpath, depth=-1):
prefix = 0
if startpath != '/':
if startpath.endswith('/'):
startpath = startpath[:-1]
prefix = len(startpath)
for root, dirs, files in os.walk(startpath):
level = root[prefix:].count(os.sep)
if depth > -1 and level > depth:
continue
indent = subindent = ''
if level > 0:
indent = '| ' * (level-1) + '|-- '
subindent = '| ' * (level) + '|-- '
print('{}{}/'.format(indent, _realname(root)))
# print dir only if symbolic link; otherwise, will be printed as root
for d in dirs:
if os.path.islink(os.path.join(root, d)):
print('{}{}'.format(subindent, _realname(d, root=root)))
for f in files:
print('{}{}'.format(subindent, _realname(f, root=root)))
| 30.963636 | 247 | 0.607164 | 702 | 5,109 | 4.323362 | 0.294872 | 0.0257 | 0.034596 | 0.015815 | 0.135091 | 0.098188 | 0.070511 | 0.04547 | 0 | 0 | 0 | 0.006094 | 0.261304 | 5,109 | 164 | 248 | 31.152439 | 0.798092 | 0.106087 | 0 | 0 | 0 | 0.019417 | 0.204873 | 0.020819 | 0 | 0 | 0 | 0.006098 | 0 | 1 | 0.116505 | false | 0.009709 | 0.048544 | 0 | 0.281553 | 0.087379 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde5bd2cb3f7fdf8cc6f96a4c93e07d27f29156e | 16,286 | py | Python | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 17 | 2015-02-10T07:10:29.000Z | 2021-05-14T22:24:45.000Z | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 459 | 2015-03-31T18:24:23.000Z | 2022-03-30T19:44:40.000Z | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 9 | 2015-04-18T16:57:31.000Z | 2020-10-30T11:49:13.000Z | import os
import time
import json
from collections import OrderedDict
from digestparser import json_output
from provider.execution_context import get_session
from provider.article_processing import download_jats
from provider import digest_provider, email_provider, lax_provider, utils
from activity.objects import Activity
class activity_IngestDigestToEndpoint(Activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
super(activity_IngestDigestToEndpoint, self).__init__(
settings, logger, conn, token, activity_task
)
self.name = "IngestDigestToEndpoint"
self.pretty_name = "Ingest Digest to API endpoint"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Send Digest JSON to an API endpoint,"
+ " to be run when a research article is ingested"
)
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.statuses = OrderedDict(
[
("approve", None),
("download", None),
("generate", None),
("ingest", None),
]
)
# Digest JSON content
self.digest_content = None
# Load the config
self.digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
def do_activity(self, data=None):
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
success, run, session, article_id, version = self.session_data(data)
self.make_activity_directories()
# get session data
if success is not True:
self.logger.error("Failed to parse session data in %s" % self.pretty_name)
return self.ACTIVITY_PERMANENT_FAILURE
# emit start message
success = self.emit_start_message(article_id, version, run)
if success is not True:
self.logger.error("Failed to emit a start message in %s" % self.pretty_name)
return self.ACTIVITY_PERMANENT_FAILURE
# Approve for ingestion
self.statuses["approve"] = self.approve(
article_id,
session.get_value("status"),
version,
session.get_value("run_type"),
)
if self.statuses.get("approve") is not True:
self.logger.info(
"Digest for article %s was not approved for ingestion" % article_id
)
self.emit_end_message(article_id, version, run)
return self.ACTIVITY_SUCCESS
try:
digest_details = self.gather_digest_details(
article_id, version, session.get_value("expanded_folder")
)
except Exception as exception:
# send email error if any error message is returned
message = "Error in gathering digest details: %s" % str(exception)
self.logger.exception(message)
return self.email_error_return(article_id, message)
# generate the digest content
try:
self.digest_content = self.generate_digest_content(
article_id, digest_details
)
except Exception as exception:
# send email error if unable to generate digest content
message = "Error in generating digest content for article: %s" % str(
exception
)
self.logger.exception(message)
return self.email_error_return(article_id, message)
# issue put to the endpoint
digest_id = self.digest_content.get("id")
# set the stage attribute depending on silent correction or not
if (
session.get_value("run_type")
and session.get_value("run_type") == "silent-correction"
):
digest_provider.set_stage(self.digest_content, "published")
else:
digest_provider.set_stage(self.digest_content, "preview")
self.logger.info(
"Digest stage value %s" % str(self.digest_content.get("stage"))
)
try:
put_response = digest_provider.put_digest_to_endpoint(
self.logger, digest_id, self.digest_content, self.settings
)
if put_response:
self.statuses["ingest"] = True
except Exception as exception:
# email error message and return self.ACTIVITY_SUCCESS
message = "Failed to ingest digest json to endpoint %s in %s: %s" % (
article_id,
self.pretty_name,
str(exception),
)
self.logger.exception(message)
return self.email_error_return(article_id, message)
self.logger.info(
"%s for article_id %s statuses: %s"
% (self.name, str(article_id), self.statuses)
)
self.emit_end_message(article_id, version, run)
return self.ACTIVITY_SUCCESS
def session_data(self, data):
"get session data and return basic values"
run = None
session = None
version = None
article_id = None
success = None
try:
run = data["run"]
session = get_session(self.settings, data, run)
version = session.get_value("version")
article_id = session.get_value("article_id")
success = True
except (TypeError, KeyError) as exception:
self.logger.exception(
"Exception when getting the session for Starting ingest digest "
+ " to endpoint. Details: %s" % str(exception)
)
return success, run, session, article_id, version
def email_error_return(self, article_id, message):
"""log exception, email error message and return activity result"""
send_error_email(article_id, message, self.settings, self.logger)
return self.ACTIVITY_SUCCESS
def emit_message(self, article_id, version, run, status, message):
"emit message to the queue"
try:
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
status,
message,
)
return True
except Exception as exception:
self.logger.exception(
"Exception emitting %s message. Details: %s"
% (str(status), str(exception))
)
def emit_start_message(self, article_id, version, run):
"emit the start message to the queue"
return self.emit_message(
article_id,
version,
run,
"start",
"Starting ingest digest to endpoint for " + str(article_id),
)
def digest_preview_link(self, article_id):
"preview link for the digest using the preview base url"
return "%s/digests/%s" % (
self.settings.journal_preview_base_url,
utils.pad_msid(article_id),
)
def activity_end_message(self, article_id, statuses):
"different end message to emit based on the ingest status"
if statuses.get("ingest") is True:
return (
"Finished ingest digest to endpoint for %s. Statuses %s Preview link %s"
% (article_id, statuses, self.digest_preview_link(article_id))
)
return "No digest ingested for %s. Statuses %s" % (article_id, statuses)
def emit_end_message(self, article_id, version, run):
"emit the end message to the queue"
return self.emit_message(
article_id,
version,
run,
"end",
self.activity_end_message(article_id, self.statuses),
)
def emit_error_message(self, article_id, version, run, message):
"emit an error message to the queue"
return self.emit_message(article_id, version, run, "error", message)
def approve(self, article_id, status, version, run_type):
"should we ingest based on some basic attributes"
approve_status = True
# check by status
return_status = digest_provider.approve_by_status(
self.logger, article_id, status
)
if return_status is False:
approve_status = return_status
# check silent corrections and consider the first vor version
run_type_status = digest_provider.approve_by_run_type(
self.settings, self.logger, article_id, run_type, version
)
first_vor_status = digest_provider.approve_by_first_vor(
self.settings, self.logger, article_id, version, status
)
if first_vor_status is False and run_type != "silent-correction":
# not the first vor and not a silent correction, do not approve
approve_status = False
elif run_type_status is False:
# otherwise depend on the silent correction run_type logic
approve_status = False
# check if there is a digest docx in the bucket for this article
if approve_status:
if not digest_provider.docx_exists_in_s3(
self.settings, article_id, self.settings.bot_bucket, self.logger
):
self.logger.info(
"Digest docx file does not exist in S3 for article %s" % article_id
)
approve_status = False
return approve_status
def gather_digest_details(self, article_id, version, expanded_folder):
digest_details = OrderedDict()
# Download digest from the S3 outbox
digest_details["docx_file"] = digest_download_docx_from_s3(
article_id,
self.settings.bot_bucket,
self.directories.get("INPUT_DIR"),
self.settings,
self.logger,
)
self.statuses["download"] = True
# find the image file name
digest_details["image_file"] = digest_image_file_name_from_s3(
article_id, self.settings.bot_bucket, self.settings
)
# download jats file
digest_details["jats_file"] = download_jats_for_digest(
expanded_folder,
self.settings,
self.directories.get("TEMP_DIR"),
self.logger,
)
# related article data
digest_details["related"] = get_related_from_lax(
article_id, version, self.settings, self.pretty_name, self.logger
)
return digest_details
def generate_digest_content(self, article_id, digest_details):
digest_content = None
try:
digest_content = self.digest_json(
digest_details.get("docx_file"),
digest_details.get("jats_file"),
digest_details.get("image_file"),
digest_details.get("related"),
)
except Exception as exception:
# email error message and return self.ACTIVITY_SUCCESS
message = "Failed to generate digest json for %s in %s: %s" % (
article_id,
self.pretty_name,
str(exception),
)
raise Exception(message)
if digest_content:
self.statuses["generate"] = True
else:
# email error message and return self.ACTIVITY_SUCCESS
message = (
"Unable to generate Digest content for docx_file %s, "
+ "jats_file %s, image_file %s"
) % (
digest_details.get("docx_file"),
digest_details.get("jats_file"),
digest_details.get("image_file"),
)
raise Exception(message)
return digest_content
def digest_json(self, docx_file, jats_file=None, image_file=None, related=None):
"generate the digest json content from the docx file and other data"
json_content = None
try:
json_content = json_output.build_json(
docx_file,
self.directories.get("TEMP_DIR"),
self.digest_config,
jats_file,
image_file,
related,
)
except Exception as exception:
self.logger.exception(
"Exception generating digest json for docx_file %s. Details: %s"
% (str(docx_file), str(exception))
)
return json_content
def digest_download_docx_from_s3(article_id, bucket_name, input_dir, settings, logger):
try:
return digest_provider.download_docx_from_s3(
settings, article_id, bucket_name, input_dir, logger
)
except Exception as exception:
message = "Unable to download digest docx file for article %s: %s" % (
article_id,
str(exception),
)
raise Exception(message)
def digest_image_file_name_from_s3(article_id, bucket_name, settings):
try:
return digest_provider.image_file_name_from_s3(
settings, article_id, bucket_name
)
except Exception as exception:
message = "Failed to get image file name from S3 for digest %s: %s" % (
article_id,
str(exception),
)
raise Exception(message)
def download_jats_for_digest(expanded_folder, settings, temp_dir, logger):
try:
return download_jats(settings, expanded_folder, temp_dir, logger)
except Exception as exception:
message = "Failed to download JATS from expanded folder %s: %s" % (
expanded_folder,
str(exception),
)
raise Exception(message)
def get_related_from_lax(article_id, version, settings, pretty_name, logger):
try:
return related_from_lax(article_id, version, settings, logger)
except Exception as exception:
message = "Failed to get related from lax for digest %s in %s: %s" % (
article_id,
pretty_name,
str(exception),
)
raise Exception(message)
def related_from_lax(article_id, version, settings, logger=None, auth=True):
"get article json from Lax and return as a list of related data"
related = None
related_json = None
try:
related_json = lax_provider.article_snippet(article_id, version, settings, auth)
except Exception as exception:
logger.exception(
(
"Exception in getting article snippet from Lax for article_id"
" %s, version %s. Details: %s"
)
% (str(article_id), str(version), str(exception))
)
raise
if related_json:
related = [related_json]
return related
def error_email_subject(article_id):
"email subject for an error email"
return u"Error ingesting digest to endpoint: {article_id}".format(
article_id=article_id
)
def send_error_email(article_id, message, settings, logger):
"email error message to the recipients"
datetime_string = time.strftime(utils.DATE_TIME_FORMAT, time.gmtime())
body = email_provider.simple_email_body(datetime_string, message)
subject = error_email_subject(article_id)
sender_email = settings.digest_sender_email
recipient_email_list = email_provider.list_email_recipients(
settings.digest_validate_error_recipient_email
)
messages = email_provider.simple_messages(
sender_email, recipient_email_list, subject, body, logger=logger
)
logger.info("Formatted %d email error messages" % len(messages))
details = email_provider.smtp_send_messages(settings, messages, logger)
logger.info("Email sending details: %s" % str(details))
| 36.191111 | 88 | 0.608805 | 1,860 | 16,286 | 5.109677 | 0.12043 | 0.063447 | 0.035354 | 0.021991 | 0.384785 | 0.306923 | 0.246107 | 0.202757 | 0.140257 | 0.126368 | 0 | 0.001973 | 0.315179 | 16,286 | 449 | 89 | 36.271715 | 0.850175 | 0.09106 | 0 | 0.277473 | 0 | 0 | 0.148821 | 0.001437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.024725 | 0 | 0.156593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde6c2d4e221af5daf9ceb3a165e32e65089ccfe | 249 | py | Python | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | from discord import Member, utils
async def unmute_user(member: Member):
role = utils.get(member.guild.roles, id=809817869914341396)
await member.edit(roles=())
await member.add_roles(role)
await member.send('Ты размучен! :)')
| 19.153846 | 63 | 0.706827 | 33 | 249 | 5.272727 | 0.636364 | 0.189655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.087379 | 0.172691 | 249 | 12 | 64 | 20.75 | 0.757282 | 0 | 0 | 0 | 0 | 0 | 0.060484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde798fb51621c003debde76678c82dcde2604d3 | 443 | py | Python | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | null | null | null | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | 13 | 2016-02-04T14:56:11.000Z | 2021-06-10T20:39:51.000Z | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import MirrorView, SubscriptionsManagementView
__all__ = [
'app_name', 'urlpatterns',
]
app_name = 'mailing'
urlpatterns = [
url(r'^mirror/(?P<signed_pk>[0-9]+:[a-zA-Z0-9_-]+)/$',
MirrorView.as_view(), name='mirror'),
url(r'^subscriptions/(?P<signed_email>.+:[a-zA-Z0-9_-]+)/$',
SubscriptionsManagementView.as_view(), name='subscriptions'),
]
| 26.058824 | 69 | 0.643341 | 54 | 443 | 5.055556 | 0.574074 | 0.051282 | 0.03663 | 0.043956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01847 | 0.14447 | 443 | 16 | 70 | 27.6875 | 0.701847 | 0.047404 | 0 | 0 | 0 | 0 | 0.340476 | 0.233333 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdeb63bd228672aa0d61f1e5f7d0335e8f073585 | 12,597 | py | Python | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 9 | 2015-06-23T00:13:49.000Z | 2022-02-23T02:46:43.000Z | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 1 | 2017-08-30T08:13:12.000Z | 2017-08-31T06:36:32.000Z | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 7 | 2015-05-08T10:17:47.000Z | 2021-04-01T15:00:57.000Z | from functools import partial
from pykit.ir import vvisit, ArgLoader, verify_lowlevel
from pykit.ir import defs, opgrouper
from pykit.types import Boolean, Integral, Real, Pointer, Function, Int64
from pykit.codegen.llvm.llvm_types import llvm_type
import llvm.core as lc
from llvm.core import Type, Constant
#===------------------------------------------------------------------===
# Definitions
#===------------------------------------------------------------------===
compare_float = {
'>': lc.FCMP_OGT,
'<': lc.FCMP_OLT,
'==': lc.FCMP_OEQ,
'>=': lc.FCMP_OGE,
'<=': lc.FCMP_OLE,
'!=': lc.FCMP_ONE,
}
compare_signed_int = {
'>': lc.ICMP_SGT,
'<': lc.ICMP_SLT,
'==': lc.ICMP_EQ,
'>=': lc.ICMP_SGE,
'<=': lc.ICMP_SLE,
'!=': lc.ICMP_NE,
}
compare_unsiged_int = {
'>': lc.ICMP_UGT,
'<': lc.ICMP_ULT,
'==': lc.ICMP_EQ,
'>=': lc.ICMP_UGE,
'<=': lc.ICMP_ULE,
'!=': lc.ICMP_NE,
}
compare_bool = {
'==' : lc.ICMP_EQ,
'!=' : lc.ICMP_NE
}
# below based on from npm/codegen
def integer_invert(builder, val):
return builder.xor(val, Constant.int_signextend(val.type, -1))
def integer_usub(builder, val):
return builder.sub(Constant.int(val.type, 0), val)
def integer_not(builder, value):
return builder.icmp(lc.ICMP_EQ, value, Constant.int(value.type, 0))
def float_usub(builder, val):
return builder.fsub(Constant.real(val.type, 0), val)
def float_not(builder, val):
return builder.fcmp(lc.FCMP_OEQ, val, Constant.real(val.type, 0))
binop_int = {
'+': (lc.Builder.add, lc.Builder.add),
'-': (lc.Builder.sub, lc.Builder.sub),
'*': (lc.Builder.mul, lc.Builder.mul),
'/': (lc.Builder.sdiv, lc.Builder.udiv),
'//': (lc.Builder.sdiv, lc.Builder.udiv),
'%': (lc.Builder.srem, lc.Builder.urem),
'&': (lc.Builder.and_, lc.Builder.and_),
'|': (lc.Builder.or_, lc.Builder.or_),
'^': (lc.Builder.xor, lc.Builder.xor),
'<<': (lc.Builder.shl, lc.Builder.shl),
'>>': (lc.Builder.ashr, lc.Builder.lshr),
}
binop_float = {
'+': lc.Builder.fadd,
'-': lc.Builder.fsub,
'*': lc.Builder.fmul,
'/': lc.Builder.fdiv,
'//': lc.Builder.fdiv,
'%': lc.Builder.frem,
}
unary_bool = {
'!': integer_not,
}
unary_int = {
'~': integer_invert,
'!': integer_not,
"+": lambda builder, arg: arg,
"-": integer_usub,
}
unary_float = {
'!': float_not,
"+": lambda builder, arg: arg,
"-": float_usub,
}
#===------------------------------------------------------------------===
# Utils
#===------------------------------------------------------------------===
i1, i16, i32, i64 = map(Type.int, [1, 16, 32, 64])
def const_int(type, value):
return Constant.int(type, value)
const_i32 = partial(const_int, i32)
const_i64 = partial(const_int, i64)
zero = partial(const_int, value=0)
one = partial(const_int, value=1)
def sizeof(builder, ty, intp):
ptr = Type.pointer(ty)
null = Constant.null(ptr)
offset = builder.gep(null, [Constant.int(Type.int(), 1)])
return builder.ptrtoint(offset, intp)
#===------------------------------------------------------------------===
# Translator
#===------------------------------------------------------------------===
class Translator(object):
"""
Translate a function in low-level form.
This means it can only use values of type Bool, Int, Float, Struct or
Pointer. Values of type Function may be called.
"""
def __init__(self, func, env, lfunc, llvm_typer, llvm_module):
self.func = func
self.env = env
self.lfunc = lfunc
self.llvm_type = llvm_typer
self.lmod = llvm_module
self.builder = None
self.phis = [] # [pykit_phi]
def blockswitch(self, newblock):
if not self.builder:
self.builder = lc.Builder.new(newblock)
self.builder.position_at_end(newblock)
# __________________________________________________________________
def op_arg(self, arg):
return self.lfunc.args[self.func.args.index(arg)]
# __________________________________________________________________
def op_unary(self, op, arg):
opmap = { Boolean: unary_bool,
Integral: unary_int,
Real: unary_float }[type(op.type)]
unop = defs.unary_opcodes[op.opcode]
return opmap[unop](self.builder, arg)
def op_binary(self, op, left, right):
binop = defs.binary_opcodes[op.opcode]
if op.type.is_int:
genop = binop_int[binop][op.type.unsigned]
else:
genop = binop_float[binop]
return genop(self.builder, left, right, op.result)
def op_compare(self, op, left, right):
cmpop = defs.compare_opcodes[op.opcode]
type = op.args[0].type
if type.is_int and type.unsigned:
cmp, lop = self.builder.icmp, compare_unsiged_int[cmpop]
elif type.is_int or type.is_bool:
cmp, lop = self.builder.icmp, compare_signed_int[cmpop]
else:
cmp, lop = self.builder.fcmp, compare_float[cmpop]
return cmp(lop, left, right, op.result)
# __________________________________________________________________
def op_convert(self, op, arg):
from llpython.byte_translator import LLVMCaster
unsigned = op.type.is_int and op.type.unsigned
# The float cast doens't accept this keyword argument
kwds = {'unsigned': unsigned} if unsigned else {}
return LLVMCaster.build_cast(self.builder, arg,
self.llvm_type(op.type), **kwds)
# __________________________________________________________________
def op_call(self, op, function, args):
# Get the callee LLVM function from the cache. This is put there by
# pykit.codegen.codegen
cache = self.env["codegen.cache"]
lfunc = cache[function]
return self.builder.call(lfunc, args)
def op_call_math(self, op, name, args):
# Math is resolved by an LLVM postpass
argtypes = [arg.type for arg in args]
lfunc_type = self.llvm_type(Function(op.type, argtypes))
lfunc = self.lmod.get_or_insert_function(
lfunc_type, 'pykit.math.%s.%s' % (map(str, argtypes), name.lower()))
return self.builder.call(lfunc, args, op.result)
# __________________________________________________________________
def op_getfield(self, op, struct, attr):
index = const_i32(op.type.names.index(attr))
return self.builder.extract_value(struct, index, op.result)
def op_setfield(self, op, struct, attr, value):
index = const_i32(op.type.names.index(attr))
return self.builder.insert_element(struct, value, index, op.result)
# __________________________________________________________________
def op_getindex(self, op, array, indices):
return self.builder.gep(array, indices, op.result)
def op_setindex(self, op, array, indices, value):
ptr = self.builder.gep(array, indices)
self.builder.store(ptr, value)
# __________________________________________________________________
def op_getindex(self, op, array, indices):
return self.builder.gep(array, indices, op.result)
# __________________________________________________________________
def op_alloca(self, op):
llvm_pointer_type = self.llvm_type(op.type)
return self.builder.alloca(llvm_pointer_type.pointee, op.result)
def op_load(self, op, stackvar):
return self.builder.load(stackvar, op.result)
def op_store(self, op, value, stackvar):
self.builder.store(value, stackvar)
# __________________________________________________________________
def op_jump(self, op, block):
self.builder.branch(block)
def op_cbranch(self, op, test, true_block, false_block):
self.builder.cbranch(test, true_block, false_block)
def op_phi(self, op):
phi = self.builder.phi(self.llvm_type(op.type), op.result)
self.phis.append(op)
return phi
def op_ret(self, op, value):
if value is None:
assert self.func.type.restype.is_void
self.builder.ret_void()
else:
self.builder.ret(value)
# __________________________________________________________________
def op_sizeof(self, op, type):
int_type = self.llvm_type(op.type)
item_type = self.llvm_type(type)
return sizeof(self.builder, item_type, int_type, op.result)
def op_addressof(self, op, func):
assert func.address
addr = const_int(i64, func.address)
return self.builder.inttoptr(addr, self.llvm_type(Pointer(func.type)))
# __________________________________________________________________
def op_ptradd(self, op, ptr, val):
return self.builder.gep(ptr, [val], op.result)
def op_ptrload(self, op, ptr):
return self.builder.load(ptr, op.result)
def op_ptrstore(self, op, ptr, val):
return self.builder.store(val, ptr, op.result)
def op_ptrcast(self, op, val):
return self.builder.bitcast(val, self.llvm_type(op.type), op.result)
def op_ptr_isnull(self, op, val):
intval = self.builder.ptrtoint(val, self.llvm_type(Int64))
return self.builder.icmp(lc.ICMP_EQ, intval, zero(intval.type), op.result)
# __________________________________________________________________
def allocate_blocks(llvm_func, pykit_func):
"""Return a dict mapping pykit blocks to llvm blocks"""
blocks = {}
for block in pykit_func.blocks:
blocks[block] = llvm_func.append_basic_block(pykit_func.name)
return blocks
def update_phis(phis, valuemap, argloader):
"""
Update LLVM phi values given a list of pykit phi values and block and
value dicts mapping pykit values to LLVM values
"""
for phi in phis:
llvm_phi = valuemap[phi.result]
llvm_blocks = map(argloader.load_op, phi.args[0])
llvm_values = map(argloader.load_op, phi.args[1])
for llvm_block, llvm_value in zip(llvm_blocks, llvm_values):
llvm_phi.add_incoming(llvm_value, llvm_block)
#===------------------------------------------------------------------===
# Pass to group operations such as add/mul
#===------------------------------------------------------------------===
class LLVMArgLoader(ArgLoader):
"""
Load Operation arguments as LLVM values passed and extra *args to the
Translator.
"""
def __init__(self, store, engine, llvm_module, lfunc, blockmap):
super(LLVMArgLoader, self).__init__(store)
self.engine = engine
self.llvm_module = llvm_module
self.lfunc = lfunc
self.blockmap = blockmap
def load_GlobalValue(self, arg):
if arg.external:
value = self.lmod.get_or_insert_function(llvm_type(arg.type))
if arg.address:
self.engine.add_global_mapping(value, arg.address)
else:
assert arg.value
value = arg.value.const
return value
def load_Block(self, arg):
return self.blockmap[arg]
def load_Constant(self, arg):
ty = type(arg.type)
lty = llvm_type(arg.type)
if ty == Pointer:
if arg.const == 0:
return lc.Constant.null(lty)
else:
return const_i64(arg.const).inttoptr(i64)
elif ty == Integral:
if arg.type.unsigned:
return lc.Constant.int(lty, arg.const)
else:
return lc.Constant.int_signextend(lty, arg.const)
elif ty == Real:
return lc.Constant.real(lty, arg.const)
else:
raise NotImplementedError("Constants for", ty)
def load_Undef(self, arg):
return lc.Constant.undef(llvm_type(arg.type))
def initialize(func, env):
verify_lowlevel(func)
llvm_module = env["codegen.llvm.module"]
return llvm_module.add_function(llvm_type(func.type), func.name)
def translate(func, env, lfunc):
engine, llvm_module = env["codegen.llvm.engine"], env["codegen.llvm.module"]
blockmap = allocate_blocks(lfunc, func)
### Create visitor ###
translator = Translator(func, env, lfunc, llvm_type, llvm_module)
visitor = opgrouper(translator)
### Codegen ###
argloader = LLVMArgLoader(None, engine, llvm_module, lfunc, blockmap)
valuemap = vvisit(visitor, func, argloader)
update_phis(translator.phis, valuemap, argloader)
return lfunc | 32.135204 | 82 | 0.618322 | 1,515 | 12,597 | 4.479208 | 0.179538 | 0.055113 | 0.024315 | 0.02682 | 0.220159 | 0.107722 | 0.062334 | 0.046124 | 0.037135 | 0.037135 | 0 | 0.004887 | 0.220291 | 12,597 | 392 | 83 | 32.135204 | 0.686011 | 0.166945 | 0 | 0.091954 | 0 | 0 | 0.016368 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.168582 | false | 0 | 0.030651 | 0.061303 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdeef5ecb135e522f7c40abc5e24bd958b8ff052 | 1,859 | py | Python | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 17 | 2019-12-09T19:09:07.000Z | 2021-08-29T01:11:13.000Z | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 1 | 2021-04-14T15:08:18.000Z | 2021-04-14T15:08:18.000Z | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 2 | 2020-06-05T03:01:06.000Z | 2020-07-09T07:13:12.000Z | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import time
import sqlalchemy as sql
#========================================================================
class AddEntry(object):
def __init__(self, database, table, entry):
self.db = database
self.table = table
self.entry = entry
def execute(self):
start = time.time()
with self.db.connect() as conn:
conn.execute(self.table.insert(), self.entry)
conn.close()
end = time.time()
#========================================================================
class FetchEntries(object):
def __init__(self, database, table, selection, name = 'test'):
self.db = database
self.table = table
self.selection = selection
self.entries = None
self.executed = False
self.entries_fetched = False
self.name = name
def execute(self):
start = time.time()
with self.db.connect() as conn:
selected = conn.execute(self.selection)
entries = selected.fetchall()
conn.close()
self.entries = entries
self.executed = True
end = time.time()
def get_entries(self):
iteration_index = 0
while not self.executed:
pass
self.entries_fetched = True
return self.entries
#========================================================================
class UpdateEntries(object):
def __init__(self, database, table, updates):
self.db = database
self.table = table
self.updates = updates
def execute(self):
start = time.time()
if isinstance(self.updates, list):
with self.db.connect() as conn:
for updates in self.updates:
updated = conn.execute(updates)
conn.close()
else:
with self.db.connect() as conn:
updated = conn.execute(self.updates)
conn.close()
end = time.time()
| 23.833333 | 73 | 0.550834 | 201 | 1,859 | 4.995025 | 0.293532 | 0.041833 | 0.039841 | 0.067729 | 0.39741 | 0.35757 | 0.195219 | 0.099602 | 0.099602 | 0.099602 | 0 | 0.000669 | 0.196342 | 1,859 | 77 | 74 | 24.142857 | 0.671352 | 0.166218 | 0 | 0.418182 | 0 | 0 | 0.010356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0.018182 | 0.036364 | 0 | 0.236364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdf105752f21bbc068ce977d28dde3f6db125f50 | 8,818 | py | Python | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import pandas as pd
from pyspark import SQLContext
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType
from data.process_initial_file import dict_education, list_education, list_race
def elbow_method_evaluation(df):
# Calculate cost and plot
cost = np.zeros(10)
for k in range(2, 10):
kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans.fit(df)
cost[k] = model.summary.trainingCost
# Plot the cost
df_cost = pd.DataFrame(cost[2:])
df_cost.columns = ["cost"]
new_col = [2, 3, 4, 5, 6, 7, 8, 9]
df_cost.insert(0, 'cluster', new_col)
pl.plot(df_cost.cluster, df_cost.cost)
pl.xlabel('Number of Clusters')
pl.ylabel('Score')
pl.title('Elbow Curve')
pl.show()
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# Define information
nullable = True
schema = StructType([
StructField("age", IntegerType(), nullable),
StructField("workclass", IntegerType(), nullable),
StructField("fnlwgt", IntegerType(), nullable),
StructField("education", IntegerType(), nullable),
StructField("marital-status", IntegerType(), nullable),
StructField("occupation", IntegerType(), nullable),
StructField("relationship", IntegerType(), nullable),
StructField("race", IntegerType(), nullable),
StructField("sex", IntegerType(), nullable),
StructField("capital-gain", IntegerType(), nullable),
StructField("capital-loss", IntegerType(), nullable),
StructField("hours-per-week", IntegerType(), nullable),
StructField("native-country", IntegerType(), nullable),
StructField("is-upper-than-50k", IntegerType(), nullable)
])
# Connect to bdd
sqlContext = SQLContext(sparkContext=spark.sparkContext, sparkSession=spark)
# Read file
df = sqlContext.read.csv("data/adult_processed_data.data", header=True, sep=",", schema=schema)
# Display all columns
# print(df.collect())
# Display columns
print(df.columns)
# df.select("is-upper-than-50k").show()
df.select("*").show()
# Create features column, assembling together the numeric data
col1_name = 'education'
col2_name = 'capital-gain'
col3_name = 'race'
col4_name = 'hours-per-week'
inputCols = [col1_name, col2_name, col3_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
# Figure 1
# Do K-means
# Evaluate number of clusters with the elbow method
elbow_method_evaluation(adults_with_features)
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to adults
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col1_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col1_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 1")
plt.xlabel(col1_name)
plt.ylabel(col2_name)
# TODO To change in case col1_name is changed
plt.xticks(range(0, len(list_education)), list_education, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture1.png", bbox_inches='tight')
# Show fig
plt.show()
# Figure 2
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col1_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col1_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# fig.set_yscale('log', base=2)
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 2")
plt.xlabel(col1_name)
plt.ylabel(col2_name)
plt.xticks(range(0, len(list_education)), list_education, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture2.png", bbox_inches='tight')
# Show fig
plt.show()
# Figure 3
inputCols = [col2_name, col3_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
# Do K-means
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to flowers
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col3_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col3_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# fig.set_yscale('log', base=2)
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 3")
plt.xlabel(col3_name)
plt.ylabel(col2_name)
plt.xticks(range(0, len(list_race)), list_race, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture3.png", bbox_inches='tight')
# Show fig
plt.show()
# TODO PUT HERE
# Figure 4
inputCols = [col1_name, col3_name, col4_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
elbow_method_evaluation(adults_with_features)
# Do K-means
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to flowers
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig_3d = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel(col1_name)
ax.set_ylabel(col3_name)
ax.set_zlabel(col4_name)
ax.set_xticks(range(0, len(list_education)))
ax.set_xticklabels(list_education, rotation=90,
verticalalignment='baseline',
horizontalalignment='left')
ax.set_yticks(range(0, len(list_race)))
ax.set_yticklabels(list_race, rotation=-15,
verticalalignment='baseline',
horizontalalignment='left')
# Data for three-dimensional scattered points
ax.scatter3D(A[col1_name], A[col3_name], A[col4_name], c=A.prediction.map(colors), cmap='Greens', marker='.')
ax.scatter3D(B[col1_name], B[col3_name], B[col4_name], c=B.prediction.map(colors), cmap='Greens', marker='x')
# Save figure
plt.savefig("picture4.png", bbox_inches='tight')
plt.show()
# DEBUG: Display stats
print("k: '{}'".format(k))
print("A.prediction.value_counts(): '{}'".format(A.prediction.value_counts()))
print("B.prediction.value_counts(): '{}'".format(B.prediction.value_counts()))
| 29.790541 | 109 | 0.713087 | 1,203 | 8,818 | 5.075644 | 0.202826 | 0.035375 | 0.041271 | 0.0321 | 0.590894 | 0.587455 | 0.556666 | 0.556666 | 0.543727 | 0.522764 | 0 | 0.017317 | 0.142096 | 8,818 | 295 | 110 | 29.891525 | 0.789822 | 0.150488 | 0 | 0.536313 | 0 | 0 | 0.160409 | 0.031355 | 0 | 0 | 0 | 0.00339 | 0 | 1 | 0.005587 | false | 0 | 0.055866 | 0 | 0.061453 | 0.072626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdf5bfe6b045a2bc243a77cfba2030c81bcde42d | 3,781 | py | Python | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | 1 | 2021-10-15T08:18:55.000Z | 2021-10-15T08:18:55.000Z | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | 1 | 2021-11-18T16:37:28.000Z | 2021-11-18T16:37:28.000Z | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | null | null | null | from src.open3DTool.planeUtils import (
segment_points_on_plane_by_picked_points,
pick_points_utils,
)
from src.algorithmsForPointCloud.fileUtils import (
get_point_cloud_from_bin_file,
generate_labels_and_object_files,
)
from src.open3DTool.fileUtils import update_label_files
import numpy as np
import open3d as o3d
class Visualizer:
point_cloud = o3d.geometry.PointCloud()
path_to_pcd_file = ""
path_to_label_file = ""
path_to_object_file = ""
main_visualizer = o3d.visualization.VisualizerWithKeyCallback()
picked_indexes = []
distance = 0
pick_points_count = 3
def __init__(
self,
path_to_bin_file: str,
path_to_save_file_label: str,
path_to_save_file_object: str,
path_to_pcd_file: str,
distance: np.intc,
pick_points_count: np.intc,
):
self.point_cloud = get_point_cloud_from_bin_file(path_to_bin_file)
self.point_cloud.paint_uniform_color([0.51, 0.51, 0.51])
self.path_to_pcd_file = path_to_pcd_file
self.path_to_label_file = path_to_save_file_label
self.path_to_object_file = path_to_save_file_object
self.distance = distance
self.pick_points_count = pick_points_count
self.generate_label_files([])
def generate_label_files(self, indexes: list):
generate_labels_and_object_files(
len(self.point_cloud.points),
indexes,
self.path_to_label_file,
self.path_to_object_file,
)
def update_pcd_and_label_files(self, count_of_points: int, is_append_right: bool):
update_label_files(
self.point_cloud,
count_of_points,
self.path_to_pcd_file,
self.path_to_label_file,
self.path_to_object_file,
is_append_right,
)
def run(self):
self.main_visualizer = o3d.visualization.VisualizerWithKeyCallback()
self.main_visualizer.create_window()
self.main_visualizer.add_geometry(self.point_cloud)
self.set_hotkeys()
self.main_visualizer.run()
self.main_visualizer.destroy_window()
def set_hotkeys(self):
self.main_visualizer.register_key_callback(32, self.pick_points) # Space
self.main_visualizer.register_key_callback(
259, self.get_previous_snapshot
) # Backspace
def pick_points(self, visualizer):
indexes_of_points = pick_points_utils(self.point_cloud)
assert len(indexes_of_points) == self.pick_points_count
self.update_main_window_by_plane(indexes_of_points)
def get_previous_snapshot(self, visualizer):
if len(self.picked_indexes) == 0:
return
number_of_last_indexes = self.picked_indexes[-1]
self.picked_indexes = self.picked_indexes[:-1]
point_cloud_len = len(self.point_cloud.points)
last_indexes = [
i for i in range(point_cloud_len - number_of_last_indexes, point_cloud_len)
]
picked_cloud = self.point_cloud.select_by_index(last_indexes)
picked_cloud.paint_uniform_color([0.51, 0.51, 0.51])
self.point_cloud = picked_cloud + self.point_cloud.select_by_index(
last_indexes, invert=True
)
self.update_pcd_and_label_files(number_of_last_indexes, False)
visualizer.clear_geometries()
visualizer.add_geometry(self.point_cloud)
def update_main_window_by_plane(self, picked_points: list):
self.point_cloud, indexes = segment_points_on_plane_by_picked_points(
self.point_cloud, picked_points, self.distance
)
self.picked_indexes.append(len(indexes))
self.update_pcd_and_label_files(len(indexes), True)
self.run()
| 34.372727 | 87 | 0.691351 | 497 | 3,781 | 4.802817 | 0.197183 | 0.079598 | 0.076246 | 0.027231 | 0.455802 | 0.274822 | 0.150398 | 0.12191 | 0.12191 | 0.103058 | 0 | 0.012081 | 0.233801 | 3,781 | 109 | 88 | 34.688073 | 0.811874 | 0.003967 | 0 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 1 | 0.086022 | false | 0 | 0.053763 | 0 | 0.247312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfa7f51aa6bca9797c581b745c48d3a51fc0b8d | 8,868 | py | Python | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 7 | 2019-08-31T22:58:41.000Z | 2021-02-06T17:41:38.000Z | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 2 | 2020-02-19T13:32:03.000Z | 2020-07-29T09:24:53.000Z | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 1 | 2020-10-01T09:48:07.000Z | 2020-10-01T09:48:07.000Z | import argparse
import logging
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_distances
import torch
def create_predictions_file(questions_file, facts_file, examples_file, logits_file, pred_output_file,
mcq_choices="correct", write_debug_file=False):
"""
Utility to generate submission file from predictions (logits scores)
"""
df_questions = pd.read_csv(questions_file, sep='\t')
df_facts = pd.read_csv(facts_file, sep='\t').drop_duplicates(subset=["uid"], keep="first").reset_index()
examples = torch.load(examples_file)
logits = np.load(logits_file)
logit_1 = logits[:, 1] - logits[:, 0]
if write_debug_file:
f_tmp = open(pred_output_file + "-as-text", "w")
# Remove wrong choices
def remove_wrong_answer_choices(row, choices):
correct_choice = row["AnswerKey"]
option_start_loc = row["Question"].rfind("(A)")
split0, split1 = row["Question"][:option_start_loc], row["Question"][option_start_loc:]
if choices == "none":
return split0
if correct_choice == "A" and "(B)" in split1:
split0 += (split1[3:split1.rfind("(B)")])
elif correct_choice == "A":
split0 += (split1[3:])
elif correct_choice == "B" and "(C)" in split1:
split0 += (split1[split1.rfind("(B)") + 3:split1.rfind("(C)")])
elif correct_choice == "B":
split0 += (split1[split1.rfind("(B)") + 3:])
elif correct_choice == "C" and "(D)" in split1:
split0 += (split1[split1.rfind("(C)") + 3:split1.rfind("(D)")])
elif correct_choice == "C":
split0 += (split1[split1.rfind("(C)") + 3:])
elif correct_choice == "D" and "(E)" in split1:
split0 += (split1[split1.rfind("D)") + 3:split1.rfind("(E)")])
elif correct_choice == "D":
split0 += (split1[split1.rfind("D)") + 3:])
elif correct_choice == "E" and "(F)" in split1:
split0 += (split1[split1.rfind("(E)") + 3:split1.rfind("(F)")])
elif correct_choice == "E":
split0 += (split1[split1.rfind("(E)") + 3:])
else:
raise ValueError("Unhandled option type: {}".format(correct_choice))
return split0
if mcq_choices != "all":
df_questions["ProcessedQuestion"] = df_questions.apply(remove_wrong_answer_choices, 1,
choices=mcq_choices)
else:
df_questions["ProcessedQuestion"] = df_questions["Question"]
vectorizer = TfidfVectorizer().fit(df_questions['Question']).fit(df_facts['text'])
X_q = vectorizer.transform(df_questions['ProcessedQuestion'])
X_e = vectorizer.transform(df_facts['text'])
X_dist = cosine_distances(X_q, X_e)
idx_start = 0
predictions = []
prev_query = examples[0].text_a
for i, example in enumerate(examples):
if example.text_a == prev_query:
continue
qid = examples[idx_start].guid.split('###')[0]
q = df_questions.loc[df_questions["questionID"] == qid]
assert q["ProcessedQuestion"].item() == examples[idx_start].text_a
relevant_logits = logit_1[idx_start:i]
relevant_examples = examples[idx_start:i]
sorted_preds, sorted_examples = zip(*sorted(zip(relevant_logits, relevant_examples), key=lambda e: e[0],
reverse=True))
added_uids = set()
example_preds = []
for se in sorted_examples:
for fid in se.guid.split('###')[1:]:
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
for dist_idx in np.argsort(X_dist[q.index.to_numpy()[0]]):
fid = df_facts.loc[dist_idx, "uid"]
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
predictions.extend(example_preds)
if write_debug_file:
f_tmp.write(q["questionID"].item())
f_tmp.write('\n')
f_tmp.write(q["Question"].item())
f_tmp.write('\n')
f_tmp.write(q["ProcessedQuestion"].item())
f_tmp.write("\n*************\n")
for i_tmp in range(40):
f_tmp.write(sorted_examples[i_tmp].guid.split('###')[1:].__str__())
f_tmp.write(' Score:{:.3f}\n'.format(sorted_preds[i_tmp]))
f_tmp.write(sorted_examples[i_tmp].text_b.__str__())
f_tmp.write('\n')
f_tmp.write("*************\n")
for i_tmp in range(40):
f_tmp.write(df_facts.loc[df_facts["uid"] == example_preds[i_tmp].split('\t')[1], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
for expl in q["explanation"].item().split(' '):
f_tmp.write(df_facts.loc[df_facts["uid"] == expl.split('|')[0], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
prev_query = example.text_a
idx_start = i
qid = examples[idx_start].guid.split('###')[0]
q = df_questions.loc[df_questions["questionID"] == qid]
assert q["ProcessedQuestion"].item() == examples[idx_start].text_a
relevant_logits = logit_1[idx_start:]
relevant_examples = examples[idx_start:]
sorted_preds, sorted_examples = zip(*sorted(zip(relevant_logits, relevant_examples), key=lambda e: e[0],
reverse=True))
added_uids = set()
example_preds = []
for se in sorted_examples:
for fid in se.guid.split('###')[1:]:
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
for dist_idx in np.argsort(X_dist[q.index.to_numpy()[0]]):
fid = df_facts.loc[dist_idx, "uid"]
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
predictions.extend(example_preds)
if write_debug_file:
f_tmp.write(q["questionID"].item())
f_tmp.write('\n')
f_tmp.write(q["Question"].item())
f_tmp.write('\n')
f_tmp.write(q["ProcessedQuestion"].item())
f_tmp.write("\n*************\n")
for i_tmp in range(40):
f_tmp.write(sorted_examples[i_tmp].guid.split('###')[1:].__str__())
f_tmp.write(' Score:{:.3f}\n'.format(sorted_preds[i_tmp]))
f_tmp.write(sorted_examples[i_tmp].text_b.__str__())
f_tmp.write('\n')
f_tmp.write("*************\n")
for i_tmp in range(40):
f_tmp.write(df_facts.loc[df_facts["uid"] == example_preds[i_tmp].split('\t')[1], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
for expl in q["explanation"].item().split(' '):
f_tmp.write(df_facts.loc[df_facts["uid"] == expl.split('|')[0], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
f_tmp.close()
logging.info("Writing to file")
with open(pred_output_file, "w") as f:
f.write('\n'.join(predictions))
f.write('\n')
logging.info("len(df_questions)={}".format(len(df_questions)))
logging.info("len(predictions)={}".format(len(predictions)))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--questions_file", type=str, required=True,
help="The tsv file containing the evaluation")
parser.add_argument("--facts_file", type=str, required=True,
help="The tsv file containing the common sense facts")
parser.add_argument("--examples_file", type=str, help="Examples file that is being evaluated")
parser.add_argument("--logits_file", type=str, help="Model predictions (liekly some file of the form *_preds.npy)")
parser.add_argument("--pred_output_file", type=str, required=True,
help="Name of the file where predictions will be written")
parser.add_argument("--mcq_choices", type=str, choices=['none', 'correct', 'all'], default="correct",
help="The choices to keep in the questions")
parser.add_argument("--write_debug_file", action='store_true')
args = parser.parse_args()
create_predictions_file(questions_file=args.questions_file, facts_file=args.facts_file,
examples_file=args.examples_file, logits_file=args.logits_file,
pred_output_file=args.pred_output_file, mcq_choices=args.mcq_choices,
write_debug_file=args.write_debug_file) | 45.948187 | 119 | 0.579725 | 1,130 | 8,868 | 4.309735 | 0.158407 | 0.029569 | 0.062834 | 0.036961 | 0.558111 | 0.482752 | 0.424641 | 0.424641 | 0.424641 | 0.424641 | 0 | 0.012739 | 0.256428 | 8,868 | 193 | 120 | 45.948187 | 0.725811 | 0.010149 | 0 | 0.491124 | 0 | 0 | 0.124501 | 0 | 0 | 0 | 0 | 0 | 0.011834 | 1 | 0.011834 | false | 0 | 0.04142 | 0 | 0.065089 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfb7f7d975d38d147cc79c67eb8466db9daf8e8 | 1,884 | py | Python | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | 3 | 2019-10-31T15:26:20.000Z | 2022-03-03T06:04:03.000Z | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | 1 | 2021-10-05T14:57:29.000Z | 2022-03-27T01:58:41.000Z | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Set, Union, Optional
from data_structure import Graph
from semantic_modeling.assembling.autolabel.heuristic import preserved_structure_with_heuristic, get_gold_semantic_types
from semantic_modeling.assembling.autolabel.maxf1 import get_gold_triples, max_f1, max_f1_no_ambiguous
from semantic_modeling.assembling.autolabel.preserved_structure import preserved_structure
class AutoLabel:
@staticmethod
def auto_label_max_f1(gold_sm: Graph, pred_sm: Graph,
is_blurring_label: bool) -> Tuple[Dict[int, bool], Dict[int, Optional[int]], float]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label)
return max_f1(gold_sm, pred_sm, is_blurring_label, gold_triples)
@staticmethod
def auto_label_max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool
) -> Tuple[Dict[int, bool], Dict[int, Optional[int]], float]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label)
return max_f1_no_ambiguous(gold_sm, pred_sm, is_blurring_label, gold_triples)
@staticmethod
def auto_label_preserved_structure(gold_sm: Graph,
pred_sm: Graph) -> Tuple[Dict[int, bool], Dict[int, Optional[int]]]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label=False)
return preserved_structure(gold_sm, pred_sm, gold_triples)
@staticmethod
def auto_label_preserved_structure_heuristic_fix(
gold_sm: Graph, pred_sm: Graph) -> Tuple[Dict[int, bool], Dict[int, Optional[int]]]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label=False)
gold_stypes = get_gold_semantic_types(gold_sm)
return preserved_structure_with_heuristic(gold_sm, pred_sm, gold_triples, gold_stypes)
| 49.578947 | 120 | 0.728769 | 255 | 1,884 | 4.992157 | 0.196078 | 0.112333 | 0.094266 | 0.080126 | 0.699921 | 0.608013 | 0.536528 | 0.536528 | 0.480754 | 0.480754 | 0 | 0.005225 | 0.187367 | 1,884 | 37 | 121 | 50.918919 | 0.826257 | 0.02017 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.185185 | 0 | 0.518519 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfb8afb236fa2a59d4614b476d34a5d38aae988 | 694 | py | Python | landscapes/scripts/convert_fitness_to_s.py | Peyara/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | 3 | 2020-08-24T20:24:41.000Z | 2020-08-26T02:16:16.000Z | landscapes/scripts/convert_fitness_to_s.py | hincz-lab/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | null | null | null | landscapes/scripts/convert_fitness_to_s.py | hincz-lab/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | null | null | null | import sys
import numpy as np
# This script takes in a file with fitness values separated by commas
# and converts the values to be s values (relative fitness as used in
# the model) instead.
# WARNING: Overwrites given file!
if len(sys.argv) < 2:
print("Usage: python convert_fitness_to_s.py [name of file to convert]")
data = []
# Read in fitness values
with open(sys.argv[1]) as infile:
data = [float(i.strip()) for i in infile.readline().split(",")]
# Do conversion
data = [np.format_float_positional(data[-1]/i - 1) if i != 0 else 10000000000000 for i in data]
# Write out s values
with open(sys.argv[1], "w") as outfile:
outfile.write(",".join([str(i) for i in data])) | 30.173913 | 95 | 0.695965 | 119 | 694 | 4.016807 | 0.546218 | 0.043933 | 0.037657 | 0.07113 | 0.09205 | 0.09205 | 0 | 0 | 0 | 0 | 0 | 0.035149 | 0.180115 | 694 | 23 | 96 | 30.173913 | 0.804921 | 0.353026 | 0 | 0 | 0 | 0 | 0.149321 | 0.052036 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfcac80e4077fb1f2378b55cba1401431e2ffec | 1,099 | py | Python | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | 5 | 2021-03-18T21:34:44.000Z | 2022-03-11T23:35:23.000Z | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | from behave import *
from hamcrest import *
from selenium.common.exceptions import RemoteDriverServerException
from eats.pyhamcrest import array_equal_to_by_key
from eats.utils.mapping import table_mapping
from ..users import Users
@when(u'I press "{key}" key')
@when(u'{user_name:Username} presses "{key}" key')
def step_impl(context, key, user_name=Users.DEFAULT_USERNAME):
user = context.users.get(user_name)
application = user.current_application
assert_that(
calling(application.driver.send_special_key).with_args(key),
not(raises(RemoteDriverServerException)),
"{unsupported} key is not supported".format(unsupported=key)
)
@then(u'{user_name:Username} should have "{name}" meta contents element')
def step_impl(context, user_name, name):
user = context.users.get(user_name)
application = user.current_application
contents = application.driver.get_metadata_elements_content_by_name(name)
keys = context.table.headings
assert_that(table_mapping(contents, keys=keys), array_equal_to_by_key(table_mapping(context.table), "content")) | 40.703704 | 115 | 0.767971 | 147 | 1,099 | 5.52381 | 0.408163 | 0.059113 | 0.029557 | 0.034483 | 0.189655 | 0.147783 | 0.147783 | 0.147783 | 0.147783 | 0.147783 | 0 | 0 | 0.128298 | 1,099 | 27 | 115 | 40.703704 | 0.847599 | 0 | 0 | 0.173913 | 0 | 0 | 0.148182 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfcb80abeeba8ef801afb6b8c9b9a48834e2016 | 5,526 | py | Python | homebytwo/routes/utils.py | drixselecta/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | 7 | 2018-03-10T20:58:59.000Z | 2021-08-22T17:18:09.000Z | homebytwo/routes/utils.py | HomebyTwo/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | 69 | 2017-02-01T21:15:43.000Z | 2022-02-26T09:33:27.000Z | homebytwo/routes/utils.py | drixselecta/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | null | null | null | from collections import namedtuple
from itertools import accumulate, chain, islice, tee
from pathlib import Path
from django.contrib.gis.db.models.functions import Distance, LineLocatePoint
from django.contrib.gis.measure import D
from .fields import LineSubstring
from .models import ActivityType, Place
# named tuple to handle Urls
Link = namedtuple("Link", ["url", "text"])
GARMIN_ACTIVITY_TYPE_MAP = {
ActivityType.ALPINESKI: "resort_skiing_snowboarding",
ActivityType.BACKCOUNTRYSKI: "backcountry_skiing_snowboarding",
ActivityType.ELLIPTICAL: "elliptical",
ActivityType.HANDCYCLE: "cycling",
ActivityType.HIKE: "hiking",
ActivityType.ICESKATE: "skating",
ActivityType.INLINESKATE: "skating",
ActivityType.NORDICSKI: "cross_country_skiing",
ActivityType.RIDE: "cycling",
ActivityType.ROCKCLIMBING: "rock_climbing",
ActivityType.ROWING: "rowing",
ActivityType.RUN: "running",
ActivityType.SNOWBOARD: "resort_skiing_snowboarding",
ActivityType.SNOWSHOE: "hiking",
ActivityType.STAIRSTEPPER: "fitness_equipment",
ActivityType.STANDUPPADDLING: "stand_up_paddleboarding",
ActivityType.SWIM: "swimming",
ActivityType.VIRTUALRIDE: "cycling",
ActivityType.VIRTUALRUN: "running",
ActivityType.WALK: "walk",
ActivityType.WEIGHTTRAINING: "fitness_equipment",
ActivityType.WORKOUT: "strength_training",
}
def get_image_path(instance, filename):
"""
callable to define the image upload path according
to the type of object: segment, route, etc.. as well as
the id of the object.
"""
return Path("images", instance.__class__.__name__, str(instance.id), filename)
def current_and_next(some_iterable):
"""
use itertools to make current and next item of an iterable available:
http://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop
used to 'create_segments_from_checkpoints'.
"""
items, nexts = tee(some_iterable, 2)
nexts = chain(islice(nexts, 1, None), [None])
return zip(items, nexts)
def create_segments_from_checkpoints(checkpoints, start=0, end=1):
"""
returns a list of segments as tuples with start and end locations
along the original line.
"""
# sorted list of line_locations from the list of places as
# well as the start and the end location of the segment where
# the places were found.
line_locations = chain(
[start], [checkpoint.line_location for checkpoint in checkpoints], [end]
)
# use the custom iterator, exclude segments where start and end
# locations are the same. Also exclude segment where 'nxt == None`.
segments = [
(crt, nxt)
for crt, nxt in current_and_next(line_locations)
if crt != nxt and nxt
]
return segments
def get_places_from_segment(segment, line, max_distance):
"""
find places within the segment of a line and annotate them with
the line location on the original line.
"""
start, end = segment
# create the Linestring geometry
subline = LineSubstring(line, start, end)
# find places within max_distance of the linestring
places = get_places_from_line(subline, max_distance)
# iterate over found places to change the line_location
# from the location on the segment to the location on
# the original linestring.
for place in places:
# relative line location to the start point of the subline
length = place.line_location * (end - start)
# update attribute with line location on the original line
place.line_location = start + length
return places
def get_places_from_line(line, max_distance):
"""
returns places within a max_distance of a Linestring Geometry
ordered by, and annotated with the `line_location` and the
`distance_from_line`:
* `line_location` is the location on the line expressed as a
float between 0.0 and 1.0.
* `distance_from_line` is a geodjango Distance object.
"""
# convert max_distance to Distance object
max_d = D(m=max_distance)
# find all places within max distance from line
places = Place.objects.filter(geom__dwithin=(line, max_d))
# annotate with distance to line
places = places.annotate(distance_from_line=Distance("geom", line))
# annotate with location along the line between 0 and 1
places = places.annotate(line_location=LineLocatePoint(line, "geom"))
# remove start and end places within 1% of start and end location
places = places.filter(
line_location__gt=0.01,
line_location__lt=0.99,
)
# sort in order of appearance along the line
places = places.order_by("line_location")
return places
def get_places_within(point, max_distance=100):
# make range a distance object
max_d = D(m=max_distance)
# get places within range
places = Place.objects.filter(geom__distance_lte=(point, max_d))
# annotate with distance
places = places.annotate(distance_from_line=Distance("geom", point))
# sort by distance
places = places.order_by(
"distance_from_line",
)
return places
def get_distances(points):
"""
Return a list with the distance of each point relative to the previous one in the list.
"""
def get_relative_distances():
if points:
yield 0
yield from (p2.distance(p1) for p1, p2 in zip(points[1:], points))
return list(accumulate(get_relative_distances()))
| 31.758621 | 92 | 0.707926 | 711 | 5,526 | 5.35865 | 0.298172 | 0.040945 | 0.025197 | 0.016535 | 0.096588 | 0.056693 | 0.04147 | 0.04147 | 0 | 0 | 0 | 0.00755 | 0.209012 | 5,526 | 173 | 93 | 31.942197 | 0.864104 | 0.344734 | 0 | 0.061728 | 0 | 0 | 0.097864 | 0.0306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0 | 0.08642 | 0 | 0.271605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfe275d909128740904498e8c3a21dcaa2bafb4 | 263 | py | Python | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | # Cálculo da razão áurea (phi)
import sympy
d = 20
phi = sympy.symbols('phi', nonnegative=True)
eqn = sympy.Eq(1/phi, phi - 1)
sol = sympy.solve(eqn)
sympy.pprint(sol)
phiAprox = sympy.N(sol[0], d)
print('Para ', d, ' dígitos significativos, ϕ = ', phiAprox)
| 18.785714 | 60 | 0.669202 | 42 | 263 | 4.190476 | 0.619048 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0.163498 | 263 | 13 | 61 | 20.230769 | 0.777273 | 0.106464 | 0 | 0 | 0 | 0 | 0.158798 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfe32d084754eda156889373513889dc3a4c1f0 | 15,732 | py | Python | core/src/structs_classes/extract_structs.py | azurlane-doujin/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 144 | 2019-06-13T06:43:43.000Z | 2022-03-29T15:07:57.000Z | core/src/structs_classes/extract_structs.py | Shabi1213/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 2 | 2020-08-02T15:08:58.000Z | 2021-11-29T02:34:18.000Z | core/src/structs_classes/extract_structs.py | Shabi1213/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 19 | 2020-03-01T10:06:52.000Z | 2022-02-06T13:49:26.000Z | import collections
import os
import re
import time
from itertools import filterfalse
import wx
from core.src.static_classes.static_data import GlobalData
from core.src.structs_classes.basic_class import BasicInfo, BasicInfoList
class PerInfo(BasicInfo):
def __init__(self, name, val, has_cn):
super(PerInfo, self).__init__(name, val)
self.sub_data = 1
self.tex_step = 2
self.mesh_step=2
self.data = GlobalData()
# tree储存结构组
self._tex_path = "Empty"
self.more_tex = ["Empty"]
self._mesh_path = "Empty"
self.more_mesh = ["Empty"]
# 目标文件位置
self.lay_in = ""
# 是否可以使用还原
self._is_able_work = False
# 导出目标位置
self._save_path: str = ""
# 中文名称
self.cn_name = val
self.has_cn = has_cn
# 父组件
self.parent = None
self.must_able = False
# tree ID
self.key = ...
self.tree_ID = ...
self.tex_id = ...
self.more_tex_per_id = []
self.mesh_id = ...
self.more_mesh_per_id = []
self.action_group = [
"independent",
"face_match",
"atlas_split",
"set_able",
"split_only",
"remove_item",
"sprite_spilt"
]
# 是否以中文保存
self._is_save_as_cn = True
def __contains__(self, item):
if self.name in item or self.cn_name in item:
return True
else:
return False
@property
def is_able_work(self):
if self.must_able:
return True
else:
return self._is_able_work
@property
def tex_path(self):
return self._tex_path
@tex_path.setter
def tex_path(self, value):
self._tex_path = value
self._is_able_work = self.is_able()
@property
def mesh_path(self):
return self._mesh_path
@mesh_path.setter
def mesh_path(self, value):
self._mesh_path = value
self._is_able_work = self.is_able()
@property
def save_path(self):
return self._save_path
@save_path.setter
def save_path(self, value):
if self._is_save_as_cn:
self._save_path = os.path.join(value, self.cn_name + ".png")
else:
self._save_path = os.path.join(value, self.name + ".png")
@property
def is_save_as_cn(self):
return self._is_save_as_cn
@is_save_as_cn.setter
def is_save_as_cn(self, value):
if isinstance(value, bool):
self._is_save_as_cn = value
@staticmethod
def is_def(val):
return bool(val)
def get_is_able_work(self):
return self._is_able_work
def is_able(self):
if os.path.isfile(self.tex_path) and os.path.isfile(self.mesh_path):
return True
else:
return False
def transform_able(self):
self.must_able = not self.must_able
def set_single_path(self, path):
self._save_path = path
def append_item_tree(self, tree: wx.TreeCtrl):
# 名称
self.key = key = tree.AppendItem(self.tree_ID, f"名称:{self.cn_name}")
if self.is_able_work:
tree.SetItemTextColour(key, wx.Colour(253, 86, 255))
tree.AppendItem(self.tree_ID, f"索引名称:{self.name}")
# texture
self.tex_id = tree.AppendItem(self.tree_ID, f"Texture文件路径:{self.tex_path}")
more_tex_id = tree.AppendItem(self.tree_ID, f"其他Texture路径({len(self.more_tex)})")
for each_path in self.more_tex:
val = tree.AppendItem(more_tex_id, each_path)
self.more_tex_per_id.append(val)
# mesh
self.mesh_id = tree.AppendItem(self.tree_ID, f"Mesh文件路径:{self.mesh_path}")
more_mesh_id = tree.AppendItem(self.tree_ID, f"其他Mesh路径({len(self.more_mesh)})")
for each_path in self.more_mesh:
val = tree.AppendItem(more_mesh_id, each_path)
self.more_mesh_per_id.append(val)
action_root = tree.AppendItem(self.tree_ID, "功能按键")
# 功能键
independent = self.action_group[self.data.at_independent] = tree.AppendItem(action_root, "将当前的组合独立")
tree.SetItemTextColour(independent, wx.Colour(255, 0, 166))
face_match = self.action_group[self.data.at_face_match] = tree.AppendItem(action_root, "为当前立绘添加附加表情")
tree.SetItemTextColour(face_match, wx.Colour(0, 16, 166))
atlas_spilt = self.action_group[self.data.at_atlas_split] = tree.AppendItem(action_root, "进行Q版小人切割")
tree.SetItemTextColour(atlas_spilt, wx.Colour(140, 0, 166))
sprite_spilt = self.action_group[self.data.at_sprite_split] = tree.AppendItem(action_root, "进行Sprite切割 ")
tree.SetItemTextColour(sprite_spilt, wx.Colour(248, 40, 255))
set_able = self.action_group[self.data.at_set_able] = tree.AppendItem(action_root,
f"强制转换为可还原状态【当前{self.must_able}】")
tree.SetItemTextColour(set_able, wx.Colour(255, 177, 166))
split_only = self.action_group[self.data.at_split_only] = tree.AppendItem(action_root, "仅进行立绘还原切割 ")
tree.SetItemTextColour(split_only, wx.Colour(248, 66, 255))
remove_item = self.action_group[self.data.at_remove_item] = tree.AppendItem(action_root, "删除该元素 ")
tree.SetItemTextColour(remove_item, wx.Colour(248, 0, 255))
def append_to_tree(self, tree: wx.TreeCtrl, tree_root: wx.TreeItemId):
"""
添加到树,构建tree列表
:param tree: tree 对象
:param tree_root: 根id
:return:
"""
self.more_mesh_per_id.clear()
self.more_tex_per_id.clear()
self.tree_ID = tree.AppendItem(tree_root, self.cn_name)
self.append_item_tree(tree)
def get_select(self, type_is: bool):
"""
获取选中的列表
:param type_is: true :texture,false:mesh
:return: list,选中的列表
"""
if type_is:
return self.more_tex
else:
return self.more_mesh
# 路径设置相关
def set_tex(self, index):
self.tex_path = self.more_tex[index]
return self.tex_id, f"Texture文件路径:{self.tex_path}"
def set_mesh(self, index):
self.mesh_path = self.more_mesh[index]
return self.mesh_id, f"Mesh文件路径:{self.mesh_path}"
def add_save(self, path):
self.save_path = path
def clear_tex(self):
self.tex_id, self.more_tex, self.tex_path, self.more_tex_per_id = None, [], "Empty", []
def clear_mesh(self):
self.mesh_id, self.more_mesh, self.mesh_path, self.more_mesh_per_id = None, [], "Empty", []
def build_sub(self, value_type, file_type, index):
"""
从自身的treeid中寻找目标
:param value_type:
:param file_type:
:param index:
:return:
"""
val = PerInfo(self.name, self.val, self.has_cn)
if value_type == self.data.td_single:
if file_type == self.data.td_texture_type:
val.tex_path = self.tex_path
elif file_type == self.data.td_mesh_type:
val.mesh_path = self.mesh_path
elif value_type == self.data.td_list_item:
if file_type == self.data.td_texture_type:
val.tex_path = self.more_tex[index]
elif file_type == self.data.td_mesh_type:
val.mesh_path = self.more_mesh[index]
return os.path.isfile(val.tex_path), val
def independent(self, name, tree, tree_root):
# 独立
target = PerInfo(name, f"{self.val}-# {self.sub_data}", self.has_cn)
target.tex_path = self.tex_path
target.mesh_path = self.mesh_path
target.append_to_tree(tree, tree_root)
self.parent[target.name] = target
self.sub_data += 1
class PerWorkList(BasicInfoList):
def __init__(self, item: collections.abc.Iterable = None, mesh_match=None, texture_match=None,
is_ignore_case=False):
super(PerWorkList, self).__init__(item)
self.is_ignore_case = is_ignore_case
self.texture_match = texture_match
self.mesh_match = mesh_match
self.data = GlobalData()
# 显示部分
def show_in_tree(self, tree, tree_root):
list(map(lambda x: self._info_dict[x].append_to_tree(tree, tree_root), self._key_list))
def append(self, name, cn_name, has_cn):
value = PerInfo(name, cn_name, has_cn)
self[value.name] = value
return value
def remove(self, item: collections.abc.Iterable):
return PerWorkList(super(PerWorkList, self).remove(item))
# 查找部分
def find_by_id(self, id):
values = list(filter(lambda x: self._info_dict[x].tree_ID == id, self._key_list))
if values.__len__() == 0:
return False, None
return True, self[values[0]]
def find_in_each(self, id) -> (bool, bool, bool, int, PerInfo):
"""
从每一个中寻找指定id
:param id:
:return: (是否成功,类型【单个True,列表False】,类型[tex(True),mesh(False)],索引,对象本身)
"""
target = None
for value in self:
# 如果id为以下的部分,进入
if id == value.tex_id == id or id in value.more_tex_per_id or value.mesh_id == id or \
id in value.more_mesh_per_id:
target = value
if target is None:
return False, False, False, -1, None
if id == target.tex_id:
return True, self.data.td_single, self.data.td_texture_type, 0, target
elif id == target.mesh_id:
return True, self.data.td_single, self.data.td_mesh_type, 0, target
elif id in target.more_tex_per_id:
return True, self.data.td_list_item, self.data.td_texture_type, target.more_tex_per_id.index(id), target
elif id in target.more_mesh_per_id:
return True, self.data.td_list_item, self.data.td_mesh_type, target.more_mesh_per_id.index(id), target
def find_action(self, id) -> (bool, int, PerInfo):
"""
查找是否为特殊动作按键
:param id:
:return: 是否成功【true/false】,动作类型,作用目标
"""
target = None
for value in self:
# 如果id为以下的部分,进入
if id in value.action_group:
target = value
break
if target is None:
return False, -1, target
else:
index = target.action_group.index(id)
return True, index, target
# 添加部分
def set_tex(self, value, name=None):
"""
添加贴图
:param name: [可选]新添加的texture地址的指向项目名称,为None会根据value获取
:param value: 新添加的texture地址
:return:
"""
has_ = False
if isinstance(value, str) and os.path.isfile(value):
if name is not None:
key = name
else:
key = os.path.splitext(os.path.basename(value))[0]
if re.match(r'.+\s#\d+\.png', value, re.IGNORECASE):
has_ = True
key = re.split(r'\s#\d+(\[alpha\])?$', key)[0]
# 赋值过程
val: PerInfo = self._info_dict[key]
if value not in val.more_tex:
val.more_tex.append(value)
lower_path = os.path.split(value)[0].lower()
# 如果非空考虑优先级
if 0 < val.tex_step and lower_path.endswith(self.texture_match[0]):
val.tex_path = value
val.tex_step = 0
elif 1 < val.tex_step and lower_path.endswith(self.texture_match[1]):
val.tex_path = value
val.tex_step = 1
else:
val.tex_path = value
val.tex_step = 2
if not has_:
val.tex_path = value
def set_mesh(self, value, name=None):
"""
添加mesh网格
:param name: [可选]新添加的mesh地址的指向项目名称,为None会根据value获取
:param value: 新添加的mesh地址
:return:
"""
has_ = False
if isinstance(value, str) and os.path.isfile(value):
if name is not None:
key = name
else:
key = os.path.splitext(os.path.basename(value))[0]
if re.match(r'.+\s#\d+\.obj', value, re.IGNORECASE):
has_ = True
key = re.split(r'\s#\d+(\[alpha\])?$', key)[0]
val: PerInfo = self._info_dict[key]
if value not in val.more_mesh:
val.more_mesh.append(value)
lower_path = os.path.split(value)[0].lower()
# 如果非空考虑优先级
if 0 < val.mesh_step and lower_path.endswith(self.mesh_match[0]):
val.mesh_path = value
val.mesh_step = 0
elif 1 < val.mesh_step and lower_path.endswith(self.mesh_match[1]):
val.mesh_path = value
val.mesh_step = 1
else:
val.mesh_path = value
val.mesh_step = 2
if not has_:
val.mesh_path = value
def append_name(self, name, names: dict, *, has_cn=False):
"""
添加新对象
:param names: 预设键-值对应组
:param name: 对象索引key
:param has_cn: 对象是否有中文名
:return:
"""
# if name == "unknown4":
# print(name)
if self.is_ignore_case:
name=name.lower()
if name not in self._key_list:
if name not in names.keys():
has_cn = False
target_cn = name
else:
has_cn = True
target_cn = names[name]
# 如果中文名为空,也认为没有中文名
if target_cn == "":
target_cn = name
has_cn = False
value = PerInfo(name, target_cn, has_cn)
value.parent = self
self[name] = value
return name
else:
return name
# 清空部分
def clear_mesh(self):
list(map(lambda x: x.clear_mesh(), self))
def clear_tex(self):
list(map(lambda x: x.clear_tex(), self))
# 生成部分
def build_able(self):
val = filter(lambda x: x.get_is_able_work(), self)
value = PerWorkList(val)
return value
def build_unable(self):
val = filterfalse(lambda x: x.get_is_able_work(), self)
value = PerWorkList(val)
return value
def build_search(self):
val = map(lambda x: f"{x.name}{x.cn_name}", self)
return list(val)
def build_filter(self):
val = map(lambda x: f"{x.name}", self)
val = list(enumerate(list(val), 0))
return val
def build_skip(self, filename):
filename = list(map(lambda x: os.path.splitext(os.path.basename(x))[0], filename))
val = filter(lambda x: x in filename, self)
return PerWorkList(val)
def build_from_indexes(self, indexes):
val = map(lambda x: self[x], indexes)
value = PerWorkList(val)
return value
def build_from_pattern(self, pattern):
val = list(filter(lambda x: re.match(pattern, list(x)[1]), self.build_filter()))
val = list(zip(*val))
if len(val) == 2:
return self.build_from_indexes(val[0])
else:
return PerWorkList()
| 33.189873 | 117 | 0.557335 | 1,992 | 15,732 | 4.165161 | 0.11496 | 0.022177 | 0.016874 | 0.010124 | 0.42883 | 0.343136 | 0.252139 | 0.191877 | 0.164156 | 0.164156 | 0 | 0.009229 | 0.3388 | 15,732 | 473 | 118 | 33.260042 | 0.788406 | 0.054475 | 0 | 0.267913 | 0 | 0 | 0.037146 | 0.014171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140187 | false | 0 | 0.024922 | 0.021807 | 0.28972 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdff65087e9d7a27500aa847fc385ea3b6c07441 | 3,950 | py | Python | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | 2 | 2018-05-10T18:10:28.000Z | 2018-05-13T18:14:33.000Z | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | 1 | 2019-09-20T14:14:49.000Z | 2019-09-20T14:14:49.000Z | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | null | null | null | import logging
import os
import shutil
from os.path import exists
from typing import Optional
from scbw.map import check_map_exists
from scbw.player import check_bot_exists
from scbw.utils import download_extract_zip
from ...utils import read_lines
logger = logging.getLogger(__name__)
class BenchmarkException(Exception):
pass
class RerunningBenchmarkException(BenchmarkException):
pass
class Benchmark:
bot_file: str
map_file: str
elo_file: str
repeat_games: int
bot_dir: str
map_dir: str
result_dir: str
def check_structure(self):
if not exists(f"{self.bot_file}"):
raise BenchmarkException(f"Bot file cannot be found in {self.bot_file}")
if not exists(self.map_file):
raise BenchmarkException(f"Map file cannot be found in {self.map_file}")
if not exists(self.elo_file):
raise BenchmarkException(f"Elo file cannot be found in {self.elo_file}")
if not exists(self.bot_dir):
raise BenchmarkException(f"Bot dir cannot be found in {self.bot_dir}")
if not exists(f"{self.map_dir}"):
raise BenchmarkException(f"Map dir cannot be found in {self.map_dir}")
if not exists(f"{self.result_dir}"):
raise BenchmarkException(f"Result dir cannot be found in {self.result_dir}")
bots = read_lines(self.bot_file)
for bot in bots:
check_bot_exists(bot, self.bot_dir)
maps = read_lines(self.map_file)
for map_file in maps:
check_map_exists(f"{self.map_dir}/{map_file}")
def has_results(self):
return len(os.listdir(self.result_dir)) > 0
class BenchmarkStorage:
def find_benchmark(self, name: str) -> Optional[Benchmark]:
raise NotImplemented
def get_benchmark(self, local_benchmark_dir):
with open(f'{local_benchmark_dir}/BENCHMARK_REPEAT_GAMES', 'r') as f:
repeat_games = int(f.read().strip())
benchmark = Benchmark()
benchmark.bot_file = f"{local_benchmark_dir}/BENCHMARK_BOTS"
benchmark.map_file = f"{local_benchmark_dir}/BENCHMARK_MAPS"
benchmark.elo_file = f"{local_benchmark_dir}/BENCHMARK_ELOS"
benchmark.bot_dir = f"{local_benchmark_dir}/bots"
benchmark.map_dir = f"{local_benchmark_dir}/maps"
benchmark.result_dir = f"{local_benchmark_dir}/results"
benchmark.repeat_games = repeat_games
return benchmark
class LocalBenchmarkStorage(BenchmarkStorage):
def __init__(self, base_dir: str):
self.base_dir = base_dir
def find_benchmark(self, name: str) -> Optional[Benchmark]:
if exists(self.benchmark_dir(name)):
return self.get_benchmark(self.benchmark_dir(name))
return None
def benchmark_dir(self, benchmark_name: str):
return f'{self.base_dir}/{benchmark_name}'
class SscaitBenchmarkStorage(LocalBenchmarkStorage):
BASE_URL = "http://sscaitournament.com/benchmarks"
def find_benchmark(self, name: str) -> Optional[Benchmark]:
if not name.startswith("SSCAIT"):
return None
if exists(self.benchmark_dir(name)):
return self.get_benchmark(self.benchmark_dir(name))
return self.try_download(name)
def try_download(self, name: str) -> Optional[Benchmark]:
benchmark_dir = self.benchmark_dir(name)
try:
os.makedirs(benchmark_dir, exist_ok=False)
download_extract_zip(f"{self.BASE_URL}/{name}.zip", benchmark_dir)
return self.get_benchmark(benchmark_dir)
except Exception as e:
logger.exception(f"Failed to download benchmark {name}")
logger.exception(e)
logger.info(f"Cleaning up dir {benchmark_dir}")
shutil.rmtree(self.benchmark_dir(name))
return None
# Feel free to include other benchmark sources!
# But they need to respect benchmark / bot structure :)
| 31.854839 | 88 | 0.678481 | 517 | 3,950 | 4.969052 | 0.197292 | 0.093422 | 0.052939 | 0.049046 | 0.317633 | 0.234332 | 0.111327 | 0.111327 | 0.0942 | 0.059167 | 0 | 0.000328 | 0.227848 | 3,950 | 123 | 89 | 32.113821 | 0.841967 | 0.025063 | 0 | 0.137931 | 0 | 0 | 0.189709 | 0.082121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.022989 | 0.103448 | 0.022989 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdff857464c359af0d0606a7da2091b6840dd15a | 21,855 | py | Python | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Main Workspace Run Script
"""
import os
import sys
import logging
import coloredlogs
import json
import math
import glob
import yaml
import yamale
import scripts.functions as func
from copy import copy
from subprocess import run, call
### Enable logging
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.INFO,
stream=sys.stdout,
)
log = logging.getLogger(__name__)
log.info("Starting...")
### Read YAML config file
#configs = list()
configs_list = dict()
#yaml_exts = ["yaml", "yml"]
config_path = str()
# Load config files with alternative extensions
#for ext in yaml_exts:
# path = f'/scripts/config.{ext}'
# if os.path.exists(path):
# configs.append(path)
# Check if multiple config files exist and load the user defined one or system/user overwritten one
if os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yaml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml'):
config_path = '/scripts/config.yml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml') and os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yml'
log.warning("both config.yaml and config.yml exists, using config.yml")
if os.path.exists('/scripts/config.yaml'): os.remove('/scripts/config.yaml')
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
else:
log.debug("No yaml config files available to load")
# Load config as yaml object
if os.path.exists(config_path):
if valid_config:
log.info(f"Loading config file: '{config_path}'")
with open(config_path, "r") as f:
configs_list = yaml.load(f, Loader=yaml.FullLoader)
log.debug(configs_list)
else:
log.debug(f"Config does not exist: '{config_path}'")
### Read or set docker default envs
docker_env = {
'LOG_VERBOSITY': os.getenv("LOG_VERBOSITY", "INFO"),
'CONFIG_BACKUP_ENABLED': os.getenv("CONFIG_BACKUP_ENABLED", "true"),
'WORKSPACE_USER': os.getenv("WORKSPACE_AUTH_USER", "coder"),
'WORKSPACE_GROUP': os.getenv("WORKSPACE_AUTH_GROUP", "users"),
'WORKSPACE_USER_SHELL': os.getenv("WORKSPACE_USER_SHELL", "zsh"),
'WORKSPACE_USER_PASSWORD': os.getenv("WORKSPACE_AUTH_PASSWORD", "password"),
'RESOURCES_PATH': os.getenv("RESOURCES_PATH", "/resources"),
'WORKSPACE_HOME': os.getenv("WORKSPACE_HOME", "/workspace"),
'APPS_PATH': os.getenv("APPS_PATH", "/apps"),
'DATA_PATH': os.getenv("DATA_PATH", "/data"),
'PROXY_BASE_URL': os.getenv("PROXY_BASE_URL", "/"),
'ZSH_PROMPT': os.getenv("ZSH_PROMPT", "none"),
'ZSH_THEME': os.getenv("ZSH_THEME", "spaceship"),
'ZSH_PLUGINS': os.getenv("ZSH_PLUGINS", "all"),
'CONDA_ENV_PATH': os.getenv("CONDA_ENV_PATH", ""),
'CADDY_VIRTUAL_PORT': os.getenv("VIRTUAL_PORT", "80"),
'CADDY_VIRTUAL_HOST': os.getenv("VIRTUAL_HOST", ""),
'CADDY_VIRTUAL_BIND_NET': os.getenv("VIRTUAL_BIND_NET", "proxy"),
'CADDY_VIRTUAL_PROTO': os.getenv("VIRTUAL_PROTO", "http"),
'CADDY_VIRTUAL_BASE_URL': os.getenv("VIRTUAL_BASE_URL", "/"),
'CADDY_PROXY_ENCODINGS_GZIP': os.getenv("PROXY_ENCODINGS_GZIP", "true"),
'CADDY_PROXY_ENCODINGS_ZSTD': os.getenv("PROXY_ENCODINGS_ZSTD", "true"),
'CADDY_PROXY_TEMPLATES': os.getenv("PROXY_TEMPLATES", "true"),
'CADDY_LETSENCRYPT_EMAIL': os.getenv("LETSENCRYPT_EMAIL", "admin@example.com"),
'CADDY_LETSENCRYPT_ENDPOINT': os.getenv("LETSENCRYPT_ENDPOINT", "dev"),
'CADDY_HTTP_PORT': os.getenv("HTTP_PORT", "80"),
'CADDY_HTTPS_ENABLE': os.getenv("HTTPS_ENABLE", "true"),
'CADDY_HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
'CADDY_AUTO_HTTPS': os.getenv("AUTO_HTTPS", "true"),
'CADDY_WORKSPACE_SSL_ENABLED': os.getenv("WORKSPACE_SSL_ENABLED", "false"),
'FB_PORT': os.getenv("FB_PORT", "8055"),
'FB_BASE_URL': os.getenv("FB_BASE_URL", "/data"),
'FB_ROOT_DIR': os.getenv("FB_ROOT_DIR", "/workspace"),
'VSCODE_BIND_ADDR': os.getenv("VSCODE_BIND_ADDR", "0.0.0.0:8300"),
'VSCODE_BASE_URL': os.getenv("VSCODE_BASE_URL", "/code"),
'APP_BIND_ADDR': os.getenv("APP_BIND_ADDR", "0.0.0.0:8080"),
'APP_BASE_URL': os.getenv("APP_BASE_URL", "/app"),
'APP_ROOT_DIR': os.getenv("APP_ROOT_DIR", "/apps/app"),
'APP_USER': os.getenv("APP_USER", "admin"),
'APP_PASSWORD': os.getenv("APP_PASSWORD", "password")
}
### Set verbosity level. log.info occasinally throws EOF errors with high verbosity
if docker_env.get("LOG_VERBOSITY") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = docker_env.get("LOG_VERBOSITY")
else:
log.info("invalid verbosity: '{}".format(docker_env.get("LOG_VERBOSITY")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
# Setup colored console logs
coloredlogs.install(fmt='%(asctime)s [%(levelname)s] %(message)s', level=verbosity, logger=log)
### Reconcile docker env var with corresponding config setting
system_configs = dict()
# copy and save user configs
users_config_copy = copy(configs_list["users"])
# if system not configured in yaml, then set to docker envs
if configs_list.get("system") == None:
log.info(f"System not defined in yaml config file. Importing settings from docker env.")
for env, value in docker_env.items():
log.debug(f"setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
# reconcile if env appears in both
else:
for env, value in docker_env.items():
for config, setting in configs_list.get("system").items():
if config == env.lower():
if setting == value:
log.debug(f"yaml config same as docker environment value: '{config}' --> '{setting}'")
system_configs[config] = value
else:
log.warning(f"using config setting instead of docker environment value - {config}: '{value}'--> '{setting}'")
system_configs[config] = setting
if not env.lower() in list(configs_list.get("system").keys()):
log.debug(f"not set in yaml config, setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
### Reset verbosity level according to yaml file. log.info occasinally throws EOF errors with high verbosity
if configs_list.get("system").get("log_verbosity") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = configs_list.get("system").get("log_verbosity")
else:
log.info("invalid verbosity: '{}".format(configs_list.get("system").get("log_verbosity")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
default_user = [{
'name': configs_list.get("system").get("workspace_user"),
'group': configs_list.get("system").get("workspace_group"),
'uid': "1000",
'gid': "100",
'shell': configs_list.get("system").get("workspace_user_shell"),
'password': configs_list.get("system").get("workspace_user_password"),
'directories': [
{
'name': 'home',
'path': os.path.join("/home", configs_list.get("system").get("workspace_user")),
'mode': '755'
},
{
'name': 'resources',
'path': configs_list.get("system").get("resources_path"),
'mode': '755'
},
{
'name': 'workspace',
'path': configs_list.get("system").get("workspace_home"),
'mode': '755'
},
{
'name': 'data',
'path': configs_list.get("system").get("data_path"),
'mode': '755'
},
{
'name': 'apps',
'path': configs_list.get("system").get("apps_path"),
'mode': '755'
},
{
'name': 'app',
'path': configs_list.get("system").get("app_root_dir"),
'mode': '755'
}],
'backup_paths': [
f'/home/{configs_list.get("system").get("workspace_user")}/.config',
f'/home/{configs_list.get("system").get("workspace_user")}/.ssh',
f'/home/{configs_list.get("system").get("workspace_user")}/.zshrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.bashrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.profile',
f'/home/{configs_list.get("system").get("workspace_user")}/.condarc',
f'/home/{configs_list.get("system").get("workspace_user")}/.oh-my-zsh',
f'/home/{configs_list.get("system").get("workspace_user")}/.gitconfig',
f'/home/{configs_list.get("system").get("workspace_user")}/filebrowser.db',
f'/home/{configs_list.get("system").get("workspace_user")}/.local',
f'/home/{configs_list.get("system").get("workspace_user")}/.conda',
f'/home/{configs_list.get("system").get("workspace_user")}/.vscode',
f'/home/{configs_list.get("system").get("workspace_user")}/.jupyter'
],
'conda': {
'env': ''
},
'zsh': {
'set_prompt': configs_list.get("system").get("zsh_prompt"),
'set_theme': configs_list.get("system").get("zsh_theme"),
'set_plugins': configs_list.get("system").get("zsh_plugins"),
'prompt': [
'https://github.com/sindresorhus/pure'
],
'theme': [
'https://github.com/romkatv/powerlevel10k',
'https://github.com/denysdovhan/spaceship-prompt',
'https://github.com/sobolevn/sobole-zsh-theme'
],
'plugins': [
'git',
'k',
'extract',
'cp',
'yarn',
'npm',
'supervisor',
'rsync',
'command-not-found',
'autojump',
'colored-man-pages',
'git-flow',
'git-extras',
'python',
'zsh-autosuggestions',
'history-substring-search',
'zsh-completions',
'ssh-agent',
'https://github.com/zsh-users/zsh-autosuggestions',
'https://github.com/zsh-users/zsh-completions',
'https://github.com/zsh-users/zsh-syntax-highlighting',
'https://github.com/zsh-users/zsh-history-substring-search',
'https://github.com/supercrabtree/k'
]},
'ssh': {
'pub_keys': [''],
'configs': [{
'hostname': '',
'port': '',
'user': '',
'pub_key_auth': '',
'id_only': '',
'id_file_path': ''
}]
},
'filebrowser': {
'port': configs_list.get("system").get("fb_port"),
'base_url': configs_list.get("system").get("fb_base_url"),
'root_dir': configs_list.get("system").get("fb_root_dir")
},
'vscode': {
'bind_addr': configs_list.get("system").get("vscode_bind_addr"),
'base_url': configs_list.get("system").get("vscode_base_url"),
'extensions': [
'ms-python.python',
'almenon.arepl',
'batisteo.vscode-django',
'bierner.color-info',
'bierner.markdown-footnotes',
'bierner.markdown-mermaid',
'bierner.markdown-preview-github-styles',
'CoenraadS.bracket-pair-colorizer-2',
'DavidAnson.vscode-markdownlint',
'donjayamanne.githistory',
'donjayamanne.python-extension-pack',
'eamodio.gitlens',
'hbenl.vscode-test-explorer',
'henriiik.docker-linter',
'kamikillerto.vscode-colorize',
'kisstkondoros.vscode-gutter-preview',
'littlefoxteam.vscode-python-test-adapter',
'magicstack.MagicPython',
'ms-azuretools.vscode-docker',
'ms-toolsai.jupyter',
'naumovs.color-highlight',
'shd101wyy.markdown-preview-enhanced',
'streetsidesoftware.code-spell-checker',
'tht13.html-preview-vscode',
'tht13.python',
'tushortz.python-extended-snippets',
'wholroyd.jinja',
'yzhang.markdown-all-in-one'
]
},
'app': {
'bind_addr': configs_list.get("system").get("app_bind_addr"),
'base_url': configs_list.get("system").get("app_base_url"),
'root_dir': configs_list.get("system").get("app_root_dir"),
'user': configs_list.get("system").get("app_user"),
'password': configs_list.get("system").get("app_password")
}
}]
def set_user_config(user_config, default_user, level):
log.setLevel(level)
log.info(user_config.get("yaml_config_value"))
log.info(user_config.get("docker_env_value"))
if user_config.get("yaml_config_value") == None:
log.info("no setting found for '{}', setting: '{}'".format(user_config.get("yaml_config_name"), user_config.get("docker_env_value")))
if user_config.get("dict_path") == 2:
configs_list.get(user_config.get("dict_path")[0])[user_config.get("dict_path")[1]] = user_config.get("docker_env_value")
elif user_config.get("yaml_config_value") == user_config.get("docker_env_value"):
log.debug("yaml config same as docker environment value: {} --> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value")))
else:
log.warning("using user config setting instead of docker environment value - {}: '{}'--> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value"), user_config.get("yaml_config_value")))
user_configs = [
{
"yaml_config_name": "name",
"docker_env_name": "workspace_user",
"yaml_config_value": configs_list.get("users")[0].get("name"),
"docker_env_value": configs_list.get("system").get("workspace_user"),
"dict_path": ["users", "name"]
},
{
"yaml_config_name": "group",
"docker_env_name": "workspace_group",
"yaml_config_value": configs_list.get("users")[0].get("group"),
"docker_env_value": configs_list.get("system").get("workspace_group"),
"dict_path": ["users", "group"]
},
{
"yaml_config_name": "shell",
"docker_env_name": "workspace_user_shell",
"yaml_config_value": configs_list.get("users")[0].get("shell"),
"docker_env_value": configs_list.get("system").get("workspace_user_shell"),
"dict_path": ["users", "shell"]
},
{
"yaml_config_name": "password",
"docker_env_name": "workspace_user_password",
"yaml_config_value": configs_list.get("users")[0].get("password"),
"docker_env_value": configs_list.get("system").get("workspace_user_password"),
"dict_path": ["users", "shell"]
},
]
### Set user config
if configs_list.get("users") == None:
log.info(f"Users not defined in yaml config file. Going with single user mode and importing settings from docker env or setting from default")
configs_list["users"] = default_user
# Show to console
default_user_json = json.dumps(default_user, indent = 4)
elif len(configs_list.get("users")) == 0:
log.info("User's list empty, populate and restart container")
sys.exit()
elif len(configs_list.get("users")) == 1:
log.info("Building a single user environment")
# what's the point of this? overwrite workspace envs with corresponding user envs? Maybe not good to touch and better keep docker envs concistent with this dict. Don't overwrite with user settings. Also simpler
#for uc in user_configs:
#set_user_config(uc, default_user, verbosity)
user_count = 0
for u in configs_list.get("users"):
log.debug(f"working on user count: '{user_count}'")
for default_config, default_setting in default_user[0].items():
for config, setting in u.items():
if config == default_config:
if setting == default_setting:
log.debug(f"yaml config setting same as default: '{config}' --> '{setting}'")
else:
log.debug(f"yaml config setting differs from default - {config}: '{default_setting}'--> '{setting}'")
if config == "name":
user = setting
home = os.path.join("/home", user)
if config == "password":
password = setting
if not default_config in list(u.keys()):
log.info(f"not set in yaml config, setting from default settings: '{default_config}' --> '{default_setting}'")
configs_list.get("users")[user_count][default_config] = default_setting
user_count+=1
log.info(f"setting workspace user to: '{user}'")
elif len(configs_list.get("users")) > 1:
log.info("More than 2 users defined, haven't build this functionality yet. Remove extra users and restart container.")
sys.exit()
# Dump into JSON for passage into scripts
configs_list_json = json.dumps(configs_list)
### Write docker envs to system environment
#for env, value in docker_env.items():
# func.set_env_variable(env, value)
### Clean up envs
# opts_json arguments to json
opts_json = json.dumps(opts)
### Dynamiruny set MAX_NUM_THREADS
ENV_MAX_NUM_THREADS = os.getenv("MAX_NUM_THREADS", None)
if ENV_MAX_NUM_THREADS:
# Determine the number of availabel CPU resources, but limit to a max number
if ENV_MAX_NUM_THREADS.lower() == "auto":
ENV_MAX_NUM_THREADS = str(math.ceil(os.cpu_count()))
try:
# read out docker information - if docker limits cpu quota
cpu_count = math.ceil(
int(
os.popen("cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us")
.read()
.replace("\n", "")
)
/ 100000
)
if cpu_count > 0 and cpu_count < os.cpu_count():
ENV_MAX_NUM_THREADS = str(cpu_count)
except:
pass
if (
not ENV_MAX_NUM_THREADS
or not ENV_MAX_NUM_THREADS.isnumeric()
or ENV_MAX_NUM_THREADS == "0"
):
ENV_MAX_NUM_THREADS = "4"
if int(ENV_MAX_NUM_THREADS) > 8:
# there should be atleast one thread less compared to cores
ENV_MAX_NUM_THREADS = str(int(ENV_MAX_NUM_THREADS) - 1)
# set a maximum of 32, in most cases too many threads are adding too much overhead
if int(ENV_MAX_NUM_THREADS) > 32:
ENV_MAX_NUM_THREADS = "32"
# only set if it is not None or empty
# OMP_NUM_THREADS: Suggested value: vCPUs / 2 in which vCPUs is the number of virtual CPUs.
set_env_variable(
"OMP_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenMP
set_env_variable(
"OPENBLAS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenBLAS
set_env_variable("MKL_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # MKL
set_env_variable(
"VECLIB_MAXIMUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Accelerate
set_env_variable(
"NUMEXPR_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr
set_env_variable(
"NUMEXPR_MAX_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr - maximum
set_env_variable(
"NUMBA_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numba
set_env_variable(
"SPARK_WORKER_CORES", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Spark Worker
set_env_variable(
"BLIS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Blis
set_env_variable("TBB_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # TBB
# GOTO_NUM_THREADS
### Set container environment
# Get system env and display
system_env = os.environ.copy()
log.debug("System Environments:")
log.debug(func.capture_cmd_stdout('env', system_env))
# Display docker env
log.debug("Docker Environments:")
log.debug(func.capture_cmd_stdout('env', docker_env))
# Merge system, docker env as workspace env and display
workspace_env = func.merge_two_dicts(system_env, docker_env)
log.debug("Workspace Environment")
log.debug(func.capture_cmd_stdout('env', workspace_env))
# Format workspace env as json for passage into scripts
workspace_env_json = json.dumps(workspace_env)
### Configure user
log.info(f"configuring user")
run(
['python', f"/scripts/configure_user.py",
'--opts', opts_json,
'--env', workspace_env_json,
'--configs', configs_list_json
],
env=workspace_env
)
### Set workspace user and home
workspace_env['USER'] = user
workspace_env['HOME'] = home
workspace_env['WORKSPACE_USER'] = user
workspace_env['WORKSPACE_USER_HOME'] = home
workspace_env['WORKSPACE_USER_PASSWORD'] = password
### Start workspace
sys.exit(
run(
['python', '/scripts/run_workspace.py',
'--opts', opts_json],
env=workspace_env
)
)
| 39.307554 | 221 | 0.625212 | 2,729 | 21,855 | 4.775009 | 0.163796 | 0.05909 | 0.061239 | 0.070601 | 0.421687 | 0.37288 | 0.311872 | 0.256005 | 0.233136 | 0.150027 | 0 | 0.005598 | 0.223564 | 21,855 | 555 | 222 | 39.378378 | 0.762331 | 0.103363 | 0 | 0.164835 | 0 | 0.004396 | 0.39213 | 0.102611 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002198 | false | 0.026374 | 0.030769 | 0 | 0.032967 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0211204f7b106ec6a65423c21ac69cd0c6c658 | 11,524 | py | Python | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | 1 | 2022-01-09T07:45:12.000Z | 2022-01-09T07:45:12.000Z | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | 2 | 2021-05-26T02:27:26.000Z | 2021-05-28T07:02:48.000Z | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import argparse
from enum import Enum
from typing import Optional
import serial
from tqdm import tqdm
from nbf import NBF_COMMAND_LENGTH_BYTES, NbfCommand, NbfFile, OPCODE_FINISH, OPCODE_PUTCH, OPCODE_READ_8, OPCODE_WRITE_8, ADDRESS_CSR_FREEZE
DRAM_REGION_START = 0x00_8000_0000
DRAM_REGION_END = 0x10_0000_0000
def _debug_format_message(command: NbfCommand) -> str:
if command.opcode == OPCODE_PUTCH:
return str(command) + f" (putch {repr(command.data[0:1].decode('utf-8'))})"
else:
return str(command)
class LogDomain(Enum):
# meta info on requested commands
COMMAND = 'command'
# sent messages
TRANSMIT = 'transmit'
# received messages out-of-turn
RECEIVE = 'receive'
# received messages in response to a transmitted command
REPLY = 'reply'
@property
def message_prefix(self):
if self == LogDomain.COMMAND:
return "[CMD ]"
elif self == LogDomain.TRANSMIT:
return "[TX ]"
elif self == LogDomain.RECEIVE:
return "[RX ]"
elif self == LogDomain.REPLY:
return "[REPLY]"
else:
raise ValueError(f"unknown log domain '{self}'")
def _log(domain: LogDomain, message: str):
tqdm.write(domain.message_prefix + " " + message)
class HostApp:
def __init__(self, serial_port_name: str, serial_port_baud: int):
self.port = serial.Serial(
port=serial_port_name,
baudrate=serial_port_baud,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
# Without a timeout, SIGINT can't end the process while we are blocking on a read.
timeout=3.0
)
self.commands_sent = 0
self.commands_received = 0
self.reply_violations = 0
def close_port(self):
if self.port.is_open:
self.port.close()
def _send_message(self, command: NbfCommand):
self.port.write(command.to_bytes())
self.port.flush()
self.commands_sent += 1
def _receive_message(self, block=True) -> Optional[NbfCommand]:
if block or self.port.in_waiting >= NBF_COMMAND_LENGTH_BYTES:
buffer = self.port.read(NBF_COMMAND_LENGTH_BYTES)
if len(buffer) != NBF_COMMAND_LENGTH_BYTES:
raise ValueError(f"serial port returned {len(buffer)} bytes, but {NBF_COMMAND_LENGTH_BYTES} requested")
self.commands_received += 1
return NbfCommand.from_bytes(buffer)
else:
return None
def _receive_until_opcode(self, opcode: int, block=True) -> Optional[NbfCommand]:
message = self._receive_message(block=block)
while message is not None and message.opcode != opcode:
_log(LogDomain.RECEIVE, _debug_format_message(message))
message = self._receive_message()
return message
def print_summary_statistics(self):
_log(LogDomain.COMMAND, f" Sent: {self.commands_sent} commands")
_log(LogDomain.COMMAND, f" Received: {self.commands_received} commands")
if self.reply_violations > 0:
_log(LogDomain.COMMAND, f" Reply violations: {self.reply_violations} commands")
def _validate_reply(self, command: NbfCommand, reply: NbfCommand) -> bool:
if not command.is_correct_reply(reply):
self.reply_violations += 1
_log(LogDomain.REPLY, f'Unexpected reply: {command} -> {reply}')
# TODO: abort on invalid reply?
return False
return True
def _validate_outstanding_replies(self, command_queue_expecting_replies: list, sliding_window_num_commands: int, log_all_rx: bool = False):
"""
Reads replies from the incoming data stream, matching them with the provided command queue
in-order and validating each. If more than "sliding_window_num_commands" commands are in the
queue, blocks waiting for an incoming command. Pops all validated commands from the front of
the queue, in-place.
"""
while len(command_queue_expecting_replies) > 0:
sent_command = command_queue_expecting_replies[0]
is_window_full = len(command_queue_expecting_replies) > sliding_window_num_commands
reply = self._receive_until_opcode(
sent_command.opcode,
block=is_window_full
)
if reply is None:
# all queued packets have been processed
break
if log_all_rx:
# TODO: indicate this is an expected reply
_log(LogDomain.RECEIVE, _debug_format_message(reply))
# TODO: verbose/echo mode
was_valid = self._validate_reply(sent_command, reply)
if was_valid:
# TODO: consider aborting on invalid reply
command_queue_expecting_replies.pop(0)
def load_file(self, source_file: str, ignore_unfreezes: bool = False, sliding_window_num_commands: int = 0, log_all_messages: bool = False):
file = NbfFile(source_file)
outstanding_commands_expecting_replies = []
command: NbfCommand
for command in tqdm(file, total=file.peek_length(), desc="loading nbf"):
if ignore_unfreezes and command.matches(OPCODE_WRITE_8, ADDRESS_CSR_FREEZE, 0):
continue
if log_all_messages:
_log(LogDomain.TRANSMIT, _debug_format_message(command))
self._send_message(command)
if command.expects_reply():
outstanding_commands_expecting_replies.append(command)
self._validate_outstanding_replies(outstanding_commands_expecting_replies, sliding_window_num_commands, log_all_rx=log_all_messages)
self._validate_outstanding_replies(outstanding_commands_expecting_replies, 0, log_all_rx=log_all_messages)
_log(LogDomain.COMMAND, "Load complete")
def unfreeze(self):
unfreeze_command = NbfCommand.with_values(OPCODE_WRITE_8, ADDRESS_CSR_FREEZE, 0)
self._send_message(unfreeze_command)
reply = self._receive_until_opcode(unfreeze_command.opcode)
self._validate_reply(unfreeze_command, reply)
def listen_perpetually(self, verbose: bool):
_log(LogDomain.COMMAND, "Listening for incoming messages...")
while message := self._receive_message():
# in "verbose" mode, we'll always print the full message, even for putchar
if not verbose and message.opcode == OPCODE_PUTCH:
print(chr(message.data[0]), end = '')
continue
_log(LogDomain.RECEIVE, _debug_format_message(message))
if message.opcode == OPCODE_FINISH:
print(f"FINISH: core {message.address_int}, code {message.data_int}")
# TODO: this assumes unicore
return
def verify(self, reference_file: str):
file = NbfFile(reference_file)
writes_checked = 0
writes_corrupted = 0
command: NbfCommand
for command in tqdm(file, total=file.peek_length(), desc="verifying nbf"):
if command.opcode != OPCODE_WRITE_8:
continue
if command.address_int < DRAM_REGION_START or command.address_int > DRAM_REGION_END - 8:
continue
read_message = NbfCommand.with_values(OPCODE_READ_8, command.address_int, 0)
self._send_message(read_message)
reply = self._receive_until_opcode(OPCODE_READ_8)
self._validate_reply(read_message, reply)
writes_checked += 1
if reply.data != command.data:
writes_corrupted += 1
_log(LogDomain.COMMAND, f"Corruption detected at address 0x{command.address_hex_str}")
_log(LogDomain.COMMAND, f" Expected: 0x{command.data_hex_str}")
_log(LogDomain.COMMAND, f" Actual: 0x{reply.data_hex_str}")
_log(LogDomain.COMMAND, "Verify complete")
_log(LogDomain.COMMAND, f" Writes checked: {writes_checked}")
_log(LogDomain.COMMAND, f" Corrupt writes found: {writes_corrupted}")
if writes_corrupted > 0:
_log(LogDomain.COMMAND, "== CORRUPTION DETECTED ==")
def _load_command(app: HostApp, args):
app.load_file(
args.file,
ignore_unfreezes=args.no_unfreeze,
sliding_window_num_commands=args.window_size,
log_all_messages=args.verbose
)
app.print_summary_statistics()
if args.listen:
app.listen_perpetually(verbose=args.verbose)
def _unfreeze_command(app: HostApp, args):
app.unfreeze()
if args.listen:
app.listen_perpetually(verbose=False)
def _verify_command(app: HostApp, args):
app.verify(args.file)
app.print_summary_statistics()
def _listen_command(app: HostApp, args):
app.listen_perpetually(verbose=False)
if __name__ == "__main__":
root_parser = argparse.ArgumentParser()
root_parser.add_argument('-p', '--port', dest='port', type=str, default='/dev/ttyS4', help='Serial port (full path or name)')
root_parser.add_argument('-b', '--baud', dest='baud_rate', type=int, default=2_000_000, help='Serial port baud rate')
command_parsers = root_parser.add_subparsers(dest="command")
command_parsers.required = True
load_parser = command_parsers.add_parser("load", help="Stream a file of NBF commands to the target")
load_parser.add_argument('file', help="NBF-formatted file to load")
load_parser.add_argument('--no-unfreeze', action='store_true', dest='no_unfreeze', help='Suppress any "unfreeze" commands in the input file')
load_parser.add_argument('--listen', action='store_true', dest='listen', help='Continue listening for incoming messages until program is aborted')
load_parser.add_argument('--window-size', type=int, default=500, dest='window_size', help='Specifies the maximum number of outstanding replies to allow before blocking')
load_parser.add_argument('--verbose', action='store_true', dest='verbose', help='Log all send and received commands, even if valid')
# TODO: add --verify which automatically implies --no-unfreeze then manually unfreezes after
# TODO: add --verbose which prints all sent and received commands
load_parser.set_defaults(handler=_load_command)
unfreeze_parser = command_parsers.add_parser("unfreeze", help="Send an \"unfreeze\" command to the target")
unfreeze_parser.add_argument('--listen', action='store_true', dest='listen', help='Continue listening for incoming messages until program is aborted')
unfreeze_parser.set_defaults(handler=_unfreeze_command)
verify_parser = command_parsers.add_parser("verify", help="Read back the results of an NBF file's memory writes and confirm that their values match the original file")
verify_parser.add_argument('file', help="NBF-formatted file to load")
verify_parser.set_defaults(handler=_verify_command)
listen_parser = command_parsers.add_parser("listen", help="Watch for incoming messages and print the received data")
listen_parser.set_defaults(handler=_listen_command)
args = root_parser.parse_args()
app = HostApp(serial_port_name=args.port, serial_port_baud=args.baud_rate)
try:
args.handler(app, args)
app.close_port()
except KeyboardInterrupt:
app.close_port()
print("Aborted")
sys.exit(1)
| 41.602888 | 173 | 0.674505 | 1,421 | 11,524 | 5.209008 | 0.204082 | 0.02756 | 0.030802 | 0.021616 | 0.215212 | 0.141043 | 0.106188 | 0.075925 | 0.058363 | 0.058363 | 0 | 0.008165 | 0.234814 | 11,524 | 276 | 174 | 41.753623 | 0.831254 | 0.083391 | 0 | 0.09596 | 0 | 0.005051 | 0.155509 | 0.019998 | 0 | 0 | 0 | 0.007246 | 0 | 1 | 0.09596 | false | 0 | 0.035354 | 0 | 0.222222 | 0.035354 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da02379e9f1f2797e8f3d2fe77571451d25da847 | 618 | py | Python | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | from mistune.inline_parser import LINK_LABEL
CITATION_PATTERN = r'\[\^@(' + LINK_LABEL + r')\]'
def render_citation(text):
return '\\cite{' + text + '}'
def parse_citation(self, m, state):
text = m.group(1)
self._ensure_bib()
return 'citation', self.render(text, state)
def plugin_citation(md):
md.inline.register_rule('citation', CITATION_PATTERN, parse_citation)
index = md.inline.rules.index('std_link')
if index != -1:
md.inline.rules.insert(index, 'citation')
else:
md.inline.rules.append('citation')
md.renderer.register('citation', render_citation)
| 22.888889 | 73 | 0.666667 | 80 | 618 | 4.975 | 0.4375 | 0.080402 | 0.09799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003945 | 0.179612 | 618 | 26 | 74 | 23.769231 | 0.781065 | 0 | 0 | 0 | 0 | 0 | 0.105178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0.0625 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da03370dc8f2f31bcdc7fd9d8a5697527015558e | 2,881 | py | Python | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 32 | 2020-01-05T13:37:16.000Z | 2022-03-26T07:27:09.000Z | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | null | null | null | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 8 | 2020-06-18T16:17:27.000Z | 2022-03-15T23:58:18.000Z | '''
Description:
Given a collection of intervals, find the minimum number of intervals you need to remove to make the rest of the intervals non-overlapping.
Example 1:
Input: [[1,2],[2,3],[3,4],[1,3]]
Output: 1
Explanation: [1,3] can be removed and the rest of intervals are non-overlapping.
Example 2:
Input: [[1,2],[1,2],[1,2]]
Output: 2
Explanation: You need to remove two [1,2] to make the rest of intervals non-overlapping.
Example 3:
Input: [[1,2],[2,3]]
Output: 0
Explanation: You don't need to remove any of the intervals since they're already non-overlapping.
Note:
You may assume the interval's end point is always bigger than its start point.
Intervals like [1,2] and [2,3] have borders "touching" but they don't overlap each other.
'''
from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
# sort segments by start index in ascending order
intervals.sort( key = lambda segment: segment[0] )
last_compare_idx = 0
removal_counter = 0
for cur_idx in range(1, len(intervals)):
cur_start, cur_end = intervals[cur_idx]
last_start, last_end = intervals[last_compare_idx]
if cur_start < last_end:
# need to remove one interval to avoid overlapping
removal_counter += 1
if cur_end < last_end:
# remove last interval, because it is lefter then current
last_compare_idx = cur_idx
else:
# remove current interval, because it is lefter then last one
# last compare idx keeps the same
pass
else:
# so far so good, no need to remove
last_compare_idx = cur_idx
return removal_counter
# n : the length of input list, intervals
## Time Complexity: O( n log n)
#
# The overhead in time is the cost of sorting, which is of O( n log n ).
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary variable, which is of O( 1 ).
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[2,3],[3,4],[1,3]] )
self.assertEqual(result, 1)
def test_case_2( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[1,2],[1,2]] )
self.assertEqual(result, 2)
def test_case_3( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[2,3]] )
self.assertEqual(result, 0)
if __name__ == '__main__':
unittest.main() | 25.052174 | 139 | 0.588684 | 386 | 2,881 | 4.297927 | 0.339378 | 0.014467 | 0.036166 | 0.009644 | 0.191682 | 0.141049 | 0.10006 | 0.069922 | 0.062688 | 0 | 0 | 0.032209 | 0.321069 | 2,881 | 115 | 140 | 25.052174 | 0.815951 | 0.449844 | 0 | 0.129032 | 0 | 0 | 0.005122 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 1 | 0.129032 | false | 0.032258 | 0.064516 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0469fe0ec53d36c9f4e75701bb9541ada5eeed | 1,220 | py | Python | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 3 | 2021-05-11T07:12:05.000Z | 2021-10-04T04:01:38.000Z | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 9 | 2021-06-02T03:43:01.000Z | 2021-07-23T14:52:03.000Z | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 1 | 2021-05-24T15:57:20.000Z | 2021-05-24T15:57:20.000Z | from os import truncate
from hive_plug_play.database.handlers import PlugPlayDb
class BlockProcessor:
@classmethod
def init(cls, config):
cls.config = config
cls.db = PlugPlayDb(config)
cls.head_block = {}
cls.block_num = 0
cls.block_time = ''
@classmethod
def check_op_id(cls, op_id):
allowed_op_ids = cls.config['op_ids']
if allowed_op_ids == []:
return True
else:
return op_id in allowed_op_ids
@classmethod
def process_block(cls, block_num, block):
prev = block['previous']
block_hash = block['block_id']
timestamp = block['timestamp']
cls.db.add_block(block_num, block_hash, prev, timestamp)
transactions = block['transactions']
for i in range(len(transactions)):
trans = transactions[i]
for op in trans['operations']:
if op['type'] == 'custom_json_operation':
if cls.check_op_id(op['value']['id']):
cls.db.add_op(block_num, block['transaction_ids'][i], op['value'])
cls.db._save()
cls.block_num = block_num
cls.block_time = timestamp | 32.105263 | 90 | 0.584426 | 149 | 1,220 | 4.557047 | 0.355705 | 0.070692 | 0.076583 | 0.047128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001183 | 0.307377 | 1,220 | 38 | 91 | 32.105263 | 0.802367 | 0 | 0 | 0.090909 | 0 | 0 | 0.085995 | 0.017199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.060606 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da058a79bcff3d1633c9de586676094982ec1208 | 24,030 | py | Python | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2016': {
'name': 'Rocky Mountain Psychological Association 2016',
'info_url': 'http://www.rockymountainpsych.org/convention-info.html',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNI2015': {
'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015',
'info_url': 'https://wp.me/P1LncT-64s',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'SWPA2016': {
'name': 'Southwestern Psychological Association Convention 2016',
'info_url': 'https://www.swpsych.org/conv_dates.php',
'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2016W': {
'name': 'Earth Science Information Partners Winter Meeting 2016',
'info_url': 'http://commons.esipfed.org/2016WinterMeeting',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MiamiBrainhack15': {
'name': 'University of Miami Brainhack 2015',
'info_url': 'http://brainhack.org/americas/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi',
'info_url': 'http://www.psichi.org/?ResearchAdvisory#.VmBpeOMrI1g',
'logo_url': 'http://s11.postimg.org/4g2451vcz/Psi_Chi_Logo.png',
'admins': [
'research.director@psichi.org',
],
'field_names': {
'submission1': 'measures',
'submission2': 'materials',
'submission1_plural': 'measures/scales',
'submission2_plural': 'study materials',
'meeting_title_type': 'Repository',
'add_submission': 'materials',
'mail_subject': 'Title',
'mail_message_body': 'Measure or material short description',
'mail_attachment': 'Your measure/scale or material file(s)'
},
},
'GI2015': {
'name': 'Genome Informatics 2015',
'info_url': 'https://meetings.cshl.edu/meetings.aspx?meet=info&year=15',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2016': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2016',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://madssci.abrf.org/sites/default/files/madssci-logo-bk.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SMM2015': {
'name': 'The Society for Marine Mammalogy',
'info_url': 'https://www.marinemammalscience.org/conference/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'TESS': {
'name': 'Time-sharing Experiments for the Social Sciences',
'info_url': 'http://www.tessexperiments.org',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
'field_names': {
'submission1': 'poster',
'submission2': 'study',
'submission1_plural': 'posters',
'submission2_plural': 'studies',
'meeting_title_type': 'Studies',
'add_submission': 'studies',
}
},
'ASCERM2016': {
'name': 'ASCE Rocky Mountain Student Conference 2016',
'info_url': 'http://luninuxos.com/asce/',
'logo_url': 'http://s2.postimg.org/eaduh2ovt/2016_ASCE_Rocky_Mtn_banner.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'ARCA2016': {
'name': '5th Applied Research Conference in Africa',
'info_url': 'http://www.arcaconference.org/',
'logo_url': 'http://www.arcaconference.org/images/ARCA_LOGO_NEW.JPG',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'CURCONF2016': {
'name': 'CUR Biennial Conference 2016',
'info_url': 'http://www.cur.org/conferences_and_events/biennial2016/',
'logo_url': 'http://s11.postimg.org/v8feuna4y/Conference_logo_eps.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CATALISE2016': {
'name': 'Criteria and Terminology Applied to Language Impairments: Synthesising the Evidence (CATALISE) 2016',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Emergy2016': {
'name': '9th Biennial Emergy Research Conference',
'info_url': 'http://www.cep.ees.ufl.edu/emergy/conferences/ERC09_2016/index.shtml',
'logo_url': 'http://s12.postimg.org/uf9ioqmct/emergy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2016': {
'name': '28th APS Annual Convention',
'info_url': 'http://www.psychologicalscience.org/convention',
'logo_url': 'http://www.psychologicalscience.org/redesign/wp-content/uploads/2015/03/APS_2016_Banner_990x157.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'jssp2016': {
'name': 'Japanese Society of Social Psychology 2016',
'info_url': 'http://www.socialpsychology.jp/conf2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'sepech2016': {
'name': 'XI SEPECH - Research Seminar in Human Sciences (Seminário de Pesquisa em Ciências Humanas)',
'info_url': 'http://www.uel.br/eventos/sepech/sepech2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'etmaal2016': {
'name': 'Etmaal van de Communicatiewetenschap 2016 - Media Psychology',
'info_url': 'https://etmaal2016.wordpress.com',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'WSAN2016': {
'name': 'WSAN2016 Erasmus University Rotterdam',
'info_url': 'http://www.humane.eu/wsan/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ContainerStrategies': {
'name': 'Container Strategies for Data & Software Preservation',
'info_url': 'https://daspos.crc.nd.edu/index.php/workshops/container-strategies-for-data-software-preservation-that-promote-open-science',
'logo_url': 'http://s17.postimg.org/8nl1v5mxb/Screen_Shot_2016_03_02_at_9_05_24_PM.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
},
'CNI2016': {
'name': 'Coalition for Networked Information (CNI) Spring Membership Meeting 2016',
'info_url': 'https://wp.me/P1LncT-6fd',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins', [])
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
custom_fields = attrs.pop('field_names', {})
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
conf.field_names.update(custom_fields)
try:
conf.save()
except ModularOdmException:
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
if isinstance(value, dict):
current = getattr(conf, key)
current.update(value)
setattr(conf, key, current)
else:
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Updated {}: {}'.format(meeting, changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
| 32.693878 | 156 | 0.544112 | 2,335 | 24,030 | 5.475803 | 0.241113 | 0.034491 | 0.085875 | 0.1145 | 0.489989 | 0.430705 | 0.373221 | 0.347411 | 0.328641 | 0.308619 | 0 | 0.044415 | 0.297295 | 24,030 | 734 | 157 | 32.73842 | 0.71278 | 0.002206 | 0 | 0.534819 | 0 | 0.013928 | 0.497414 | 0.009594 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002786 | false | 0 | 0.009749 | 0 | 0.012535 | 0.002786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0602f1e855ed3a2c59e5d54ad317e3bc77bd87 | 3,563 | py | Python | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | 1 | 2020-11-30T01:39:12.000Z | 2020-11-30T01:39:12.000Z | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | null | null | null | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
from os import path
from time import time
import sys
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from .utils import ae_finetuning
from ..tools.deep_learning.iotools import Parameters
from ..tools.deep_learning.data import MinMaxNormalization, MRIDataset, load_data
from ..tools.deep_learning import create_autoencoder, commandline_to_json
def train_autoencoder(params):
""" Parameters
params: class from utils module containing all the parameters for training a
CNN.
"""
if params.evaluation_steps % params.accumulation_steps != 0 and params.evaluation_steps != 1:
raise Exception('Evaluation steps %d must be a multiple of accumulation steps %d' %
(params.evaluation_steps, params.accumulation_steps))
if params.minmaxnormalization:
transformations = MinMaxNormalization()
else:
transformations = None
total_time = time()
criterion = torch.nn.MSELoss()
training_tsv, valid_tsv = load_data(params.tsv_path, params.diagnoses,
params.split, params.n_splits,
params.baseline)
data_train = MRIDataset(params.input_dir, training_tsv,
params.preprocessing, transformations)
data_valid = MRIDataset(params.input_dir, valid_tsv,
params.preprocessing, transformations)
# Use argument load to distinguish training and testing
train_loader = DataLoader(data_train,
params.batch_size,
shuffle=True,
num_workers=params.num_workers,
drop_last=True
)
valid_loader = DataLoader(data_valid,
)
valid_loader = DataLoader(data_valid,
batch_size=params.batch_size,
shuffle=False,
num_workers=params.num_workers,
drop_last=False
)
text_file = open(path.join(params.output_dir, 'python_version.txt'), 'w')
text_file.write('Version of python: %s \n' % sys.version)
text_file.write('Version of pytorch: %s \n' % torch.__version__)
text_file.close()
decoder = create_autoencoder(params.model, params.pretrained_path,
difference=params.pretrained_difference)
optimizer = eval("torch.optim." + params.optimizer)(filter(lambda x: x.requires_grad, decoder.parameters()), params.learning_rate, weight_decay=params.weight_decay)
if params.add_sigmoid:
if isinstance(decoder.decoder[-1], nn.ReLU):
decoder.decoder = nn.Sequential(*list(decoder.decoder)[:-1])
decoder.decoder.add_module("sigmoid", nn.Sigmoid())
ae_finetuning(decoder, train_loader, valid_loader, criterion, optimizer, False, params)
total_time = time() - total_time
print('Total time', total_time)
#if __name__ == "__main__":
# commandline = parser.parse_known_args()
# commandline_to_json(commandline, 'ConvAutoencoder')
# options = commandline[0]
# if commandline[1]:
# print("unknown arguments: %s" % parser.parse_known_args()[1])
# train_params_autoencoder = Parameters(tsv_path, output_dir, input_dir, model)
# train_params_autoencoder.write(options)
# train_autoencoder(train_parameters_autoencoder)
| 39.153846 | 168 | 0.63991 | 383 | 3,563 | 5.704961 | 0.342037 | 0.020595 | 0.017849 | 0.028833 | 0.118993 | 0.071396 | 0.031121 | 0 | 0 | 0 | 0 | 0.002716 | 0.276733 | 3,563 | 90 | 169 | 39.588889 | 0.845169 | 0.158855 | 0 | 0.105263 | 0 | 0 | 0.053908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.210526 | 0 | 0.22807 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |