hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a33a8a8695860ddb6163f6f5857cdcec9984aa3 | 822 | py | Python | cmskit/recipes/menu.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | 1 | 2015-09-28T10:10:34.000Z | 2015-09-28T10:10:34.000Z | cmskit/recipes/menu.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | null | null | null | cmskit/recipes/menu.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from cms.menu_bases import CMSAttachMenu
from menus.base import Menu, NavigationNode
from menus.menu_pool import menu_pool
from cmskit.recipes.models import Recipe
class RecipesMenu(CMSAttachMenu):
name = _("Recipes menu")
def get_nodes(self, request):
nodes = []
for recipe in Recipe.objects.published().select_related():
try:
node = NavigationNode(
recipe.title,
recipe.get_absolute_url(),
recipe.pk
)
nodes.append(node)
except:
pass
return nodes
menu_pool.register_menu(RecipesMenu)
| 27.4 | 66 | 0.600973 | 86 | 822 | 5.593023 | 0.569767 | 0.049896 | 0.06237 | 0.108108 | 0.133056 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001805 | 0.326034 | 822 | 29 | 67 | 28.344828 | 0.866426 | 0.025547 | 0 | 0 | 0 | 0 | 0.015019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a33ddcfa2b3e722458ff7163d0ad438baf597ed | 6,218 | py | Python | code/results-past/figure-3-b.py | shaifulcse/codemetrics-with-context-replication | 9f0fe6e840d204b70efc9610e6887a64f9a51ce7 | [
"MIT"
] | null | null | null | code/results-past/figure-3-b.py | shaifulcse/codemetrics-with-context-replication | 9f0fe6e840d204b70efc9610e6887a64f9a51ce7 | [
"MIT"
] | null | null | null | code/results-past/figure-3-b.py | shaifulcse/codemetrics-with-context-replication | 9f0fe6e840d204b70efc9610e6887a64f9a51ce7 | [
"MIT"
] | null | null | null | """
"""
import re
import os
import matplotlib.pyplot as plt
import re
import numpy as np
import math
from scipy.stats.stats import pearsonr
from scipy.stats.stats import kendalltau
import scipy
from matplotlib.patches import Rectangle
from scipy import stats
import seaborn as sns
import pandas as pd
sns.set(font_scale = 1.2)
fig = plt.figure()
ax = fig.add_subplot(111)
PROJECTS_LIST = "../../info/settings-project.txt"
RESULT_PATH="../../data/complexity-and-change-data/"
styles=['-', '--','-.',':']
colors = ['r', 'g','b','y']
styles=["-", "--","-.", ":", "-", "--","-.", ":"]
marks=["^", "d", "o", "v", "p", "s", "<", ">"]
#marks_size=[15, 17, 10, 15, 17, 10, 12,15]
marks_size=[15, 17, 10, 15, 17, 10, 12,15]
marker_color=['#0F52BA','#ff7518','#6CA939','#e34234','#756bb1','brown','#c994c7', '#636363']
gap = [5,5,3,4,4,3]
PROJECTS = {}
STATS = {}
correl_type = {}
def list_projects():
fr = open(PROJECTS_LIST,"r")
lines = fr.readlines()
fr.close()
projects = []
c = 0
for line in lines:
c+=1
#if c>2:
# break
line = line.strip()
data = re.findall("[^\t]+",line)
if data[0] not in PROJECTS:
PROJECTS[data[0]]=1
### to help step2
def find_index(feature, project):
fr = open(RESULT_PATH+project+".txt")
line = fr.readline() ## header
line = line.strip()
data = re.findall("[^\t]+",line)
for i in range(len(data)):
if data[i] == feature:
return i
def parse_data():
global STATS
for project in PROJECTS:
list_indexes(feature,"checkstyle")
STATS[project]={}
fr = open(RESULT_PATH+project+".txt")
line = fr.readline() ## header
lines = fr.readlines()
fr.close()
for line in lines:
line = line.strip()
data = re.findall("[^\t]+",line)
age = int(data[0])
if apply_age_restriction == 1 and age < age_restriction:
continue
method = data[len(data)-1]
if method not in STATS[project]:
STATS[project][method]={}
feature_values = re.findall("[^,]+",data[feature_index])
date_values = re.findall("[^,]+",data[date_index])
diff_values = re.findall("[^,]+",data[diff_index])
addition_values = re.findall("[^,]+",data[addition_index])
edit_values = re.findall("[^,]+",data[edit_index])
track = 0
for i in range(1, len(diff_values)):
if int(date_values[i]) > age_restriction: ## change not within time
break
if int(diff_values[i]) == 0: ## no change in content
continue
track = 1
feature_value = int(feature_values[i-1]) ## current change happened because of the previous state
if feature_value not in STATS[project][method]:
STATS[project][method][feature_value]=build_dic()
update_stats(project, method, feature_value, 1, int(addition_values[i]), int(diff_values[i]), int(edit_values[i]))
if track == 0: ## there was no change
feature_value = int(feature_values[0]) ##
if feature_value not in STATS[project][method]:
STATS[project][method][feature_value]=build_dic()
update_stats(project, method, feature_value, 0, 0, 0, 0)
def update_stats(project, method, feature_value, rev, add, diff, edit):
# print project, method
STATS[project][method][feature_value][changeTypes[0]] += rev ###
STATS[project][method][feature_value][changeTypes[1]] += add
STATS[project][method][feature_value][changeTypes[2]] += diff
STATS[project][method][feature_value][changeTypes[3]] += edit
def build_dic():
dic = {}
for t in changeTypes:
dic[t]=0
return dic
def list_indexes(feature, project):
global feature_index
global date_index
global diff_index
global addition_index
global edit_index
feature_index = find_index(feature, project)
date_index = find_index("ChangeDates", project)
diff_index = find_index("DiffSizes", project)
addition_index = find_index("NewAdditions", project)
edit_index = find_index("EditDistances", project)
def correlation():
for project in STATS:
for type in changeTypes:
X=[]
Y=[]
for method in STATS[project]:
for feature_value in STATS[project][method]:
X.append(feature_value)
Y.append(STATS[project][method][feature_value][type])
cr = kendalltau(X, Y)
#print project, type, cr, cr[0]
if type not in correl_type:
correl_type[type] = []
correl_type[type].append(float(cr[0]))
def draw_graph():
index = 0
for type in changeTypes:
X,Y = build_cdf(correl_type[type])
#print Y
line=(plt.plot(X,Y))
plt.setp(line, linewidth=3,ls=styles[index], marker=marks[index],
markerfacecolor=marker_color[index], markersize = 12, color=marker_color[index],markevery=gap[index])
index += 1
plt.legend(changeTypes,loc=0,fontsize=17)
plt.xlabel("Correlation",fontsize=20)
plt.ylabel("CDF",fontsize=18)
for label in ax.get_xticklabels():
label.set_fontsize(19)
for label in ax.get_yticklabels():
label.set_fontsize(18)
plt.tight_layout()
plt.show()
def build_cdf(ls):
X = []
Y = []
prev = 0.0
total = len(ls)
dic = {}
for key in ls:
if key not in dic:
dic[key] = 0.0
dic[key] += 1.0
tracked = {}
for key in sorted(ls):
if key in tracked:
continue
tracked[key] = 1
X.append(key)
prob = dic[key]/total
Y.append(prob + prev)
prev = prob + prev
return X,Y
if __name__ == "__main__":
global feature
global age_restriction
global changeTypes
global risks
apply_age_restriction = 1
age_restriction = 730
risks =["Low", "Medium", "High", "Very High"]
changeTypes =["#Revisions", "NewAdditions", "DiffSizes", "EditDistances"]
### will change based on feature
feature = "SLOCStandard" ### changeDates for #revisions
list_projects()
#list_indexes(feature)
parse_data()
correlation()
draw_graph()
| 24.772908 | 122 | 0.603731 | 811 | 6,218 | 4.499383 | 0.240444 | 0.055906 | 0.06906 | 0.075363 | 0.257605 | 0.199507 | 0.140861 | 0.129076 | 0.10359 | 0.10359 | 0 | 0.027546 | 0.246864 | 6,218 | 250 | 123 | 24.872 | 0.751655 | 0.06063 | 0 | 0.19186 | 0 | 0 | 0.062069 | 0.011897 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052326 | false | 0 | 0.075581 | 0 | 0.145349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a34367f5bced4609356f0cde39e3a13b062d891 | 4,585 | py | Python | ppmi_tweet_collector.py | wolferobert3/uweat_icsc_2022 | 172f4cdd737f7405bdc83357e3dabeee94251efc | [
"MIT"
] | null | null | null | ppmi_tweet_collector.py | wolferobert3/uweat_icsc_2022 | 172f4cdd737f7405bdc83357e3dabeee94251efc | [
"MIT"
] | null | null | null | ppmi_tweet_collector.py | wolferobert3/uweat_icsc_2022 | 172f4cdd737f7405bdc83357e3dabeee94251efc | [
"MIT"
] | null | null | null | import pandas as pd
from os import path, listdir
import pickle
from nltk import word_tokenize
import json
import re
import emoji
import string
TARGET_STATE = 'OR'
source_dir = f''
twitter_files = [i for i in list(listdir(source_dir)) if TARGET_STATE in i]
punctuation_list = list(string.punctuation) + ['....','...', '..', '\"', '\'', '“','”','`','``','…']
tweets_by_date = {}
#Collect and clean tweets
for idx, tfile in enumerate(twitter_files):
with open(path.join(source_dir, tfile), 'r') as i:
t_f = [json.loads(line) for line in i]
en_tweets = []
for tweet in t_f:
en_tweets.append((tweet['id'],tweet['text'],tweet['created_at'][:10]))
cleaned_string = {}
for tweet in en_tweets:
if tweet[0] in cleaned_string:
continue
refined_tweet = tweet[1].lower()
refined_tweet = re.sub(emoji.get_emoji_regexp(), r'', refined_tweet)
refined_tweet = re.sub(r'http\S+', '', refined_tweet)
refined_tweet = re.sub(r'@\S+', '', refined_tweet)
refined_tweet = re.sub(r'#', '', refined_tweet)
refined_tweet = re.sub(r'&', '&', refined_tweet)
refined_tweet = re.sub(r'\s+', ' ', refined_tweet)
refined_tweet = re.sub(r'^rts*\s+', '', refined_tweet)
refined_tweet = re.sub(r'^\s+', '', refined_tweet)
refined_tweet = re.sub(r'\S+…','',refined_tweet)
refined_tweet = ' '.join([i for i in word_tokenize(refined_tweet) if i not in punctuation_list])
refined_tweet = refined_tweet.replace(' \' ','\'')
if tweet[2] in tweets_by_date:
tweets_by_date[tweet[2]].append(refined_tweet)
else:
tweets_by_date[tweet[2]] = [refined_tweet]
cleaned_string[tweet[0]] = refined_tweet
print(idx)
with open(f'E:\\state_corpora\\tweets_by_date\\{TARGET_STATE}_tweets_by_date.pkl','wb') as pkl_writer:
pickle.dump(tweets_by_date,pkl_writer)
#Obtain ground truth dates
covid_timeline = pd.read_csv(f'Public_Health_Measures.csv')
start_dates = covid_timeline['Start_Date'].tolist()
start_dates = list({date:'' for date in start_dates}.keys())
end_dates = covid_timeline['End_Date'].tolist()
end_dates = list({date:'' for date in end_dates}.keys())
start_end_dict = {start_dates[idx]:[start_dates[idx],end_dates[idx]] for idx in range(len(start_dates))}
for key in start_end_dict.keys():
start_year_int = int(key[:4])
start_month_int = int(key[5:7])
start_day_int = int(key[-2:])
end_year_int = int(start_end_dict[key][-1][:4])
end_month_int = int(start_end_dict[key][-1][5:7])
end_day_int = int(start_end_dict[key][-1][-2:])
if start_month_int == end_month_int:
days_to_add = [i for i in range(start_day_int,end_day_int+1)]
start_end_dict[key] = [f'{start_year_int}-{start_month_int}-{idx}' for idx in days_to_add]
else:
month1_to_add = [f'{start_year_int}-{start_month_int}-{idx}' for idx in range(start_day_int,32)]
month2_to_add = [f'{end_year_int}-{end_month_int}-{idx}' for idx in range(1,end_day_int+1)]
start_end_dict[key] = month1_to_add + month2_to_add
for idx, date in enumerate(start_end_dict[key]):
if date[-2] == '-':
start_end_dict[key][idx] = date[:-1] + '0' + date[-1]
for idx, date in enumerate(start_end_dict[key]):
if date[-5] == '-':
start_end_dict[key][idx] = date[:5] + '0' + date[5:]
#Process with newlines for PPMI
for key in tweets_by_date.keys():
tweets = tweets_by_date[key]
tweets_by_date[key] = ['\n'.join(i.split(' ')) for i in tweets]
#Join into time period corpora
tweets_by_time_period = {}
for key in start_end_dict.keys():
time_range_tweets = []
for date in start_end_dict[key]:
if date in tweets_by_date:
if time_range_tweets:
time_range_tweets.extend(tweets_by_date[date])
else:
time_range_tweets = tweets_by_date[date]
tweets_by_time_period[key] = time_range_tweets
if time_range_tweets:
with open(f'E:\\state_corpora\\strict_divisions\\corpus_lists\\{TARGET_STATE}_{key}_tweet_list.pkl','wb') as pkl_writer:
pickle.dump(time_range_tweets,pkl_writer)
corpus_string = '\n\n\n\n\n\n\n\n'.join(time_range_tweets)
with open(f'E:\\state_corpora\\strict_divisions\\corpus_strings\\{TARGET_STATE}_{key}_tweet_corpus.txt','w',encoding='utf8') as writer:
writer.write(corpus_string) | 39.188034 | 144 | 0.638168 | 694 | 4,585 | 3.920749 | 0.18732 | 0.114664 | 0.057332 | 0.088203 | 0.362367 | 0.332598 | 0.284454 | 0.209849 | 0.169055 | 0.168688 | 0 | 0.010261 | 0.213522 | 4,585 | 117 | 145 | 39.188034 | 0.742651 | 0.023555 | 0 | 0.102273 | 0 | 0.011364 | 0.118173 | 0.088573 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a38fc376b5e5fd6f4153aa152aa553785289bf1 | 911 | py | Python | all/054.py | brenodt/Desafio-365-dias-programando | 6c899b4f2e314b9a3a75734f39509a665016e206 | [
"MIT"
] | null | null | null | all/054.py | brenodt/Desafio-365-dias-programando | 6c899b4f2e314b9a3a75734f39509a665016e206 | [
"MIT"
] | null | null | null | all/054.py | brenodt/Desafio-365-dias-programando | 6c899b4f2e314b9a3a75734f39509a665016e206 | [
"MIT"
] | null | null | null |
from collections import OrderedDict # OrderedDict deve ser importado!
def enesimo_nao_repetido(entrada: str, n: int):
# Cria o dicionário usando OrderedDict e inicializa
# cada chave com o valor 0
dicionario = OrderedDict.fromkeys(entrada, 0)
# Conta a ocorrência de cada letra
for letra in entrada:
dicionario[letra] += 1
# Usando List Comprehension, elimina letras duplicadas
elementos_nao_repetidos = [ chave for (chave, valor) in \
dicionario.items()if valor == 1 ]
# Se N é maior do que o número de letras não repetidas
if len(elementos_nao_repetidos) < n:
return -1
else: # Senão, retorne a n-ésima letra não repetida
return elementos_nao_repetidos[n-1]
if __name__ == "__main__":
texto = "AA BB CC DD EE F GG H II JJ KK L M N OO"
letra = 5
# -- SAÍDA --:
# N
print(enesimo_nao_repetido(texto, letra)) | 28.46875 | 70 | 0.668496 | 130 | 911 | 4.546154 | 0.615385 | 0.060914 | 0.106599 | 0.07445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010324 | 0.255763 | 911 | 32 | 71 | 28.46875 | 0.861357 | 0.333699 | 0 | 0 | 0 | 0 | 0.078859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4693933697b877ba83ef27e07a84e5fbb283c6 | 884 | py | Python | motorway/contrib/amazon_kinesis/intersections.py | alesdotio/motorway | 8514f9e6494c9e55576705b72dda306c175e62dc | [
"Apache-2.0"
] | 1 | 2016-09-16T14:51:59.000Z | 2016-09-16T14:51:59.000Z | motorway/contrib/amazon_kinesis/intersections.py | alesdotio/motorway | 8514f9e6494c9e55576705b72dda306c175e62dc | [
"Apache-2.0"
] | null | null | null | motorway/contrib/amazon_kinesis/intersections.py | alesdotio/motorway | 8514f9e6494c9e55576705b72dda306c175e62dc | [
"Apache-2.0"
] | 1 | 2020-12-12T17:35:55.000Z | 2020-12-12T17:35:55.000Z | import json
import boto.kinesis
from motorway.intersection import Intersection
class KinesisInsertIntersection(Intersection):
stream_name = None
def __init__(self, **kwargs):
super(KinesisInsertIntersection, self).__init__(**kwargs)
self.conn = boto.kinesis.connect_to_region(**self.connection_parameters())
assert self.stream_name, "Please define attribute stream_name on your KinesisInsertIntersection"
def connection_parameters(self):
return {
'region_name': 'eu-west-1',
# Add this or use ENV VARS
# 'aws_access_key_id': '',
# 'aws_secret_access_key': ''
}
def process(self, message):
self.conn.put_record(
self.stream_name,
json.dumps(message.content),
message.grouping_value
)
self.ack(message)
yield
| 29.466667 | 104 | 0.640271 | 93 | 884 | 5.817204 | 0.580645 | 0.073937 | 0.051756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001546 | 0.2681 | 884 | 29 | 105 | 30.482759 | 0.834621 | 0.087104 | 0 | 0 | 0 | 0 | 0.110834 | 0.031133 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.142857 | false | 0 | 0.142857 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4758fe075141b33a34759f92db71968b493b1d | 3,034 | py | Python | middle-tier/app/interact_contract.py | Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain | 35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501 | [
"MIT"
] | 4 | 2018-07-23T22:01:16.000Z | 2020-09-22T11:15:39.000Z | middle-tier/app/interact_contract.py | Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain | 35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501 | [
"MIT"
] | 1 | 2021-01-21T13:15:16.000Z | 2021-01-21T13:15:16.000Z | middle-tier/app/interact_contract.py | Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain | 35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501 | [
"MIT"
] | 5 | 2018-07-20T00:23:11.000Z | 2020-09-22T11:15:47.000Z | import json
import time
import web3
import sha3
from os import environ
from web3 import Web3, HTTPProvider
from web3.contract import ConciseContract
from contract import IDENTITY_STORE_JSON
API_KEY = environ.get('API_KEY')
PRIVATE_KEY = environ.get('PRIVATE_KEY')
CONTRACT_ADDRESS = environ.get('CONTRACT_ADDRESS')
NETWORK_ENDPOINT = "https://ropsten.infura.io/v3/{}".format(API_KEY)
w3 = Web3(HTTPProvider(NETWORK_ENDPOINT))
w3.eth.enable_unaudited_features()
#known_nonce = set()
def setTenant(hashObject, address, timestamp, tenantId):
#global known_nonce
contract = load_contract()
account = w3.eth.account.privateKeyToAccount(PRIVATE_KEY)
get_data = contract.encodeABI(
fn_name='setTenant',
args=[
hashObject,
address,
timestamp,
tenantId
])
trans_count = w3.eth.getTransactionCount(account.address)
nonce = trans_count
#while nonce in known_nonce:
# nonce += 1
print("transaction count=%d nonce=%d" %(trans_count, nonce))
price = w3.toWei('21', 'gwei')
success = False
retry = 100
while not success and retry > 0:
retry -= 1
try:
transaction = {
'to': contract.address,
'data': get_data,
'gas': 1728712,
'gasPrice': price,
'nonce': nonce
}
signed = w3.eth.account.signTransaction(transaction, PRIVATE_KEY)
txn_hash = w3.eth.sendRawTransaction(signed.rawTransaction)
txn = w3.eth.getTransaction(txn_hash)
print('Contract Transaction Hash {}'.format(txn_hash))
print('Transaction {}'.format(txn))
#known_nonce.add(nonce)
success = True
except ValueError as err:
err_msg = err.args[0]['message']
print('web3 error:: %s' % err_msg)
if 'replacement transaction underpriced' in err_msg:
price += 1
retry += 1 # underprice doesn't count for retrying
print('increase price to %d' % price)
elif 'nonce too low' in err_msg or 'known transaction' in err_msg:
#known_nonce.add(nonce)
nonce += 1
print('increase nonce to %d' % nonce)
else:
raise err
if retry <= 0:
print('stop retrying')
return txn
def get_deloyed_contract(contract_definition, contract_address):
contract_abi = contract_definition['abi']
contract = w3.eth.contract(abi=contract_abi, address=contract_address)
return contract
def load_contract():
contract_definition = json.loads(IDENTITY_STORE_JSON)
return get_deloyed_contract(contract_definition, CONTRACT_ADDRESS)
def is_valid(tenant_id, user_address):
contract = load_contract()
timestamp = int(time.time())
# call isValid on contract
isValid = contract.functions.isValid(tenant_id, user_address, timestamp).call()
return isValid
| 30.34 | 83 | 0.627884 | 343 | 3,034 | 5.387755 | 0.35277 | 0.018939 | 0.012987 | 0.036797 | 0.055195 | 0.055195 | 0.055195 | 0 | 0 | 0 | 0 | 0.016856 | 0.276533 | 3,034 | 99 | 84 | 30.646465 | 0.825057 | 0.060976 | 0 | 0.027027 | 0 | 0 | 0.111228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.108108 | 0 | 0.216216 | 0.094595 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4900d73a1ad7f743a5146f147752cc74077496 | 2,227 | py | Python | startup/90-functions.py | NSLS-II-XPD-tomo/profile_collection | a960faa6bd24dc87bd094399f2124f80159207be | [
"BSD-3-Clause"
] | null | null | null | startup/90-functions.py | NSLS-II-XPD-tomo/profile_collection | a960faa6bd24dc87bd094399f2124f80159207be | [
"BSD-3-Clause"
] | null | null | null | startup/90-functions.py | NSLS-II-XPD-tomo/profile_collection | a960faa6bd24dc87bd094399f2124f80159207be | [
"BSD-3-Clause"
] | 2 | 2021-11-08T19:13:50.000Z | 2022-01-08T16:17:01.000Z | print(f'Loading {__file__}')
def configure_area_det(det,acq_time,acq_period=None,exposure=None,num_exposures=1):
if det.name == 'prosilica':
acq_time = min(acq_time,25)
if det.cam.acquire.get() == 0:
yield from bps.abs_set(det.cam.acquire, 1, wait=True)
if det.name == 'dexela':
yield from bps.abs_set(det.cam.acquire_time, max(acq_time,0.1), wait=True)
acq_time_rbv = det.cam.acquire_time.get()
else:
yield from bps.abs_set(det.cam.acquire_time, acq_time, wait=True)
acq_time_rbv = det.cam.acquire_time.get()
if det.name == 'dexela':
yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv+0.005, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
else:
if acq_period is None:
if det.name == 'blackfly':
yield from bps.abs_set(det.cam.acquire_period, 0.1, wait=False)
else:
yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
else:
if det.name == 'blackfly':
yield from bps.abs_set(det.cam.acquire_period, min(1,acq_period), wait=False)
else:
yield from bps.abs_set(det.cam.acquire_period, acq_period, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
if exposure is None:
exposure = acq_time_rbv*10
num_frames = np.ceil(exposure / acq_time_rbv)
yield from bps.abs_set(det.images_per_set, num_frames, wait=True)
yield from bps.abs_set(det.number_of_sets, num_exposures, wait=True)
if det.name == 'emergent':
print(">>>%s is configured as:\n acq_time = %.3fmsec; acq_period = %.3fmsec; exposure = %.3fmsec \
(num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures))
else:
print(">>>%s is configured as:\n acq_time = %.3fsec; acq_period = %.3fsec; exposure = %.3fsec \
(num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures))
return
| 40.490909 | 118 | 0.624158 | 327 | 2,227 | 4.003058 | 0.180428 | 0.080214 | 0.139037 | 0.114591 | 0.689076 | 0.669977 | 0.637892 | 0.637892 | 0.571429 | 0.538579 | 0 | 0.014414 | 0.252357 | 2,227 | 54 | 119 | 41.240741 | 0.771772 | 0 | 0 | 0.425 | 0 | 0.05 | 0.028289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0 | 0 | 0.05 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4982c716e53e21237cd2d0f5536f385174c347 | 15,085 | py | Python | same-same.py | gwk/same-same | 065674668bf26dd2bcc62ab7a556629d21647fe4 | [
"CC0-1.0"
] | 22 | 2018-05-25T20:45:46.000Z | 2021-01-24T23:26:20.000Z | same-same.py | gwk/same-same | 065674668bf26dd2bcc62ab7a556629d21647fe4 | [
"CC0-1.0"
] | null | null | null | same-same.py | gwk/same-same | 065674668bf26dd2bcc62ab7a556629d21647fe4 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import re
from argparse import ArgumentParser
from difflib import SequenceMatcher
from itertools import chain, groupby
from os import environ
from sys import stderr, stdout
from typing import *
from typing import Match
class DiffLine:
def __init__(self, kind:str, match:Match, rich_text:str) -> None:
self.kind = kind # The name from `diff_pat` named capture groups.
self.match = match
self.rich_text = rich_text # Original colorized text from git.
self.old_num = 0 # 1-indexed.
self.new_num = 0 # ".
self.chunk_idx = 0 # Positive for rem/add.
self.is_src = False # True for ctx/rem/add.
self.text = '' # Final text for ctx/rem/add.
@property
def plain_text(self) -> str:
return self.match.string # type: ignore
def main() -> None:
arg_parser = ArgumentParser(prog='same-same', description='Git diff filter.')
arg_parser.add_argument('-interactive', action='store_true', help="Accommodate git's interactive mode.")
args = arg_parser.parse_args()
# Git can generate utf8-illegal sequences; ignore them.
stdin = open(0, errors='replace')
if 'SAME_SAME_OFF' in environ:
for line in stdin:
stdout.write(line)
exit(0)
dbg = ('SAME_SAME_DBG' in environ)
buffer:List[DiffLine] = []
def flush_buffer() -> None:
nonlocal buffer
if buffer:
handle_file_lines(buffer, interactive=args.interactive)
buffer = []
try:
for rich_text in stdin:
rich_text = rich_text.rstrip('\n')
plain_text = sgr_pat.sub('', rich_text) # remove colors.
match = diff_pat.match(plain_text)
assert match is not None
kind = match.lastgroup
assert kind is not None, match
if dbg:
print(kind, ':', repr(plain_text))
continue
if kind == 'diff':
flush_buffer()
buffer.append(DiffLine(kind, match, rich_text))
flush_buffer()
except BrokenPipeError:
stderr.close() # Prevents warning message.
def handle_file_lines(lines:List[DiffLine], interactive:bool) -> None:
first = lines[0]
kind = first.kind
skip = False
# Detect if we should skip these lines.
if kind not in ('diff', 'loc'): skip = True
elif graph_pat.match(first.plain_text).end(): skip = True # type: ignore
if skip:
for line in lines: print(line.rich_text)
return
old_ctx_nums:Set[int] = set() # Line numbers of context lines.
new_ctx_nums:Set[int] = set() # ".
old_lines:Dict[int, DiffLine] = {} # Maps of line numbers to line structs.
new_lines:Dict[int, DiffLine] = {} # ".
old_uniques:Dict[str, Optional[int]] = {} # Maps unique line bodies to line numbers.
new_uniques:Dict[str, Optional[int]] = {} # ".
old_num = 0 # 1-indexed source line number.
new_num = 0 # ".
chunk_idx = 0 # Counter to differentiate chunks; becomes part of the groupby key.
# Accumulate source lines into structures.
old_path = '<OLD_PATH>'
new_path = '<NEW_PATH>'
is_prev_add_rem = False
for line in lines:
match = line.match
kind = line.kind
is_add_rem = (kind in ('rem', 'add'))
if not is_prev_add_rem and is_add_rem: chunk_idx += 1
is_prev_add_rem = is_add_rem
if kind in ('ctx', 'rem', 'add'):
line.is_src = True
if kind == 'ctx':
line.text = match['ctx_text']
elif kind == 'rem':
line.text = match['rem_text']
line.chunk_idx = chunk_idx
insert_unique_line(old_uniques, line.text, old_num)
elif kind == 'add':
line.text = match['add_text']
line.chunk_idx = chunk_idx
insert_unique_line(new_uniques, line.text, new_num)
if kind in ('ctx', 'rem'):
assert old_num not in old_lines
assert old_num not in old_ctx_nums
line.old_num = old_num
old_lines[old_num] = line
old_ctx_nums.add(old_num)
old_num += 1
if kind in ('ctx', 'add'):
assert new_num not in new_lines
assert new_num not in new_ctx_nums
line.new_num = new_num
new_lines[new_num] = line
new_ctx_nums.add(new_num)
new_num += 1
elif kind == 'loc':
o = int(match['old_num'])
if o > 0:
assert o > old_num, (o, old_num, match.string)
old_num = o
n = int(match['new_num'])
if n > 0:
assert n > new_num
new_num = n
elif kind == 'old': old_path = vscode_path(match['old_path'].rstrip('\t'))
elif kind == 'new': new_path = vscode_path(match['new_path'].rstrip('\t')) # Not sure why this trailing tab appears.
# Detect moved lines.
def diff_lines_match(old_idx:int, new_idx:int) -> bool:
if old_idx in old_ctx_nums or new_idx in new_ctx_nums: return False
try: return old_lines[old_idx].text.strip() == new_lines[new_idx].text.strip()
except KeyError: return False
old_moved_nums:Set[int] = set()
new_moved_nums:Set[int] = set()
for body, new_idx in new_uniques.items():
if new_idx is None: continue
old_idx = old_uniques.get(body)
if old_idx is None: continue
p_o = old_idx
p_n = new_idx
while diff_lines_match(p_o-1, p_n-1):
p_o -= 1
p_n -= 1
e_o = old_idx + 1
e_n = new_idx + 1
while diff_lines_match(e_o, e_n):
e_o += 1
e_n += 1
old_moved_nums.update(range(p_o, e_o))
new_moved_nums.update(range(p_n, e_n))
# Break lines into rem/add chunks.
# While a "hunk" is a series of (possibly many) ctx/rem/add lines provided by git diff,
# a "chunk" is either a contiguous block of rem/add lines, or else any other single line.
# This approach simplifies the token diffing process so that it is a reasonably
# straightforward comparison of a rem block to an add block.
def chunk_key(line:DiffLine) -> Tuple[int, bool]:
return (line.is_src, line.chunk_idx, (line.old_num in old_moved_nums or line.new_num in new_moved_nums))
for ((is_src, chunk_idx, is_moved), _chunk) in groupby(lines, key=chunk_key):
chunk = list(_chunk) # We iterate over the sequence several times.
if chunk_idx and not is_moved: # Chunk should be diffed by tokens.
# We must ensure that the same number of lines is output, at least for `-interactive` mode.
# Currently, we do not reorder lines at all, but that is an option for the future.
rem_lines = [l for l in chunk if l.old_num]
add_lines = [l for l in chunk if l.new_num]
add_token_diffs(rem_lines, add_lines)
elif is_src: # ctx or moved.
for l in chunk:
l.text = highlight_strange_chars(l.text)
# Print lines.
for line in chunk:
kind = line.kind
match = line.match
text = line.text
if kind == 'ctx':
print(text)
elif kind == 'rem':
m = C_REM_MOVED if line.old_num in old_moved_nums else ''
print(C_REM_LINE, m, text, C_END, sep='')
elif kind == 'add':
m = C_ADD_MOVED if line.new_num in new_moved_nums else ''
print(C_ADD_LINE, m, text, C_END, sep='')
elif kind == 'loc':
new_num = match['new_num']
snippet = match['parent_snippet']
s = ' ' + C_SNIPPET if snippet else ''
print(C_LOC, new_path, ':', new_num, ':', s, snippet, C_END, sep='')
elif kind == 'diff':
msg = new_path if (old_path == new_path) else '{} -> {}'.format(old_path, new_path)
print(C_FILE, msg, ':', C_END, sep='')
elif kind == 'meta':
print(C_MODE, new_path, ':', RST, ' ', line.rich_text, sep='')
elif kind in dropped_kinds:
if interactive: # cannot drop lines, becasue interactive mode slices the diff by line counts.
print(C_DROPPED, line.plain_text, RST, sep='')
elif kind in pass_kinds:
print(line.rich_text)
else:
raise Exception('unhandled kind: {}\n{!r}'.format(kind, text))
def insert_unique_line(d:Dict[str, Optional[int]], line:str, idx:int) -> None:
'For the purpose of movement detection, lines are tested for uniqueness after stripping leading and trailing whitespace.'
body = line.strip()
if body in d: d[body] = None
else: d[body] = idx
def add_token_diffs(rem_lines:List[DiffLine], add_lines:List[DiffLine]) -> None:
'Rewrite DiffLine.text values to include per-token diff highlighting.'
# Get lists of tokens for the entire chunk.
r_tokens = tokenize_difflines(rem_lines)
a_tokens = tokenize_difflines(add_lines)
m = SequenceMatcher(isjunk=is_token_junk, a=r_tokens, b=a_tokens, autojunk=True)
r_frags:List[List[str]] = [[] for _ in rem_lines] # Accumulate highlighted tokens.
a_frags:List[List[str]] = [[] for _ in add_lines]
r_line_idx = 0 # Step through the accumulators.
a_line_idx = 0
r_d = 0 # Token index of previous/next diff.
a_d = 0
# TODO: r_lit, a_lit flags could slightly reduce emission of color sequences.
blocks = m.get_matching_blocks() # last block is the sentinel: (len(a), len(b), 0).
for r_p, a_p, l in m.get_matching_blocks():
# Highlight the differing tokens.
r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_d, r_p, C_REM_TOKEN)
a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_d, a_p, C_ADD_TOKEN)
r_d = r_p+l # update to end of match / beginning of next diff.
a_d = a_p+l
# Do not highlight the matching tokens.
r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_p, r_d, C_RST_TOKEN)
a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_p, a_d, C_RST_TOKEN)
for rem_line, frags in zip(rem_lines, r_frags):
rem_line.text = ''.join(frags)
for add_line, frags in zip(add_lines, a_frags):
add_line.text = ''.join(frags)
def tokenize_difflines(lines:List[DiffLine]) -> List[str]:
'Convert the list of line texts into a single list of tokens, including newline tokens.'
tokens:List[str] = []
for i, line in enumerate(lines):
if i: tokens.append('\n')
tokens.extend(m[0] for m in token_pat.finditer(line.text))
return tokens
def is_token_junk(token:str) -> bool:
'''
Treate newlines as tokens, but all other whitespace as junk.
This forces the diff algorithm to respect line breaks but not get distracted aligning to whitespace.
'''
return token.isspace() and token != '\n'
def append_frags(frags:List[List[str]], tokens:List[str], line_idx:int, pos:int, end:int, highlight:str) -> int:
for frag in tokens[pos:end]:
if frag == '\n':
line_idx += 1
else:
line_frags = frags[line_idx]
line_frags.append(highlight)
line_frags.append(highlight_strange_chars(frag))
return line_idx
def highlight_strange_chars(string:str) -> str:
return strange_char_pat.sub(
lambda m: '{}{}{}'.format(C_STRANGE, m[0].translate(strange_char_trans_table), C_RST_STRANGE),
string)
dropped_kinds = {
'idx', 'old', 'new'
}
pass_kinds = {
'empty', 'other'
}
sgr_pat = re.compile(r'\x1B\[[0-9;]*m')
graph_pat = re.compile(r'(?x) [ /\*\|\\]*') # space is treated as literal inside of brackets, even in extended mode.
diff_pat = re.compile(r'''(?x)
(?:
(?P<empty> $)
| (?P<commit> commit\ [0-9a-z]{40} )
| (?P<author> Author: )
| (?P<date> Date: )
| (?P<diff> diff\ --git )
| (?P<idx> index )
| (?P<old> --- \ (?P<old_path>.+) )
| (?P<new> \+\+\+ \ (?P<new_path>.+) )
| (?P<loc> @@\ -(?P<old_num>\d+)(?P<old_len>,\d+)?\ \+(?P<new_num>\d+)(?P<new_len>,\d+)?\ @@\ ?(?P<parent_snippet>.*) )
| (?P<ctx> \ (?P<ctx_text>.*) )
| (?P<rem> - (?P<rem_text>.*) )
| (?P<add> \+ (?P<add_text>.*) )
| (?P<meta>
( old\ mode
| new\ mode
| deleted\ file\ mode
| new\ file\ mode
| copy\ from
| copy\ to
| rename\ from
| rename\ to
| similarity\ index
| dissimilarity\ index ) )
| (?P<other> .* )
)
''')
token_pat = re.compile(r'''(?x)
\w[\w\d]* # Symbol token.
| \d+ # Number token.
| \ + # Spaces; distinct from other whitespace.
| \t+ # Tabs; distinct from other whitespace.
| \s+ # Other whitespace.
| . # Any other single character; newlines are never present so DOTALL is irrelevant.
''')
# Unicode ranges for strange characters:
# C0: \x00 - \x1F
# \n: \x0A
# C0 !\n: [ \x00-\x09 \x0B-\x1F ]
# SP: \x20
# DEL: \x7F
# C1: \x80 - \x9F
# NBSP: \xA0 (nonbreaking space)
# SHY: \xAD (soft hyphen)
strange_char_re = r'(?x) [\x00-\x09\x0B-\x1F\x7F\x80-\x9F\xA0\xAD]+'
strange_char_pat = re.compile(strange_char_re)
assert not strange_char_pat.match(' ')
strange_char_ords = chain(range(0, 0x09+1), range(0x0B, 0x1F+1), range(0x7F, 0x7F+1),
range(0x80, 0x9F+1), range(0xA0, 0xA0+1), range(0xAD, 0xAD+1))
assert ord(' ') not in strange_char_ords
strange_char_names = { i : '\\x{:02x}'.format(i) for i in strange_char_ords }
strange_char_names.update({
'\0' : '\\0',
'\a' : '\\a',
'\b' : '\\b',
'\f' : '\\f',
'\r' : '\\r',
'\t' : '\\t',
'\v' : '\\v',
})
strange_char_trans_table = str.maketrans(strange_char_names)
# ANSI control sequence indicator.
CSI = '\x1b['
ERASE_LINE_F = CSI + 'K' # Sending erase line forward while background color is set colors to end of line.
def sgr(*codes:Any) -> str:
'Select Graphic Rendition control sequence string.'
code = ';'.join(str(c) for c in codes)
return '\x1b[{}m'.format(code)
RST = sgr()
RST_BOLD, RST_ULINE, RST_BLINK, RST_INVERT, RST_TXT, RST_BG = (22, 24, 25, 27, 39, 49)
BOLD, ULINE, BLINK, INVERT = (1, 4, 5, 7)
# xterm-256 sequence initiators; these should be followed by a single color index.
# both text and background can be specified in a single sgr call.
TXT = '38;5'
BG = '48;5'
# RGB6 color cube: 6x6x6, from black to white.
K = 16 # black.
W = 231 # white.
# Grayscale: the 24 palette values have a suggested 8 bit grayscale range of [8, 238].
middle_gray_indices = range(232, 256)
def gray26(n:int) -> int:
assert 0 <= n < 26
if n == 0: return K
if n == 25: return W
return W + n
def rgb6(r:int, g:int, b:int) -> int:
'index RGB triples into the 256-color palette (returns 16 for black, 231 for white).'
assert 0 <= r < 6
assert 0 <= g < 6
assert 0 <= b < 6
return (((r * 6) + g) * 6) + b + 16
# same-same colors.
C_FILE = sgr(BG, rgb6(1, 0, 1))
C_MODE = sgr(BG, rgb6(1, 0, 1))
C_LOC = sgr(BG, rgb6(0, 1, 2))
C_UNKNOWN = sgr(BG, rgb6(5, 0, 5))
C_SNIPPET = sgr(TXT, gray26(22))
C_DROPPED = sgr(TXT, gray26(10))
C_REM_LINE = sgr(BG, rgb6(1, 0, 0))
C_ADD_LINE = sgr(BG, rgb6(0, 1, 0))
C_REM_MOVED = sgr(TXT, rgb6(4, 2, 0))
C_ADD_MOVED = sgr(TXT, rgb6(2, 4, 0))
C_REM_TOKEN = sgr(TXT, rgb6(5, 2, 3), BOLD)
C_ADD_TOKEN = sgr(TXT, rgb6(2, 5, 3), BOLD)
C_RST_TOKEN = sgr(RST_TXT, RST_BOLD)
C_STRANGE = sgr(INVERT)
C_RST_STRANGE = sgr(RST_INVERT)
C_END = ERASE_LINE_F + RST
def vscode_path(path:str) -> str:
'VSCode will only recognize source locations if the path contains a slash; add "./" to plain file names.'
if '/' in path or '<' in path or '>' in path: return path # Do not alter pseudo-names like <stdin>.
return './' + path
def errL(*items:Any) -> None: print(*items, sep='', file=stderr)
def errSL(*items:Any) -> None: print(*items, file=stderr)
if __name__ == '__main__': main()
| 33.300221 | 124 | 0.646868 | 2,431 | 15,085 | 3.828466 | 0.203209 | 0.012894 | 0.007091 | 0.005587 | 0.121092 | 0.072634 | 0.058236 | 0.038036 | 0.028581 | 0.019985 | 0 | 0.019585 | 0.214717 | 15,085 | 452 | 125 | 33.373894 | 0.766081 | 0.214915 | 0 | 0.058997 | 0 | 0.0059 | 0.163408 | 0.011241 | 0 | 0 | 0.003584 | 0.002212 | 0.041298 | 1 | 0.056047 | false | 0.0059 | 0.023599 | 0.00885 | 0.115044 | 0.035398 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4aca28089648fa6cf04d319fbef84a5a8cbf94 | 1,912 | py | Python | ED_Chapter4.py | Wang-ZhengYi/ED_Chapter5_code | f63861ab8b6bce4756b7f0e1fd15041a976c1a38 | [
"MIT"
] | 1 | 2019-12-19T11:04:49.000Z | 2019-12-19T11:04:49.000Z | ED_Chapter4.py | Wang-ZhengYi/ED_assignment | f63861ab8b6bce4756b7f0e1fd15041a976c1a38 | [
"MIT"
] | null | null | null | ED_Chapter4.py | Wang-ZhengYi/ED_assignment | f63861ab8b6bce4756b7f0e1fd15041a976c1a38 | [
"MIT"
] | null | null | null | #!\usr\bin\python3
# -*- coding: utf-8 -*-
'''
Created on Oct. 2019
ED_Chapter4
@author: ZYW @ BNU
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy import interpolate
from mpl_toolkits.mplot3d import Axes3D
import os
from matplotlib import font_manager as fm, rcParams
import astropy.units as u
##------------parameters settings-----------------##
pixel_sides = 10#pixels per cm
N = np.array([3,3,3])#wave node numbers
L = np.array([100,100,100])#unit:cm
A = np.array([2,12,5])#initial intensities
pi = np.pi
K_0 = np.array([N[0]*pi/L[0],N[1]*pi/L[1],N[2]*pi/L[2]])/pixel_sides#wave vector
fpath = os.path.join(rcParams["datapath"], "fonts/ttf/cmr10.ttf")
prop = fm.FontProperties(fname=fpath)
xx = np.linspace(0,L[0]*pixel_sides,L[0]*pixel_sides)
yy = np.linspace(0,L[1]*pixel_sides,L[1]*pixel_sides)
zz = np.zeros(0,L[1]*pixel_sides,L[1]*pixel_sides)
##------------functions settings-----------------##
'''
def E_x(x,y,z):
return A[0]*np.cos(x*K_0[0])*np.sin(y*K_0[1])*np.sin(z*K_0[2])
def E_y(x,y,z):
return A[1]*np.sin(x*K_0[0])*np.cos(y*K_0[1])*np.sin(z*K_0[2])
'''
def E_z(x,y,z):
return A[2]*np.sin(x*K_0[0])*np.sin(y*K_0[1])*np.cos(z*K_0[2])
#Intensities of 3 directions in Cartissian coordinate
xx, yy= np.meshgrid(xx, yy)
zz = 11
E = E_z(xx,yy,zz)
def draw3D(X,Y,Z,angle):
fig = plt.figure(figsize=(15,7))
ax1 = fig.add_subplot(121)
ax1.imshow(Z,cmap='YlGnBu')
ax2 = fig.add_subplot(122,projection='3d')
ax2.view_init(angle[0],angle[1])
ax2.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap='rainbow',alpha=0.8)
surf = ax2.contourf(X,Y,Z,zdir='z',offset=-5,cmap='rainbow')
ax1.set_title(r'$E_z-plane-figure$')
ax2.set_title(r'$E_z-hologram$')
plt.tight_layout()
plt.savefig('ED_4.png',dpi=600)
plt.show()
##------------data writting & figures making-----------------##
draw3D(xx,yy,E,(45,45))
exit()
| 29.875 | 80 | 0.643828 | 368 | 1,912 | 3.25 | 0.394022 | 0.016722 | 0.01505 | 0.040134 | 0.147157 | 0.103679 | 0.103679 | 0.090301 | 0.090301 | 0.048495 | 0 | 0.061429 | 0.11454 | 1,912 | 63 | 81 | 30.349206 | 0.645009 | 0.190377 | 0 | 0 | 0 | 0 | 0.066617 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.205128 | 0.025641 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4f041d9b0a170d8f07557ba868331447b86391 | 3,748 | py | Python | experiments/prediction_similarity.py | CrispyHarder/deep-weight-prior | b87e61d6ad590c61b90e188ec86bfb956073be65 | [
"MIT"
] | null | null | null | experiments/prediction_similarity.py | CrispyHarder/deep-weight-prior | b87e61d6ad590c61b90e188ec86bfb956073be65 | [
"MIT"
] | null | null | null | experiments/prediction_similarity.py | CrispyHarder/deep-weight-prior | b87e61d6ad590c61b90e188ec86bfb956073be65 | [
"MIT"
] | null | null | null | import os
import torch
import argparse
import yaml
import utils
import matplotlib.pyplot as plt
import seaborn as sns
import math
import numpy as np
from datetime import date
def plot_matrix_as_heatmap(matrix,show=False,title='',xlabel='',ylabel='',save_path=''):
'''plots the cosine similariy matrix of a number of models
or model configurations'''
n = np.shape(np.array(matrix))[0]
ticks = math.floor(n/4)
sns.set_theme()
ax = sns.heatmap(matrix,xticklabels=ticks,yticklabels=ticks,cmap='bwr')
ax.invert_yaxis()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if save_path:
plt.savefig(save_path)
if show:
plt.show()
logs_path = os.path.join('logs','exman-train-net.py','runs')
runs = [os.path.join(logs_path,run) for run in os.listdir(logs_path) if run[:6] not in ['000001','000002']]
INIT_NAMES = [['vae'],['ghn_default']]
SAVE_PATH = os.path.join('..','..','small-results',str(date.today()),'prediction_similarity')
if not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
parser = argparse.ArgumentParser()
parser.add_argument('--init',type=str)
parser.add_argument('--device')
parser.add_argument('--sim',choices=['pred','logits'])
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed_all(42)
torch.manual_seed(42)
init = args.init
model_paths = []
for run in runs:
file = os.path.join(run,'net_params.torch')
yaml_p = os.path.join(run,'params.yaml')
with open(yaml_p) as f:
dict = yaml.full_load(f)
if not 'mult_init_prior' in dict:
if dict['mult_init_mode'] == init:
model_paths.append(file)
_, testloader = utils.load_dataset(data='cifar', train_bs=64, test_bs=500, num_examples=None, seed=42,augmentation=False)
if args.sim == 'pred':
all_predictions = []
for model_path in model_paths:
if init == 'vae':
model = utils.load_vae(model_path,device)
predictions = []
for x,_ in testloader:
x = x.to(device)
p = model(x)
predictions.append(p.max(1)[1])
predictions = torch.cat(predictions)
all_predictions.append(predictions)
all_predictions = torch.stack(all_predictions)
length_data = all_predictions.shape[1]
matrix = torch.zeros(length_data,length_data)
for i in range(length_data):
for j in range(i+1):
pred_sim = torch.sum(all_predictions[i] == all_predictions[j])/length_data
matrix[i,j] = matrix[j,i] = pred_sim
if args.sim == 'logits':
CosineSimilarity = torch.nn.CosineSimilarity(dim=0)
all_predictions = []
for model_path in model_paths:
if init == 'vae':
model = utils.load_vae(model_path,device)
predictions = []
for x,_ in testloader:
x = x.to(device)
p = model(x)
predictions.append(torch.flatten(p))
predictions = torch.cat(predictions)
all_predictions.append(predictions)
all_predictions = torch.stack(all_predictions)
length_data = all_predictions.shape[1]
matrix = torch.zeros(length_data,length_data)
for i in range(length_data):
for j in range(i+1):
cos_sim = CosineSimilarity(all_predictions[i],all_predictions[j])
matrix[i,j] = matrix[j,i] = cos_sim
title = f'{args.sim} Similarity of {args.init} inits'
save_path = os.path.join(SAVE_PATH,title)
plot_matrix_as_heatmap(matrix,title=title,save_path=save_path)
| 33.765766 | 122 | 0.648346 | 526 | 3,748 | 4.444867 | 0.30038 | 0.083832 | 0.025663 | 0.017964 | 0.366125 | 0.329341 | 0.289991 | 0.289991 | 0.289991 | 0.289991 | 0 | 0.011648 | 0.221185 | 3,748 | 110 | 123 | 34.072727 | 0.789311 | 0.021078 | 0 | 0.326087 | 0 | 0 | 0.076078 | 0.005917 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.108696 | 0 | 0.119565 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a4fe2e81bcaaac8a994f19cd3b167cfb47d0dfa | 2,116 | py | Python | plugins/pattern_navigate.py | OdatNurd/SublimeScraps | e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c | [
"MIT"
] | 2 | 2017-01-26T06:27:58.000Z | 2017-07-13T22:48:19.000Z | plugins/pattern_navigate.py | OdatNurd/SublimeScraps | e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c | [
"MIT"
] | null | null | null | plugins/pattern_navigate.py | OdatNurd/SublimeScraps | e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
# Related Reading:
# https://forum.sublimetext.com/t/find-for-a-macro/57387/
#
# This example command allows you to jump the cursor to the next or previous
# location of a given pattern of text, which can be either a regex or not and
# case sensitive or not based on command arguments.
#
# A use case for this is implementing a specific Find operation in a macro in
# a repeatable way.
class PatternNavigateCommand(sublime_plugin.TextCommand):
"""
Jump the selection in the file to the next or previous location of the
given textual pattern based on the current cursor location. The search
direction is controlled by the forward argument, and will wrap around the
ends of the buffer.
"""
def run(self, edit, pattern, literal=True, ignorecase=False, forward=True):
# Convert the incoming arguments to the appropriate search flags.
flags = ((sublime.LITERAL if literal else 0) |
(sublime.IGNORECASE if ignorecase else 0))
# Find the locations where this pattern occurs; leave if none
regions = self.view.find_all(pattern, flags)
if not regions:
return
# Get a starting point for our search, and where we should jump to if
# there are no matches in the specified direction.
point = self.view.sel()[0].b
fallback = regions[-1] if not forward else regions[0]
# Remove all selections.
self.view.sel().clear()
# Look in the given direction for the first match from the current
# position; if one is found jump there.
pick = lambda p: (point < p.a) if forward else (point > p.a)
for pos in regions if forward else reversed(regions):
if pick(pos):
return self.jump(pos.a)
# No matches in the search direction, so wrap around.
self.jump(fallback.a)
def jump(self, point):
# Add in the given position as a selection and ensure that it's
# visible.
self.view.sel().add(sublime.Region(point))
self.view.show(point, True)
| 37.785714 | 79 | 0.666352 | 308 | 2,116 | 4.568182 | 0.435065 | 0.017768 | 0.023454 | 0.015636 | 0.041222 | 0.041222 | 0.041222 | 0 | 0 | 0 | 0 | 0.006406 | 0.262287 | 2,116 | 55 | 80 | 38.472727 | 0.894939 | 0.518904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a50d6dea9ac0f55af007919a82e31ba7ff4734d | 6,440 | py | Python | main.py | gaelfargeas/markdown_to_static_site | 8f270ab38e7cf93f74b58bd64c96e7571f8c5262 | [
"BSD-3-Clause"
] | 1 | 2021-12-13T12:00:21.000Z | 2021-12-13T12:00:21.000Z | main.py | gaelfargeas/markdown_to_static_site | 8f270ab38e7cf93f74b58bd64c96e7571f8c5262 | [
"BSD-3-Clause"
] | null | null | null | main.py | gaelfargeas/markdown_to_static_site | 8f270ab38e7cf93f74b58bd64c96e7571f8c5262 | [
"BSD-3-Clause"
] | null | null | null | import argparse
from pathlib import Path
import markdown2
import jinja2
import os
import shutil
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="Chemin du/des source.", type=str)
parser.add_argument("-o", help="Chemin du dossier des fichiers générés.", type=str)
parser.add_argument("-t", help="Chemin du dossier des fichiers modeles.", type=str)
parser.add_argument(
"-s", help="type de sources (fichier ou dossier).", action="store_true"
)
parser.add_argument("-v", "--verbose", help="Verbose mode.", action="store_true")
args = parser.parse_args()
VERBOSE = args.verbose
if VERBOSE:
print("input :", args.i)
print("output :", args.o)
print("template:", args.t)
print("type input", args.s)
def add_image(html_test):
src_path = Path(args.o + "/src")
if not os.path.exists(src_path):
os.makedirs(src_path)
if args.s:
input_path = os.fspath(args.i)
else:
input_path = os.path.dirname(args.i)
result_string = ""
for line in html_test.split("\n"):
line = str(line)
if "<img " in line:
image_path = line.split('src="')[1].split('" ')[0]
if VERBOSE:
print("image_path", image_path)
image_name = image_path.split("/")[-1].split("\\")[-1]
if VERBOSE:
print("image_name", image_name)
shutil.copyfile(
str(input_path) + "/" + image_path, str(src_path) + "/" + image_name
)
line = (
line.split('src="')[0]
+ 'src="./src/'
+ image_name
+ '" '
+ line.split('src="')[-1].split('" ')[-1]
)
result_string += line + "\n"
else:
result_string += line + "\n"
return result_string
if __name__ == "__main__":
if args.s:
if args.i != None and args.o != None:
with Path(args.i) as directory:
for file in list(directory.glob("*_main.md")):
config_dict = {}
with open(file, "r") as input_file:
if VERBOSE:
print("intput file :", input_file.name)
file_name = (
input_file.name.split(".")[-2]
.split("/")[-1]
.split("\\")[-1]
.split("_main")[0]
)
with open(
str(args.o) + "/" + str(file_name) + ".html", "w"
) as output_file:
if VERBOSE:
print("output file :", output_file.name)
html = markdown2.markdown(input_file.read())
config_dict["main"] = html
if args.t != None:
for config_file in list(
directory.glob(file_name + "*.md")
):
config_name = (
config_file.name.split(".")[-2]
.split(file_name + "_")[-1]
.lower()
)
if config_name != "main":
with open(config_file, "r") as open_config_file:
config_dict[
config_name
] = open_config_file.read()
with open(args.t) as template_file:
resutl = jinja2.Template(
template_file.read()
).render(config_dict)
else:
resutl = html
if VERBOSE:
print("template file :", args.t)
resutl = add_image(resutl)
output_file.write(resutl)
else:
if args.i != None and args.o != None:
config_dict = {}
with open(args.i, "r") as input_file:
if VERBOSE:
print("intput file :", input_file.name)
file_name = (
input_file.name.split(".")[-2]
.split("/")[-1]
.split("\\")[-1]
.split("_main")[0]
)
with open(
str(args.o) + "/" + str(file_name) + ".html", "w"
) as output_file:
if VERBOSE:
print("output file :", output_file.name)
html = markdown2.markdown(input_file.read())
config_dict["main"] = html
if args.t != None:
path_directory = args.i.split(file_name + "_main.md")[0]
with Path(path_directory) as directory:
# recupe le dossier ou est le fichier
for config_file in list(directory.glob(file_name + "*.md")):
config_name = (
config_file.name.split(".")[-2]
.split(file_name + "_")[-1]
.lower()
)
if config_name != "main":
with open(config_file, "r") as open_config_file:
config_dict[
config_name
] = open_config_file.read()
with open(args.t) as template_file:
resutl = jinja2.Template(template_file.read()).render(
config_dict
)
else:
resutl = html
if VERBOSE:
print("template file :", args.t)
resutl = add_image(resutl)
output_file.write(resutl)
| 34.438503 | 88 | 0.39472 | 576 | 6,440 | 4.230903 | 0.164931 | 0.055806 | 0.051703 | 0.029545 | 0.579811 | 0.526057 | 0.501436 | 0.501436 | 0.482561 | 0.482561 | 0 | 0.007998 | 0.495186 | 6,440 | 186 | 89 | 34.623656 | 0.741618 | 0.005435 | 0 | 0.535211 | 0 | 0 | 0.072634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007042 | false | 0 | 0.042254 | 0 | 0.056338 | 0.084507 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a574b70d61a04a7120b0d3ff05a024266c78785 | 5,837 | py | Python | src/rics/cardinality/_enum.py | rsundqvist/rics | c67ff6703facb3170535dcf173d7e55734cedbc6 | [
"MIT"
] | 1 | 2022-02-24T22:12:13.000Z | 2022-02-24T22:12:13.000Z | src/rics/cardinality/_enum.py | rsundqvist/rics | c67ff6703facb3170535dcf173d7e55734cedbc6 | [
"MIT"
] | 26 | 2022-02-24T21:08:51.000Z | 2022-03-19T19:55:26.000Z | src/rics/cardinality/_enum.py | rsundqvist/rics | c67ff6703facb3170535dcf173d7e55734cedbc6 | [
"MIT"
] | null | null | null | from enum import Enum
from typing import Tuple, Union
# CardinalityLiteral = Literal["1:1", "1:N", "N:1", "M:N"]
CardinalityT = Union[str, "Cardinality"]
class Cardinality(Enum):
"""Enumeration type for cardinality relationships.
Cardinalities are comparable using numerical operators, and can be thought of as comparing "preciseness". The less
ambiguity there is for a given cardinality, the smaller it is in comparison to the others. The hierarchy is given by
``1:1 < 1:N = N:1 < M:N``. Note that ``1:N`` and ``N:1`` are considered equally precise.
Examples:
Comparing cardinalities
>>> from rics.cardinality import Cardinality
>>> Cardinality.ManyToOne
<Cardinality.ManyToOne: 'N:1'>
>>> Cardinality.OneToOne
<Cardinality.OneToOne: '1:1'>
>>> Cardinality.ManyToOne < Cardinality.OneToOne
False
"""
OneToOne = "1:1"
OneToMany = "1:N"
ManyToOne = "N:1"
ManyToMany = "M:N"
@property
def many_left(self) -> bool:
"""Many-relationship on the right, True for ``N:1`` and ``M:N``."""
return self == Cardinality.ManyToMany or self == Cardinality.ManyToOne # pragma: no cover
@property
def many_right(self) -> bool:
"""Many-relationship on the right, True for ``1:N`` and ``M:N``."""
return self == Cardinality.ManyToMany or self == Cardinality.OneToMany # pragma: no cover
@property
def one_left(self) -> bool:
"""One-relationship on the right, True for ``1:1`` and ``1:N``."""
return not self.many_left # pragma: no cover
@property
def one_right(self) -> bool:
"""One-relationship on the right, True for ``1:1`` and ``N:1``."""
return not self.many_right # pragma: no cover
@property
def inverse(self) -> "Cardinality":
"""Inverse cardinality. For symmetric cardinalities, ``self.inverse == self``.
Returns:
Inverse cardinality.
See Also:
:attr:`symmetric`
"""
if self == Cardinality.OneToMany:
return Cardinality.ManyToOne
if self == Cardinality.ManyToOne:
return Cardinality.OneToMany
return self
@property
def symmetric(self) -> bool:
"""Symmetry flag. For symmetric cardinalities, ``self.inverse == self``.
Returns:
Symmetry flag.
See Also:
:attr:`inverse`
"""
return self == Cardinality.OneToOne or self == Cardinality.ManyToMany
def __ge__(self, other: "Cardinality") -> bool:
"""Equivalent to :meth:`set.issuperset`."""
return _is_superset(self, other)
def __lt__(self, other: "Cardinality") -> bool:
return not self >= other
@classmethod
def from_counts(cls, left_count: int, right_count: int) -> "Cardinality":
"""Derive a `Cardinality` from counts.
Args:
left_count: Number of elements on the left-hand side.
right_count: Number of elements on the right-hand side.
Returns:
A :class:`Cardinality`.
Raises:
ValueError: For counts < 1.
"""
return _from_counts(left_count, right_count)
@classmethod
def parse(cls, arg: CardinalityT, strict: bool = False) -> "Cardinality":
"""Convert to cardinality.
Args:
arg: Argument to parse.
strict: If True, `arg` must match exactly when it is given as a string.
Returns:
A :class:`Cardinality`.
Raises:
ValueError: If the argument could not be converted.
"""
return arg if isinstance(arg, Cardinality) else _from_generous_string(arg, strict)
########################################################################################################################
# Supporting functions
#
# Would rather have this in a "friend module", but that's not practical (before 3.10?)
########################################################################################################################
def _parsing_failure_message(arg: str, strict: bool) -> str:
options = tuple([c.value for c in Cardinality])
alternatively = tuple([c.name for c in Cardinality])
strict_hint = "."
if strict:
try:
strict = False
Cardinality.parse(arg, strict=strict)
strict_hint = f". Hint: set {strict=} to allow this input."
except ValueError:
pass
return f"Could not convert {arg=} to Cardinality{strict_hint} Correct input {options=} or {repr(alternatively)}"
_MATRIX = (
(Cardinality.ManyToMany, Cardinality.ManyToOne),
(Cardinality.OneToMany, Cardinality.OneToOne),
)
def _is_superset(c0: Cardinality, c1: Cardinality) -> bool:
if c0 == c1:
return True
c0_i, c0_j = _pos(c0)
c1_i, c1_j = _pos(c1)
return c0_i <= c1_i and c0_j <= c1_j
def _pos(cardinality: Cardinality) -> Tuple[int, int]:
for i in range(2):
for j in range(2):
if _MATRIX[i][j] == cardinality:
return i, j
raise AssertionError("This should be impossible.")
def _from_counts(left_count: int, right_count: int) -> Cardinality:
if left_count < 1:
raise ValueError(f"{left_count=} < 1")
if right_count < 1:
raise ValueError(f"{right_count=} < 1")
one_left = left_count == 1
one_right = right_count == 1
return _MATRIX[int(one_left)][int(one_right)]
def _from_generous_string(s: str, strict: bool) -> Cardinality:
if not strict:
s = s.strip().upper().replace("-", ":", 1).replace("*", "N", 2)
if s == "N:N":
s = "M:N"
for c in Cardinality:
if c.value == s:
return c
raise ValueError(_parsing_failure_message(s, strict))
| 31.38172 | 120 | 0.582662 | 687 | 5,837 | 4.848617 | 0.244541 | 0.040528 | 0.015011 | 0.026418 | 0.221855 | 0.194236 | 0.138397 | 0.088262 | 0.083458 | 0.058841 | 0 | 0.012314 | 0.262635 | 5,837 | 185 | 121 | 31.551351 | 0.761617 | 0.328422 | 0 | 0.094118 | 0 | 0.011765 | 0.087578 | 0.013405 | 0 | 0 | 0 | 0 | 0.011765 | 1 | 0.176471 | false | 0.011765 | 0.023529 | 0.011765 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5751b3bd6e1acf691790d0550d0f5c13fdbbcc | 5,443 | py | Python | salt-pillar-linter.py | Noah-Huppert/salt-pillar-linter | a18d5504a46fe314e729ed611a0d8b29a2ea97aa | [
"MIT"
] | 1 | 2019-10-13T18:52:25.000Z | 2019-10-13T18:52:25.000Z | salt-pillar-linter.py | Noah-Huppert/salt-pillar-linter | a18d5504a46fe314e729ed611a0d8b29a2ea97aa | [
"MIT"
] | null | null | null | salt-pillar-linter.py | Noah-Huppert/salt-pillar-linter | a18d5504a46fe314e729ed611a0d8b29a2ea97aa | [
"MIT"
] | 1 | 2019-03-18T16:51:40.000Z | 2019-03-18T16:51:40.000Z | #!/usr/bin/env python3
import argparse
import os
import sys
import re
import yaml
import jinja2
# {{{1 Parse arguments
parser = argparse.ArgumentParser(description="Lints Salt states to ensure " +
"pillars are used correctly")
parser.prog = 'salt-pillar-linter'
parser.add_argument('-p',
action='append',
metavar='PILLARS_ROOT',
required=True,
dest='pillar_roots',
help="Directories where pillars are present, can be " +
"specified multiple times")
parser.add_argument('-s',
action='append',
metavar='STATES_ROOT',
required=True,
dest='state_roots',
help="Directories where states are located, can be " +
"specified multiple times")
parser.add_argument('-f',
action='append',
metavar='TMPL_FILE',
dest='template_files',
help="Non state files which uses Jinja templating to " +
"check, can be specified multiple times")
parser.add_argument('-d',
action='store_true',
default=False,
dest='debug',
help="Print additional debug information")
args = parser.parse_args()
# {{{1 Locate all state and pillar files
def gather_sls_files(initial_dirs):
""" Walks directories to find locations of all sls files
"""
dirs = set()
dirs.update(initial_dirs)
sls_files = set()
while dirs:
root = dirs.pop()
for top_dir, sub_dirs, files in os.walk(root):
sls_files.update([os.path.join(top_dir, f) for f in files
if f != 'top.sls' and
os.path.splitext(f)[1] == '.sls'])
dirs.update([os.path.join(top_dir, sub_dir)
for sub_dir in sub_dirs])
return sls_files
pillar_files = gather_sls_files(args.pillar_roots)
state_files = gather_sls_files(args.state_roots)
if args.template_files:
state_files.update(args.template_files)
# {{{1 Get all pillar keys
def flatten_dict(d, parent_key=''):
""" Return array of flattened dict keys
"""
keys = []
for k in d:
combined_key = k
if parent_key:
combined_key = "{}.{}".format(parent_key, k)
if type(d[k]) == dict:
keys.extend(flatten_dict(d[k], parent_key=combined_key))
else:
keys.append(combined_key)
return keys
pillar_keys = {}
loader = jinja2.FileSystemLoader(searchpath=os.getcwd())
env = jinja2.Environment(loader=loader)
if args.debug:
print("###################")
print("# PARSING PILLARS #")
print("###################")
for pillar_file in pillar_files:
template = env.get_template(pillar_file)
template_str = None
try:
template_str = template.render()
except Exception as e:
raise ValueError("Failed to render Jinja template: {}".format(e))
value = yaml.load(template_str)
flat_keys = flatten_dict(value)
if args.debug:
print()
print ("{} keys:".format(pillar_file))
print()
for k in flat_keys:
print(" {}".format(k))
for k in flat_keys:
pillar_keys[k] = True
if args.debug:
print()
# {{{1 Lint states
if args.debug:
print("##################")
print("# LINTING STATES #")
print("##################")
jinja_pattern = re.compile(r"{{\s*pillar\.([0-9a-zA-Z\._]*)\s*}}")
for state_file in state_files:
with open(state_file, 'r') as f:
line_num = 1
not_keys = {}
if args.debug:
print("{} keys used by state:".format(state_file))
print()
# For each line in a state
for line in f:
# For each Jinja pillar usage in state
for match in re.finditer(jinja_pattern, line):
# Get groups from match
for pillar_str in match.groups():
if args.debug:
print(" {}".format(pillar_str))
# Check if pillar key used exists
if pillar_str not in pillar_keys:
# Create entry in not_keys dict for line if this is the
# first item on this line
if line_num not in not_keys:
not_keys[line_num] = []
# Add pillar key to dict so we can tell user about
# improper usage later
not_keys[line_num].append(pillar_str)
# Increment line number so we can keep track of where errors are
line_num += 1
if args.debug:
print()
# If any errors
if not_keys:
common_prefix = os.path.commonprefix([os.getcwd(), state_file])
pretty_file_name = os.path.relpath(state_file, common_prefix)
print("{} uses pillar keys which do not exist".format(pretty_file_name))
for line_num in not_keys:
print(" Line {}:".format(line_num))
for k in not_keys[line_num]:
print (" {}".format(k))
print()
| 29.263441 | 84 | 0.529855 | 634 | 5,443 | 4.395899 | 0.291798 | 0.017223 | 0.027628 | 0.040187 | 0.112307 | 0.06315 | 0.047363 | 0.047363 | 0 | 0 | 0 | 0.003721 | 0.358075 | 5,443 | 185 | 85 | 29.421622 | 0.793932 | 0.103619 | 0 | 0.229508 | 0 | 0 | 0.152514 | 0.007214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.04918 | 0 | 0.081967 | 0.155738 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a58e81c41213397323d2016af44248b99f994ce | 15,107 | py | Python | machine.py | rkronberg/ml-project | 3129beb4ad1c92d7813df1611ccebdeca591b513 | [
"MIT"
] | null | null | null | machine.py | rkronberg/ml-project | 3129beb4ad1c92d7813df1611ccebdeca591b513 | [
"MIT"
] | null | null | null | machine.py | rkronberg/ml-project | 3129beb4ad1c92d7813df1611ccebdeca591b513 | [
"MIT"
] | null | null | null | '''
Optimal hyperparameters for CM + Laplacian kernel
Ea: alpha 1e-11, gamma 1e-4
polarizability: alpha 1e-3, gamma 1e-4
HOMO-LUMO gap: alpha 1e-2, gamma 1e-4
Dipole moment: alpha 1e-1, gamma 1e-3
Optimal hyperparameters for BoB + Laplacian kernel
Ea: alpha 1e-11, gamma 1e-5
polarizability: alpha 1e-3, gamma 1e-4
HOMO-LUMO gap: alpha 1e-3, gamma 1e-4
Dipole moment: alpha 1e-1, gamma 1e-3
Optimal hyperparameters for MBTR + Gaussian kernel
Ea: alpha 1e-7, gamma 1e-8
polarizability: alpha 1e-6, gamma 1e-7
HOMO-LUMO gap: alpha 1e-3, gamma 1e-6
Dipole moment: alpha 1e-2, gamma 1e-5
Results for CM + Laplacian kernel
Ea: MAE 0.38, RMSE 0.55, R2 0.9977
polarizability: MAE 0.12, RMSE 0.18, R2 0.9828
HOMO-LUMO gap: MAE 0.56, RMSE 0.70, R2 0.7203
Dipole moment: MAE 0.14, RMSE 0.19, R2 0.5901
Results for BoB + Laplacian kernel
Ea: MAE 0.08, RMSE 0.13, R2 0.9998
polarizability: MAE 0.06, RMSE 0.09, R2 0.9952
HOMO-LUMO gap: MAE 0.23, RMSE 0.31, R2 0.9465
Dipole moment: MAE 0.11, RMSE 0.16, R2 0.7327
Results for MBTR + Gaussian kernel
Ea: MAE 0.04, RMSE 0.06, R2 0.9999
polarizability: MAE 0.02, RMSE 0.04, R2 0.9993
HOMO-LUMO gap: MAE 0.17, RMSE 0.23, R2 0.9686
Dipole moment: MAE 0.08, RMSE 0.11, R2 0.8508
'''
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import mean_absolute_error as MAE
from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import r2_score as R2
from scipy.special import comb
from itertools import combinations, permutations
## This part of the code reads the raw data (.xyz files) and returns the central quantities stored in arrays
def preprocess(datasize,atoms):
# Selects all molecules with 7 or fewer non-H atoms (3963) and (datasize - 3963) molecules with 8 non-H atoms at random.
# This compensates the underrepresentation of small molecules (molecules with 9 non-H atoms are excluded)
ind = np.concatenate((np.arange(1,3964),np.random.randint(3964,21989,size=datasize-3963)))
# Initialize the variables as empty lists
# natoms = number of atoms in a given molecule
# nonHatoms = number of non-H atoms in a given molecule 21989
# Ea = Atomization energy (Ha)
# dipmom = Dipole moment (D)
# polar = Isotropic polarizability (bohr^3)
# atomlist = list of the atoms constituting a given molecule (e.g. ['C','H','H','H'] for methane)
# coords = xyz coordinates of each atom in a given molecule
natoms,nonHatoms,Ea,polar,dipmom,gap,atomlist,coords=[],[],[],[],[],[],[],[]
# Energies (Ha) of single atoms [H,C,N,O,F]
atomref=[-0.500273,-37.846772,-54.583861,-75.064579,-99.718730]
# Loop over all selected indices (molecules)
for i in ind:
# Initialize list that will contain coordinates and element types of ith molecule
xyz,elemtype,mulliken,nnonH=[],[],[],0
# This pads the index with zeros so that all contain 6 digits (e.g. index 41 -> 000041)
i = str(i).zfill(6)
# Define the path to the .xyz file of ith molecule. Here it is assumed that the dataset is stored in a
# subdirectory "xyz" within the one containing machine.py
# xyz/*.xyz
fpath = os.path.join('xyz',"dsgdb9nsd_%s.xyz" % i)
# Open the file and loop over the lines
with open(fpath) as f:
for j, line in enumerate(f):
if j == 0:
# Number of atoms in molecule
na = int(line)
natoms.append(na)
elif j == 1:
# Properties written on second line. Atomization energy, dipole moment, polarizability, HOMO-LUMO gap
E = float(line.split()[12])
dipmom.append(float(line.split()[5])*0.20819)
polar.append(float(line.split()[6])*0.14818)
gap.append(float(line.split()[9])*27.21139)
elif 2 <= j <= na+1:
# Lines 2 -> na+1 contains element types, coordinates and charges
parts = line.split()
# Index 0 = element type, 1 = x, 2 = y, 3 = z
elemtype.append(parts[0])
# Subtract energy of isolated atom from total energy
E = E - atomref[atoms.index(parts[0])]
if parts[0] != 'H':
nnonH += 1
xyz.append(np.array([float(parts[1]),float(parts[2]),float(parts[3])]))
Ea.append(-E*27.21139)
atomlist.append(elemtype)
coords.append(xyz)
nonHatoms.append(nnonH)
# Return all lists in the form of numpy arrays
return np.array(natoms),np.array(Ea),np.array(dipmom),np.array(polar),np.array(gap), \
np.array(atomlist),np.array(coords),np.array(nonHatoms)
def gauss(x,weight,sigma,mu):
return weight/(sigma*np.sqrt(2*np.pi))*np.exp(-((x-mu)**2)/(2*sigma**2))
# The many-body tensor representation (MBTR) descriptor
def mbtr(atomlist,coords,atoms,Z):
# Decay factor (d) and sigmas are roughly optimal
d=0.5
w1=1
sigma1,sigma2,sigma3=0.1,0.01,0.05
x1=np.linspace(0,10,201)
x2=np.linspace(0,1.25,201)
x3=np.linspace(-1,1,201)
mbtr_output=[]
atoms = list(set([''.join(p) for p in combinations('CHONF',1)]))
pairs = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)]))
triples = list(set([''.join(p) for p in permutations('CCCHHHOOONNNFFF',3)]))
for i in range(len(atomlist)):
bag1=dict((k,np.zeros(len(x1))) for k in atoms)
bag2=dict((k,np.zeros(len(x2))) for k in pairs)
bag3=dict((k,np.zeros(len(x3))) for k in triples)
MBTRvec=np.array([])
for j in range(len(atomlist[i])):
g1=Z[atoms.index(atomlist[i][j])]
bag1[atomlist[i][j]]+=gauss(x1,w1,sigma1,g1)
for k in range(len(atomlist[i])):
if k > j:
Rjk=np.linalg.norm(coords[i][j]-coords[i][k])
w2=np.exp(-d*Rjk)
g2=1/Rjk
try:
bag2[atomlist[i][j]+atomlist[i][k]]+=gauss(x2,w2,sigma2,g2)
except KeyError:
bag2[atomlist[i][k]+atomlist[i][j]]+=gauss(x2,w2,sigma2,g2)
for l in range(len(atomlist[i])):
if l > k:
Rjl=np.linalg.norm(coords[i][j]-coords[i][l])
Rkl=np.linalg.norm(coords[i][k]-coords[i][l])
w3=np.exp(-d*(Rjk+Rjl+Rkl))
g3=np.dot(coords[i][j]-coords[i][l],coords[i][k]-coords[i][l])/(Rjl*Rkl)
try:
bag3[atomlist[i][j]+atomlist[i][l]+atomlist[i][k]]+=gauss(x3,w3,sigma3,g3)
except KeyError:
bag3[atomlist[i][k]+atomlist[i][l]+atomlist[i][j]]+=gauss(x3,w3,sigma3,g3)
for atom in bag1:
MBTRvec = np.concatenate((MBTRvec,bag1[atom]))
for pair in bag2:
MBTRvec = np.concatenate((MBTRvec,bag2[pair]))
for triple in bag3:
MBTRvec = np.concatenate((MBTRvec,bag3[triple]))
mbtr_output.append(MBTRvec)
return mbtr_output
## The bag-of-bonds (BOB) descriptor
def bob(atomlist,coords,atoms,Z):
bob_output = []
# 18 H atoms in octane -> comb(18,2) H-H pairs (max. size of a bond vector in a bag of bonds)
dim = int(comb(18,2))
perms = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)]))
for i in range(len(atomlist)):
bag=dict((k,dim*[0]) for k in perms)
BoBvec = np.array([])
for j in range(len(atomlist[i])):
for k in range(len(atomlist[i])):
if j > k:
try:
bag[atomlist[i][j]+atomlist[i][k]].insert(0,Z[atoms.index(atomlist[i][j])]* \
Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k]))
del bag[atomlist[i][j]+atomlist[i][k]][-1]
except KeyError:
bag[atomlist[i][k]+atomlist[i][j]].insert(0,Z[atoms.index(atomlist[i][j])]* \
Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k]))
# Avoid KeyError raised by "wrong" order of atoms in a bond (e.g. 'CH' -> 'HC')
del bag[atomlist[i][k]+atomlist[i][j]][-1]
for pair in bag:
BoBvec = np.concatenate((BoBvec,np.array(sorted(bag[pair],reverse=True))))
bob_output.append(BoBvec)
return bob_output
## The following function takes the number of atoms in each molecule, the atom types and corresponding coordinates
## and returns an array of corresponding Coulomb matrices (CM)
def coulomb(natoms,atomlist,coords,atoms,Z):
# Specify the dimensions of the Coulomb matrices based on the largest molecule
dim = natoms.max()
# Initialize an array of all Coulomb matrices
CM = np.zeros((len(natoms),dim,dim))
CMvec = []
# Loop over all molecules
for i in range(len(natoms)):
for j in range(len(atomlist[i])):
# Loop over all atom pairs (j,k) in molecule i
for k in range(len(atomlist[i])):
if j == k:
CM[i][j][k] = 0.5*Z[atoms.index(atomlist[i][j])]**2.4
else:
CM[i][j][k] = Z[atoms.index(atomlist[i][j])]*Z[atoms.index(atomlist[i][k])]/ \
np.linalg.norm(coords[i][j]-coords[i][k])
# Sort Coulomb matrix according to descending row norm
# Get the indices in the sorted order
indexlist = np.argsort(-np.linalg.norm(CM[i],axis=1))
# Rearrange the matrix
CM[i] = CM[i][indexlist]
# Convert the lower triangular matrix into a vector and append to a list of Coulomb 'vectors'
CMvec.append(CM[i][np.tril_indices(dim,k=0)])
return CMvec
## Do grid search (if optimal hyperparameters are not known), then training and prediction using KRR
## If doing grid search for optimal parameters use small training set size, like 1k (takes forever otherwise)
def krr(x,y,nonHatoms):
inp4 = input('Do grid search for optimal hyperparameters? [True/False]\n')
if inp4 == True:
inp5 = raw_input('Provide kernel. [laplacian/rbf]\n').split()
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.9,stratify=nonHatoms)
kr = GridSearchCV(KernelRidge(kernel=inp5[0]),cv=5,param_grid={"alpha": np.logspace(-11,-1,11), \
"gamma": np.logspace(-9,-3,7)})
kr.fit(x_train,y_train)
print(kr.best_params_)
elif inp4 == False:
inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split()
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.1,stratify=nonHatoms)
kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2]))
kr.fit(x_train,y_train)
y_pred = kr.predict(x_test)
mae = MAE(y_test,y_pred)
rmse = np.sqrt(MSE(y_test,y_pred))
r2 = R2(y_test,y_pred)
# Print mean absolute error and root mean squared error
print('Mean absolute error: ' + repr(mae) + ', Root mean squared error: ' + repr(rmse) + \
', R2-score: ' + repr(r2))
return y_pred,y_test
def learning_curve(x,y,nonHatoms):
# Do training with different sample sizes and see how the MAE behaves (learning curve)
inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split()
mae,rmse,r2=[],[],[]
sample_sizes = [50,200,1000,3000,9000]
kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2]))
for i in sample_sizes:
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=1-float(i)/len(y),stratify=nonHatoms)
kr.fit(x_train,y_train)
y_pred = kr.predict(x_test)
mae.append(MAE(y_test,y_pred))
rmse.append(np.sqrt(MSE(y_test,y_pred)))
r2.append(R2(y_test,y_pred))
print('Mean absolute error: ' + repr(mae[-1]) + ', Root mean squared error: ' \
+ repr(rmse[-1]) + ', R2-score: ' + repr(r2[-1]))
return y_pred,y_test,mae,rmse,sample_sizes
## The main routine and plotting
def main():
# Just some plot settings
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=14)
plt.rc('xtick', direction='in')
# Preprocess data
datasize=10000
atoms = ['H','C','N','O','F']
Z = [1,6,7,8,9]
natoms,Ea,dipmom,polar,gap,atomlist,coords,nonHatoms = preprocess(datasize,atoms)
inp1 = raw_input('Which descriptor? [CM/BoB/MBTR]\n')
if inp1 == 'CM':
descriptor = coulomb(natoms,atomlist,coords,atoms,Z)
elif inp1 == 'BoB':
descriptor = bob(atomlist,coords,atoms,Z)
elif inp1 == 'MBTR':
descriptor = mbtr(atomlist,coords,atoms,Z)
inp2 = raw_input('Which property? [Ea/gap/polar/dipmom]\n')
plt.figure()
if inp2 == 'Ea':
prop = Ea
plt.title(r'Atomization energy (eV)')
plt.xlabel(r'$\Delta_\mathrm{at}E^\mathrm{DFT}$ (eV)')
plt.ylabel(r'$\Delta_\mathrm{at}E^\mathrm{KRR}$ (eV)')
elif inp2 == 'gap':
prop = gap
plt.title(r'HOMO-LUMO gap (eV)')
plt.xlabel(r'$\Delta\varepsilon^\mathrm{DFT}$ (eV)')
plt.ylabel(r'$\Delta\varepsilon^\mathrm{KRR}$ (eV)')
elif inp2 == 'polar':
prop = polar
plt.title(r'Isotropic polarizability (\r{A}$^3$)')
plt.xlabel(r'$\alpha^\mathrm{DFT}$ (\r{A}$^3$)')
plt.ylabel(r'$\alpha^\mathrm{KRR}$ (\r{A}$^3$)')
elif inp2 == 'dipmom':
prop = dipmom
plt.title(r'Dipole moment (e\r{A})')
plt.xlabel(r'$\mu^\mathrm{DFT}$ (e\r{A})')
plt.ylabel(r'$\mu^\mathrm{KRR}$ (e\r{A})')
inp3 = input('Plot learning curve? [True/False]\n')
if inp3 == True:
# Train
y_pred,y_test,mae,rmse,sample_sizes=learning_curve(descriptor,prop,nonHatoms)
np.savetxt('dipmom_BoB.dat',np.c_[y_test,y_pred])
np.savetxt('dipmom_BoB_lc.dat',np.c_[sample_sizes,mae])
# Plot learning curve
plt.semilogx(sample_sizes,mae,'o-',color='blue')
plt.xlabel(r'Training set size')
plt.ylabel(r'MAE')
elif inp3 == False:
# Train
y_pred,y_test=krr(descriptor,prop,nonHatoms)
#Plot results
plt.plot(y_test,y_pred,'.',color='blue')
plt.plot(np.linspace(y_test.min(),y_test.max(),1000),np.linspace(y_test.min(),y_test.max(),1000),'k--')
plt.show()
if __name__ == '__main__':
main()
| 41.05163 | 131 | 0.585358 | 2,235 | 15,107 | 3.908725 | 0.205369 | 0.035027 | 0.016026 | 0.018544 | 0.330243 | 0.252518 | 0.196314 | 0.175137 | 0.143887 | 0.119277 | 0 | 0.048628 | 0.271728 | 15,107 | 367 | 132 | 41.163488 | 0.74541 | 0.284702 | 0 | 0.116279 | 0 | 0 | 0.092924 | 0.018343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037209 | false | 0 | 0.046512 | 0.004651 | 0.116279 | 0.013953 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5a3364c51511dc6143a8fdb73fad93ea8e274b | 981 | py | Python | allegro_notify/soap/response/__init__.py | marlowww/AllegroNotify | 5fa8581debacbfa6e28cb2f3a0e6c177c88bc724 | [
"MIT"
] | null | null | null | allegro_notify/soap/response/__init__.py | marlowww/AllegroNotify | 5fa8581debacbfa6e28cb2f3a0e6c177c88bc724 | [
"MIT"
] | null | null | null | allegro_notify/soap/response/__init__.py | marlowww/AllegroNotify | 5fa8581debacbfa6e28cb2f3a0e6c177c88bc724 | [
"MIT"
] | null | null | null | from rinse import NS_MAP
from rinse.util import safe_parse_string
from soap import SoapFault
class SoapResponse():
def __init__(self, response):
self._response = response
# Parse response
try:
self._doc = safe_parse_string(response.content)
self._body = self._doc.xpath(
"/soapenv:Envelope/soapenv:Body", namespaces=NS_MAP)[0]
except:
raise SoapFault("ResponseParseError", "Cannot parse response")
self._fault = self._body.find("soapenv:Fault", NS_MAP)
if self._fault is not None:
raise SoapFault(self._fault.find("faultcode").text,
self._fault.find("faultstring").text)
# Get and set Allegro API namespaces
self._ns = NS_MAP.copy()
for i, v in enumerate(self._doc.nsmap.values()):
if v != NS_MAP["soapenv"]:
self._ns["ns{}".format(i)] = v
from soap.response.item_list import *
| 31.645161 | 74 | 0.607543 | 119 | 981 | 4.789916 | 0.462185 | 0.04386 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001425 | 0.284404 | 981 | 30 | 75 | 32.7 | 0.810541 | 0.049949 | 0 | 0 | 0 | 0 | 0.121636 | 0.032293 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5b5fa3f82709fc1c5b8a1b0be0a9e5c031ce11 | 16,969 | py | Python | ecn.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 9 | 2020-03-04T13:24:25.000Z | 2022-03-15T09:52:37.000Z | ecn.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 2 | 2019-12-30T07:28:33.000Z | 2020-10-13T11:38:34.000Z | ecn.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 6 | 2018-03-15T18:08:45.000Z | 2019-07-15T06:49:16.000Z | import json
import time
import argparse
import os
import datetime
from os import path
import numpy as np
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
import torch.nn.functional as F
import nets
import sampling
import rewards_lib
import alive_sieve
def render_action(t, s, prop, term):
agent = t % 2
speaker = 'A' if agent == 0 else 'B'
utility = s.utilities[:, agent]
print(' ', end='')
if speaker == 'B':
print(' ', end='')
if term[0][0]:
print(' ACC')
else:
print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')
print(' %s:%s/%s %s:%s/%s %s:%s/%s' % (
utility[0][0], prop[0][0], s.pool[0][0],
utility[0][1], prop[0][1], s.pool[0][1],
utility[0][2], prop[0][2], s.pool[0][2],
), end='')
print('')
if t + 1 == s.N[0]:
print(' [out of time]')
def save_model(model_file, agent_models, agent_opts, start_time, episode):
state = {}
for i in range(2):
state['agent%s' % i] = {}
state['agent%s' % i]['model_state'] = agent_models[i].state_dict()
state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()
state['episode'] = episode
state['elapsed_time'] = time.time() - start_time
with open(model_file + '.tmp', 'wb') as f:
torch.save(state, f)
os.rename(model_file + '.tmp', model_file)
def load_model(model_file, agent_models, agent_opts):
with open(model_file, 'rb') as f:
state = torch.load(f)
for i in range(2):
agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])
agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])
episode = state['episode']
# create a kind of 'virtual' start_time
start_time = time.time() - state['elapsed_time']
return episode, start_time
class State(object):
def __init__(self, N, pool, utilities):
batch_size = N.size()[0]
self.N = N
self.pool = pool
self.utilities = torch.zeros(batch_size, 2, 3).long()
self.utilities[:, 0] = utilities[0]
self.utilities[:, 1] = utilities[1]
self.last_proposal = torch.zeros(batch_size, 3).long()
self.m_prev = torch.zeros(batch_size, 6).long()
def cuda(self):
self.N = self.N.cuda()
self.pool = self.pool.cuda()
self.utilities = self.utilities.cuda()
self.last_proposal = self.last_proposal.cuda()
self.m_prev = self.m_prev.cuda()
def sieve_(self, still_alive_idxes):
self.N = self.N[still_alive_idxes]
self.pool = self.pool[still_alive_idxes]
self.utilities = self.utilities[still_alive_idxes]
self.last_proposal = self.last_proposal[still_alive_idxes]
self.m_prev = self.m_prev[still_alive_idxes]
def run_episode(
batch,
enable_cuda,
enable_comms,
enable_proposal,
prosocial,
agent_models,
# batch_size,
testing,
render=False):
"""
turning testing on means, we disable stochasticity: always pick the argmax
"""
type_constr = torch.cuda if enable_cuda else torch
batch_size = batch['N'].size()[0]
s = State(**batch)
if enable_cuda:
s.cuda()
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=enable_cuda)
actions_by_timestep = []
alive_masks = []
# next two tensofrs wont be sieved, they will stay same size throughout
# entire batch, we will update them using sieve.out_idxes[...]
rewards = type_constr.FloatTensor(batch_size, 3).fill_(0)
num_steps = type_constr.LongTensor(batch_size).fill_(10)
term_matches_argmax_count = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
num_policy_runs = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
entropy_loss_by_agent = [
Variable(type_constr.FloatTensor(1).fill_(0)),
Variable(type_constr.FloatTensor(1).fill_(0))
]
if render:
print(' ')
for t in range(10):
agent = t % 2
agent_model = agent_models[agent]
if enable_comms:
_prev_message = s.m_prev
else:
# we dont strictly need to blank them, since they'll be all zeros anyway,
# but defense in depth and all that :)
_prev_message = type_constr.LongTensor(sieve.batch_size, 6).fill_(0)
if enable_proposal:
_prev_proposal = s.last_proposal
else:
# we do need to blank this one though :)
_prev_proposal = type_constr.LongTensor(sieve.batch_size, 3).fill_(0)
nodes, term_a, s.m_prev, this_proposal, _entropy_loss, \
_term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = agent_model(
pool=Variable(s.pool),
utility=Variable(s.utilities[:, agent]),
m_prev=Variable(s.m_prev),
prev_proposal=Variable(_prev_proposal),
testing=testing
)
entropy_loss_by_agent[agent] += _entropy_loss
actions_by_timestep.append(nodes)
term_matches_argmax_count += _term_matches_argmax_count
num_policy_runs += sieve.batch_size
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if render and sieve.out_idxes[0] == 0:
render_action(
t=t,
s=s,
term=term_a,
prop=this_proposal
)
new_rewards = rewards_lib.calc_rewards(
t=t,
s=s,
term=term_a
)
rewards[sieve.out_idxes] = new_rewards
s.last_proposal = this_proposal
sieve.mark_dead(term_a)
sieve.mark_dead(t + 1 >= s.N)
alive_masks.append(sieve.alive_mask.clone())
sieve.set_dead_global(num_steps, t + 1)
if sieve.all_dead():
break
s.sieve_(sieve.alive_idxes)
sieve.self_sieve_()
if render:
print(' r: %.2f' % rewards[0].mean())
print(' ')
return actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent, \
term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws, \
prop_matches_argmax_count, prop_stochastic_draws
def safe_div(a, b):
"""
returns a / b, unless b is zero, in which case returns 0
this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar
"""
return 0 if b == 0 else a / b
def run(enable_proposal, enable_comms, seed, prosocial, logfile, model_file, batch_size,
term_entropy_reg, utterance_entropy_reg, proposal_entropy_reg, enable_cuda,
no_load, testing, test_seed, render_every_seconds):
"""
testing option will:
- use argmax, ie disable stochastic draws
- not run optimizers
- not save model
"""
type_constr = torch.cuda if enable_cuda else torch
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
train_r = np.random.RandomState(seed)
else:
train_r = np.random
test_r = np.random.RandomState(test_seed)
test_batches = sampling.generate_test_batches(batch_size=batch_size, num_batches=5, random_state=test_r)
test_hashes = sampling.hash_batches(test_batches)
episode = 0
start_time = time.time()
agent_models = []
agent_opts = []
for i in range(2):
model = nets.AgentModel(
enable_comms=enable_comms,
enable_proposal=enable_proposal,
term_entropy_reg=term_entropy_reg,
utterance_entropy_reg=utterance_entropy_reg,
proposal_entropy_reg=proposal_entropy_reg
)
if enable_cuda:
model = model.cuda()
agent_models.append(model)
agent_opts.append(optim.Adam(params=agent_models[i].parameters()))
if path.isfile(model_file) and not no_load:
episode, start_time = load_model(
model_file=model_file,
agent_models=agent_models,
agent_opts=agent_opts)
print('loaded model')
elif testing:
print('')
print('ERROR: must have loadable model to use --testing option')
print('')
return
last_print = time.time()
rewards_sum = type_constr.FloatTensor(3).fill_(0)
steps_sum = 0
count_sum = 0
for d in ['logs', 'model_saves']:
if not path.isdir(d):
os.makedirs(d)
f_log = open(logfile, 'w')
f_log.write('meta: %s\n' % json.dumps({
'enable_proposal': enable_proposal,
'enable_comms': enable_comms,
'prosocial': prosocial,
'seed': seed
}))
last_save = time.time()
baseline = type_constr.FloatTensor(3).fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
while True:
render = time.time() - last_print >= render_every_seconds
# render = True
batch = sampling.generate_training_batch(batch_size=batch_size, test_hashes=test_hashes, random_state=train_r)
actions, rewards, steps, alive_masks, entropy_loss_by_agent, \
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = run_episode(
batch=batch,
enable_cuda=enable_cuda,
enable_comms=enable_comms,
enable_proposal=enable_proposal,
agent_models=agent_models,
prosocial=prosocial,
# batch_size=batch_size,
render=render,
testing=testing)
term_matches_argmax_count += _term_matches_argmax_count
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
num_policy_runs += _num_policy_runs
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if not testing:
for i in range(2):
agent_opts[i].zero_grad()
reward_loss_by_agent = [0, 0]
baselined_rewards = rewards - baseline
rewards_by_agent = []
for i in range(2):
if prosocial:
rewards_by_agent.append(baselined_rewards[:, 2])
else:
rewards_by_agent.append(baselined_rewards[:, i])
sieve_playback = alive_sieve.SievePlayback(alive_masks, enable_cuda=enable_cuda)
for t, global_idxes in sieve_playback:
agent = t % 2
if len(actions[t]) > 0:
for action in actions[t]:
_rewards = rewards_by_agent[agent]
_reward = _rewards[global_idxes].float().contiguous().view(
sieve_playback.batch_size, 1)
_reward_loss = - (action * Variable(_reward))
_reward_loss = _reward_loss.sum()
reward_loss_by_agent[agent] += _reward_loss
for i in range(2):
loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]
loss.backward()
agent_opts[i].step()
rewards_sum += rewards.sum(0)
steps_sum += steps.sum()
baseline = 0.7 * baseline + 0.3 * rewards.mean(0)
count_sum += batch_size
if render:
"""
run the test batches, print the results
"""
test_rewards_sum = 0
for test_batch in test_batches:
actions, test_rewards, steps, alive_masks, entropy_loss_by_agent, \
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = run_episode(
batch=test_batch,
enable_cuda=enable_cuda,
enable_comms=enable_comms,
enable_proposal=enable_proposal,
agent_models=agent_models,
prosocial=prosocial,
render=True,
testing=True)
test_rewards_sum += test_rewards[:, 2].mean()
print('test reward=%.3f' % (test_rewards_sum / len(test_batches)))
time_since_last = time.time() - last_print
if prosocial:
baseline_str = '%.2f' % baseline[2]
# rewards_str = '%.2f' % (rewards_sum[2] / count_sum)
else:
baseline_str = '%.2f,%.2f' % (baseline[0], baseline[1])
rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum, rewards_sum[1] / count_sum, rewards_sum[2] / count_sum)
print('e=%s train=%s b=%s games/sec %s avg steps %.4f argmaxp term=%.4f utt=%.4f prop=%.4f' % (
episode,
rewards_str,
baseline_str,
int(count_sum / time_since_last),
steps_sum / count_sum,
term_matches_argmax_count / num_policy_runs,
safe_div(utt_matches_argmax_count, utt_stochastic_draws),
prop_matches_argmax_count / prop_stochastic_draws
))
f_log.write(json.dumps({
'episode': episode,
'avg_reward_0': rewards_sum[2] / count_sum,
'test_reward': test_rewards_sum / len(test_batches),
'avg_steps': steps_sum / count_sum,
'games_sec': count_sum / time_since_last,
'elapsed': time.time() - start_time,
'argmaxp_term': (term_matches_argmax_count / num_policy_runs),
'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),
'argmaxp_prop': (prop_matches_argmax_count / prop_stochastic_draws)
}) + '\n')
f_log.flush()
last_print = time.time()
steps_sum = 0
rewards_sum.fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
count_sum = 0
if not testing and time.time() - last_save >= 30.0:
save_model(
model_file=model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
last_save = time.time()
episode += 1
f_log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-file', type=str, default='model_saves/model.dat')
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--test-seed', type=int, default=123, help='used for generating test game set')
parser.add_argument('--seed', type=int, help='optional')
parser.add_argument('--term-entropy-reg', type=float, default=0.05)
parser.add_argument('--utterance-entropy-reg', type=float, default=0.001)
parser.add_argument('--proposal-entropy-reg', type=float, default=0.05)
parser.add_argument('--disable-proposal', action='store_true')
parser.add_argument('--disable-comms', action='store_true')
parser.add_argument('--disable-prosocial', action='store_true')
parser.add_argument('--render-every-seconds', type=int, default=30)
parser.add_argument('--testing', action='store_true', help='turn off learning; always pick argmax')
parser.add_argument('--enable-cuda', action='store_true')
parser.add_argument('--no-load', action='store_true')
parser.add_argument('--name', type=str, default='', help='used for logfile naming')
parser.add_argument('--logfile', type=str, default='logs/log_%Y%m%d_%H%M%S{name}.log')
args = parser.parse_args()
args.enable_comms = not args.disable_comms
args.enable_proposal = not args.disable_proposal
args.prosocial = not args.disable_prosocial
args.logfile = args.logfile.format(**args.__dict__)
args.logfile = datetime.datetime.strftime(datetime.datetime.now(), args.logfile)
del args.__dict__['disable_comms']
del args.__dict__['disable_proposal']
del args.__dict__['disable_prosocial']
del args.__dict__['name']
run(**args.__dict__)
| 38.830664 | 129 | 0.613236 | 2,145 | 16,969 | 4.522611 | 0.142657 | 0.052263 | 0.072364 | 0.029482 | 0.353366 | 0.327801 | 0.273271 | 0.231213 | 0.199464 | 0.183074 | 0 | 0.012472 | 0.281808 | 16,969 | 436 | 130 | 38.919725 | 0.78354 | 0.0452 | 0 | 0.245283 | 0 | 0.002695 | 0.067497 | 0.007472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024259 | false | 0 | 0.040431 | 0 | 0.078167 | 0.056604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5cd88fd8c0d16613c9c5ac3d7d23b3867ecc8f | 2,724 | py | Python | nexar_requests.py | NexarDeveloper/nexar-examples-py | 9a55964d9a847cf124e8928308369074e67d1dfe | [
"MIT"
] | null | null | null | nexar_requests.py | NexarDeveloper/nexar-examples-py | 9a55964d9a847cf124e8928308369074e67d1dfe | [
"MIT"
] | null | null | null | nexar_requests.py | NexarDeveloper/nexar-examples-py | 9a55964d9a847cf124e8928308369074e67d1dfe | [
"MIT"
] | null | null | null | """Resources for making Nexar requests."""
import os, requests, re
from typing import Callable, Dict, Iterator
from requests_toolbelt import MultipartEncoder
NEXAR_URL = "https://api.nexar.com/graphql"
NEXAR_FILE_URL = "https://files.nexar.com/Upload/WorkflowAttachment"
class NexarClient:
def __init__(self, token) -> None:
self.s = requests.session()
self.s.headers.update({"token": token})
self.s.keep_alive = False
def get_query(self, query: str, variables: Dict) -> dict:
"""Return Nexar response for the query."""
try:
r = self.s.post(
NEXAR_URL,
json={"query": query, "variables": variables},
)
except Exception as e:
print(e)
raise Exception("Error while getting Nexar response")
response = r.json()
if ("errors" in response):
for error in response["errors"]: print(error["message"])
raise SystemExit
return response["data"]
def upload_file(self, workspaceUrl: str, path: str, container: str) -> str:
"""Return Nexar response for the file upload."""
try:
multipart_data = MultipartEncoder(
fields = {
'file': (os.path.basename(path), open(path, 'rb'), 'text/plain'),
'workspaceUrl': workspaceUrl,
'container': container,
}
)
r = self.s.post(
NEXAR_FILE_URL,
data = multipart_data,
headers = {
'Content-Type': multipart_data.content_type,
}
)
except Exception as e:
print(e)
raise Exception("Error while uploading file to Nexar")
return r.text
class Node:
def __init__(self, client, query: str, variables: Dict, f: Callable) -> None:
self.client = client
self.query = query
self.variables = variables
self.f = f
self.name = re.search("after[\s]*:[\s]*\$([\w]*)", query).group(1)
def __iter__(self) -> Iterator:
self.pageInfo = {"hasNextPage": True}
return self
def __next__(self):
if (not self.pageInfo["hasNextPage"]): raise StopIteration
data = self.client.get_query(self.query, self.variables)
self.pageInfo = self.f(data)["pageInfo"]
self.variables[self.name] = self.pageInfo["endCursor"]
return self.f(data)["nodes"]
def NodeIter(self, query: str, variables: dict, f: Callable) -> Iterator:
return NexarClient.Node(self, query, variables, f)
| 32.819277 | 85 | 0.552863 | 289 | 2,724 | 5.103806 | 0.33564 | 0.016949 | 0.034576 | 0.042712 | 0.179661 | 0.105763 | 0.065085 | 0.065085 | 0.065085 | 0.065085 | 0 | 0.000547 | 0.329295 | 2,724 | 82 | 86 | 33.219512 | 0.806787 | 0.042584 | 0 | 0.129032 | 0 | 0 | 0.118441 | 0.009645 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.048387 | 0.016129 | 0.274194 | 0.048387 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5ea12ecc4b49137705efc9bc329a10e0a04827 | 1,560 | py | Python | lab3/test.py | dssgabriel/obhpc | 6358dffc85b18e1e65cb80f8e9b11b191b86463d | [
"MIT"
] | null | null | null | lab3/test.py | dssgabriel/obhpc | 6358dffc85b18e1e65cb80f8e9b11b191b86463d | [
"MIT"
] | null | null | null | lab3/test.py | dssgabriel/obhpc | 6358dffc85b18e1e65cb80f8e9b11b191b86463d | [
"MIT"
] | null | null | null | import numpy as np
import sys
import time
import mblas
# Python matrix multiplication
def sgemm_py(A, B, C, n):
for i in range(0, n):
for j in range(0, n):
loc = A[i * n + j]
for k in range(0, n):
C[i * n + k] += loc * B[j * n + k]
# Time measurement of Python sgemm
def measure_py(A, B, n):
C = np.zeros((n * n,), dtype=np.float32)
before = time.perf_counter()
sgemm_py(A, B, C, n)
after = time.perf_counter()
return after - before
# Time measurement of Numpy sgemm
def measure_np(A, B, n):
C = np.zeros((n, n), dtype=np.float32)
A = A.reshape(n, n)
B = B.reshape(n, n)
before = time.perf_counter()
C = np.dot(A, B)
after = time.perf_counter()
return after - before
# Time measurement of C sgemm
def measure_c(A, B, n):
C = np.zeros((n * n,), dtype=np.float32)
before = time.perf_counter()
mblas.sgemm_c(A, B, C, n)
after = time.perf_counter()
return after - before
# Main function
def main():
size = int(sys.argv[1])
A = np.random.rand(size * size).astype(np.float32)
B = np.random.rand(size * size).astype(np.float32)
elapsed_py = measure_py(A, B, size)
elapsed_np = measure_np(A, B, size)
elapsed_c = measure_c(A, B, size)
#elapsed_avx2 = measure_avx(A, B, size)
#elapsed_avx512 = measure_avx(A, B, size)
print("py / c: ", elapsed_py / elapsed_c);
print("py / np: ", elapsed_py / elapsed_np);
print("np / c: ", elapsed_np / elapsed_c);
if __name__ == "__main__":
main()
| 23.636364 | 54 | 0.592949 | 257 | 1,560 | 3.459144 | 0.210117 | 0.026997 | 0.101237 | 0.058493 | 0.445444 | 0.409449 | 0.389201 | 0.389201 | 0.310461 | 0.310461 | 0 | 0.015598 | 0.260256 | 1,560 | 65 | 55 | 24 | 0.754766 | 0.136538 | 0 | 0.261905 | 0 | 0 | 0.026139 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.095238 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a5efdd3fa22d91097ebe01edb43cd8a8309df98 | 2,817 | py | Python | main.py | devlocalhost/covidpy | 4c2e8ce8831a389b9ca5bad8ed64aa3c4fb5c60c | [
"MIT"
] | null | null | null | main.py | devlocalhost/covidpy | 4c2e8ce8831a389b9ca5bad8ed64aa3c4fb5c60c | [
"MIT"
] | null | null | null | main.py | devlocalhost/covidpy | 4c2e8ce8831a389b9ca5bad8ed64aa3c4fb5c60c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from traceback import format_exc
from requests import get
from colorhex import colorex, BOLD
from datetime import datetime
from sys import exit
from os import system
BLURPLE = '7289da'
GREEN = '43b581'
YELLOW = 'fdcc4b'
RED = 'f04947'
def main():
system('clear')
country = input(colorex('Enter a countries name, press enter without typing anything to auto detect your country or e to exit\n -> ', GREEN, BOLD))
if country == '':
try:
auto_country = get('http://www.geoplugin.net/json.gp').json()
except Exception as exc:
system('clear')
print(colorex(f'An error occured while trying to auto detect country. Please try again or enter the countries name and make sure you have internet access\nTraceback: {exc}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
country = auto_country['geoplugin_countryName']
getcovidstats(country)
elif country == 'e':
system('clear')
exit()
elif country != '':
getcovidstats(country)
def getcovidstats(country):
try:
resp = get(f'https://disease.sh/v3/covid-19/countries/{country}').json()
except Exception as exc:
system('clear')
print(colorex(f'An error occured while trying to get covid 19 stats. Please try again later and make sure you have internet access\nTraceback: {exc}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
try:
country_name = resp['country']
except KeyError as exc:
system('clear')
print(colorex(f'Invalid country name, or the country doesnt have stats. Please try again\nTraceback: {format_exc()}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
short_country_name = resp['countryInfo']['iso2']
country_population = resp['population']
total_cases = resp['cases']
cases_today = resp['todayCases']
total_deaths = resp['deaths']
deaths_today = resp['todayDeaths']
total_recovered = resp['recovered']
today_recovered = resp['todayRecovered']
continent = resp['continent']
updated_at = datetime.fromtimestamp(resp['updated'] / 1000.0).strftime('%d %B %Y at %I:%M:%S %p')
system('clear')
print(colorex(f'Country: {country_name} ({short_country_name})', BLURPLE, BOLD))
print(colorex(f'Continent: {continent}', BLURPLE, BOLD))
print(colorex(f'Population: {country_population}', GREEN, BOLD))
print(colorex(f'Total cases: {total_cases}, Today: {cases_today}', RED, BOLD))
print(colorex(f'Total deaths: {total_deaths}, Today: {deaths_today}', RED, BOLD))
print(colorex(f'Total recovered: {total_recovered}, Today: {today_recovered}', GREEN, BOLD))
print(colorex(f'Updated at: {updated_at}', YELLOW, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
main() | 29.652632 | 188 | 0.709265 | 394 | 2,817 | 5.002538 | 0.30203 | 0.055809 | 0.065956 | 0.05175 | 0.354135 | 0.292745 | 0.292745 | 0.24759 | 0.24759 | 0.24759 | 0 | 0.011213 | 0.14519 | 2,817 | 95 | 189 | 29.652632 | 0.807309 | 0.007455 | 0 | 0.382353 | 0 | 0.044118 | 0.424535 | 0.015379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.088235 | 0 | 0.117647 | 0.147059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a60ed8d5b51faf0acae3eb73b2aefe676b8f9fb | 407 | py | Python | data-detective-airflow/dags/dags/dummy/code/code.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 5 | 2021-12-01T09:55:23.000Z | 2021-12-21T16:23:33.000Z | data-detective-airflow/dags/dags/dummy/code/code.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 1 | 2022-03-14T16:50:41.000Z | 2022-03-14T16:50:41.000Z | data-detective-airflow/dags/dags/dummy/code/code.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 2 | 2021-11-03T09:43:09.000Z | 2021-11-17T10:16:29.000Z | import yaml
from pandas import DataFrame
def val_translate(context, in_df: DataFrame, file_name: str) -> DataFrame:
task = context.get('task')
out_df = in_df.copy()
with open(f'{task.dag.etc_dir}/{file_name}', 'r', encoding='utf-8') as cfg:
config = yaml.safe_load(cfg)
out_df['test'] = out_df.apply(
lambda row: config[row['test']],
axis=1
)
return out_df
| 27.133333 | 79 | 0.633907 | 62 | 407 | 3.983871 | 0.645161 | 0.080972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006289 | 0.218673 | 407 | 14 | 80 | 29.071429 | 0.77044 | 0 | 0 | 0 | 0 | 0 | 0.117936 | 0.07371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a61638ec41b218d13f9ed19e5fe88c4dfeca072 | 331 | py | Python | Regex/3_Group_Groups_ Groupdict.py | FaranakAlikhah/ADM-HW1 | f4255112c58a4a200d04c943c74f096cc31e9dad | [
"MIT"
] | null | null | null | Regex/3_Group_Groups_ Groupdict.py | FaranakAlikhah/ADM-HW1 | f4255112c58a4a200d04c943c74f096cc31e9dad | [
"MIT"
] | null | null | null | Regex/3_Group_Groups_ Groupdict.py | FaranakAlikhah/ADM-HW1 | f4255112c58a4a200d04c943c74f096cc31e9dad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # section 13.Regex and Parsing challenges :
#
# ### writer : Faranak Alikhah 1954128
# ### 3. Group(), Groups() & Groupdict() :
# In[ ]:
import re
s= input()
pattern=r'([A-Z a-z 0-9])\1+'#alphabet numeric
m=re.search(pattern,s)
if m:
print(m.group(1))
else:
print(-1)
#
| 13.791667 | 47 | 0.589124 | 51 | 331 | 3.823529 | 0.784314 | 0.020513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060837 | 0.205438 | 331 | 23 | 48 | 14.391304 | 0.680608 | 0.528701 | 0 | 0 | 0 | 0 | 0.130435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a631c8b774ce30119cf2fb20a75fd6b612229ac | 2,238 | py | Python | checker/__main__.py | vyahello/pep8-checker | fe5d0746201d3a26ec2ae0c9c4a70203700af1f0 | [
"MIT"
] | 3 | 2020-08-09T15:17:44.000Z | 2022-03-19T22:16:25.000Z | checker/__main__.py | vyahello/pep8-checker | fe5d0746201d3a26ec2ae0c9c4a70203700af1f0 | [
"MIT"
] | 4 | 2020-08-12T21:25:16.000Z | 2021-04-17T10:57:57.000Z | checker/__main__.py | vyahello/pep8-checker | fe5d0746201d3a26ec2ae0c9c4a70203700af1f0 | [
"MIT"
] | 1 | 2020-12-17T10:06:37.000Z | 2020-12-17T10:06:37.000Z | """Represents executable entrypoint for `pep8-checker` application."""
import http
import os
from typing import Any, Dict, Optional
from pathlib import Path
import attr
from bottle import TEMPLATE_PATH, abort, request, route, run, view
import requests
TEMPLATE_PATH.append(str(Path('./') / 'checker' / 'views'))
def api_url() -> str:
"""Returns AWS_ENDPOINT URL."""
url: str = os.environ.get('AWS_ENDPOINT', '')
if not url:
raise RuntimeError('Please set API_URL environment variable')
return url
@attr.dataclass(frozen=True, slots=True)
class Server:
"""The class represents a server endpoint."""
host: str = '0.0.0.0'
port: str = os.environ.get('PORT', '5050')
is_debug: bool = True
reloader: bool = True
def as_json(self) -> Dict[str, Any]:
"""Returns server configuration as a dict."""
return {
'host': self.host,
'port': self.port,
'is_debug': self.is_debug,
'reloader': self.reloader,
}
@route('/', method=('GET', 'POST'))
@view(tpl_name='index')
def index() -> Dict[str, str]:
"""Specify index page view.
Returns: <dict[str, str]> response from AWS lambda server.
"""
title = 'PEP8 Checker'
code: str = request.forms.get('code', '') # pylint: disable=no-member
if code:
response: Dict[Any, Any] = requests.post(
url=api_url(), json={'code': code}
).json()
error: Optional[str] = response.get('errorMessage')
exception: Optional[str] = response.get('errorType')
if error and exception:
abort(
code=int(http.HTTPStatus.BAD_REQUEST),
text=f'Lambda function returned status {exception} exception',
)
return {'title': title, 'code': code, 'pep_errors': response['body']}
return {'title': title, 'code': code, 'pep_errors': ''}
def easyrun(server: Server = Server()) -> None:
"""Launches a web application.
Args:
server: <Server> a given server configuration.
"""
run(
host=server.host,
port=server.port,
debug=server.is_debug,
reloader=server.reloader,
)
if __name__ == '__main__':
easyrun()
| 27.975 | 78 | 0.602324 | 268 | 2,238 | 4.940299 | 0.391791 | 0.021148 | 0.018127 | 0.022659 | 0.049849 | 0.049849 | 0.049849 | 0 | 0 | 0 | 0 | 0.005974 | 0.252011 | 2,238 | 79 | 79 | 28.329114 | 0.784946 | 0.164433 | 0 | 0 | 0 | 0 | 0.143643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.132075 | 0 | 0.377358 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a66f09c5519425a30cfc0792ff8af6ceb340d6d | 15,676 | py | Python | Projects/Space Invaders/main c clara.py | busterbeam/pygame | 5dbbfd8dff5785e5d1010909aec51e9ef8d1cf5c | [
"MIT"
] | 6 | 2018-05-26T13:06:34.000Z | 2021-11-08T11:44:28.000Z | Projects/Space Invaders/main c clara.py | busterbeam/pygame | 5dbbfd8dff5785e5d1010909aec51e9ef8d1cf5c | [
"MIT"
] | 1 | 2018-09-11T18:35:56.000Z | 2019-05-06T13:41:32.000Z | Projects/Space Invaders/main c clara.py | busterbeam/pygame | 5dbbfd8dff5785e5d1010909aec51e9ef8d1cf5c | [
"MIT"
] | 11 | 2018-06-01T01:53:11.000Z | 2021-08-11T01:00:17.000Z | import pygame
from pygame.locals import *
from os.path import realpath, dirname
from time import time
from random import randint
def main():
running, settings = load()
while running:
settings = update(settings)
draw(**settings)
running = check_exit(**settings)
pygame.quit()
quit()
def load():
screen_size = (450, 333)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Shoot'n up")
game_object = {
'player' : Player(),
'enemy' : [],
'shoot' : [],
'jump_scare': [],
'hit_effect': [],
'shoot_effect':[],
'bg' : Background(),
'HUD' : [Sprite(dirname(realpath(__file__))+'/assets/img/effects/HUD_Vidas.png', 20, 300)],
}
game_object = load_level(game_object, 1)
path = dirname(realpath(__file__))
last_shoot = time()
level = 1
return True, {
'screen_size' : screen_size,
'screen' : screen,
'game_object' : game_object,
'path' : path,
'exit_request' : False,
'last_shoot' : last_shoot,
'level' : level,
'enemy_last_shoot': time()
}
def load_level(game_object, what_level):
path = dirname(realpath(__file__))
if what_level==1:
for j in range(10):
if j%2:
game_object['enemy'].append(Enemy(20+40*j, 40, 0, 0.5))
else:
game_object['enemy'].append(Enemy(20+40*j, 40, 0, 1))
elif what_level==2:
for i in range(3):
for j in range(10):
game_object['enemy'].append(Enemy(20+40*j, -70-80*i, i, .8))
elif what_level==3:
for i in range(5):
if i%2:
for j in range(5):
x = 20+(440/5*j)
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, .8))
else:
for j in range(10):
x = 20+(440/10*j)
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, .8))
else:
for i in range(randint(2, 8)):
foo = randint(5,10)
for j in range(foo):
x = 20+(440/foo*j)
if (i ==1 or i ==5) and j%2:
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, 2))
else:
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, 1))
return game_object
def update(settings):
settings = check_keys(settings)
if len(settings['game_object']['enemy'])==0:
settings['level'] +=1
load_level(settings['game_object'], settings['level'])
settings['game_object']['player'].load_img()
settings['game_object']['shoot'] = update_shoot(settings['game_object']['shoot'])
settings['game_object']['bg'].tile, settings['game_object']['bg'].time = update_bg(settings['game_object']['bg'].tile, settings['game_object']['bg'].time)
settings['game_object']['bg'] = parallax(settings['game_object']['player'], settings['game_object']['bg'])
settings['game_object']['enemy'] = update_enemy(settings['game_object']['enemy'], settings['screen_size'], settings)
settings['game_object'] = collider(settings['game_object'])
for fire in settings['game_object']['player'].fires:
fire.animation.update()
for gO in settings['game_object']['enemy']:
gO.fire.animation.update()
for explosion in settings['game_object']['hit_effect']:
explosion.animation.update()
if explosion.animation.pos == 7:
settings['game_object']['hit_effect'].remove(explosion)
for gO in settings['game_object']['shoot_effect']:
gO.animation.update()
gO.x += settings['game_object']['player'].x_speed
if gO.animation.pos == 6:
settings['game_object']['shoot_effect'].remove(gO)
return settings
def collider(game_object):
for shoot in game_object['shoot']:
if shoot.origin=='player':
for enemy in game_object['enemy']:
if (shoot.x>enemy.x and shoot.x<enemy.x+enemy.width) or \
(shoot.x+shoot.width>enemy.x and shoot.x+shoot.width<enemy.x+enemy.width):
if (shoot.y<enemy.y+enemy.height and shoot.y>enemy.y):
x, y = shoot.x-53/2, shoot.y-25
game_object['hit_effect'].append(Hit_effect(x, y))
enemy.hit_demage()
game_object['shoot'].remove(shoot)
if enemy.hp<=0:
game_object['enemy'].remove(enemy)
break
if shoot.y<0:
try:
game_object['shoot'].remove(shoot)
except:None
if shoot.origin=='enemy':
player = game_object['player']
if (shoot.x>player.x and shoot.x<player.x+player.width) or \
(shoot.x+shoot.width>player.x and shoot.x+shoot.width<player.x+player.width):
if (shoot.y<player.y+player.height and shoot.y>player.y):
x, y = shoot.x-53/2, shoot.y-25
game_object['hit_effect'].append(Hit_effect(x, y))
game_object['shoot'].remove(shoot)
game_object['player'].hp -=1
break
if shoot.y<0:
try:
game_object['shoot'].remove(shoot)
except:None
return game_object
def update_enemy(enemy, screen_size, settings):
enemy_who_gonna_shoot = randint(0, len(enemy))
index =0
for gO in enemy:
if index == enemy_who_gonna_shoot and time()-settings['enemy_last_shoot']>0.5:
x = gO.x+gO.width/2-8
y = gO.y+gO.height
settings['game_object']['shoot'].append(Shoot(x,y, 'enemy'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x,y, 'enemy'))
settings['enemy_last_shoot'] = time()
gO.y += gO.y_speed
if time()-gO.init>0.1 and gO.hit_mark:
gO.image_return()
gO.hit_mark = False
if gO.y>screen_size[1]:
enemy.remove(gO)
index +=1
return enemy
def parallax(player, bg):
middle = player.x
foo = -middle/225.00*25
bg.x = foo
return bg
def update_shoot(shoot):
for gO in shoot:
gO.y+=gO.y_speed
return shoot
def check_keys(settings):
k = pygame.key.get_pressed()
settings['game_object']['player'].player_move_key(k, settings['screen_size'])
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
settings['exit_request'] = True
if k[K_SPACE] and time()-settings['last_shoot']>0.24:
x, y = settings['game_object']['player'].x, settings['game_object']['player'].y
settings['game_object']['shoot'].append(Shoot(x,y+3, 'player'))
settings['game_object']['shoot'].append(Shoot(x+24,y+3, 'player'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x, y-14, 'player'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x+22, y-14, 'player'))
settings['last_shoot'] = time()
return settings
def update_bg(tile, last_time):
if time()-last_time>0.02:
tile = (tile+1)%200
last_time = time()
return tile, last_time
def draw(game_object, screen, screen_size, path, **kwargs):
draw_bg(screen, game_object['bg'])
draw_enemy(screen, game_object['enemy'])
draw_shoot_effect(screen, game_object['shoot_effect'])
draw_player(screen, game_object['player'])
draw_HUD(screen, game_object['HUD'], game_object['player'].hp)
draw_shoot(screen, game_object['shoot'])
draw_hit_effect(screen, game_object['hit_effect'])
pygame.display.flip()
fps(60)
pass
def draw_shoot_effect(screen, effect):
for gO in effect:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_hit_effect(screen, explosion):
for gO in explosion:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_shoot(screen, shoot):
for gO in shoot:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_enemy(screen, enemy):
for gO in enemy:
screen.blit(gO.fire.img, (int(gO.x+16), int(gO.y-7)))
screen.blit(gO.img, (int(gO.x),int(gO.y)))
def draw_HUD(screen, HUD, lifes):
for gO in HUD:
if gO.__class__==Sprite:
x= gO.x
screen.blit(gO.img, (int(gO.x), int(gO.y)))
img = pygame.image.load(dirname(realpath(__file__)) + '/assets/img/effects/life.png')
for i in range(lifes-1):
screen.blit(img, (int(x+5+22*i), int(305)))
def draw_player(screen, player):
screen.blit(player.img, (int(player.x), int(player.y)))
y = player.fires[0].y+player.height-5
if player.pos == 'M':
x = player.x+2 #compensar o offset do primeiro fogo
x_offset = 25 #compensar o offset do segundo fogo
else:
x = player.x+4 #compensar o offset do primeiro fogo
x_offset = 18 #compensar o offset do segundo fogo
screen.blit(player.fires[0].img, (int(x), int(y)))
screen.blit(player.fires[0].img, (int(x+x_offset), int(y)))
def fps(frames):
pygame.time.Clock().tick(frames)
def draw_bg(screen, bg):
screen.blit(bg.img[bg.tile], (int(bg.x),int(bg.y)))
pass
def check_exit(exit_request, **kwargs):
return not exit_request
class Hit_effect:
def __init__(self,x,y):
self.x = x
self.y = y
path = dirname(realpath(__file__))+'/assets/img/effects'
self.img = pygame.image.load(path+'/explosion0.png')
self.animation = Animation({'explosion' : [8, 0.01]}, path, 'explosion', self)
class Shoot_effect:
def __init__(self, x,y, origin):
self.x = x
self.y = y
self.origin = origin
path = dirname(realpath(__file__))
if origin == 'player':
self.img = pygame.image.load(path+'/assets/img/effects/fire_effectPlayer0.png')
self.animation = Animation({'fire_effectPlayer' : [7, 0]}, path+'/assets/img/effects', 'fire_effectPlayer', self)
else:
self.img = pygame.image.load(path+'/assets/img/effects/fire_effectEnemy0.png')
self.animation = Animation({'fire_effectEnemy' : [7, 0]}, path+'/assets/img/effects', 'fire_effectEnemy', self)
class Shoot:
def __init__(self, x, y, origin):
self.x = x
self.y = y
self.origin = origin
if origin == 'player':
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/effects/shootPlayer.png')
self.y_speed = -4
else:
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/effects/shootEnemy.png')
self.y_speed = 4
self.width = self.img.get_width()
self.height= self.img.get_height()
class Sprite:
def __init__(self, path, x, y):
self.x = x
self.y = y
self.img = pygame.image.load(path)
class Explosion:
def __init__(self, x, y):
self.x = x
self.y = y
path = dirname(realpath(__file__))
self.img = pygame.image.load(path+'/assets/img/effects/explosion0.png')
self.animation = Animation({'explosion' : [7, 0.2]}, path, 'explosion', self)
class Enemy:
def __init__(self, x, y, type, y_speed):
self.x = x
self.x_speed = 0
self.y = y
self.hp = type+2
self.y_speed = y_speed
self.type = type
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/enemy/enemy'+str(type)+'.png').convert_alpha()
self.width = self.img.get_width()
self.height = self.img.get_height()
self.fire = Fire(self)
self.init = time()
self.hit_mark = False
def hit_demage(self):
if randint(1,2)%2:
self.img = white(self.img)
else:
self.img = red(self.img)
self.hit_mark = True
self.hp -= 1
self.init = time()
def image_return(self):
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/enemy/enemy'+str(self.type)+'.png').convert_alpha()
def white(surface):
for row in range(surface.get_height()):
for column in range(surface.get_width()):
if surface.get_at((column, row))[3] == 255:
surface.set_at((column, row), (255, 255, 255))
return surface
def red(surface):
for row in range(surface.get_height()):
for column in range(surface.get_width()):
if surface.get_at((column, row))[3] == 255:
surface.set_at((column, row), (255, 130, 130))
return surface
class Player:
def __init__(self):
self.pos = 'M'
self.hp = 4
self.x = 200
self.x_speed = 0
self.y = 280
self.tiles = {}
self.spaw_effect = False
self.spaw_effect_start = time()
self.fires= [
Fire(self),
Fire(self)
]
path = dirname(realpath(__file__))
for sides in ['L', 'M', 'R']:
for i in range(4):
k = i+1
self.tiles[str(k)+sides]= (pygame.image.load(path+'/assets/img/ships/ship' + str(k) + sides + '.png'))
self.load_img()
self.width = self.img.get_width()
self.height = self.img.get_height()
def load_img(self):
self.img = self.tiles[str(self.hp)+self.pos]
def player_move_key(self, k, screen_size):
if k[K_d]:
self.x_speed += 1.4
self.pos = 'R'
elif k[K_a]:
self.x_speed -= 1.4
self.pos = 'L'
else:
self.x_speed /= 1.1
self.pos = 'M'
if abs(self.x_speed)>5:
if self.x_speed>0:
self.x_speed = 5
else:
self.x_speed = -5
self.x+=self.x_speed
if self.x+self.width>screen_size[0]:
self.x = screen_size[0]-self.width
self.pos = 'M'
if self.x < 0:
self.x = 0
self.pos = 'M'
class Fire:
def __init__(self, obj):
self.x = obj.x
self.y = obj.y
self.img = ''
self.animation = Animation({'fire' : [4, 0.02]}, dirname(realpath(__file__))+'/assets/img/effects', 'fire', self)
class Animation():
def __init__(self, sprites, path, first, obj):
self.sprites = sprites
self.path = path
self.tile = first
self.pos = 0
self.last_update = time()
self.obj = obj
self.obj.img = pygame.image.load(path + '/' + first + str(self.pos) + '.png')
def change(self, tile, pos=0):
self.tile = tile
self.pos = 0
self.obj.img = pygame.image.load(self.path + '/' + tile + str(pos) + '.png')
def update(self):
if time()-self.last_update>self.sprites[self.tile][1]:
if self.pos == self.sprites[self.tile][0]-1:
self.pos = 0
else:
self.pos += 1
self.obj.img = pygame.image.load(self.path + '/' + self.tile + str(self.pos) + '.png')
self.last_update = time()
class Background:
def __init__(self):
self.x = -25
self.y = 0
self.tile = 0
self.time = time()
self.img = []
path = dirname(realpath(__file__))
for i in range(200):
self.img.append(pygame.image.load(path+'/assets/img/bg/b0553b276f5049bec4808d6a012e32bc-' + str(i)+'.png'))
main() | 36.37123 | 158 | 0.559773 | 2,124 | 15,676 | 3.974576 | 0.093691 | 0.08055 | 0.06823 | 0.027719 | 0.41874 | 0.346245 | 0.277422 | 0.260602 | 0.219972 | 0.214996 | 0 | 0.02628 | 0.288785 | 15,676 | 431 | 159 | 36.37123 | 0.730918 | 0.008803 | 0 | 0.265985 | 0 | 0 | 0.101577 | 0.023367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097187 | false | 0.005115 | 0.012788 | 0.002558 | 0.16624 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a699d75b7048967367caac0285a0e4744e00927 | 2,168 | py | Python | src/redrawing/components/debug.py | ReDrawing/redrawing | 20743f0c8d64d9d2e15cefa840423c9698c74653 | [
"MIT"
] | 1 | 2021-04-20T00:00:15.000Z | 2021-04-20T00:00:15.000Z | src/redrawing/components/debug.py | ReDrawing/redrawing | 20743f0c8d64d9d2e15cefa840423c9698c74653 | [
"MIT"
] | null | null | null | src/redrawing/components/debug.py | ReDrawing/redrawing | 20743f0c8d64d9d2e15cefa840423c9698c74653 | [
"MIT"
] | 1 | 2021-07-18T03:57:01.000Z | 2021-07-18T03:57:01.000Z | import time
from .stage import Stage
class Debug_Stage(Stage):
'''!
Stage for debugging, print messages in setup and process
'''
configs_default={"name":"debug_stage", "blank_line":False, "wait_key": False, "wait_seconds": 0, "context_debug":"context"}
def __init__(self, configs={}):
'''!
Constructor
@param configs:
name: Stage name, will be printed in the screen (default: "debug_stage")
blank_line: Print a blank line in the screen (default: False)
wait_key: Wait for a key to be pressed after print (default: False)
wait_seconds: Wait for a number of seconds after print (default: 0, no wait)
context_debug: Word that will be placed in context, can be used for debbunging substages (default: "context")
'''
super().__init__(configs)
def setup(self):
'''!
Intiialize the stage
Print the name of the stage, and according to the settings,
wait for a key to be pressed or print a blank line
'''
print(self._configs["name"], "setup")
if self._configs["blank_line"]:
print()
if self._configs["wait_key"]:
input("Type anything to continue: ")
if self._configs["wait_seconds"] != 0:
time.sleep(self._configs["wait_seconds"])
self.set_context("context_debug", self._configs["context_debug"])
def process(self, context={}):
'''!
Prints the name of the stage, and according to the settings,
wait for a key to be pressed or print a blank line
If "context_debug" key is in the context, print the value.
'''
print(self._configs["name"], "process")
if "context_debug" in context:
print(context["context_debug"])
if self._configs["blank_line"]:
print()
if self._configs["wait_key"]:
input("Type anything to continue: ")
if self._configs["wait_seconds"] != 0:
time.sleep(self._configs["wait_seconds"])
| 32.358209 | 127 | 0.579336 | 264 | 2,168 | 4.587121 | 0.242424 | 0.109001 | 0.06441 | 0.056152 | 0.366639 | 0.366639 | 0.366639 | 0.348472 | 0.348472 | 0.348472 | 0 | 0.002695 | 0.315498 | 2,168 | 66 | 128 | 32.848485 | 0.813342 | 0.374077 | 0 | 0.48 | 0 | 0 | 0.24144 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.28 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a69a21aa05fe2df5e6fa114f41410a45c4bd7d5 | 6,659 | py | Python | bin/build.py | mgijax/mgv-data | 99a2f5a8f56d7ebb6a014431d8a20df721df58e2 | [
"MIT"
] | 1 | 2022-03-04T06:20:33.000Z | 2022-03-04T06:20:33.000Z | bin/build.py | JoelRichardson/mgv-data | 99a2f5a8f56d7ebb6a014431d8a20df721df58e2 | [
"MIT"
] | 1 | 2021-04-06T13:17:47.000Z | 2021-04-06T13:17:47.000Z | bin/build.py | JoelRichardson/mgv-data | 99a2f5a8f56d7ebb6a014431d8a20df721df58e2 | [
"MIT"
] | 1 | 2022-03-04T11:49:53.000Z | 2022-03-04T11:49:53.000Z | #
# build.py
#
# Builds a back end for MGV based on a config file.
#
import os
import sys
import time
import json
from argparse import ArgumentParser
import re
from urllib.request import urlopen
import gzip
from lib.Config import ConfigFileReader
from lib.Downloader import downloaderNameMap
from lib.Importer import importerNameMap
from lib.Deployer import Deployer
### ------------------------------------------------------------------
class MgvDataBuilder :
VALID_TYPES = ["assembly", "models", "orthologs"]
VALID_PHASES = ["download", "import", "deploy"]
def __init__ (self) :
self.logfile = sys.stderr
self.genome_re = None
def log(self, s, newline='\n', timestamp=True) :
if timestamp:
ts = time.asctime(time.localtime(time.time()))
self.logfile.write(ts + " ")
self.logfile.write(str(s))
self.logfile.write(newline)
self.logfile.flush()
def getArgs (self) :
parser = ArgumentParser("Builds the backend for MGV based on a config file.")
parser.add_argument(
"-b", "--build-config",
required=True,
help = "Build config file. Required.")
parser.add_argument(
"-g", "--genome",
default = ".*",
help = "Which genomes to build. By default, builds all genomes. Specify a regex pattern used to match the genome names.")
parser.add_argument(
"-p", "--phase",
choices = self.VALID_PHASES,
action = "append",
default = [],
help = "Which phase to run. One of: %(choices)s. If not specified, runs all phases.")
parser.add_argument(
"-t", "--type",
choices = self.VALID_TYPES,
default = None,
help = "Which datatype to process. One of: %(choices)s. If not specified, processes all types.")
parser.add_argument(
"-l", "--log-file",
default = None,
help = "Where to write log messages. By default, logs to stderr.")
parser.add_argument(
"-d", "--downloads-dir",
default = "./downloads",
help = "Where downloaded files go. Default = %(default)s")
parser.add_argument(
"-o", "--output-dir",
default = "./output",
help = "Where the output files go. Default = %(default)s")
parser.add_argument(
"-w", "--web-dir",
help = "Web accessible directory containing data generated files. Default = same as --output-dir.")
parser.add_argument(
"--cgi-dir",
help = "Place to put the CGI scripts used by MGV Default = same as --web-dir.")
parser.add_argument(
"--snapshot-file",
help = "Alliance release snapshot file to use in lieu of querying API. (default = get snapshot from Alliance API)")
parser.add_argument(
"-D", "--debug",
action = "store_true",
default = False,
help = "Run in debug mode.")
args = parser.parse_args()
args.downloads_dir = os.path.abspath(args.downloads_dir)
args.output_dir = os.path.abspath(args.output_dir)
args.web_dir = os.path.abspath(args.web_dir) if args.web_dir else args.output_dir
args.cgi_dir = os.path.abspath(args.cgi_dir) if args.cgi_dir else args.web_dir
if len(args.phase) == 0:
args.phase = self.VALID_PHASES
return args
def deepCopy (self, obj) :
return json.loads(json.dumps(obj))
def ensureDirectory (self, d, empty = False):
if self.args.debug:
return
if not os.path.exists(d):
os.makedirs(d)
if empty:
cmd = "rm -fr %s/*" % d
self.log(cmd)
os.system(cmd)
def process(self, g) :
self.log("Processing cfg: " + str(g))
gn = g["name"]
for t in self.VALID_TYPES:
if self.args.type in [t, None] :
if not t in g:
continue
#
if type(g[t]) is str and g[t].startswith("="):
if "deploy" in self.args.phase:
gg = self.getCfg(g[t][1:])
tgtPath = os.path.join(self.args.web_dir, gg["name"], t)
lnkPath = os.path.join(self.args.web_dir, g["name"], t)
cmd = 'ln -s %s %s' % (tgtPath, lnkPath)
self.log("Creating symlink: " + cmd)
continue
sname = g[t].get("source","UrlDownloader")
cls = downloaderNameMap[sname]
downloader = cls(self, g, t, self.args.debug)
# Download data
if "download" in self.args.phase:
downloader.go()
# Import data
if "import" in self.args.phase:
icls = importerNameMap[t]
importer = icls(self, t, g, self.args.output_dir, self.args.debug)
importer.go()
# Deploy
if "deploy" in self.args.phase:
deployer = Deployer(self, t, g, self.args.output_dir, self.args.web_dir, self.args.cgi_dir, debug=self.args.debug)
deployer.go()
def getCfg (self, name = None) :
if name is None:
return self.cfg
else:
return self.name2cfg.get(name, None)
def main (self) :
#
self.args = self.getArgs()
if self.args.log_file:
self.logfile = open(self.args.log_file, 'w')
self.log("\n\nThis is the MGV back end data builder.")
self.log("Arguments: " + str(self.args))
self.genome_re = re.compile('^' + self.args.genome + '$')
#
self.cfg = ConfigFileReader(self.args.build_config).read()
if self.args.debug:
self.log("Running in DEBUG mode. No commands will be executed.")
#
self.name2cfg = {}
for g in self.cfg:
self.name2cfg[g["name"]] = g
#
for g in self.cfg:
if g.get("disabled", False) :
continue
if self.genome_re.match(g["name"]):
self.log("Processing " + g["name"])
self.process(g)
else:
# self.log("Skipping " + g["name"])
pass
self.log("Builder exiting.")
self.logfile.close()
### ------------------------------------------------------------------
if __name__ == "__main__":
MgvDataBuilder().main()
| 37.410112 | 134 | 0.522301 | 772 | 6,659 | 4.433938 | 0.257772 | 0.051417 | 0.05463 | 0.018697 | 0.129127 | 0.09816 | 0.084721 | 0.0409 | 0.018113 | 0 | 0 | 0.001136 | 0.33894 | 6,659 | 177 | 135 | 37.621469 | 0.776465 | 0.038895 | 0 | 0.162162 | 0 | 0.027027 | 0.199561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0.006757 | 0.114865 | 0.006757 | 0.222973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a6c43158e65f1ef63580d8b4dd6ab79de3bce35 | 10,245 | py | Python | plane_waves/polarization_animation.py | raptor/ECEn360_Winter2016 | 65076f19c561ee51c8757720694d1ef00f829bdb | [
"MIT"
] | null | null | null | plane_waves/polarization_animation.py | raptor/ECEn360_Winter2016 | 65076f19c561ee51c8757720694d1ef00f829bdb | [
"MIT"
] | 1 | 2019-03-03T00:54:52.000Z | 2019-03-04T18:06:38.000Z | plane_waves/polarization_animation.py | raptor/ECEn360_Winter2016 | 65076f19c561ee51c8757720694d1ef00f829bdb | [
"MIT"
] | 1 | 2019-03-02T02:50:24.000Z | 2019-03-02T02:50:24.000Z | #----------------------------------------------------------------------
# # 9/25/18 - Update to use Python 3.6, PyQt5 and pyqtgraph 0.10.0
# G. Nordin
#----------------------------------------------------------------------
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import sys
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Polarization Visualization')
## Create widgets to be placed inside
heading_text = QtGui.QLabel('Polarization Angles ' + u"\u03C8" + ' and ' + u"\u03B4")
# Box with sliders
sliderbox = QtGui.QGroupBox()
hBoxLayout = QtGui.QHBoxLayout()
psi_slider_layout = QtGui.QVBoxLayout()
delta_slider_layout = QtGui.QVBoxLayout()
# psi slider
psi_label = QtGui.QLabel(u"\u03C8")
psi_slider = QtGui.QSlider()
psi_slider.setOrientation(QtCore.Qt.Vertical)
psi_slider.setMinimum(0)
psi_slider.setMaximum(90)
psi_slider.setValue(0)
psi_value = QtGui.QLabel(str(psi_slider.value()) + u"\u00b0")
psi_slider_layout.addWidget(psi_label)
psi_slider_layout.addWidget(psi_slider)
psi_slider_layout.addWidget(psi_value)
def set_psi_value(value):
psi_value.setText(str(value) + u"\u00b0")
global psi_deg
psi_deg = value
psi_slider.valueChanged.connect(set_psi_value)
# delta slider
delta_label = QtGui.QLabel(u"\u03B4")
delta_slider = QtGui.QSlider()
delta_slider.setOrientation(QtCore.Qt.Vertical)
delta_slider.setMinimum(-180)
delta_slider.setMaximum(180)
delta_slider.setValue(0)
delta_value = QtGui.QLabel(str(delta_slider.value()) + u"\u00b0")
delta_slider_layout.addWidget(delta_label)
delta_slider_layout.addWidget(delta_slider)
delta_slider_layout.addWidget(delta_value)
def set_delta_value(value):
delta_value.setText(str(value) + u"\u00b0")
global delta_deg
delta_deg = value
delta_slider.valueChanged.connect(set_delta_value)
# Set layout of box containing sliders
hBoxLayout.addItem(psi_slider_layout)
hBoxLayout.addItem(delta_slider_layout)
sliderbox.setLayout(hBoxLayout)
# Box with options
optionbox = QtGui.QGroupBox()
vBoxLayout = QtGui.QVBoxLayout()
# Options
hfield_checkbox = QtGui.QCheckBox("Show H-field")
# Add to layout
vBoxLayout.addWidget(hfield_checkbox)
# Add to box
optionbox.setLayout(vBoxLayout)
# Create openGL view widget & add a grid
wGL = gl.GLViewWidget()
wGL.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
wGL.opts['distance'] = 5
g = gl.GLGridItem()
wGL.addItem(g)
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.setColumnStretch (1, 2)
## Add widgets to the layout in their proper positions
layout.addWidget(heading_text, 0, 0) # heading text goes in upper-left
layout.addWidget(sliderbox, 1, 0) # slider box goes underneath heading text
layout.addWidget(optionbox, 2, 0) # option box goes underneath slider box
layout.addWidget(wGL, 0, 1, 3, 1) # wGL goes on right side, spanning 3 rows
## Display the widget as a new window
w.show()
##------------ Set up polarization animation ------------##
degtorad = np.pi/180.0
# Function to create new array from old where new array is formatted to prepare to
# draw lines perpendicular from z-axis to curve defined by input array
def preptomakelines(pts):
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
return pts2
psi_deg = float(psi_slider.value())
delta_deg = float(delta_slider.value())
# Calculate sinusoidal electric field for arbitrary polarization
def efield_arbpol(t,z,amplitude,psi_rad,delta_rad):
x = amplitude * np.cos(psi_rad) * np.cos(2*np.pi*(t-z))
y = amplitude * np.sin(psi_rad) * np.cos(2*np.pi*(t-z) + delta_rad)
z = z
return x, y, z
# Prep coordinate rotations for electric & magnetic fields to go from calculation
# coordinates to pyqtgraph plotting coordinates
temp2Darray = [[-1, 0, 0],
[0, 0, 1],
[0, 1, 0]]
rot_efield_coord = np.array(temp2Darray)
# Calculate electric & magnetic field arrays. Also make arrays to define lines.
amplitude = 1.0
z = np.linspace(-10, 10, 500)
x, y, z = efield_arbpol(0.0,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
# E-field
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
z0 = np.zeros(len(z))
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
# H-field
pts_h = np.vstack([-y,x,z]).transpose() # Orthogonal to E
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
# Get ready to make plots
efield_color = (1, 0, 0, 1)
efield_color_z0 = (1, 1, 1, 1)
efield_color_arrow = (1, 0.67, 0.67, 1)
hfield_color = (0, 0, 1, 1)
hfield_color_z0 = (1, 1, 1, 1)
hfield_color_arrow = (0.67, 0.67, 1, 1)
linewidth = 4.0
linewidth2Dpol = 2.0
linewidth2Defieldvector = 10.0
# Make plots
plt_e = gl.GLLinePlotItem(pos=pts_e, mode='line_strip', color=efield_color, width=linewidth, antialias=True)
wGL.addItem(plt_e)
#plt_e_lines = gl.GLLinePlotItem(pos=pts_e_lines, mode='lines', color=efield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_e_lines)
plt_e_z0 = gl.GLLinePlotItem(pos=pts_e_z0, mode='line_strip', color=efield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_e_z0)
plt_e_arrow = gl.GLLinePlotItem(pos=pts_e_arrow, mode='line_strip', color=efield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_e_arrow)
plt_h = gl.GLLinePlotItem(pos=pts_h, mode='line_strip', color=hfield_color, width=linewidth, antialias=True)
wGL.addItem(plt_h)
#plt_h_lines = gl.GLLinePlotItem(pos=pts_h_lines, mode='lines', color=hfield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_h_lines)
plt_h_z0 = gl.GLLinePlotItem(pos=pts_h_z0, mode='line_strip', color=hfield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_h_z0)
plt_h_arrow = gl.GLLinePlotItem(pos=pts_h_arrow, mode='line_strip', color=hfield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_h_arrow)
# Start with H-field items as invisible
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Add lines to visually define axes
x_length = 1.1
y_length = 1.1
z_length = 10
linewidthaxis = 1.0
axis_color = (32, 32, 32, 40)
## make z-axis
zaxis = np.linspace(-z_length,z_length,10)
x_zaxis = np.zeros(10)
y_zaxis = np.zeros(10)
pts_zaxis = np.vstack([x_zaxis,zaxis,y_zaxis]).transpose()
plt_zaxis = gl.GLLinePlotItem(pos=pts_zaxis, color=axis_color, width=linewidthaxis, antialias=True)
#wGL.addItem(plt_zaxis)
## make y-axis
yaxis = np.linspace(-y_length,y_length,10)
x_yaxis = np.zeros(10)
z_yaxis = np.zeros(10)
pts_yaxis = np.vstack([yaxis,z_yaxis,x_yaxis]).transpose()
plt_yaxis = gl.GLLinePlotItem(pos=pts_yaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_yaxis)
## make x-axis
xaxis = np.linspace(-x_length,x_length,10)
y_xaxis = np.zeros(10)
z_xaxis = np.zeros(10)
pts_xaxis = np.vstack([y_xaxis,z_xaxis,xaxis]).transpose()
plt_xaxis = gl.GLLinePlotItem(pos=pts_xaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_xaxis)
# make image for x-y plane
image_shape = (2,2)
uniform_values = np.ones(image_shape, dtype=np.int) * 255
print(uniform_values)
uniform_image_transparent = pg.makeARGB(uniform_values)[0]
uniform_image_transparent[:,:,:] = 255
uniform_image_transparent[:,:,3] = 80
print(uniform_image_transparent)
v1 = gl.GLImageItem(uniform_image_transparent)
v1.translate(-image_shape[0]/2., -image_shape[1]/2., 0)
v1.rotate(90, 1,0,0)
wGL.addItem(v1)
# Set up some animation parameters
frametime = 50 # frame refresh time in ms
velocity = 1./frametime
counter = 0
# Function to update scene for each frame
def update():
global z, z0, velocity, counter, amplitude
global plt_e, rot_efield_coord, plt_e_z0, plt_e_arrow #, plt_e_lines
global plt_h, plt_h_z0, plt_h_arrow #, plt_h_lines
global psi_deg, delta_deg, degtorad
counter +=1
time = float(counter)/frametime % 1
x, y, z = efield_arbpol(time,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
#pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
plt_e.setData(pos=pts_e)
#plt_e_lines.setData(pos=pts_e_lines)
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
plt_e_z0.setData(pos=pts_e_z0)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
plt_e_arrow.setData(pos=pts_e_arrow)
pts_h = np.vstack([-y,x,z]).transpose()
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
#pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
plt_h.setData(pos=pts_h)
#plt_h_lines.setData(pos=pts_h_lines)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
plt_h_z0.setData(pos=pts_h_z0)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
plt_h_arrow.setData(pos=pts_h_arrow)
# Poor man's state updating
if hfield_checkbox.isChecked():
plt_h.setVisible(True)
#plt_h_lines.setVisible(True)
plt_h_z0.setVisible(True)
plt_h_arrow.setVisible(True)
else:
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Set up timer for animation
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
## Start the Qt event loop
app.exec_()
| 35.085616 | 140 | 0.730112 | 1,682 | 10,245 | 4.218193 | 0.171225 | 0.019168 | 0.007611 | 0.007329 | 0.364764 | 0.292319 | 0.261311 | 0.25229 | 0.234249 | 0.196476 | 0 | 0.033576 | 0.127867 | 10,245 | 291 | 141 | 35.206186 | 0.760492 | 0.219815 | 0 | 0.150754 | 0 | 0 | 0.022607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025126 | false | 0 | 0.025126 | 0 | 0.060302 | 0.01005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a6d0f5efa3463ae20d88607a8d5407e7e5f2f04 | 4,102 | py | Python | FEMpy/tests/unit/test_assemblers.py | floydie7/FEMpy | 50e11b88dc249ff7c599472b455b07b04df1afd7 | [
"MIT"
] | null | null | null | FEMpy/tests/unit/test_assemblers.py | floydie7/FEMpy | 50e11b88dc249ff7c599472b455b07b04df1afd7 | [
"MIT"
] | null | null | null | FEMpy/tests/unit/test_assemblers.py | floydie7/FEMpy | 50e11b88dc249ff7c599472b455b07b04df1afd7 | [
"MIT"
] | 1 | 2022-01-22T06:39:38.000Z | 2022-01-22T06:39:38.000Z | import numpy as np
from FEMpy import Mesh, FEBasis, Assemblers
mesh_1D_linear = Mesh.Interval1D(0, 1, 1/2, 'linear')
basis_1D_linear = FEBasis.IntervalBasis1D('linear')
mesh_1D_quadratic = Mesh.Interval1D(0, 1, 1/2, 'quadratic')
basis_1D_quadratic = FEBasis.IntervalBasis1D('quadratic')
mesh_2D_triangular_linear = Mesh.TriangularMesh2D(0, 1, 0, 1, 1/2, 1/2, 'linear')
basis_2D__triangular_linear = FEBasis.TriangularBasis2D('linear')
def coefficient_or_source_function(x):
return 1
def test_matrix_assembly_1d_linear():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_1D_linear,
basis_1D_linear, basis_1D_linear,
derivative_order_trial=1, derivative_order_test=1)
assert np.allclose(matrix.toarray(), np.array([[2., -2., 0.],
[-2., 4., -2.],
[0., -2., 2.]]))
def test_matrix_assembly_1d_quadratic():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_1D_quadratic,
basis_1D_quadratic, basis_1D_quadratic,
derivative_order_trial=1, derivative_order_test=1)
assert np.allclose(matrix.toarray(), np.array([[4.6667, -5.3333, 0.6667, 0., 0.],
[-5.3333, 10.6667, -5.3333, 0., 0.],
[0.6667, -5.3333, 9.3333, -5.3333, 0.6667],
[0., 0., -5.3333, 10.6667, -5.3333],
[0., 0., 0.6667, -5.3333, 4.6667]]), rtol=1e-4, atol=1e-7)
def test_matrix_assembly_2d_linear():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_2D_triangular_linear,
basis_2D__triangular_linear, basis_2D__triangular_linear,
derivative_order_trial=(1, 0), derivative_order_test=(1, 0))
assert np.allclose(matrix.toarray(), np.array([[0.5, 0., 0., -0.5, 0., 0., 0., 0., 0.],
[0., 1., 0., 0., -1., 0., 0., 0., 0.],
[0., 0., 0.5, 0., 0., -0.5, 0., 0., 0.],
[-0.5, 0., 0., 1., 0., 0., -0.5, 0., 0.],
[0., -1., 0., 0., 2., 0., 0., -1., 0.],
[0., 0., -0.5, 0., 0., 1., 0., 0., -0.5],
[0., 0., 0., -0.5, 0., 0., 0.5, 0., 0.],
[0., 0., 0., 0., -1., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., -0.5, 0., 0., 0.5]]))
# test_matrix_assembly_2d_quadratic omitted because the matrix is too large to type by hand.
def test_vector_assembly_1d_linear():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_1D_linear,
basis_1D_linear, derivative_order_test=0)
assert np.allclose(vector, np.array([0.25, 0.5, 0.25]))
def test_vector_assembly_1d_quadratic():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_1D_quadratic,
basis_1D_quadratic, derivative_order_test=0)
assert np.allclose(vector, np.array([0.0833, 0.3333, 0.1667, 0.3333, 0.0833]), rtol=1e-3, atol=1e-6)
def test_vector_assembly_2d_linear():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_2D_triangular_linear,
basis_2D__triangular_linear, derivative_order_test=(0,0))
assert np.allclose(vector, np.array([0.0417, 0.1250, 0.0833, 0.1250, 0.25, 0.1250, 0.0833, 0.1250, 0.0417]),
rtol=1e-3, atol=1e-6)
# test_vector_assembly_2d_quadratic omitted because the vector is too large to type by hand.
| 53.973684 | 112 | 0.515358 | 512 | 4,102 | 3.875 | 0.132813 | 0.058468 | 0.049899 | 0.034274 | 0.749496 | 0.682964 | 0.612903 | 0.540323 | 0.524698 | 0.497984 | 0 | 0.131401 | 0.352511 | 4,102 | 75 | 113 | 54.693333 | 0.615587 | 0.044125 | 0 | 0.038462 | 0 | 0 | 0.01072 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.134615 | false | 0 | 0.038462 | 0.019231 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a6f7d87a2d1fb4721a02d6017c7510a78a70218 | 7,420 | py | Python | fonts/terminus-font-4.49.1/bin/bdf.py | xfnw/yaft | c57e8f3014aa5cf743ca0855e543dbafc2e0db22 | [
"MIT"
] | null | null | null | fonts/terminus-font-4.49.1/bin/bdf.py | xfnw/yaft | c57e8f3014aa5cf743ca0855e543dbafc2e0db22 | [
"MIT"
] | null | null | null | fonts/terminus-font-4.49.1/bin/bdf.py | xfnw/yaft | c57e8f3014aa5cf743ca0855e543dbafc2e0db22 | [
"MIT"
] | null | null | null | #
# Copyright (C) 2017-2020 Dimitar Toshkov Zhekov <dimitar.zhekov@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import re
import codecs
from collections import OrderedDict
from enum import IntEnum, unique
import fnutil
# -- Width --
DPARSE_LIMIT = 512
SPARSE_LIMIT = 32000
class Width:
def __init__(self, x, y):
self.x = x
self.y = y
@staticmethod
def parse(name, value, limit):
words = fnutil.split_words(name, value, 2)
return Width(fnutil.parse_dec(name + '.x', words[0], -limit, limit),
fnutil.parse_dec(name + '.y', words[1], -limit, limit))
@staticmethod
def parse_s(name, value):
return Width.parse(name, value, SPARSE_LIMIT)
@staticmethod
def parse_d(name, value):
return Width.parse(name, value, DPARSE_LIMIT)
def __str__(self):
return '%d %d' % (self.x, self.y)
# -- BXX --
class BBX:
def __init__(self, width, height, xoff, yoff):
self.width = width
self.height = height
self.xoff = xoff
self.yoff = yoff
@staticmethod
def parse(name, value):
words = fnutil.split_words(name, value, 4)
return BBX(fnutil.parse_dec('width', words[0], 1, DPARSE_LIMIT),
fnutil.parse_dec('height', words[1], 1, DPARSE_LIMIT),
fnutil.parse_dec('bbxoff', words[2], -DPARSE_LIMIT, DPARSE_LIMIT),
fnutil.parse_dec('bbyoff', words[3], -DPARSE_LIMIT, DPARSE_LIMIT))
def row_size(self):
return (self.width + 7) >> 3
def __str__(self):
return '%d %d %d %d' % (self.width, self.height, self.xoff, self.yoff)
# -- Props --
def skip_comments(line):
return None if line[:7] == b'COMMENT' else line
class Props(OrderedDict):
def __iter__(self):
return self.items().__iter__()
def read(self, input, name, callback=None):
return self.parse(input.read_lines(skip_comments), name, callback)
def parse(self, line, name, callback=None):
if not line or not line.startswith(bytes(name, 'ascii')):
raise Exception(name + ' expected')
value = line[len(name):].lstrip()
self[name] = value
return value if callback is None else callback(name, value)
def set(self, name, value):
self[name] = value if isinstance(value, (bytes, bytearray)) else bytes(str(value), 'ascii')
# -- Base --
class Base:
def __init__(self):
self.props = Props()
self.bbx = None
# -- Char
HEX_BYTES = (48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70)
class Char(Base):
def __init__(self):
Base.__init__(self)
self.code = -1
self.swidth = None
self.dwidth = None
self.data = None
def bitmap(self):
bitmap = ''
row_size = self.bbx.row_size()
for index in range(0, len(self.data), row_size):
bitmap += self.data[index : index + row_size].hex() + '\n'
return bytes(bitmap, 'ascii').upper()
def _read(self, input):
# HEADER
self.props.read(input, 'STARTCHAR')
self.code = self.props.read(input, 'ENCODING', fnutil.parse_dec)
self.swidth = self.props.read(input, 'SWIDTH', Width.parse_s)
self.dwidth = self.props.read(input, 'DWIDTH', Width.parse_d)
self.bbx = self.props.read(input, 'BBX', BBX.parse)
line = input.read_lines(skip_comments)
if line and line.startswith(b'ATTRIBUTES'):
self.props.parse(line, 'ATTRIBUTES')
line = input.read_lines(skip_comments)
# BITMAP
if self.props.parse(line, 'BITMAP'):
raise Exception('BITMAP expected')
row_len = self.bbx.row_size() * 2
self.data = bytearray()
for _ in range(0, self.bbx.height):
line = input.read_lines(skip_comments)
if not line:
raise Exception('bitmap data expected')
if len(line) == row_len:
self.data += codecs.decode(line, 'hex')
else:
raise Exception('invalid bitmap length')
# FINAL
if input.read_lines(skip_comments) != b'ENDCHAR':
raise Exception('ENDCHAR expected')
return self
@staticmethod
def read(input):
return Char()._read(input) # pylint: disable=protected-access
def write(self, output):
for [name, value] in self.props:
output.write_prop(name, value)
output.write_line(self.bitmap() + b'ENDCHAR')
# -- Font --
@unique
class XLFD(IntEnum):
FOUNDRY = 1
FAMILY_NAME = 2
WEIGHT_NAME = 3
SLANT = 4
SETWIDTH_NAME = 5
ADD_STYLE_NAME = 6
PIXEL_SIZE = 7
POINT_SIZE = 8
RESOLUTION_X = 9
RESOLUTION_Y = 10
SPACING = 11
AVERAGE_WIDTH = 12
CHARSET_REGISTRY = 13
CHARSET_ENCODING = 14
CHARS_MAX = 65535
class Font(Base):
def __init__(self):
Base.__init__(self)
self.xlfd = []
self.chars = []
self.default_code = -1
@property
def bold(self):
return b'bold' in self.xlfd[XLFD.WEIGHT_NAME].lower()
@property
def italic(self):
return self.xlfd[XLFD.SLANT] in [b'I', b'O']
@property
def proportional(self):
return self.xlfd[XLFD.SPACING] == b'P'
def _read(self, input):
# HEADER
line = input.read_line()
if self.props.parse(line, 'STARTFONT') != b'2.1':
raise Exception('STARTFONT 2.1 expected')
self.xlfd = self.props.read(input, 'FONT', lambda name, value: value.split(b'-', 15))
if len(self.xlfd) != 15 or self.xlfd[0] != b'':
raise Exception('non-XLFD font names are not supported')
self.props.read(input, 'SIZE')
self.bbx = self.props.read(input, 'FONTBOUNDINGBOX', BBX.parse)
line = input.read_lines(skip_comments)
if line and line.startswith(b'STARTPROPERTIES'):
num_props = self.props.parse(line, 'STARTPROPERTIES', fnutil.parse_dec)
for _ in range(0, num_props):
line = input.read_lines(skip_comments)
if line is None:
raise Exception('property expected')
match = re.fullmatch(br'(\w+)\s+([-\d"].*)', line)
if not match:
raise Exception('invalid property format')
name = str(match.group(1), 'ascii')
value = match.group(2)
if self.props.get(name) is not None:
raise Exception('duplicate property')
if name == 'DEFAULT_CHAR':
self.default_code = fnutil.parse_dec(name, value)
self.props[name] = value
if self.props.read(input, 'ENDPROPERTIES') != b'':
raise Exception('ENDPROPERTIES expected')
line = input.read_lines(skip_comments)
# GLYPHS
num_chars = fnutil.parse_dec('CHARS', self.props.parse(line, 'CHARS'), 1, CHARS_MAX)
for _ in range(0, num_chars):
self.chars.append(Char.read(input))
if next((char.code for char in self.chars if char.code == self.default_code), -1) != self.default_code:
raise Exception('invalid DEFAULT_CHAR')
# FINAL
if input.read_lines(skip_comments) != b'ENDFONT':
raise Exception('ENDFONT expected')
if input.read_line() is not None:
raise Exception('garbage after ENDFONT')
return self
@staticmethod
def read(input):
return Font()._read(input) # pylint: disable=protected-access
def write(self, output):
for [name, value] in self.props:
output.write_prop(name, value)
for char in self.chars:
char.write(output)
output.write_line(b'ENDFONT')
| 23.935484 | 105 | 0.68814 | 1,101 | 7,420 | 4.514078 | 0.235241 | 0.034406 | 0.028169 | 0.032596 | 0.273843 | 0.198793 | 0.135614 | 0.099396 | 0.065996 | 0.065996 | 0 | 0.019133 | 0.175876 | 7,420 | 309 | 106 | 24.012945 | 0.793622 | 0.124124 | 0 | 0.182796 | 0 | 0 | 0.087212 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145161 | false | 0 | 0.026882 | 0.069892 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a6f999b096c719861108c73f2eea05851482105 | 9,488 | py | Python | gym_brt/envs/qube_base_env.py | zuzuba/quanser-openai-driver | c4bec08a8c7ac1c05dec26c863f899f44f15fd06 | [
"MIT"
] | null | null | null | gym_brt/envs/qube_base_env.py | zuzuba/quanser-openai-driver | c4bec08a8c7ac1c05dec26c863f899f44f15fd06 | [
"MIT"
] | null | null | null | gym_brt/envs/qube_base_env.py | zuzuba/quanser-openai-driver | c4bec08a8c7ac1c05dec26c863f899f44f15fd06 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import gym
import time
import math
import numpy as np
from gym import spaces
from gym.utils import seeding
from gym_brt.quanser import QubeServo2, QubeServo2Simulator
from gym_brt.control import QubeFlipUpControl
# theta, alpha: positions, velocities, accelerations
OBSERVATION_HIGH = np.asarray([
1, 1, 1, 1, # angles
np.pi / 4, np.pi / 4, # velocities
np.pi / 4, np.pi / 4, # accelerations
4100, # tach0
0.2, # sense
], dtype=np.float64)
OBSERVATION_LOW = -OBSERVATION_HIGH
MAX_MOTOR_VOLTAGE = 8.0
ACTION_HIGH = np.asarray([MAX_MOTOR_VOLTAGE], dtype=np.float64)
ACTION_LOW = -ACTION_HIGH
STATE_KEYS = [
'COS_THETA',
'SIN_THETA',
'COS_ALPHA',
'SIN_ALPHA',
'THETA_VELOCITY',
'ALPHA_VELOCITY',
'THETA_ACCELERATION',
'ALPHA_ACCELERATION',
'TACH0',
'SENSE'
]
def normalize_angle(theta):
return ((theta + np.pi) % (2 * np.pi)) - np.pi
class QubeBaseReward(object):
def __init__(self):
self.target_space = spaces.Box(
low=ACTION_LOW,
high=ACTION_HIGH, dtype=np.float32)
def __call__(self, state, action):
raise NotImplementedError
class QubeBaseEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,
frequency=1000,
use_simulator=False):
self.observation_space = spaces.Box(
OBSERVATION_LOW, OBSERVATION_HIGH,
dtype=np.float32)
self.action_space = spaces.Box(
ACTION_LOW, ACTION_HIGH,
dtype=np.float32)
self.reward_fn = QubeBaseReward()
self._theta_velocity_cstate = 0
self._alpha_velocity_cstate = 0
self._theta_velocity = 0
self._alpha_velocity = 0
self._frequency = frequency
# Open the Qube
if use_simulator:
self.qube = QubeServo2Simulator(
euler_steps=1,
frequency=frequency)
else:
self.qube = QubeServo2(frequency=frequency)
self.qube.__enter__()
self.seed()
self.viewer = None
self.use_simulator = use_simulator
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
motor_voltages = np.clip(np.array(
[action[0]], dtype=np.float64), ACTION_LOW, ACTION_HIGH)
currents, encoders, others = self.qube.action(motor_voltages)
self._sense = currents[0]
self._tach0 = others[0]
# Calculate alpha, theta, alpha_velocity, and theta_velocity
self._theta = encoders[0] * (-2.0 * np.pi / 2048)
alpha_un = encoders[1] * (2.0 * np.pi / 2048) # Alpha without normalizing
self._alpha = (alpha_un % (2.0 * np.pi)) - np.pi # Normalized and shifted alpha
theta_velocity = -2500 * self._theta_velocity_cstate + 50 * self._theta
alpha_velocity = -2500 * self._alpha_velocity_cstate + 50 * alpha_un
self._theta_velocity_cstate += (-50 * self._theta_velocity_cstate + self._theta) / self._frequency
self._alpha_velocity_cstate += (-50 * self._alpha_velocity_cstate + alpha_un) / self._frequency
# TODO: update using the transfer function
self._theta_acceleration = (theta_velocity - self._theta_velocity) * self._frequency
self._alpha_acceleration = (alpha_velocity - self._alpha_velocity) * self._frequency
self._theta_velocity = theta_velocity
self._alpha_velocity = alpha_velocity
return self._get_state()
def _get_state(self):
state = np.asarray([
np.cos(self._theta),
np.sin(self._theta),
np.cos(self._alpha),
np.sin(self._alpha),
self._theta_velocity,
self._alpha_velocity,
self._theta_acceleration,
self._alpha_acceleration,
self._tach0,
self._sense,
], dtype=np.float32)
return state
def _flip_up(self, early_quit=False):
"""Run classic control for flip-up until the pendulum is inverted for
a set amount of time. Assumes that initial state is stationary
downwards.
Args:
early_quit: Quit if flip up doesn't succeed after set amount of
time
"""
control = QubeFlipUpControl(env=self, sample_freq=self._frequency)
time_hold = 1.0 * self._frequency # Number of samples to hold upright
sample = 0 # Samples since control system started
samples_upright = 0 # Consecutive samples pendulum is upright
action = self.action_space.sample()
state, _, _, _ = self.step([1.0])
while True:
action = control.action(state)
state, _, _, _ = self.step(action)
# Break if pendulum is inverted
if self._alpha < (10 * np.pi / 180):
if samples_upright > time_hold:
break
samples_upright += 1
else:
samples_upright = 0
sample += 1
return state
def _dampen_down(self, min_hold_time=0.5):
action = np.zeros(
shape=self.action_space.shape,
dtype=self.action_space.dtype)
time_hold = min_hold_time * self._frequency
samples_downwards = 0 # Consecutive samples pendulum is stationary
while True:
state, _, _, _ = self.step(action)
# Break if pendulum is stationary
ref_state = [0., 0., 0., 0.]
if np.allclose(state[4:8], ref_state, rtol=1e-02, atol=1e-03):
if samples_downwards > time_hold:
break
samples_downwards += 1
else:
samples_downwards = 0
return self._get_state()
def flip_up(self, early_quit=False, time_out=5, min_hold_time=1):
return self._flip_up(early_quit=early_quit)
def dampen_down(self):
return self._dampen_down()
def reset(self):
# Start the pendulum stationary at the bottom (stable point)
self.dampen_down()
action = np.zeros(
shape=self.action_space.shape,
dtype=self.action_space.dtype)
return self.step(action)[0]
def step(self, action):
state = self._step(action)
reward = self.reward_fn(state, action)
done = False
info = {}
return state, reward, done, info
def render(self, mode='human'):
# Simple and *NOT* physically accurate rendering
screen = screen_width = screen_height = 600
scale = 0.5 * screen / 100.0 # Everything is scaled out of 100
qubewidth = 10.0 * scale
qubeheight = 10.0 * scale
origin = (screen_width/2, screen_height/2)
arm_len = 40 * scale
arm_width = 1.0 * scale
pen_len = 40 * scale
pen_width = 2.0 * scale
def pen_origin(theta, origin=origin, len=arm_len):
x = origin[0] - len * math.sin(theta)
y = origin[1] + len * math.cos(theta)
return x, y
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
# draw qube base
l,r,t,b = qubewidth/2, -qubewidth/2, -qubeheight/2, qubeheight/2
qube = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
qube.set_color(0.0, 0.0, 0.0)
qubetrans = rendering.Transform(translation=origin)
qube.add_attr(qubetrans)
self.viewer.add_geom(qube)
# draw qube arm
l,r,t,b = arm_width/2, -arm_width/2, 0, arm_len
arm = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
arm.set_color(0.5, 0.5, 0.5)
self.armtrans = rendering.Transform(translation=origin)
arm.add_attr(self.armtrans)
self.viewer.add_geom(arm)
arm_trace = rendering.make_circle(radius=arm_len, filled=False)
armtracetrans = rendering.Transform(translation=origin)
arm_trace.set_color(0.5, 0.5, 0.5)
arm_trace.add_attr(armtracetrans)
self.viewer.add_geom(arm_trace)
# draw qube pendulum
pen_orgin = (origin[0], origin[1] + arm_len)
l,r,t,b = pen_width/2, -pen_width/2, 0, pen_len
pen = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pen.set_color(1.0, 0.0, 0.0)
self.pentrans = rendering.Transform(
translation=pen_orgin,
rotation=math.pi/10)
pen.add_attr(self.pentrans)
self.viewer.add_geom(pen)
self.armtrans.set_rotation(np.pi+self._theta)
self.pentrans.set_translation(*pen_origin(np.pi+self._theta))
self.pentrans.set_rotation(self._alpha)
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self, type=None, value=None, traceback=None):
# Safely close the Qube
self.qube.__exit__(type=type, value=value, traceback=traceback)
if self.viewer: self.viewer.close()
| 33.059233 | 106 | 0.602867 | 1,178 | 9,488 | 4.617997 | 0.202037 | 0.028125 | 0.004963 | 0.004412 | 0.194118 | 0.113971 | 0.104779 | 0.069853 | 0.039706 | 0.039706 | 0 | 0.027998 | 0.296058 | 9,488 | 286 | 107 | 33.174825 | 0.786495 | 0.096648 | 0 | 0.127358 | 0 | 0 | 0.02037 | 0.002708 | 0 | 0 | 0 | 0.003497 | 0 | 1 | 0.084906 | false | 0 | 0.056604 | 0.018868 | 0.216981 | 0.004717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a70d71d5291dbc0c13322848d901d168bba6bc7 | 3,178 | py | Python | main/helpers.py | anngle/t923 | 078d2c566c77afa2ca1be7663d3c23c9f0ecddac | [
"BSD-3-Clause"
] | 1 | 2021-11-28T05:46:45.000Z | 2021-11-28T05:46:45.000Z | main/helpers.py | anngle/t923 | 078d2c566c77afa2ca1be7663d3c23c9f0ecddac | [
"BSD-3-Clause"
] | null | null | null | main/helpers.py | anngle/t923 | 078d2c566c77afa2ca1be7663d3c23c9f0ecddac | [
"BSD-3-Clause"
] | null | null | null | #coding=utf-8
from werkzeug import import_string, cached_property
from functools import wraps
from flask import request,render_template,session,current_app,url_for
from datetime import timedelta,datetime
# from main.extensions import redis_store
from flask_sse import sse
# from urllib.parse import urljoin
# from urllib import parse
# from urlparse import urlparse, urljoin
import time
class LazyView(object):
def __init__(self, import_name):
self.__module__, self.__name__ = import_name.rsplit('.', 1)
self.import_name = import_name
@cached_property
def view(self):
return import_string(self.import_name)
def __call__(self, *args, **kwargs):
return self.view(*args, **kwargs)
def url(bp,url_rule, import_name, **options):
view = LazyView('main.views.' + bp.name+'.'+ import_name)
bp.add_url_rule(url_rule, view_func=view, **options)
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated_function
return decorator
"""http://flask.pocoo.org/snippets/71/
Counting Online Users with Redis
"""
def mark_online(user_id):
now = int(time.time())
expires = now + (current_app.config['ONLINE_LAST_MINUTES'] * 60) + 10
all_users_key = 'online-users/%d' % (now // 60)
user_key = 'user-activity/%s' % user_id
p = redis_store.pipeline()
p.sadd(all_users_key, user_id)
p.set(user_key, now)
p.expireat(all_users_key, expires)
p.expireat(user_key, expires)
p.execute()
def get_user_last_activity(user_id):
last_active = redis_store.get('user-activity/%s' % user_id)
if last_active is None:
return None
return datetime.utcfromtimestamp(int(last_active))
def get_online_users():
current = int(time.time()) // 60
minutes = range(current_app.config['ONLINE_LAST_MINUTES'])
online_count = redis_store.sunion(['online-users/%d' % (current - x)
for x in minutes])
return online_count
"""http://flask.pocoo.org/snippets/62/
Securely Redirect Back
"""
# def is_safe_url(target):
# ref_url = parse(request.host_url)
# test_url = parse(urljoin(request.host_url, target))
# return test_url.scheme in ('http', 'https') and \
# ref_url.netloc == test_url.netloc
# def get_redirect_target():
# for target in request.values.get('next'), request.referrer:
# if not target:
# continue
# if is_safe_url(target):
# return target
# def redirect_back(endpoint, **values):
# target = request.form['next']
# if not target or not is_safe_url(target):
# target = url_for(endpoint, **values)
# return redirect(target)
"""
return redirect_back('index')
""" | 28.890909 | 73 | 0.648521 | 414 | 3,178 | 4.746377 | 0.309179 | 0.035623 | 0.021374 | 0.022901 | 0.078372 | 0.033588 | 0 | 0 | 0 | 0 | 0 | 0.00574 | 0.232536 | 3,178 | 110 | 74 | 28.890909 | 0.799918 | 0.237256 | 0 | 0 | 0 | 0 | 0.053981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.214286 | 0.035714 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a710889018c82681c9e827f743aab1961a6e26a | 1,482 | py | Python | LeetCode/medium/search_a_2d_matrix_ii.py | hnc01/online-judge | d306dc32c9d8600a987affbe4e4b80809f0b0982 | [
"MIT"
] | null | null | null | LeetCode/medium/search_a_2d_matrix_ii.py | hnc01/online-judge | d306dc32c9d8600a987affbe4e4b80809f0b0982 | [
"MIT"
] | null | null | null | LeetCode/medium/search_a_2d_matrix_ii.py | hnc01/online-judge | d306dc32c9d8600a987affbe4e4b80809f0b0982 | [
"MIT"
] | null | null | null | '''
https://leetcode.com/problems/search-a-2d-matrix-ii/
240. Search a 2D Matrix II
Write an efficient algorithm that searches for a target value in an m x n integer matrix. The matrix has the following properties:
- Integers in each row are sorted in ascending from left to right.
- Integers in each column are sorted in ascending from top to bottom.
'''
'''
Accepted
'''
class Solution:
def searchMatrix(self, matrix: [[int]], target: int) -> bool:
m = len(matrix)
n = len(matrix[0])
for col in range(0, n):
if matrix[0][col] == target:
return True
elif matrix[0][col] > target:
# there's no way we can find it moving forward
break
else:
# if matrix[0][col] < target:
# we need to search the column IF the cell at [row][col] < target
# there's a chance to find it in the column
for row in range(1, m):
if matrix[row][col] == target:
return True
elif matrix[row][col] > target:
# we reached a point in the column where the numbers are larger than target
break
return False
matrix = [[1, 4, 7, 11, 15], [2, 5, 8, 12, 19], [3, 6, 9, 16, 22], [10, 13, 14, 17, 24], [18, 21, 23, 26, 30]]
target = 30
print(Solution().searchMatrix(matrix, target))
| 32.933333 | 134 | 0.536437 | 206 | 1,482 | 3.859223 | 0.514563 | 0.067925 | 0.037736 | 0.060377 | 0.210063 | 0.072956 | 0 | 0 | 0 | 0 | 0 | 0.057082 | 0.361673 | 1,482 | 44 | 135 | 33.681818 | 0.783298 | 0.412281 | 0 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a718a243bcf5867511bde8c48494b0dbfa75d51 | 7,503 | py | Python | automow_ekf/src/automow_ekf/__init__.py | Auburn-Automow/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 43 | 2016-03-05T17:06:29.000Z | 2022-03-10T08:50:46.000Z | automow_ekf/src/automow_ekf/__init__.py | qintxwd/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 2 | 2017-07-10T12:43:49.000Z | 2019-03-13T13:57:31.000Z | automow_ekf/src/automow_ekf/__init__.py | qintxwd/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 22 | 2016-03-23T06:10:52.000Z | 2022-03-10T08:50:49.000Z | import numpy as np
import threading
def wrapToPi(angle):
"""
Wrap a given angle in radians to the range -pi to pi.
@param angle : The angle to be wrapped
@param type angle : float
@return : Wrapped angle
@rtype : float
"""
return np.mod(angle + np.pi, 2.0 * np.pi) - np.pi
class AutomowEKF:
__nx = 7 # Number of States in the Kalman Filter
__ny_gps = 2 # Number of measurements from the GPS
__ny_imu = 2 # Number of measurements from the IMU
__nu = 2 # Number of inputs
__prev_time = 0
__dt = np.double
C_gps = np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]], dtype=__dt)
C_imu = np.array([0, 0, 1, 0, 0, 0, 1], dtype=__dt)
def __init__(self,
x_hat_i,
P_i,
Q,
R_gps,
R_imu):
"""
Initialize the Kalman Filter with a set of input arguments
@param x_hat_i : The initial state of the Kalman Estimator
@param type x_hat_i : (7, ) numpy.array, dtype=np.double
@param P_i : The initial covariance matrix of the Kalman Estimator
@param type P_i : (7, 7) numpy.array, dtype=np.double
@param Q : The process noise covariance of the system
@param type Q : (7, 7) numpy.array, dtype=np.double
@param R_gps : The GPS measurement noise covariance
@param type R_gps : (2, 2) numpy.array, dtype=np.double
@param R_imu : The AHRS measurement noise covariance
@param type R_imu : (1, 1) numpy.array, dtype=np.double
"""
self.state_lock = threading.Lock()
with self.state_lock:
self.x_hat = x_hat_i
self.P = P_i
self.Q = Q
self.R_gps = R_gps
self.R_imu = R_imu
self.F = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
self.G = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
@classmethod
def fromDefault(cls):
"""
Initialize the Kalman Filter with a set of default arguments
"""
x_hat_i = np.array([0, 0, 0, 0.159, 0.159, 0.5461, 0], dtype=cls.__dt)
P_i = np.diag(np.array([100, 100, 100, 0.0001, 0.0001, 0.0001, 0.0001], dtype=cls.__dt))
Q = np.diag(np.array([0.1, 0.1, 0, 0, 0, 0, 0], dtype=cls.__dt))
R_gps = np.eye(2, dtype=cls.__dt) * 0.02
R_imu = np.eye(1, dtype=cls.__dt) * 0.02
return cls(x_hat_i, P_i, Q, R_gps, R_imu)
def updateModel(self, u, dt):
"""
Update the process and process noise matricies of the model
@param u : The current i
@param type u : (2, ) numpy.array, dtype=np.double
@param dt : The time delta from the previous time update
@param type dt : np.float
"""
self.F = np.eye(self.__nx, dtype=self.__dt)
self.F[0, 2] = -0.5 * dt \
* (self.x_hat[3] * u[0] + self.x_hat[4] * u[1]) \
* np.sin(self.x_hat[2])
self.F[0, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.F[0, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.F[1, 2] = 0.5 * dt \
* (self.x_hat[3] * u[0] + self.x_hat[4] * u[1]) \
* np.cos(self.x_hat[2])
self.F[1, 3] = 0.5 * dt * u[0] * np.sin(self.x_hat[2])
self.F[1, 4] = 0.5 * dt * u[1] * np.sin(self.x_hat[2])
self.F[2, 3] = -1.0 * dt * u[0] / self.x_hat[5]
self.F[2, 4] = dt * u[1] / self.x_hat[5]
self.F[2, 5] = dt \
* (self.x_hat[3] * u[0] - self.x_hat[4] * u[1]) \
/ np.power(self.x_hat[5], 2)
self.G = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
self.G[0, 0] = 0.5 * dt * self.x_hat[3] * np.cos(self.x_hat[2])
self.G[0, 1] = 0.5 * dt * self.x_hat[4] * np.cos(self.x_hat[2])
self.G[0, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.G[0, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.G[1, 0] = 0.5 * dt * self.x_hat[3] * np.sin(self.x_hat[2])
self.G[1, 1] = 0.5 * dt * self.x_hat[4] * np.sin(self.x_hat[2])
self.G[1, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.G[1, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.G[2, 0] = -1.0 * dt * self.x_hat[3] / self.x_hat[5]
self.G[2, 1] = dt * self.x_hat[4] / self.x_hat[5]
self.G[2, 2] = dt
self.G[2, 3] = -1.0 * dt * self.x_hat[3] / self.x_hat[5]
self.G[2, 4] = dt * self.x_hat[4] / self.x_hat[5]
self.G[3, 3] = dt
self.G[4, 4] = dt
self.G[5, 5] = dt
self.G[6, 6] = dt
return
def timeUpdate(self, u, time):
dt = time - self.__prev_time
self.__prev_time = time
self.updateModel(u, dt)
v = self.x_hat[4] / 2.0 * u[1] + self.x_hat[3] / 2.0 * u[0]
w = self.x_hat[4] / self.x_hat[5] * u[1] - \
self.x_hat[3] / self.x_hat[5] * u[0]
with self.state_lock:
self.x_hat[0] += dt * v * np.cos(self.x_hat[2] + dt * w / 2.0)
self.x_hat[1] += dt * v * np.sin(self.x_hat[2] + dt * w / 2.0)
self.x_hat[2] += dt * w
self.x_hat[2] = wrapToPi(self.x_hat[2])
self.P = np.dot(self.F, np.dot(self.P, self.F.T)) \
+ np.dot(self.G, np.dot(self.Q, self.G.T))
return v, w
def measurementUpdateGPS(self, y, R):
if y.shape is (2, ):
y = y.reshape((1, 2))
if y.dtype is not np.double:
y = y.astype(np.double)
innovation = y - np.dot(self.C_gps, self.x_hat)
S = np.dot(self.C_gps, np.dot(self.P, self.C_gps.T))
S += R
K = np.dot(self.P, np.dot(self.C_gps.conj().T, np.linalg.inv(S)))
with self.state_lock:
self.x_hat = self.x_hat + np.dot(K, innovation)
self.P = np.dot((np.eye(self.__nx) - np.dot(K, self.C_gps)), self.P)
return innovation, S, K
def measurementUpdateAHRS(self, y):
y = wrapToPi(y)
# if y.dtype is not np.double:
# y = y.astype(np.double)
innovation = y - np.dot(self.C_imu, self.x_hat)
innovation = wrapToPi(innovation)
S = np.dot(self.C_imu, np.dot(self.P, self.C_imu.T))
S += self.R_imu[0, 0]
K = np.dot(self.P, self.C_imu.T / S)
with self.state_lock:
self.x_hat += K * innovation
self.x_hat[2] = wrapToPi(self.x_hat[2])
self.x_hat[6] = wrapToPi(self.x_hat[6])
self.P = np.dot((np.eye(self.__nx) - \
np.dot(K.reshape((self.__nx, 1)), self.C_imu.reshape((1, self.__nx)))), self.P)
return innovation, S, K
def getYaw(self):
with self.state_lock:
return self.x_hat[2]
def getNorthing(self):
with self.state_lock:
return self.x_hat[1]
def getEasting(self):
with self.state_lock:
return self.x_hat[0]
def getYawBias(self):
with self.state_lock:
return self.x_hat[6]
def getStateString(self):
with self.state_lock:
string = ''
for ii in range(7):
string += str(self.x_hat[ii]) + ", "
return string
def getStateList(self):
with self.state_lock:
return self.x_hat.flatten().tolist()
def getPList(self):
with self.state_lock:
return self.P.flatten()
| 38.086294 | 103 | 0.514461 | 1,280 | 7,503 | 2.867188 | 0.107813 | 0.076294 | 0.141689 | 0.053951 | 0.584469 | 0.539237 | 0.462398 | 0.39346 | 0.318801 | 0.222888 | 0 | 0.055622 | 0.329068 | 7,503 | 196 | 104 | 38.280612 | 0.673421 | 0.1666 | 0 | 0.136691 | 0 | 0 | 0.000331 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100719 | false | 0 | 0.014388 | 0 | 0.273381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a719e5fbbaf6f206dcab2bd1b09cd9219be9533 | 2,028 | py | Python | functions/oxford.py | codefire53/aegisbot | b4cb4f42e3cbdf8554eb234661cc6091e83f1be4 | [
"MIT"
] | null | null | null | functions/oxford.py | codefire53/aegisbot | b4cb4f42e3cbdf8554eb234661cc6091e83f1be4 | [
"MIT"
] | null | null | null | functions/oxford.py | codefire53/aegisbot | b4cb4f42e3cbdf8554eb234661cc6091e83f1be4 | [
"MIT"
] | null | null | null | '''Importing necessary modules'''
from urllib.parse import quote
import urllib
import requests
from bs4 import BeautifulSoup
'''Function to search word/phrase on oxford dictionary'''
def define(word):
#Oxford dictionary search query url
url='https://en.oxforddictionaries.com/definition/'+quote(word)
#Parse the html file
test=urllib.request.urlopen(url)
soup=BeautifulSoup(test,'html.parser')
#Initialize definition list
lst=[]
#Find all the elements of the definitions of the keyword section, and looping through them
meanings=soup.find_all('section',{'class':'gramb'})
for row in meanings:
#Obtain the definition type
types=row.find('h3',{'class':'ps pos'})
ulist=row.find('ul',{'class':'semb'})
#find the li tag which contains list of the definitions
word_defs=ulist.find_all('li')
for defs in word_defs:
#If tag <div class="trg"> exist, then fetch the main definition which is located in <p> tag
mean_word=defs.find('div',{'class':'trg'})
if mean_word!=None:
#Generate all <div class="trg"> children
m_word=mean_word.findChildren()
for mw in m_word:
#If the current section class is ind and the parent tag is p, then that's the main definiton
if mw.get('class')==['ind'] and mw.parent.name=='p':
#Putting on the type and the main definition to the list
lst.append('({}){}'.format(types.get_text().strip(),mw.get_text().strip()))
#If the list contains all of the defintions, then print out all of them
if lst:
res='List of definitions of "{}" word/phrase:\n'.format(word)
for num,define in enumerate(lst,1):
res+='{}. {}\n'.format(num,define)
return res
#Otherwise, print error message
else:
return 'There\'s no "{}" word/phrase in the oxford dictionary database!'.format(word)
| 45.066667 | 113 | 0.61785 | 275 | 2,028 | 4.516364 | 0.421818 | 0.016103 | 0.02657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002011 | 0.2643 | 2,028 | 44 | 114 | 46.090909 | 0.830429 | 0.321499 | 0 | 0 | 0 | 0 | 0.140661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a72cc0496b6255bcf4c039018a93cc9e747a2d4 | 3,546 | py | Python | checkov/common/output/baseline.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | checkov/common/output/baseline.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | checkov/common/output/baseline.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import json
from collections import defaultdict
from typing import Any, TYPE_CHECKING
if TYPE_CHECKING:
from checkov.common.output.record import Record
from checkov.common.output.report import Report
from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks
class Baseline:
def __init__(self) -> None:
self.path = ""
self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list)
self.failed_checks: list[_BaselineFailedChecks] = []
def add_findings_from_report(self, report: Report) -> None:
for check in report.failed_checks:
try:
existing = next(
x for x in self.path_failed_checks_map[check.file_path] if x["resource"] == check.resource
)
except StopIteration:
existing = {"resource": check.resource, "check_ids": []}
self.path_failed_checks_map[check.file_path].append(existing)
existing["check_ids"].append(check.check_id)
existing["check_ids"].sort() # Sort the check IDs to be nicer to the eye
def to_dict(self) -> dict[str, Any]:
"""
The output of this class needs to be very explicit, hence the following structure of the dict:
{
"failed_checks": [
{
"file": "path/to/file",
"findings: [
{
"resource": "aws_s3_bucket.this",
"check_ids": [
"CKV_AWS_1",
"CKV_AWS_2",
"CKV_AWS_3"
]
}
]
}
]
}
"""
failed_checks_list = []
for file, findings in self.path_failed_checks_map.items():
formatted_findings = []
for finding in findings:
formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]})
failed_checks_list.append({"file": file, "findings": formatted_findings})
resp = {"failed_checks": failed_checks_list}
return resp
def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:
for scan_report in scan_reports:
scan_report.passed_checks = [
check for check in scan_report.passed_checks if self._is_check_in_baseline(check)
]
scan_report.skipped_checks = [
check for check in scan_report.skipped_checks if self._is_check_in_baseline(check)
]
scan_report.failed_checks = [
check for check in scan_report.failed_checks if not self._is_check_in_baseline(check)
]
def _is_check_in_baseline(self, check: Record) -> bool:
failed_check_id = check.check_id
failed_check_resource = check.resource
for baseline_failed_check in self.failed_checks:
for finding in baseline_failed_check["findings"]:
if finding["resource"] == failed_check_resource and failed_check_id in finding["check_ids"]:
return True
return False
def from_json(self, file_path: str) -> None:
self.path = file_path
with open(file_path, "r") as f:
baseline_raw = json.load(f)
self.failed_checks = baseline_raw.get("failed_checks", {})
| 40.295455 | 111 | 0.57868 | 392 | 3,546 | 4.936224 | 0.239796 | 0.099225 | 0.028941 | 0.041344 | 0.170026 | 0.15814 | 0.130749 | 0.082687 | 0.045478 | 0.045478 | 0 | 0.0017 | 0.336435 | 3,546 | 87 | 112 | 40.758621 | 0.820654 | 0.147208 | 0 | 0 | 0 | 0 | 0.049232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0.035088 | 0.122807 | 0 | 0.298246 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a7625718df7e6640a9e34803147593716f4fd9f | 5,618 | py | Python | tests/support/rabbitmq/__init__.py | juntossomosmais/django-stomp | 65e7cb86f8f6e2336a2739df8f33f985c9b4c792 | [
"MIT"
] | 32 | 2019-06-10T13:24:11.000Z | 2021-12-17T21:00:41.000Z | tests/support/rabbitmq/__init__.py | juntossomosmais/django-stomp | 65e7cb86f8f6e2336a2739df8f33f985c9b4c792 | [
"MIT"
] | 26 | 2019-12-17T12:51:00.000Z | 2022-02-16T16:13:14.000Z | tests/support/rabbitmq/__init__.py | juntossomosmais/django-stomp | 65e7cb86f8f6e2336a2739df8f33f985c9b4c792 | [
"MIT"
] | 1 | 2021-09-11T03:55:30.000Z | 2021-09-11T03:55:30.000Z | import json
import logging
import urllib.parse
from time import sleep
from typing import Generator
from typing import Optional
import requests
from requests.adapters import HTTPAdapter
from tests.support.dtos import ConsumerStatus
from tests.support.dtos import CurrentDestinationStatus
from tests.support.dtos import MessageStatus
logger = logging.getLogger(__name__)
_queues_details_request_path = "/api/queues"
_specific_queue_details_request_path = _queues_details_request_path + "/%2F/{queue_name}"
_bindings_from_queue_request_path = _queues_details_request_path + "/%2F/{queue_name}/bindings"
_get_message_from_queue_request_path = _queues_details_request_path + "/%2F/{queue_name}/get"
_channels_details_request_path = "/api/channels"
_channel_details_from_channel_request_path = _channels_details_request_path + "/{channel_name}"
_overview_request_path = "/api/overview"
def current_queue_configuration(queue_name, host="localhost", port=15672) -> Optional[CurrentDestinationStatus]:
result = _do_request(host, port, _specific_queue_details_request_path.format(queue_name=queue_name))
logger.debug("RabbitMQ request result: %s", result)
if result.get("error"):
return None
if result.get("message_stats"):
message_stats = result["message_stats"]
messages_dequeued = message_stats.get("deliver_get", 0)
messages_enqueued = message_stats.get("publish")
else:
messages_dequeued = 0
messages_enqueued = None
number_of_pending_messages = result["messages"]
number_of_consumers = result["consumers"]
return CurrentDestinationStatus(
number_of_pending_messages, number_of_consumers, messages_enqueued, messages_dequeued
)
def current_topic_configuration(topic_name, host="localhost", port=15672) -> Optional[CurrentDestinationStatus]:
queues = _do_request(host, port, _queues_details_request_path + "?name=&use_regex=false")
for queue_details in queues:
queue_name = queue_details["name"]
bindings = _do_request(host, port, _bindings_from_queue_request_path.format(queue_name=queue_name))
for binding in bindings:
if binding["source"] == "amq.topic" and binding["routing_key"] == topic_name:
message_stats = queue_details["message_stats"]
number_of_pending_messages = queue_details["messages"]
number_of_consumers = queue_details["consumers"]
messages_enqueued = message_stats["publish"]
messages_dequeued = message_stats["deliver_get"] if message_stats.get("deliver_get") else 0
return CurrentDestinationStatus(
number_of_pending_messages, number_of_consumers, messages_enqueued, messages_dequeued
)
return None
def consumers_details(connection_id, host="localhost", port=15672) -> Generator[ConsumerStatus, None, None]:
channels = _do_request(host, port, _channels_details_request_path)
for channel in channels:
channel_name = channel["connection_details"]["name"]
channel_details = _do_request(
host,
port,
_channel_details_from_channel_request_path.format(
channel_name=urllib.parse.quote(f"{channel_name} ") + "(1)"
),
)
if channel_details.get("consumer_details"):
for consumer in channel_details["consumer_details"]:
if consumer["consumer_tag"] == f"T_{connection_id}":
yield ConsumerStatus(
address_to_destination_details=None,
destination_name=consumer["queue"]["name"],
session_id=None,
enqueues=None,
dequeues=None,
dispatched=None,
dispatched_queue=None,
prefetch=consumer["prefetch_count"],
max_pending=channel_details["messages_unacknowledged"],
exclusive=consumer["exclusive"],
retroactive=None,
)
def retrieve_message_published(destination_name, host="localhost", port=15672) -> MessageStatus:
body = json.dumps(
{
"vhost": "/",
"name": destination_name,
"truncate": "50000",
"ackmode": "ack_requeue_false",
"encoding": "auto",
"count": "1",
}
)
message_details = _do_request(
host, port, _get_message_from_queue_request_path.format(queue_name=destination_name), do_post=True, body=body
)
assert len(message_details) == 1
properties = message_details[0]["properties"]
details = json.loads(message_details[0]["payload"])
persistent = None
correlation_id = properties["correlation_id"]
headers = properties.pop("headers")
return MessageStatus(None, details, persistent, correlation_id, {**headers, **properties})
def get_broker_version(host="localhost", port=15672) -> str:
broker_overview = _do_request(host, port, _overview_request_path)
return broker_overview["rabbitmq_version"]
def _do_request(host, port, request_path, do_post=False, body=None):
sleep(2)
session = requests.Session()
session.mount("http://", HTTPAdapter(max_retries=3))
address, auth = f"http://{host}:{port}{request_path}", ("guest", "guest")
with session:
if not do_post:
data = session.get(address, auth=auth)
else:
data = session.post(address, auth=auth, data=body)
return data.json()
| 41.007299 | 117 | 0.672481 | 611 | 5,618 | 5.826514 | 0.222586 | 0.061798 | 0.050562 | 0.038202 | 0.266292 | 0.196067 | 0.170225 | 0.105618 | 0.105618 | 0.105618 | 0 | 0.009933 | 0.229441 | 5,618 | 136 | 118 | 41.308824 | 0.812428 | 0 | 0 | 0.070175 | 0 | 0 | 0.122998 | 0.016376 | 0 | 0 | 0 | 0 | 0.008772 | 1 | 0.052632 | false | 0 | 0.096491 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a77731700f270ddcae5f715cc1cc8dec54c3bd0 | 964 | py | Python | Searching/Ternary Search/Python/TernarySearch.py | priyanshi2808/DSA | 1e907e869a55049c6b5d9469d8d52bfce2add4f0 | [
"MIT"
] | 8 | 2021-10-14T16:31:54.000Z | 2022-01-05T11:56:37.000Z | Searching/Ternary Search/Python/TernarySearch.py | priyanshi2808/DSA | 1e907e869a55049c6b5d9469d8d52bfce2add4f0 | [
"MIT"
] | 55 | 2021-10-15T14:53:05.000Z | 2021-12-21T07:29:00.000Z | Searching/Ternary Search/Python/TernarySearch.py | priyanshi2808/DSA | 1e907e869a55049c6b5d9469d8d52bfce2add4f0 | [
"MIT"
] | 12 | 2021-10-14T12:13:22.000Z | 2022-02-22T13:41:42.000Z | # Here, left = 0 and right = length of array - 1
def ternarySearch(ar , key , left , right):
if left < right:
inter = (right - left ) // 3
rightmid = right - inter
leftmid = left +inter
if (ar[rightmid] == key ):
print( "Element found!Index:",rightmid )
return 0;
elif ( ar[leftmid] == key ):
print( "Element found!Index:",leftmid )
return 0;
elif ( key < ar[rightmid] and key > ar[leftmid] ) :
return ternarySearch( ar , key , leftmid , rightmid)
elif ( key > ar[rightmid] ) :
return ternarySearch( ar , key , rightmid , right)
else:
return ternarySearch( a , key , left , leftmid )
print( "Key not found!" )
return 0
# Sample Input :
# Ar = [12 , 90 , 67 , 19 , 18]
# Key = 19
# Output:
# Element found!Index: 3
| 22.952381 | 64 | 0.48029 | 102 | 964 | 4.539216 | 0.343137 | 0.097192 | 0.116631 | 0.086393 | 0.107991 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033392 | 0.409751 | 964 | 41 | 65 | 23.512195 | 0.780316 | 0.135892 | 0 | 0.105263 | 0 | 0 | 0.065375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.368421 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a7790db65580f59c5c2b4b7513f7c447480f699 | 1,114 | py | Python | feature_engineering/featuretools.py | bukosabino/ml-utils | 3f9379e0558a3db8f4b43b924da8fa30f6d42edb | [
"MIT"
] | 2 | 2019-03-03T15:17:51.000Z | 2021-01-31T15:35:21.000Z | feature_engineering/featuretools.py | bukosabino/ml-utils | 3f9379e0558a3db8f4b43b924da8fa30f6d42edb | [
"MIT"
] | null | null | null | feature_engineering/featuretools.py | bukosabino/ml-utils | 3f9379e0558a3db8f4b43b924da8fa30f6d42edb | [
"MIT"
] | 1 | 2018-10-20T16:42:54.000Z | 2018-10-20T16:42:54.000Z | import featuretools as ft
def merge_featuretools(df_parent, df_related, parent_column, related_column, date_column):
"""Automated feature engineering
More info:
https://www.featuretools.com
https://github.com/featuretools/featuretools
https://docs.featuretools.com
http://www.jmaxkanter.com/static/papers/DSAA_DSM_2015.pdf
"""
# Create the entityset
es = ft.EntitySet('parent')
# Add the entities to the entityset
es = es.entity_from_dataframe('parent', df_parent, index=parent_column)
es = es.entity_from_dataframe('relate', df_related, make_index=True,
time_index=date_column,
index='related_id')
# Define the relationships
relationship = ft.Relationship(es['parent'][parent_column], es['relate'][related_column])
# Add the relationships
es = es.add_relationships([relationship])
# Deep feature synthesis
feature_matrix, feature_defs = ft.dfs(entityset=es,
target_entity='parent')
return feature_matrix.reset_index()
| 32.764706 | 93 | 0.66158 | 128 | 1,114 | 5.546875 | 0.4375 | 0.050704 | 0.039437 | 0.039437 | 0.064789 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004723 | 0.239677 | 1,114 | 33 | 94 | 33.757576 | 0.83353 | 0.29623 | 0 | 0 | 0 | 0 | 0.061415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a788cd072bdcfa375e6dc2987b8609a8045d6fa | 6,234 | py | Python | recommender/colaborative/prepare_data.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | 2 | 2017-05-27T15:06:04.000Z | 2018-11-23T06:43:25.000Z | recommender/colaborative/prepare_data.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | null | null | null | recommender/colaborative/prepare_data.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | null | null | null | from pyspark import SQLContext
from pyspark.sql.functions import lit
from datetime import datetime
def prepare_data(sc, months, output_path):
sqlContext = SQLContext(sc)
sqlContext.setConf('spark.sql.parquet.compression.codec', 'snappy')
blacklist = []
blacklist_top50 = ['({})|'.format(x) for x in get_top50()]
blacklist_filters = ['(.+\.{}.*)|'.format(x) for x in get_blackList()]
blacklist.extend(blacklist_top50)
blacklist.extend(blacklist_filters)
blacklist = list(set(blacklist))
rx = ''.join(blacklist)
rx = rx[:-1]
# gets all user installs from the selected number of previous months excluding the current month
df = get_files_from_s3(sqlContext, months)
# select only the hash and explode the list of packages
df_pkg = df.select(
df['hash'].alias('hash'),
df['pkg'].alias('package')
).drop_duplicates().cache()
# remove incoherent packages like "android"
rpkg = '.+\..+'
df_pkg = df_pkg.filter(df_pkg['package'].rlike(rpkg)).cache()
# filter blacklist packages and top 50
df_pkg_nosystemapps = df_pkg.filter(~df_pkg['package'].rlike(rx)).cache()
# connects to database and filter packages with less than 500 downloads
df_pkg_nosystemapps = filter_less_500_downloads(sqlContext, df_pkg_nosystemapps).cache()
def toCSVLine(data):
name = data[0]
id = data[1]
return "{},{}".format(name, id)
# mapping of hashs and ID used for recommendations
rdd_hashs = df_pkg_nosystemapps.select(df_pkg_nosystemapps['hash']).distinct().rdd.zipWithUniqueId().map(
lambda x: (x[0][0], x[1] + 1)).cache()
df_hashs = sqlContext.createDataFrame(rdd_hashs, ['hash', 'user_id'])
rdd_hashs = rdd_hashs.map(toCSVLine)
rdd_hashs.repartition(1).saveAsTextFile(output_path + "/hashs")
rdd_hashs.unpersist()
print("user hashs saved")
# mapping of packages and ID used for recommendations
rdd_packages = df_pkg_nosystemapps.select(df_pkg_nosystemapps['package']).distinct().rdd.zipWithUniqueId().map(
lambda x: (x[0][0], x[1]+1)).cache()
df_packages = sqlContext.createDataFrame(rdd_packages, ['package', 'app_id'])
rdd_packages = rdd_packages.map(toCSVLine)
rdd_packages.repartition(1).saveAsTextFile(output_path + "/apps")
print("apps ID's saved")
def toCSVLine_2(data):
app_id = data[0]
count = data[1]
quo = data[2]
return "{},{},{}".format(app_id, count, quo)
# final dataframe to be sent to recommend engine
df_data = df_pkg_nosystemapps.join(df_hashs, 'hash', 'left_outer').select('user_id', 'package').cache()
df_data = df_data.join(df_packages, 'package', 'left_outer').select('user_id', 'app_id').cache()
df_data = df_data.withColumn("rating", lit(1)).cache()
df_data.rdd.map(toCSVLine_2).repartition(1).saveAsTextFile(output_path + "/dataset")
print("dataset saved")
# save apps histogram
df_hist = get_app_histogram(df_data, df_packages)
df_hist.rdd.map(toCSVLine_2).repartition(1).saveAsTextFile(output_path + "/histogram")
print("apps histogram saved")
return df_data.rdd
def get_files_from_s3(sqlContext, amount_months):
year = datetime.today().year
month = datetime.today().month
if month - amount_months >= 0:
months = range(month - amount_months, month)
year_and_month = ["year={}/month={}".format(year, m) for m in months]
else:
previous_year_months = [x for x in range(12 - abs(month - amount_months), 13)]
this_year_months = [x for x in range(1, month)]
year_and_month = ["year={}/month={}".format(year - 1, m) for m in previous_year_months]
year_and_month = year_and_month + ["year={}/month={}".format(year, m) for m in this_year_months]
day = '*'
filename = '*'
version = '1'
filepath = ['{}/{}/{}/{}'.format(version, pair, day, filename) for pair in year_and_month]
print("reading {}".format(filepath))
return sqlContext.read.parquet(*filepath)
def filter_less_500_downloads(sqlContext, df_pkg_nosystemapps):
u, p = ['user', 'password']
durl = 'url'
dbta = 'table'
psql_df = sqlContext.read.format('jdbc').options(url=durl,
user=u,
password=p,
dbtable=dbta,
driver='org.postgresql.Driver').load()
psql_df = psql_df.drop(psql_df['added_timestamp'])
df_pkg_nosystemapps = df_pkg_nosystemapps.join(psql_df,
psql_df['data'] == df_pkg_nosystemapps['package']) \
.drop(psql_df['data'])
dbta = 'table'
psql_df = sqlContext.read.format('jdbc').options(url=durl,
user=u,
password=p,
dbtable=dbta,
driver='org.postgresql.Driver').load()
psql_df = psql_df.drop(psql_df['id'])
df_pkg_nosystemapps = df_pkg_nosystemapps.join(psql_df,
psql_df['app_package'] == df_pkg_nosystemapps['id']) \
.drop(psql_df['app_package']) \
.drop(df_pkg_nosystemapps['id'])
df_pkg_nosystemapps = df_pkg_nosystemapps.filter(
df_pkg_nosystemapps['downloads'] > 500).drop(df_pkg_nosystemapps['downloads'])
df_pkg_nosystemapps = df_pkg_nosystemapps.drop_duplicates()
return df_pkg_nosystemapps
def get_app_histogram(df_data, df_packages):
total = df_data.count()
df_hist = df_data.groupBy("app_id").count() # histogram
df_hist = df_hist.withColumn("total", lit(total))
df_hist = df_hist.withColumn('percentage', (df_hist['count'] / df_hist['total'])*100)
df_hist = df_hist.join(df_packages, 'app_id', 'left_outer').select('package', 'count', 'percentage')
return df_hist
def get_blackList():
blacklist_filters = ['list']
return blacklist_filters
def get_top50():
blacklist_top50 = ['list']
return blacklist_top50
| 38.9625 | 115 | 0.623837 | 774 | 6,234 | 4.788114 | 0.205426 | 0.040475 | 0.105505 | 0.025634 | 0.401241 | 0.320022 | 0.286023 | 0.21047 | 0.174312 | 0.146249 | 0 | 0.012937 | 0.243664 | 6,234 | 159 | 116 | 39.207547 | 0.773065 | 0.076195 | 0 | 0.142857 | 0 | 0 | 0.104384 | 0.013396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.026786 | 0.026786 | 0 | 0.169643 | 0.044643 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a7b176678afd25894ed7a7d41c1b8b41f6c77eb | 6,711 | py | Python | social-update.py | uditvashisht/SaralGyaanTwitterRedditBot | 7e6238afd57383eb6a95df4395e23c868c5e4b2c | [
"MIT"
] | null | null | null | social-update.py | uditvashisht/SaralGyaanTwitterRedditBot | 7e6238afd57383eb6a95df4395e23c868c5e4b2c | [
"MIT"
] | null | null | null | social-update.py | uditvashisht/SaralGyaanTwitterRedditBot | 7e6238afd57383eb6a95df4395e23c868c5e4b2c | [
"MIT"
] | null | null | null | import tweepy
import praw
import prawcore
import time
import requests
import logging
import os
import shutil
import facebook
import requests
# pip install python-decouple
from decouple import config
# Login Credentials
REDDIT_CLIENT_ID = config('REDDIT_CLIENT_ID')
REDDIT_CLIENT_SECRET = config('REDDIT_CLIENT_SECRET')
REDDIT_USERNAME = config('REDDIT_USERNAME')
REDDIT_PASSWORD = config('REDDIT_PASSWORD')
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN')
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET')
USER_AGENT = 'python:saralgyaan_social_updates:v1.0.0 (by /u/uditvashisht)'
FACEBOOK_PAGE_ID = config('FACEBOOK_PAGE_ID')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_ACCESS_TOKEN')
# Dictionary containing subreddits and tags
SUBREDDIT_DICT = {'programmerhumor': ['progammer', 'programmerhumor', 'humor'],
'programmingmemes': ['programming', 'programmingmemes', 'programmerhumor'],
'xkcd': ['xkcd', 'xkcdcomics']
}
current_dir = os.getcwd()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(f'{os.path.join(current_dir, "social-update.log")}')
fmt = logging.Formatter('%(levelname)s : %(name)s : %(asctime)s : %(message)s')
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
def auto_post_facebook(picture, message):
"""A function which auto-posts the photos with hastags on facebook.
Requires
--------
facebook: module
pip install facebook-sdk, import facebook
page_id : str
Page ID of the facebook page.
access_token : str
Access token of facebook account.
Can be obtained from https://developers.facebook.com/tools.
Use this tutorial
https://pythoncircle.com/post/666/automating-facebook-page-posts-using-python-script/
Parameters
__________
message : str
title and hashtags of the photo.
picture: str
Complete link of the header image.
Posts
_____
A post containing photo title and hashtags
"""
graph = facebook.GraphAPI(FACEBOOK_ACCESS_TOKEN)
facebook_page_id = FACEBOOK_PAGE_ID
#IF you want to post a status update
# graph.put_object(facebook_page_id, "feed", message='test message')
graph.put_photo(image=open(picture, 'rb'),
message=message)
def login_to_reddit():
""" This function log into to the reddit account and returns the Reddit Instance by interacting with Reddit's API through PRAW
Parameters:
-----------
None
Returns:
--------
A Reddit Instance
"""
try:
logger.info('* Logging into Reddit Account')
reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID,
client_secret=REDDIT_CLIENT_SECRET,
password=REDDIT_PASSWORD,
user_agent=USER_AGENT,
username=REDDIT_USERNAME)
logger.info('* Login successful')
return reddit
except:
logger.info('* Login failed')
def grab_new_image(url):
""" This function grabs the image from the URL of the reddit post and save it as img.jpg
Parameters:
-----------
url : str
URL of the subreddit containing the image
Returns:
--------
An Image
"""
logger.info('* Fetching image from the Reddit')
try:
response = requests.get(url)
with open('img.jpg', 'wb') as image:
image.write(response.content)
image.close()
logger.info('* Image saved successfully')
except:
logger.info('* Something went wrong while downloading image')
def post_tweet(tweet_content):
""" This function post the tweet update with the image
Parameters:
-----------
tweet_content : str
Execute:
--------
Post the tweet with the image
"""
try:
logger.info('* Logging into twitter')
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
logger.info('* Login successful')
tweet = tweet_content
image_path = 'img.jpg'
logger.info('* Posting on twitter')
api.update_with_media(image_path, tweet)
logger.info("* Successfully posted")
except:
logger.info('* Something went wrong while posting tweet')
def main(sub_reddit, tags):
""" This main function check the sub reddit for images, download the images using grab_new_image() and then tweet it using post_tweet()
Parameters:
-----------
sub_reddit : str
Name of the sub reddit to check
tags : list
list of hashtags to be used
"""
reddit = login_to_reddit()
try:
for submission in reddit.subreddit(sub_reddit).hot(limit=8):
if submission.stickied == False:
logger.info("* Fetching submission from reddit")
post_url = f'redd.it/{str(submission)}'
title = submission.title
tweet_content = f'{title} posted by {str(submission.author)} {post_url} #{" #".join(tags)}'
url = submission.url
if 'jpg' in url:
grab_new_image(url)
post_tweet(tweet_content)
auto_post_facebook('img.jpg', f'{title} #{" #".join(tags)}')
time.sleep(20)
elif 'png' in url:
grab_new_image(url)
post_tweet(tweet_content)
auto_post_facebook('img.jpg', f'{title} #{" #".join(tags)}')
time.sleep(20)
else:
logger.info("* Not an image url")
# exception handling
except prawcore.exceptions.ServerError as e:
logger.info(e)
time.sleep(20)
pass
# excepts errors like rate limit
except praw.exceptions.APIException as e:
logger.info(e)
time.sleep(60)
# excepts other PRAW errors
except praw.exceptions.PRAWException as e:
logger.info(e)
time.sleep(20)
# excepts network connection errors
except prawcore.exceptions.RequestException:
logger.info("* Please check your network connection")
logger.info("* Sleeping for 1 minute")
time.sleep(60)
if __name__ == "__main__":
for key, value in SUBREDDIT_DICT.items():
main(key, value)
| 30.22973 | 139 | 0.627924 | 779 | 6,711 | 5.223363 | 0.288832 | 0.044237 | 0.026542 | 0.017695 | 0.147456 | 0.120177 | 0.101008 | 0.055542 | 0.043254 | 0.043254 | 0 | 0.004091 | 0.271495 | 6,711 | 221 | 140 | 30.366516 | 0.828186 | 0.252868 | 0 | 0.226087 | 0 | 0.008696 | 0.224087 | 0.043223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.026087 | 0.095652 | 0 | 0.147826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a7eeca58b940b9dce392dd95136944747af3a0f | 2,553 | py | Python | youths/management/commands/import_youth_data.py | City-of-Helsinki/youth-membership | 36f5324fa7444753d49fb476e71b09cc6e842dc2 | [
"MIT"
] | null | null | null | youths/management/commands/import_youth_data.py | City-of-Helsinki/youth-membership | 36f5324fa7444753d49fb476e71b09cc6e842dc2 | [
"MIT"
] | 31 | 2020-07-02T11:26:39.000Z | 2022-03-12T00:50:49.000Z | youths/management/commands/import_youth_data.py | City-of-Helsinki/youth-membership | 36f5324fa7444753d49fb476e71b09cc6e842dc2 | [
"MIT"
] | null | null | null | import json
from django.contrib.auth import get_user_model
from django.core import serializers
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models.signals import post_save
from helusers.models import ADGroup, ADGroupMapping
from sequences.models import Sequence
from youths.models import YouthProfile
from youths.signals import generate_membership_number
from youths.utils import generate_admin_group
User = get_user_model()
class Command(BaseCommand):
help = "Import youth data from a JSON file created using the open-city-profile backend's export_youth_data command."
def add_arguments(self, parser):
parser.add_argument("filename", nargs="+", type=str)
def handle(self, *args, **kwargs):
filename = kwargs["filename"][0]
with open(filename, "r") as infile:
data = json.load(infile)
post_save.disconnect(generate_membership_number, sender=YouthProfile)
with transaction.atomic():
YouthProfile.objects.all().delete()
User.objects.exclude(is_superuser=True).delete()
ADGroup.objects.all().delete()
User.objects.get_by_natural_key = lambda uuid: User.objects.get(uuid=uuid)
ADGroup.objects.get_by_natural_key = lambda name: ADGroup.objects.get(
name=name
)
YouthProfile.objects.get_by_natural_key = (
lambda uuid: YouthProfile.objects.get(id=uuid)
)
max_membership_number = 0
for obj in serializers.deserialize("json", json.dumps(data)):
obj.save()
if obj.object.__class__ == YouthProfile:
membership_number = int(obj.object.membership_number.lstrip("0"))
if membership_number > max_membership_number:
max_membership_number = membership_number
Sequence.objects.filter(name="membership_number").update(
last=max_membership_number
)
YouthProfile.objects.update(approval_token="")
admin_group = generate_admin_group()
for ad_group in ADGroup.objects.all():
ADGroupMapping.objects.create(group=admin_group, ad_group=ad_group)
self.stdout.write(
self.style.SUCCESS(
f"Successfully read {get_user_model().objects.count()} users and "
f"{YouthProfile.objects.count()} from {filename}"
)
)
| 37 | 120 | 0.647865 | 288 | 2,553 | 5.559028 | 0.388889 | 0.109931 | 0.04747 | 0.035603 | 0.12055 | 0.057464 | 0.039975 | 0 | 0 | 0 | 0 | 0.001598 | 0.264787 | 2,553 | 68 | 121 | 37.544118 | 0.851359 | 0 | 0 | 0 | 0 | 0.019231 | 0.100274 | 0.025069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a7f11e6c6a877151c17c25eab1dd510ce82e20f | 2,412 | py | Python | 1_screen_pipeline/03_peak_intersection/pcommon.py | weng-lab/SCREEN | e8e7203e2f9baa2de70e2f75bdad3ae24b568367 | [
"MIT"
] | 5 | 2020-07-30T02:35:20.000Z | 2020-12-24T01:26:47.000Z | 1_screen_pipeline/03_peak_intersection/pcommon.py | weng-lab/SCREEN | e8e7203e2f9baa2de70e2f75bdad3ae24b568367 | [
"MIT"
] | 6 | 2021-03-04T10:30:11.000Z | 2022-03-16T16:47:47.000Z | 1_screen_pipeline/03_peak_intersection/pcommon.py | weng-lab/SCREEN | e8e7203e2f9baa2de70e2f75bdad3ae24b568367 | [
"MIT"
] | 2 | 2020-12-08T10:05:02.000Z | 2022-03-10T09:41:19.000Z |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
from __future__ import print_function
import sys
import os
import gzip
import json
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../../metadata/utils"))
from utils import AddPath, Utils, Timer, printt, printWroteNumLines
AddPath(__file__, '../../common/')
from common import printr, printt
def doIntersection(cres, others):
try:
return [p.rstrip().split("\t")[4] for p in Utils.runCmds([
"bedtools", "intersect", "-a", cres, "-b", others, "-wa"
])]
except:
print("pcommon$doIntersection: failed to intersect %s with %s" % (cres, others),
file=sys.stderr)
def runIntersectJob(jobargs, bedfnp):
if not os.path.exists(jobargs["bed"]["fnp"]):
print("pcommon$runIntersectJob: missing bed %s; cannot intersect" % jobargs["bed"]["fnp"],
file=sys.stderr)
return None
ret = []
printr("pcommon$runIntersectJob: (exp %d of %d)" % (jobargs["i"], jobargs["total"]),
"intersecting", jobargs["etype"], jobargs["label"])
accessions = doIntersection(bedfnp, jobargs["bed"]["fnp"])
if accessions is None:
print("pcommon$runIntersectJob: warning: unable to intersect REs with bed %s" % jobargs["bed"]["fnp"],
file=sys.stderr)
else:
ret.append((jobargs["etype"], jobargs["label"], jobargs["bed"]["fileID"], accessions))
return ret
def processResults(results, outFnp):
tfImap = {}
fileJsons = []
for fileJson, accessions in results:
if not accessions:
continue
for etype, label, fileID, accs in accessions:
for acc in accs:
if acc not in tfImap:
tfImap[acc] = {"tf": {}, "histone": {}}
if label not in tfImap[acc][etype]:
tfImap[acc][etype][label] = []
tfImap[acc][etype][label].append(fileID)
fileJsons += fileJson
printt("completed hash merge")
with gzip.open(outFnp, 'w') as f:
for k, v in tfImap.iteritems():
f.write('\t'.join([k,
json.dumps(v["tf"]),
json.dumps(v["histone"])
]) + '\n')
printt("wrote", outFnp)
| 33.5 | 110 | 0.569652 | 271 | 2,412 | 5.02214 | 0.431734 | 0.036738 | 0.038207 | 0.024982 | 0.038207 | 0.038207 | 0 | 0 | 0 | 0 | 0 | 0.00519 | 0.281095 | 2,412 | 71 | 111 | 33.971831 | 0.7797 | 0.044362 | 0 | 0.054545 | 0 | 0 | 0.173837 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.127273 | 0 | 0.236364 | 0.163636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a8034f5cd14516baa13647e9508787ee3e7837b | 18,882 | py | Python | nested/train_vgg.py | yingyichen-cyy/Nested-Co-teaching | 8b7e3ed02d8994d93dcb2011340fe28ba6012283 | [
"MIT"
] | 39 | 2021-04-29T08:36:59.000Z | 2022-02-26T03:53:48.000Z | nested/train_vgg.py | yingyichen-cyy/Nested-Co-teaching | 8b7e3ed02d8994d93dcb2011340fe28ba6012283 | [
"MIT"
] | 1 | 2021-05-19T07:53:53.000Z | 2021-09-24T09:00:45.000Z | nested/train_vgg.py | yingyichen-cyy/Nested-Co-teaching | 8b7e3ed02d8994d93dcb2011340fe28ba6012283 | [
"MIT"
] | 3 | 2021-05-14T06:53:19.000Z | 2021-08-04T13:44:24.000Z | import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import json
import os
import argparse
import utils
from model import vgg
import itertools
import numpy as np
import random
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torch.nn.functional as F
### ------------------------------------ Dataloader -------------------------------------- ###
def get_dataloader(dataset, train_dir, val_dir, batchsize):
if dataset == 'Animal10N':
nb_cls = 10
# transformation of the training set
transform_train = transforms.Compose([
transforms.ToTensor()])
# transformation of the validation set
transform_test = transforms.Compose([
transforms.ToTensor()])
trainloader = DataLoader(ImageFolder(train_dir, transform_train),
batch_size=batchsize,
shuffle=True,
drop_last=True,
num_workers = 4,
pin_memory = True)
valloader = DataLoader(ImageFolder(val_dir, transform_test),
batch_size=batchsize,
shuffle=False,
drop_last=False,
num_workers = 4,
pin_memory = True)
return trainloader, valloader, nb_cls
### --------------------------------------------------------------------------------------------
### ------------------------------------ Distribution -------------------------------------- ###
def GaussianDist(mu, std, N):
dist = np.array([np.exp(-((i - mu) / std)**2) for i in range(1, N + 1)])
return dist / np.sum(dist)
### ---------------------------------------------------------------------------------------------
### ------------------------ Test with Nested (iterate all possible K) --------------------- ###
def TestNested(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim):
net_feat.eval()
net_cls.eval()
bestTop1 = 0
true_pred = torch.zeros(len(mask_feat_dim)).cuda()
nb_sample = 0
for batchIdx, (inputs, targets) in enumerate(valloader):
inputs = inputs.cuda()
targets = targets.cuda()
feature = net_feat(inputs)
outputs = []
for i in range(len(mask_feat_dim)):
feature_mask = feature * mask_feat_dim[i]
outputs.append( net_cls(feature_mask).unsqueeze(0) )
outputs = torch.cat(outputs, dim=0)
_, pred = torch.max(outputs, dim=2)
targets = targets.unsqueeze(0).expand_as(pred)
true_pred = true_pred + torch.sum(pred == targets, dim=1).type(torch.cuda.FloatTensor)
nb_sample += len(inputs)
acc, k = torch.max((true_pred / nb_sample - 1e-5 * torch.arange(len(mask_feat_dim)).type_as(true_pred)), dim=0)
acc, k = acc.item(), k.item()
msg = '\nNested ... Epoch {:d}, Acc {:.3f} %, K {:d} (Best Acc {:.3f} %)'.format(epoch, acc * 100, k, best_acc * 100)
print (msg)
# save checkpoint
if acc > best_acc:
msg = 'Best Performance improved from {:.3f} --> {:.3f}'.format(best_acc, acc)
print(msg)
print ('Saving Best!!!')
param = {'feat': net_feat.state_dict(),
'cls': net_cls.state_dict(),
}
torch.save(param, os.path.join(out_dir, 'netBest.pth'))
best_acc = acc
best_k = k
return best_acc, acc, best_k
### --------------------------------------------------------------------------------------------
### --------------- Test standard (used for model w/o nested, baseline, dropout) ------------###
def TestStandard(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim):
net_feat.eval()
net_cls.eval()
bestTop1 = 0
true_pred = torch.zeros(1).cuda()
nb_sample = 0
for batchIdx, (inputs, targets) in enumerate(valloader):
inputs = inputs.cuda()
targets = targets.cuda()
feature = net_feat(inputs)
outputs = net_cls(feature)
_, pred = torch.max(outputs, dim=1)
true_pred = true_pred + torch.sum(pred == targets).type(torch.cuda.FloatTensor)
nb_sample += len(inputs)
acc = true_pred / nb_sample
acc = acc.item()
msg = 'Standard ... Epoch {:d}, Acc {:.3f} %, (Best Acc {:.3f} %)'.format(epoch, acc * 100, best_acc * 100)
print (msg)
# save checkpoint
if acc > best_acc:
msg = 'Best Performance improved from {:.3f} --> {:.3f}'.format(best_acc * 100, acc * 100)
print (msg)
print ('Saving Best!!!')
param = {'feat': net_feat.state_dict(),
'cls': net_cls.state_dict(),
}
torch.save(param, os.path.join(out_dir, 'netBest.pth'))
best_acc = acc
return best_acc, acc, len(mask_feat_dim)
### --------------------------------------------------------------------------------------------
### -------------------------------------- Training --------------------------------------- ###
def Train(epoch, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn):
msg = '\nEpoch: {:d}'.format(epoch)
print (msg)
net_feat.train(freeze_bn = freeze_bn)
net_cls.train()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
for batchIdx, (inputs, targets) in enumerate(trainloader):
inputs = inputs.cuda()
targets = targets.cuda()
for optim in optimizer:
optim.zero_grad()
# whether to use alterative training for the nested mode
if alter_train:
alter = random.randint(0, 1)
else:
alter = None
if dist1 is not None:
if alter == 0 or alter is None:
k1 = np.random.choice(range(len(mask_feat_dim)), p=dist1)
mask1 = mask_feat_dim[k1]
else:
# train both nested layers
mask1 = mask_feat_dim[-1]
else:
mask1 = mask_feat_dim[-1]
feature = net_feat(inputs, mask1)
if dist2 is not None:
if alter == 1 or alter is None:
k2 = np.random.choice(range(len(mask_feat_dim)), p=dist2)
mask2 = mask_feat_dim[k2]
feature_masked = feature * mask2
else:
feature_masked = feature
else:
feature_masked = feature
outputs = net_cls(feature_masked)
loss = criterion(outputs, targets)
loss.backward()
for optim in optimizer:
optim.step()
acc1, acc5 = utils.accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size()[0])
top1.update(acc1[0].item(), inputs.size()[0])
top5.update(acc5[0].item(), inputs.size()[0])
msg = 'Loss: {:.3f} | Top1: {:.3f}% | Top5: {:.3f}%'.format(losses.avg, top1.avg, top5.avg)
utils.progress_bar(batchIdx, len(trainloader), msg)
return losses.avg, top1.avg, top5.avg
### --------------------------------------------------------------------------------------------
### ------------------------------------ Lr Warm Up --------------------------------------- ###
def LrWarmUp(warmUpIter, lr, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn):
nbIter = 0
while nbIter < warmUpIter:
net_feat.train(freeze_bn = freeze_bn)
net_cls.train()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
for batchIdx, (inputs, targets) in enumerate(trainloader):
nbIter += 1
if nbIter == warmUpIter:
break
lrUpdate = nbIter / float(warmUpIter) * lr
for optim in optimizer:
for g in optim.param_groups:
g['lr'] = lrUpdate
inputs = inputs.cuda()
targets = targets.cuda()
for optim in optimizer:
optim.zero_grad()
# whether to use alterative training for the nested mode
if alter_train:
alter = random.randint(0, 1)
else:
# train both nested layers
alter = None
if dist1 is not None:
if alter == 0 or alter is None:
k1 = np.random.choice(range(len(mask_feat_dim)), p=dist1)
mask1 = mask_feat_dim[k1]
else:
mask1 = mask_feat_dim[-1]
else:
mask1 = mask_feat_dim[-1]
feature = net_feat(inputs, mask1)
if dist2 is not None:
if alter == 1 or alter is None:
k2 = np.random.choice(range(len(mask_feat_dim)), p=dist2)
mask2 = mask_feat_dim[k2]
feature_masked = feature * mask2
else:
feature_masked = feature
else:
feature_masked = feature
outputs = net_cls(feature_masked)
loss = criterion(outputs, targets)
loss.backward()
for optim in optimizer:
optim.step()
acc1, acc5 = utils.accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size()[0])
top1.update(acc1[0].item(), inputs.size()[0])
top5.update(acc5[0].item(), inputs.size()[0])
msg = 'Loss: {:.3f} | Lr : {:.5f} | Top1: {:.3f}% | Top5: {:.3f}%'.format(losses.avg, lrUpdate, top1.avg, top5.avg)
utils.progress_bar(batchIdx, len(trainloader), msg)
### --------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
########################################-- MAIN FUNCTION --#####################################
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
def main(gpu, arch, vgg_dropout, out_dir, dataset, train_dir, val_dir, warmUpIter, lr, nbEpoch, batchsize, momentum=0.9, weightDecay = 5e-4, lrSchedule = [200, 300], lr_gamma=0.1, mu=0, nested1=1.0, nested2=1.0, alter_train=False, resumePth=None, freeze_bn=False, pretrained=False):
best_acc = 0 # best test accuracy
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
trainloader, valloader, nb_cls = get_dataloader(dataset, train_dir, val_dir, batchsize)
# feature net + classifier net (a linear layer)
net_feat = vgg.NetFeat(arch = arch,
pretrained = pretrained,
dataset = dataset,
vgg_dropout = vgg_dropout)
net_cls = vgg.NetClassifier(feat_dim = net_feat.feat_dim,
nb_cls = nb_cls)
net_feat.cuda()
net_cls.cuda()
feat_dim = net_feat.feat_dim
best_k = feat_dim
# generate mask
mask_feat_dim = []
for i in range(feat_dim):
tmp = torch.cuda.FloatTensor(1, feat_dim).fill_(0)
tmp[:, : (i + 1)] = 1
mask_feat_dim.append(tmp)
# distribution and test function
dist1 = GaussianDist(mu, nested1, feat_dim) if nested1 > 0 else None
dist2 = GaussianDist(mu, nested2, feat_dim) if nested2 > 0 else None
Test = TestNested if (nested1 > 0) or (nested2 > 0) else TestStandard
# load model
if resumePth:
param = torch.load(resumePth)
net_feat.load_state_dict(param['feat'])
print ('Loading feature weight from {}'.format(resumePth))
net_cls.load_state_dict(param['cls'])
print ('Loading classifier weight from {}'.format(resumePth))
# output dir + loss + optimizer
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
criterion = nn.CrossEntropyLoss()
optimizer = [torch.optim.SGD(itertools.chain(*[net_feat.parameters()]),
1e-7,
momentum=args.momentum,
weight_decay=args.weightDecay),
torch.optim.SGD(itertools.chain(*[net_cls.parameters()]),
1e-7,
momentum=args.momentum,
weight_decay=args.weightDecay)] # remove the weight decay in classifier
# learning rate warm up
LrWarmUp(warmUpIter, lr, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn)
with torch.no_grad():
best_acc, acc, best_k = Test(0, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim)
best_acc, best_k = 0, feat_dim
for optim in optimizer:
for g in optim.param_groups:
g['lr'] = lr
history = {'trainTop1':[], 'best_acc':[], 'trainTop5':[], 'valTop1':[], 'trainLoss':[], 'best_k':[]}
lrScheduler = [MultiStepLR(optim, milestones=lrSchedule, gamma=lr_gamma) for optim in optimizer]
for epoch in range(nbEpoch):
trainLoss, trainTop1, trainTop5 = Train(epoch, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn)
with torch.no_grad():
best_acc, valTop1, best_k = Test(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim)
history['trainTop1'].append(trainTop1)
history['trainTop5'].append(trainTop5)
history['trainLoss'].append(trainLoss)
history['valTop1'].append(valTop1)
history['best_acc'].append(best_acc)
history['best_k'].append(best_k)
with open(os.path.join(out_dir, 'history.json'), 'w') as f:
json.dump(history, f)
for lr_schedule in lrScheduler:
lr_schedule.step()
msg = 'mv {} {}'.format(out_dir, '{}_Acc{:.3f}_K{:d}'.format(out_dir, best_acc, best_k))
print (msg)
os.system(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Classification', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# data
parser.add_argument('--train-dir', type=str, default='../data/Animal10N/train/', help='train directory')
parser.add_argument('--val-dir', type=str, default='../data/Animal10N/test/', help='val directory')
parser.add_argument('--dataset', type=str, choices=['Animal10N'], default='Animal10N', help='which dataset?')
# training
parser.add_argument('--warmUpIter', type=int, default=6000, help='total iterations for learning rate warm')
parser.add_argument('--lr', default=1e-1, type=float, help='learning rate')
parser.add_argument('--weightDecay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--batchsize', type=int, default=128, help='batch size')
parser.add_argument('--nbEpoch', type=int, default=100, help='nb epoch')
parser.add_argument('--lrSchedule', nargs='+', type=int, default=[50, 75], help='lr schedule')
parser.add_argument('--lr-gamma', type=float, default=0.2, help='decrease learning rate by lr-gamma')
parser.add_argument('--gpu', type=str, default='0', help='gpu devices')
# model
parser.add_argument('--arch', type=str, choices=['vgg19-bn'], default='vgg19-bn', help='which archtecture?')
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--mu', type=float, default=0.0, help='nested mean hyperparameter')
parser.add_argument('--nested1', type=float, default=0.0, help='nested1 std hyperparameter')
parser.add_argument('--nested2', type=float, default=0.0, help='nested2 std hyperparameter')
parser.add_argument('--alter-train', action='store_true', help='whether to use alternative training for nested')
parser.add_argument('--vgg-dropout', type=float, default=0.0, help='dropout ratio')
parser.add_argument('--resumePth', type=str, help='resume path')
parser.add_argument('--freeze-bn', action='store_true', help='freeze the BN layers')
parser.add_argument('--pretrained', action='store_true', help='Start with ImageNet pretrained model (Pytorch Model Zoo)')
args = parser.parse_args()
print (args)
if (args.nested1 > 0 or args.nested2 > 0) and args.vgg_dropout > 0:
raise RuntimeError('Activating both nested1 / nested2 (eta = {:.3f} / {:.3f}) and vgg_dropout \
(ratio = {:.3f})'.format(args.nested1, args.nested2, args.vgg_dropout))
main(gpu = args.gpu,
arch = args.arch,
vgg_dropout= args.vgg_dropout,
out_dir = args.out_dir,
dataset = args.dataset,
train_dir = args.train_dir,
val_dir = args.val_dir,
warmUpIter = args.warmUpIter,
lr = args.lr,
nbEpoch = args.nbEpoch,
batchsize = args.batchsize,
momentum = args.momentum,
weightDecay = args.weightDecay,
lrSchedule = args.lrSchedule,
lr_gamma = args.lr_gamma,
mu = args.mu,
nested1 = args.nested1,
nested2 = args.nested2,
alter_train = args.alter_train,
resumePth = args.resumePth,
freeze_bn = args.freeze_bn,
pretrained = args.pretrained) | 37.915663 | 283 | 0.517848 | 2,016 | 18,882 | 4.69494 | 0.15129 | 0.027364 | 0.031379 | 0.010988 | 0.464237 | 0.439197 | 0.402007 | 0.390808 | 0.374326 | 0.365029 | 0 | 0.020141 | 0.300551 | 18,882 | 498 | 284 | 37.915663 | 0.696525 | 0.111588 | 0 | 0.437107 | 0 | 0.006289 | 0.086517 | 0.002828 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022013 | false | 0 | 0.053459 | 0 | 0.091195 | 0.034591 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a8cb7844af88c35eb1b63d6733fa0006e6aff06 | 1,291 | py | Python | src/xg_all/xg.py | tapojyotipaul/xgboost-benchmarks | 789b99acbf401617a45a8c82dbae1210378527d8 | [
"Apache-2.0"
] | null | null | null | src/xg_all/xg.py | tapojyotipaul/xgboost-benchmarks | 789b99acbf401617a45a8c82dbae1210378527d8 | [
"Apache-2.0"
] | null | null | null | src/xg_all/xg.py | tapojyotipaul/xgboost-benchmarks | 789b99acbf401617a45a8c82dbae1210378527d8 | [
"Apache-2.0"
] | 2 | 2021-04-07T12:32:42.000Z | 2021-04-21T16:28:42.000Z | from timeit import default_timer as timer
import xgboost as xgb
import common
import gc
NUM_LOOPS = 100
PARAMS = {
'objective': 'reg:squarederror',
'alpha': 0.9,
'max_bin': 256,
'scale_pos_weight': 2,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'min_child_weight': 0,
'max_depth': 8,
'max_leaves': 2**8,
'tree_method': 'hist',
'predictor': 'cpu_predictor'
}
TRAIN_DF = xgb.DMatrix(data=common.X, label=common.y)
MODEL = xgb.train(params=PARAMS, dtrain=TRAIN_DF)
def run_inference(num_observations:int = 1000):
"""Run xgboost for specified number of observations"""
# Load data
test_df = common.get_test_data(num_observations)
num_rows = len(test_df)
# print(f"Running {NUM_LOOPS} inference loops with batch size {num_rows}...")
run_times3 = []
inference_times3 = []
for _ in range(NUM_LOOPS):
start_time = timer()
data = xgb.DMatrix(test_df)
MODEL.predict(data)
end_time = timer()
total_time3 = end_time - start_time
run_times3.append(total_time3*10e3)
inference_time3 = total_time3*(10e6)/num_rows
inference_times3.append(inference_time3)
print(num_observations, ", ", common.calculate_stats(inference_times3)) | 26.346939 | 81 | 0.659954 | 172 | 1,291 | 4.69186 | 0.5 | 0.02974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036743 | 0.219985 | 1,291 | 49 | 82 | 26.346939 | 0.764647 | 0.10457 | 0 | 0 | 0 | 0 | 0.138261 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.111111 | 0 | 0.138889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a8dbcfb9f48db68d70725b0f63d6593739a5371 | 255 | py | Python | gyak/03/duplum-torol.py | horverno/sze-academic-python | 3ac8f2c62b827822f529dc600eef91713e82d551 | [
"MIT"
] | 4 | 2019-06-24T17:01:03.000Z | 2021-11-09T21:48:32.000Z | gyak/03/duplum-torol.py | horverno/sze-academic-python | 3ac8f2c62b827822f529dc600eef91713e82d551 | [
"MIT"
] | null | null | null | gyak/03/duplum-torol.py | horverno/sze-academic-python | 3ac8f2c62b827822f529dc600eef91713e82d551 | [
"MIT"
] | 6 | 2018-07-24T10:08:14.000Z | 2021-09-11T20:40:47.000Z | # irjunk olyan python kodot, amely kitorli egy listabol a duplumokat
a = [10,20,30,20,10,50,60,40,80,50,40]
dup_items = set()
uniq_items = []
for x in a:
if x not in dup_items:
uniq_items.append(x)
dup_items.add(x)
print(dup_items)
| 19.615385 | 68 | 0.662745 | 48 | 255 | 3.395833 | 0.604167 | 0.196319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11 | 0.215686 | 255 | 12 | 69 | 21.25 | 0.705 | 0.258824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a91c1b9651492ee7c9426b0fcd2c1c972c0dcc4 | 7,034 | py | Python | rudderthralloc/forcealloc.py | kplindegaard/pycs2 | a21a9b9403b84601e889bf253b85b11659ea5896 | [
"BSD-2-Clause"
] | 2 | 2021-04-21T01:53:11.000Z | 2022-03-21T10:05:23.000Z | rudderthralloc/forcealloc.py | kplindegaard/pycs2 | a21a9b9403b84601e889bf253b85b11659ea5896 | [
"BSD-2-Clause"
] | null | null | null | rudderthralloc/forcealloc.py | kplindegaard/pycs2 | a21a9b9403b84601e889bf253b85b11659ea5896 | [
"BSD-2-Clause"
] | 1 | 2020-09-16T03:47:12.000Z | 2020-09-16T03:47:12.000Z | """ forcealloc.py - Map commanded thrust to generalized, cartesian forces """
# BSD 2-Clause License
#
# Copyright (c) 2001-2017, Karl-Petter Lindegaard
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from math import sin, cos
from cs2data import T1LX, T1LY, T2LX, T2LY, T3LX, T3LY
class ForceAllocation:
"""
ForceAllocation - maps from tau_c to generalized forces
"""
def __init__(self, theta1, theta2, c1, c2):
"""
:param theta1: Port main propeller/rudder postive force angle span [rad]
:param theta2: Starboard main propeller/rudder postive force angle span [rad]
:param c1: Port main propeller positive thrust bias [N]
:param c2: Starboard main properller positive thrust bias [N]
"""
self.theta1 = theta1
self.theta2 = theta2
self.c1 = c1
self.c2 = c2
self.Q1 = np.eye(4)
self.Q2 = np.eye(4)
# Full allocation matrix
self.A = np.array([
[1, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[-T1LY, T1LX, -T2LY, T2LX, T3LX]
])
# Filters. f1 = Rudder 2 inactive, f2 = Rudder 1 inactive
self.f1 = np.array([True, True, True, False, True])
self.f2 = np.array([True, False, True, True, True])
# Configure A1, n1 and A1dagger etc.
self.A1 = self.A[:,self.f1]
# Null-vector for A1
self.n1 = np.zeros(4)
self.n1[0] = (T1LX - T3LX) / (T1LY - T2LY)
self.n1[1] = 1.0
self.n1[2] = -self.n1[0]
self.n1[3] = -1.0
# A1_dagger = A1'*inv(A1*A1')
self.A1_dagger = self.A1.T.dot(np.linalg.inv(self.A1.dot(self.A1.T)))
# Configure A2, n2 and A2dagger etc.
self.A2 = self.A[:,self.f2]
# Null-vector for A2
self.n2 = np.zeros(4)
self.n2[0] = (T3LX - T2LX) / (T1LY - T2LY)
self.n2[1] = -self.n2[0]
self.n2[2] = -1.0
self.n2[3] = 1.0
# A2_dagger = A2'*inv(A2*A2')
self.A2_dagger = self.A2.T.dot(np.linalg.inv(self.A2.dot(self.A2.T)))
def nullsub1(self, tauc, Adagger, n, theta, c):
# type: (np.array, np.array, np.array, float, float) -> np.array
# Step 0: Prepare the a-vector (sector boundary)
a1 = cos(theta)
a2 = -sin(theta)
# Step 1: Find optimal solution based on pseudo-inverse
u0 = Adagger.dot(tauc)
# Step 2: Extract prop/rudder and translate to "the other" ref. frame
u0m1 = u0[0] - c
u0m2 = u0[1]
# Step 3: Sector check
nn1 = n[1]
nn2 = -n[0]
dp = nn1*u0m1 + nn2*u0m2
insector = False
if dp <= 0.0:
# Traverse in x-asxis (fx,0)
b1 = 0.0
b2 = 1.0
else:
# Are we in sector "1"
if u0m2 >= 0.0:
b1 = 0.0
b2 = 1.0
# Or perhaps we are already within the valid sector
elif u0m1*a2 < u0m2*a1:
insector = True
# Otherwise, traverse along the nullvector until sector limit "a"
else:
b1 = a2
b2 = -a1
# Step 4: Find lambda, the distance to traverse
gamma = 0.0
if not insector:
gamma = -(u0m1*b1 + u0m2*b2) / (n[0]*b1 + n[1]*b2)
# Step 5: Adjust solution
u = u0 + gamma*n
return u
def nullsub2(self, tauc, Adagger, n, theta, c):
# type: (np.array, np.array, np.array, float, float) -> np.array
# Step 0: Prepare the a-vector (sector boundary)
a1 = cos(theta)
a2 = sin(theta)
# Step 1: Find optimal solution based on pseudo-inverse
u0 = Adagger.dot(tauc)
# Step 2: Extract prop/rudder and translate to "the other" ref. frame
u0m1 = u0[1] - c
u0m2 = u0[2]
# Step 3: Sector check
nn1 = n[2]
nn2 = -n[1]
dp = nn1 * u0m1 + nn2 * u0m2
insector = False
if dp >= 0.0:
# Traverse in x-asxis (fx,0)
b1 = 0.0
b2 = 1.0
else:
# Are we in sector "1"
if u0m2 <= 0.0:
b1 = 0.0
b2 = 1.0
# Or perhaps we are already within the valid sector
elif u0m1 * a2 > u0m2 * a1:
insector = True
# Otherwise, traverse along the nullvector until sector limit "a"
else:
b1 = a2
b2 = -a1
# Step 4: Find lambda, the distance to traverse
gamma = 0.0
if not insector:
gamma = -(u0m1 * b1 + u0m2 * b2) / (n[1] * b1 + n[2] * b2)
# Step 5: Adjust solution
u = u0 + gamma * n
return u
def allocate(self, tau):
"""
Map 3-DOF commanded thrust to generalized forces. First two elements are surge and sway
for thruster 1 (port main prop+rudder), next two for starboard main prop+rudder, fifth
element is the bow thruster's sway force.
:param tau: Commanded thrust vector (surge, sway, yaw)
:return: Generalized forces
"""
# type: (np.array) -> np.array
# Call subroutines for each rudder
x1 = self.nullsub1(tau, self.A1_dagger, self.n1, self.theta1, self.c1)
x2 = self.nullsub2(tau, self.A2_dagger, self.n2, self.theta2, self.c2)
# Compare results and pick the best solution J = x'*Q*x
j1 = x1.dot(self.Q1.dot(x1))
j2 = x2.dot(self.Q2.dot(x2))
u = np.zeros(5)
if j1 <= j2:
# Use: u = [x1(0) x1(1) x1(2) 0 x1(3)];
u[self.f1] = x1
else:
# u = [x2(0) 0 x2(1) x2(2) x2(3)];
u[self.f2] = x2
return u
| 33.495238 | 95 | 0.565254 | 996 | 7,034 | 3.980924 | 0.276104 | 0.022951 | 0.011349 | 0.017654 | 0.387894 | 0.371248 | 0.351576 | 0.351576 | 0.351576 | 0.327364 | 0 | 0.066624 | 0.329969 | 7,034 | 209 | 96 | 33.655502 | 0.774666 | 0.486636 | 0 | 0.371134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041237 | false | 0 | 0.030928 | 0 | 0.113402 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a958e3eba071c4fbf127852dcb80e42dd73f774 | 3,982 | py | Python | docly/logic/logic_main.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 29 | 2020-12-31T08:27:32.000Z | 2022-02-15T08:48:51.000Z | docly/logic/logic_main.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 4 | 2020-12-30T18:18:54.000Z | 2021-08-03T14:42:35.000Z | docly/logic/logic_main.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
] | 2 | 2022-01-04T17:58:22.000Z | 2022-02-05T13:04:14.000Z | import os
import sys
from io import open
import numpy as np
import torch
import torch.nn as nn
from .example import make_example, make_new_example
from .input_features import convert_examples_to_features
from torch.utils.data import DataLoader, Dataset, SequentialSampler, TensorDataset
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
model_name_or_path = "microsoft/codebert-base"
beam_size = 10
max_target_length = 128
max_source_length = 256
seed = 42
def load_model(model_path, is_old=False):
if is_old:
from .model import Seq2Seq
else:
from .model_new import Seq2Seq
config_class, model_class, tokenizer_class = MODEL_CLASSES['roberta']
config = config_class.from_pretrained(model_name_or_path)
if is_old:
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
else:
tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case=False)
encoder = model_class.from_pretrained(model_name_or_path, config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size,
nhead=config.num_attention_heads)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model = Seq2Seq(encoder=encoder,
decoder=decoder,
config=config,
beam_size=beam_size,
max_length=max_target_length,
sos_id=tokenizer.cls_token_id,
eos_id=tokenizer.sep_token_id
)
if is_old:
if not torch.cuda.is_available():
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
else:
model.load_state_dict(torch.load(model_path))
else:
if not torch.cuda.is_available():
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=False)
else:
model.load_state_dict(torch.load(model_path), strict=False)
if not torch.cuda.is_available():
model.to("cpu")
model.eval()
return model, tokenizer
def predict_docstring(model, tokenizer, code_tokens, is_old):
examples = make_example(code_tokens) if is_old else make_new_example(code_tokens)
features = convert_examples_to_features(examples, tokenizer)
if is_old:
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in features], dtype=torch.long)
else:
all_source_ids = torch.tensor([f.source_ids[: max_source_length] for f in features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask[: max_source_length] for f in features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids, all_source_mask)
eval_sampler = SequentialSampler(eval_data)
batch_size = len(code_tokens) if is_old else len(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=batch_size)
p=[]
for batch in eval_dataloader:
if not torch.cuda.is_available():
batch = tuple(t.to('cpu') for t in batch)
else:
batch = tuple(t for t in batch)
source_ids, source_mask = batch
with torch.no_grad():
preds = model(source_ids=source_ids, source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
px = p[0].split()
if px[-1] == ".":
px[-2] = px[-2].strip() + "."
px.pop()
return [" ".join(px)]
| 35.553571 | 112 | 0.659719 | 525 | 3,982 | 4.725714 | 0.270476 | 0.016123 | 0.016929 | 0.03023 | 0.322048 | 0.322048 | 0.295042 | 0.244256 | 0.217654 | 0.143491 | 0 | 0.00703 | 0.249874 | 3,982 | 111 | 113 | 35.873874 | 0.823569 | 0 | 0 | 0.170455 | 0 | 0 | 0.013059 | 0.005776 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.136364 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a975bde69531b07d4ebbabad0e38aa1e1ed2b20 | 268 | py | Python | app.py | zhoujiahua/TaskTool | ba8e359be0d016f0e14a5ac5671ce926945bf21e | [
"MIT"
] | null | null | null | app.py | zhoujiahua/TaskTool | ba8e359be0d016f0e14a5ac5671ce926945bf21e | [
"MIT"
] | null | null | null | app.py | zhoujiahua/TaskTool | ba8e359be0d016f0e14a5ac5671ce926945bf21e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
from flask import Flask
from common.BaseClass import Student
app = Flask(__name__)
@app.route('/')
def home_index():
BQ = Student('jerry', 18)
return BQ.get_user_info()
if __name__ == '__main__':
app.run()
| 14.105263 | 36 | 0.645522 | 37 | 268 | 4.27027 | 0.756757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018349 | 0.186567 | 268 | 18 | 37 | 14.888889 | 0.706422 | 0.145522 | 0 | 0 | 0 | 0 | 0.061674 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa3477bd6ed541a43bdde0d7bf39d526c22e133 | 646 | py | Python | 01/116.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | 01/116.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | 01/116.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | """
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':
if not root: return None
nd = root
while root.left:
nl = root.left
while root:
root.left.next = root.right
root.right.next = root.next.left if root.next else None
root = root.next
root = nl
return nd | 29.363636 | 101 | 0.520124 | 81 | 646 | 4.098765 | 0.320988 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002439 | 0.365325 | 646 | 22 | 102 | 29.363636 | 0.807317 | 0.368421 | 0 | 0 | 0 | 0 | 0.069825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa4fa934c1256120ad178cb78b93a531007672d | 821 | py | Python | data/signals/rel_coords.py | TYSSSY/Apb-gcn | b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7 | [
"MIT"
] | null | null | null | data/signals/rel_coords.py | TYSSSY/Apb-gcn | b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7 | [
"MIT"
] | 1 | 2020-10-30T02:01:39.000Z | 2020-10-30T02:01:39.000Z | data/signals/rel_coords.py | TYSSSY/Apb-gcn | b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7 | [
"MIT"
] | null | null | null | import numpy as np
def get_relative_coordinates(sample,
references=(4, 8, 12, 16)):
# input: C, T, V, M
c, t, v, m = sample.shape
final_sample = np.zeros((4 * c, t, v, m))
valid_frames = (sample != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
start = valid_frames.argmax()
end = len(valid_frames) - valid_frames[::-1].argmax()
sample = sample[:, start:end, :, :]
rel_coords = []
for i in range(len(references)):
ref_loc = sample[:, :, references[i], :]
coords_diff = (sample.transpose((2, 0, 1, 3)) - ref_loc).transpose((1, 2, 0, 3))
rel_coords.append(coords_diff)
# Shape: 4*C, t, V, M
rel_coords = np.vstack(rel_coords)
# Shape: C, T, V, M
final_sample[:, start:end, :, :] = rel_coords
return final_sample
| 31.576923 | 88 | 0.5676 | 123 | 821 | 3.642276 | 0.382114 | 0.022321 | 0.033482 | 0.044643 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036244 | 0.260658 | 821 | 25 | 89 | 32.84 | 0.701812 | 0.06821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa5f484404f8770edc1499119077e82dbc3d9e3 | 4,774 | py | Python | src/preprocess_librosa.py | Un-bias/musicnn-training | c7aa67bd1641592fcf43467c37db01b553dd4e5c | [
"0BSD"
] | 41 | 2019-07-24T07:11:53.000Z | 2022-03-28T13:42:34.000Z | src/preprocess_librosa.py | Un-bias/musicnn-training | c7aa67bd1641592fcf43467c37db01b553dd4e5c | [
"0BSD"
] | 3 | 2020-05-04T13:17:58.000Z | 2020-11-14T14:37:06.000Z | src/preprocess_librosa.py | Un-bias/musicnn-training | c7aa67bd1641592fcf43467c37db01b553dd4e5c | [
"0BSD"
] | 13 | 2019-08-19T15:52:00.000Z | 2021-03-22T02:36:33.000Z | import os
import librosa
from joblib import Parallel, delayed
import json
import config_file
import argparse
import pickle
import numpy as np
from pathlib import Path
DEBUG = False
def compute_audio_repr(audio_file, audio_repr_file):
audio, sr = librosa.load(audio_file, sr=config['resample_sr'])
if config['type'] == 'waveform':
audio_repr = audio
audio_repr = np.expand_dims(audio_repr, axis=1)
elif config['spectrogram_type'] == 'mel':
audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr,
hop_length=config['hop'],
n_fft=config['n_fft'],
n_mels=config['n_mels']).T
# Compute length
print(audio_repr.shape)
length = audio_repr.shape[0]
# Transform to float16 (to save storage, and works the same)
audio_repr = audio_repr.astype(np.float16)
# Write results:
with open(audio_repr_file, "wb") as f:
pickle.dump(audio_repr, f) # audio_repr shape: NxM
return length
def do_process(files, index):
try:
[id, audio_file, audio_repr_file] = files[index]
if not os.path.exists(audio_repr_file[:audio_repr_file.rfind('/') + 1]):
path = Path(audio_repr_file[:audio_repr_file.rfind('/') + 1])
path.mkdir(parents=True, exist_ok=True)
# compute audio representation (pre-processing)
length = compute_audio_repr(audio_file, audio_repr_file)
# index.tsv writing
fw = open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "index_" + str(config['machine_i']) + ".tsv", "a")
fw.write("%s\t%s\t%s\n" % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):]))
fw.close()
print(str(index) + '/' + str(len(files)) + ' Computed: %s' % audio_file)
except Exception as e:
ferrors = open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "errors" + str(config['machine_i']) + ".txt", "a")
ferrors.write(audio_file + "\n")
ferrors.write(str(e))
ferrors.close()
print('Error computing audio representation: ', audio_file)
print(str(e))
def process_files(files):
if DEBUG:
print('WARNING: Parallelization is not used!')
for index in range(0, len(files)):
do_process(files, index)
else:
Parallel(n_jobs=config['num_processing_units'])(
delayed(do_process)(files, index) for index in range(0, len(files)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('configurationID', help='ID of the configuration dictionary')
args = parser.parse_args()
config = config_file.config_preprocess[args.configurationID]
config['audio_representation_folder'] = "audio_representation/%s__%s/" % (config['identifier'], config['type'])
# set audio representations folder
if not os.path.exists(config_file.DATA_FOLDER + config['audio_representation_folder']):
os.makedirs(config_file.DATA_FOLDER + config['audio_representation_folder'])
else:
print("WARNING: already exists a folder with this name!"
"\nThis is expected if you are splitting computations into different machines.."
"\n..because all these machines are writing to this folder. Otherwise, check your config_file!")
# list audios to process: according to 'index_file'
files_to_convert = []
f = open(config_file.DATA_FOLDER + config["index_file"])
for line in f.readlines():
id, audio = line.strip().split("\t")
audio_repr = audio[:audio.rfind(".")] + ".pk" # .npy or .pk
files_to_convert.append((id, config['audio_folder'] + audio,
config_file.DATA_FOLDER + config['audio_representation_folder'] + audio_repr))
# compute audio representation
if config['machine_i'] == config['n_machines'] - 1:
process_files(files_to_convert[int(len(files_to_convert) / config['n_machines']) * (config['machine_i']):])
# we just save parameters once! In the last thread run by n_machine-1!
json.dump(config, open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "config.json", "w"))
else:
first_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'])
second_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'] + 1)
assigned_files = files_to_convert[first_index:second_index]
process_files(assigned_files)
print("Audio representation folder: " + config_file.DATA_FOLDER + config['audio_representation_folder'])
| 42.247788 | 139 | 0.651445 | 610 | 4,774 | 4.855738 | 0.288525 | 0.069885 | 0.047265 | 0.067522 | 0.304862 | 0.272113 | 0.243754 | 0.227549 | 0.133018 | 0.053005 | 0 | 0.003512 | 0.22455 | 4,774 | 112 | 140 | 42.625 | 0.796596 | 0.076875 | 0 | 0.0375 | 0 | 0 | 0.205233 | 0.055518 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0 | 0.1125 | 0 | 0.1625 | 0.0875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa67f347816a860d8954caac66621040cc0046d | 532 | py | Python | 45/45.py | cleamoon/ProjectEuler | 8d51ad089e5fa21c709161fc658f8c4b533a3ac3 | [
"MIT"
] | null | null | null | 45/45.py | cleamoon/ProjectEuler | 8d51ad089e5fa21c709161fc658f8c4b533a3ac3 | [
"MIT"
] | null | null | null | 45/45.py | cleamoon/ProjectEuler | 8d51ad089e5fa21c709161fc658f8c4b533a3ac3 | [
"MIT"
] | null | null | null | ltn = []
lpn = []
for i in range(1, 1000000):
t = 143 + i
p = 165 + i
ltn.append(t * (2 * t - 1))
lpn.append(p * (3 * p - 1) // 2)
def bst(n, b = 0, e = len(ltn)):
if b >= e:
if ltn[b] == n:
return True
else:
return False
else:
m = (b + e)//2
if n > ltn[m]:
return bst(n, m+1, e)
elif n < ltn[m]:
return bst(n, b, m)
else:
return True
for p in lpn:
if (bst(p)):
print(p)
break
| 18.344828 | 36 | 0.381579 | 84 | 532 | 2.416667 | 0.369048 | 0.059113 | 0.049261 | 0.108374 | 0.147783 | 0.147783 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0.462406 | 532 | 28 | 37 | 19 | 0.632867 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.24 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa6a0eddaf3a04a78ac717238e95549001e1d08 | 2,508 | py | Python | tests/python/test_workitems.py | mdp/rpaframework | d427a3a4b9ea360780e449ece2674e275060310e | [
"Apache-2.0"
] | null | null | null | tests/python/test_workitems.py | mdp/rpaframework | d427a3a4b9ea360780e449ece2674e275060310e | [
"Apache-2.0"
] | null | null | null | tests/python/test_workitems.py | mdp/rpaframework | d427a3a4b9ea360780e449ece2674e275060310e | [
"Apache-2.0"
] | null | null | null | import copy
import pytest
from RPA.Robocloud.Items import BaseAdapter, Items
VALID_DATABASE = {
("test-ws", "test-item"): {"username": "testguy", "address": "guy@company.com"},
("test-ws", "second-item"): {"username": "another", "address": "dude@company.com"},
}
class MockAdapter(BaseAdapter):
DATABASE = {}
@classmethod
def validate(cls, item, key, val):
data = cls.DATABASE.get((item.workspace_id, item.item_id))
assert data is not None
assert data[key] == val
def save(self, workspace_id, item_id, data):
self.DATABASE[(workspace_id, item_id)] = data
def load(self, workspace_id, item_id):
return self.DATABASE.get((workspace_id, item_id), {})
@pytest.fixture
def valid_adapter(monkeypatch):
monkeypatch.setenv("RC_WORKSPACE_ID", "test-ws")
monkeypatch.setenv("RC_WORKITEM_ID", "test-item")
MockAdapter.DATABASE = copy.deepcopy(VALID_DATABASE)
yield MockAdapter
MockAdapter.DATABASE = {}
def test_no_env(monkeypatch):
monkeypatch.delenv("RC_WORKSPACE_ID", raising=False)
monkeypatch.delenv("RC_WORKITEM_ID", raising=False)
lib = Items(default_adapter=MockAdapter)
assert lib.current is None
def test_load_env(valid_adapter):
lib = Items(default_adapter=valid_adapter)
# Called by Robot Framework listener
lib._start_suite(None, None)
# Work item loaded using env variables
env = lib.current
assert env is not None
assert env.data["username"] == "testguy"
def test_load_env_disable(valid_adapter):
lib = Items(load_env=False, default_adapter=valid_adapter)
# Called by Robot Framework listener
lib._start_suite(None, None)
assert lib.current is None
def test_keyword_load_item(valid_adapter):
lib = Items(default_adapter=valid_adapter)
item = lib.load_work_item("test-ws", "second-item")
assert item.data["username"] == "another"
assert item == lib.current
def test_keyword_save_item(valid_adapter):
lib = Items(default_adapter=valid_adapter)
item = lib.load_work_item("test-ws", "second-item")
MockAdapter.validate(item, "username", "another")
item.data["username"] = "changed"
lib.save_work_item()
MockAdapter.validate(item, "username", "changed")
def test_keyword_no_active_item():
lib = Items(default_adapter=MockAdapter)
assert lib.current is None
with pytest.raises(AssertionError) as err:
lib.save_work_item()
assert str(err.value) == "No active work item"
| 27.56044 | 87 | 0.702153 | 329 | 2,508 | 5.155015 | 0.243161 | 0.063679 | 0.044222 | 0.064858 | 0.367335 | 0.288915 | 0.288915 | 0.267689 | 0.255896 | 0.255896 | 0 | 0 | 0.176236 | 2,508 | 90 | 88 | 27.866667 | 0.82091 | 0.042265 | 0 | 0.241379 | 0 | 0 | 0.130525 | 0 | 0 | 0 | 0 | 0 | 0.189655 | 1 | 0.172414 | false | 0 | 0.051724 | 0.017241 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aa74729651c5f6bb13c86b7a22664c4c8570ee6 | 7,418 | py | Python | seq2seq/models/conv_seq2seq.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | 342 | 2017-06-23T12:47:32.000Z | 2021-12-06T06:56:15.000Z | seq2seq/models/conv_seq2seq.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | 26 | 2017-07-25T01:39:39.000Z | 2020-06-08T09:59:17.000Z | seq2seq/models/conv_seq2seq.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | 123 | 2017-06-25T16:02:37.000Z | 2020-07-08T08:14:11.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq.models.seq2seq_model import Seq2SeqModel
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
from seq2seq.inference import beam_search
class ConvSeq2Seq(Seq2SeqModel):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="conv_seq2seq"):
super(ConvSeq2Seq, self).__init__(params, mode, name)
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = Seq2SeqModel.default_params().copy()
params.update({
"encoder.class": "seq2seq.encoders.ConvEncoderFairseq",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.ConvDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"source.max_seq_len": 50,
"source.reverse": False,
"target.max_seq_len": 50,
"embedding.dim": 256,
"embedding.init_scale": 0.04,
"embedding.share": False,
"position_embeddings.num_positions": 100,
"inference.beam_search.beam_width": 0,
"inference.beam_search.length_penalty_weight": 1.0,
"inference.beam_search.choose_successors_fn": "choose_top_k",
"vocab_source": "",
"vocab_target": "",
"optimizer.name": "Momentum",
"optimizer.learning_rate": 0.25,
"optimizer.params": {"momentum": 0.99, "use_nesterov": True}, # Arbitrary parameters for the optimizer
#"optimizer.params": { "epsilon": 0.0000008}, # Arbitrary parameters for the optimizer
"optimizer.lr_decay_type": "exponential_decay",
"optimizer.lr_decay_steps": 5000, # one epoch steps
"optimizer.lr_decay_rate": 0.9,
"optimizer.lr_start_decay_at": 0, # start annealing epoch 0
"optimizer.lr_stop_decay_at": tf.int32.max,
"optimizer.lr_min_learning_rate": 1e-5,
"optimizer.lr_staircase": True,
"optimizer.clip_gradients": 0.1,
"optimizer.clip_embed_gradients": 5,
"optimizer.sync_replicas": 0,
"optimizer.sync_replicas_to_aggregate": 0,
})
return params
def source_embedding_fairseq(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W",
shape=[self.source_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_embedding_fairseq(self):
"""Returns the embedding used for the target sequence.
"""
if self.params["embedding.share"]:
return self.source_embedding_fairseq()
return tf.get_variable(
name="W",
shape=[self.target_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def source_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def _create_decoder(self, encoder_output, features, _labels):
config = beam_search.BeamSearchConfig(
beam_width=self.params["inference.beam_search.beam_width"],
vocab_size=self.target_vocab_info.total_size,
eos_token=self.target_vocab_info.special_vocab.SEQUENCE_END,
length_penalty_weight=self.params[
"inference.beam_search.length_penalty_weight"],
choose_successors_fn=getattr(
beam_search,
self.params["inference.beam_search.choose_successors_fn"]))
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
config=config,
target_embedding=self.target_embedding_fairseq(),
pos_embedding=self.target_pos_embedding_fairseq(),
start_tokens=self.target_vocab_info.special_vocab.SEQUENCE_END)
def _decode_train(self, decoder, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,
labels["target_ids"])
return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels["target_len"]-1)
def _decode_infer(self, decoder, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
return decoder(_encoder_output, labels)
@templatemethod("encode")
def encode(self, features, labels):
features["source_ids"] = tf.reverse_sequence(features["source_ids"], features["source_len"], batch_dim=0, seq_dim=1) # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]] [4,2]
features["source_ids"] = tf.reverse(features["source_ids"],[1]) # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]
source_embedded = tf.nn.embedding_lookup(self.source_embedding_fairseq(),
features["source_ids"])
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, self.source_pos_embedding_fairseq())
return encoder_fn(source_embedded, features["source_len"])
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, encoder_output, features,
labels)
else:
return self._decode_train(decoder, encoder_output, features,
labels)
| 40.758242 | 181 | 0.685495 | 929 | 7,418 | 5.241119 | 0.259419 | 0.023413 | 0.025878 | 0.022181 | 0.376463 | 0.272335 | 0.222633 | 0.212364 | 0.151571 | 0.110084 | 0 | 0.01914 | 0.204098 | 7,418 | 181 | 182 | 40.983425 | 0.805556 | 0.209356 | 0 | 0.211382 | 0 | 0 | 0.209427 | 0.122065 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089431 | false | 0 | 0.089431 | 0.01626 | 0.284553 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aac8956f007f10c83a2012e4e3af7ab31bb2c4c | 12,374 | py | Python | src/approach/finetuning.py | tunglamlqddb/DFKD | 924220a81d2a08bf07e50d86028e04899248d17b | [
"MIT"
] | null | null | null | src/approach/finetuning.py | tunglamlqddb/DFKD | 924220a81d2a08bf07e50d86028e04899248d17b | [
"MIT"
] | null | null | null | src/approach/finetuning.py | tunglamlqddb/DFKD | 924220a81d2a08bf07e50d86028e04899248d17b | [
"MIT"
] | null | null | null | import torch, warnings
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from argparse import ArgumentParser
from .incremental_learning import Inc_Learning_Appr
from datasets.exemplars_dataset import ExemplarsDataset
class Appr(Inc_Learning_Appr):
"""Class implementing the finetuning baseline"""
def __init__(self, model, device, nepochs=100, lr=0.05, lr_min=1e-4, lr_factor=3, lr_patience=5, clipgrad=10000,
momentum=0, wd=0, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, fix_bn=False, eval_on_train=False,
logger=None, exemplars_dataset=None, all_outputs=False, CE=True, OPL=False, gamma=0.5, opl_weight=1.0):
super(Appr, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd,
multi_softmax, wu_nepochs, wu_lr_factor, fix_bn, eval_on_train, logger,
exemplars_dataset)
self.all_out = all_outputs
self.CE = CE
self.OPL = OPL
self.gamma = gamma
self.opl_weight = opl_weight
self.means = []
self.covs = []
self.class_labels = []
@staticmethod
def exemplars_dataset_class():
return ExemplarsDataset
@staticmethod
def extra_parser(args):
"""Returns a parser containing the approach specific parameters"""
parser = ArgumentParser()
parser.add_argument('--all-outputs', action='store_true', required=False,
help='Allow all weights related to all outputs to be modified (default=%(default)s)')
parser.add_argument('--CE', action='store_false', required=False,
help='CE loss (default=%(default)s)')
parser.add_argument('--OPL', action='store_true', required=False,
help='OPL loss (default=%(default)s)')
parser.add_argument('--gamma', default=0.5, type=float, required=False,
help='Gamma for neg pair in OPL (default=%(default)s)')
parser.add_argument('--opl_weight', default=1, type=float, required=False,
help='Weight for OPL loss (default=%(default)s)')
return parser.parse_known_args(args)
def _get_optimizer(self):
"""Returns the optimizer"""
if len(self.exemplars_dataset) == 0 and len(self.model.heads) > 1 and not self.all_out:
# if there are no exemplars, previous heads are not modified
params = list(self.model.model.parameters()) + list(self.model.heads[-1].parameters())
else:
params = self.model.parameters()
return torch.optim.SGD(params, lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
def save_protype(self, trained_model, loader):
trained_model.eval()
features = []
labels = []
with torch.no_grad():
for images, targets in loader:
output, feature = trained_model(images.to(self.device), return_features=True)
labels.append(targets.numpy())
features.append(feature.cpu().numpy())
labels = np.hstack(labels)
labels_set = np.unique(labels)
features = np.concatenate(features, 0)
feature_dim = features.shape[1]
for item in labels_set:
index = np.where(item==labels)[0]
feature_classwise = features[index]
self.class_labels.append(item)
self.means.append(torch.from_numpy(np.mean(feature_classwise, axis=0)))
self.covs.append(torch.from_numpy(np.cov(feature_classwise.T)))
def pre_train_process(self, t, trn_loader):
"""Runs before training all epochs of the task (before the train session)"""
if t == 0:
# Sec. 4.1: "the ReLU in the penultimate layer is removed to allow the features to take both positive and
# negative values"
if self.model.model.__class__.__name__ == 'ResNet':
old_block = self.model.model.layer3[-1]
self.model.model.layer3[-1] = BasicBlockNoRelu(old_block.conv1, old_block.bn1, old_block.relu,
old_block.conv2, old_block.bn2, old_block.downsample)
elif self.model.model.__class__.__name__ == 'SmallCNN':
self.model.model.last_relu = False
else:
warnings.warn("Warning: ReLU not removed from last block.")
super().pre_train_process(t, trn_loader)
def train_epoch(self, t, trn_loader):
"""Runs a single epoch"""
self.model.train()
if self.fix_bn and t > 0:
self.model.freeze_bn()
for images, targets in trn_loader:
# Forward current model
if not self.OPL:
features = None
outputs = self.model(images.to(self.device))
else:
outputs, features = self.model(images.to(self.device), return_features=True)
loss = self.criterion(t, outputs, targets.to(self.device), features)
# Backward
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clipgrad)
self.optimizer.step()
def train_loop(self, t, trn_loader, val_loader):
"""Contains the epochs loop"""
# add exemplars to train_loader
if len(self.exemplars_dataset) > 0 and t > 0:
trn_loader = torch.utils.data.DataLoader(trn_loader.dataset + self.exemplars_dataset,
batch_size=trn_loader.batch_size,
shuffle=True,
num_workers=trn_loader.num_workers,
pin_memory=trn_loader.pin_memory)
# FINETUNING TRAINING -- contains the epochs loop
super().train_loop(t, trn_loader, val_loader)
# EXEMPLAR MANAGEMENT -- select training subset
self.exemplars_dataset.collect_exemplars(self.model, trn_loader, val_loader.dataset.transform)
self.save_protype(self.model, trn_loader)
def classify(self, task, features, targets):
# expand means to all batch images # bs*256*num_classes
means = torch.stack(self.means)
means = torch.stack([means]*features.shape[0])
means = means.transpose(1,2)
# expand all features to all classes
features = features.unsqueeze(2)
features = features.expand_as(means)
# get cosine-similarities for all images to all prototypes
# note: features and means do not need normalize
cos_sim = torch.nn.functional.cosine_similarity(features, means.to(self.device), dim=1, eps=1e-08) # bs*num_classes
pred = cos_sim.argmax(1)
hits_tag = (pred == targets.to(self.device)).float()
return hits_tag, hits_tag
def eval_ncm(self, t, val_loader):
with torch.no_grad():
total_loss, total_acc_taw, total_acc_tag, total_num = 0, 0, 0, 0
self.model.eval()
for images, targets in val_loader:
# Forward old model
old_features = None
if t > 0:
old_outputs, old_features = self.model_old(images.to(self.device), return_features=True)
# Forward current model
outputs, feats = self.model(images.to(self.device), return_features=True)
loss = self.criterion(t, outputs, targets.to(self.device), feats)
# during training, the usual accuracy is not computed
if t > len(self.means)-1:
print('No means created yet!')
hits_taw, hits_tag = torch.zeros(targets.shape[0]).float(), torch.zeros(targets.shape[0]).float()
else:
hits_taw, hits_tag = self.classify(t, feats, targets)
# Log
total_loss += loss.item() * len(targets)
total_acc_taw += hits_taw.sum().item()
total_acc_tag += hits_tag.sum().item()
total_num += len(targets)
return total_loss / total_num, total_acc_taw / total_num, total_acc_tag / total_num
def eval(self, t, val_loader):
"""Contains the evaluation code"""
with torch.no_grad():
total_loss, total_acc_taw, total_acc_tag, total_num = 0, 0, 0, 0
self.model.eval()
for images, targets in val_loader:
# Forward current model
if self.OPL:
outputs, features = self.model(images.to(self.device), return_features=True)
else:
outputs = self.model(images.to(self.device))
features = None
loss = self.criterion(t, outputs, targets.to(self.device), features)
hits_taw, hits_tag = self.calculate_metrics(outputs, targets)
# Log
total_loss += loss.item() * len(targets)
total_acc_taw += hits_taw.sum().item()
total_acc_tag += hits_tag.sum().item()
total_num += len(targets)
return total_loss / total_num, total_acc_taw / total_num, total_acc_tag / total_num
def criterion(self, t, outputs, targets, features=None):
"""Returns the loss value"""
if self.all_out or len(self.exemplars_dataset) > 0:
if self.CE and not self.OPL:
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets)
if self.CE and self.OPl:
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets) + self.opl_weight*OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
if not self.CE and self.OPL:
return OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
else:
if self.CE and not self.OPL:
return torch.nn.functional.cross_entropy(outputs[t], targets - self.model.task_offset[t])
if self.CE and self.OPL:
return torch.nn.functional.cross_entropy(outputs[t], targets - self.model.task_offset[t]) + self.opl_weight*OrthogonalProjectionLoss(self.gamma)(features, targets - self.model.task_offset[t], normalize=True)
if not self.CE and self.OPL:
return OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
class OrthogonalProjectionLoss(nn.Module):
def __init__(self, gamma=0.5):
super(OrthogonalProjectionLoss, self).__init__()
self.gamma = gamma
def forward(self, features, labels=None, normalize=True):
device = (torch.device('cuda') if features.is_cuda else torch.device('cpu'))
# features are normalized
if normalize:
features = F.normalize(features, p=2, dim=1)
labels = labels[:, None] # extend dim
mask = torch.eq(labels, labels.t()).bool().to(device)
eye = torch.eye(mask.shape[0], mask.shape[1]).bool().to(device)
mask_pos = mask.masked_fill(eye, 0).float()
mask_neg = (~mask).float()
dot_prod = torch.matmul(features, features.t())
pos_pairs_mean = (mask_pos * dot_prod).sum() / (mask_pos.sum() + 1e-6)
neg_pairs_mean = (mask_neg * dot_prod).sum() / (mask_neg.sum() + 1e-6) # TODO: removed abs
loss = (1.0 - pos_pairs_mean) + self.gamma * neg_pairs_mean
return loss
# This class implements a ResNet Basic Block without the final ReLu in the forward
class BasicBlockNoRelu(nn.Module):
expansion = 1
def __init__(self, conv1, bn1, relu, conv2, bn2, downsample):
super(BasicBlockNoRelu, self).__init__()
self.conv1 = conv1
self.bn1 = bn1
self.relu = relu
self.conv2 = conv2
self.bn2 = bn2
self.downsample = downsample
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# Removed final ReLU
return out | 47.409962 | 223 | 0.601422 | 1,530 | 12,374 | 4.692157 | 0.191503 | 0.032595 | 0.020059 | 0.017551 | 0.333473 | 0.279983 | 0.256025 | 0.218136 | 0.195431 | 0.195431 | 0 | 0.011894 | 0.293357 | 12,374 | 261 | 224 | 47.409962 | 0.809126 | 0.091401 | 0 | 0.217172 | 0 | 0 | 0.033992 | 0.009393 | 0 | 0 | 0 | 0.003831 | 0 | 1 | 0.080808 | false | 0 | 0.035354 | 0.005051 | 0.207071 | 0.005051 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aacb536ae303f634a3016d2c8edb89930e0c942 | 11,609 | py | Python | SMA/lab_simfleet/simfleet/mystrategy.py | jiwidi/MIARFID | 979eda45fc18a4816ef65d33b1423a6d63176c04 | [
"MIT"
] | null | null | null | SMA/lab_simfleet/simfleet/mystrategy.py | jiwidi/MIARFID | 979eda45fc18a4816ef65d33b1423a6d63176c04 | [
"MIT"
] | null | null | null | SMA/lab_simfleet/simfleet/mystrategy.py | jiwidi/MIARFID | 979eda45fc18a4816ef65d33b1423a6d63176c04 | [
"MIT"
] | null | null | null | import json
import random
from loguru import logger
from simfleet.customer import CustomerStrategyBehaviour
from simfleet.fleetmanager import FleetManagerStrategyBehaviour
from simfleet.helpers import PathRequestException, distance_in_meters
from simfleet.protocol import (
REQUEST_PERFORMATIVE,
ACCEPT_PERFORMATIVE,
REFUSE_PERFORMATIVE,
PROPOSE_PERFORMATIVE,
CANCEL_PERFORMATIVE,
INFORM_PERFORMATIVE,
QUERY_PROTOCOL,
REQUEST_PROTOCOL,
)
from simfleet.transport import TransportStrategyBehaviour
from simfleet.utils import (
TRANSPORT_WAITING,
TRANSPORT_WAITING_FOR_APPROVAL,
CUSTOMER_WAITING,
TRANSPORT_MOVING_TO_CUSTOMER,
CUSTOMER_ASSIGNED,
TRANSPORT_WAITING_FOR_STATION_APPROVAL,
TRANSPORT_MOVING_TO_STATION,
TRANSPORT_CHARGING,
TRANSPORT_CHARGED,
TRANSPORT_NEEDS_CHARGING,
)
################################################################
# #
# FleetManager Strategy #
# #
################################################################
class MyFleetManagerStrategy(FleetManagerStrategyBehaviour):
"""
The default strategy for the FleetManager agent. By default it delegates all requests to all transports.
# Modified to sent request only to the closest taxi to the customer
"""
async def run(self):
if not self.agent.registration:
await self.send_registration()
msg = await self.receive(timeout=5)
logger.debug("Manager received message: {}".format(msg))
if msg:
content = json.loads(msg.body)
customer = content["customer_id"]
position = content["origin"]
destination = content["dest"]
best_transport = None
min_distance = 10e99
for transport in self.get_transport_agents().values():
logger.warning("EEeeeee")
logger.warning(type(transport))
logger.warning((transport))
dst = distance_in_meters(transport.get_position(), position)
if dst < min_distance:
min_distance = dst
best_transport = transport
msg.to = str(best_transport["jid"])
logger.debug(
"Manager sent request to transport {}".format(best_transport["name"])
)
await self.send(msg)
################################################################
# #
# Transport Strategy #
# #
################################################################
class MyTransportStrategy(TransportStrategyBehaviour):
"""
The default strategy for the Transport agent. By default it accepts every request it receives if available.
"""
async def run(self):
if self.agent.needs_charging():
if self.agent.stations is None or len(self.agent.stations) < 1:
logger.warning(
"Transport {} looking for a station.".format(self.agent.name)
)
await self.send_get_stations()
else:
station = random.choice(list(self.agent.stations.keys()))
logger.info(
"Transport {} reserving station {}.".format(
self.agent.name, station
)
)
await self.send_proposal(station)
self.agent.status = TRANSPORT_WAITING_FOR_STATION_APPROVAL
msg = await self.receive(timeout=5)
if not msg:
return
logger.debug("Transport received message: {}".format(msg))
try:
content = json.loads(msg.body)
except TypeError:
content = {}
performative = msg.get_metadata("performative")
protocol = msg.get_metadata("protocol")
if protocol == QUERY_PROTOCOL:
if performative == INFORM_PERFORMATIVE:
self.agent.stations = content
logger.info(
"Got list of current stations: {}".format(
list(self.agent.stations.keys())
)
)
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for stations information.")
elif protocol == REQUEST_PROTOCOL:
logger.debug(
"Transport {} received request protocol from customer/station.".format(
self.agent.name
)
)
if performative == REQUEST_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING:
if not self.has_enough_autonomy(content["origin"], content["dest"]):
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_NEEDS_CHARGING
else:
await self.send_proposal(content["customer_id"], {})
self.agent.status = TRANSPORT_WAITING_FOR_APPROVAL
elif performative == ACCEPT_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_APPROVAL:
logger.debug(
"Transport {} got accept from {}".format(
self.agent.name, content["customer_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_CUSTOMER
await self.pick_up_customer(
content["customer_id"], content["origin"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to customer {}. Cancelling...".format(
self.agent.name, content["customer_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["customer_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_WAITING
else:
await self.cancel_proposal(content["customer_id"])
elif performative == REFUSE_PERFORMATIVE:
logger.debug(
"Transport {} got refusal from customer/station".format(
self.agent.name
)
)
self.agent.status = TRANSPORT_WAITING
elif performative == INFORM_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_STATION_APPROVAL:
logger.info(
"Transport {} got accept from station {}".format(
self.agent.name, content["station_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_STATION
await self.send_confirmation_travel(content["station_id"])
await self.go_to_the_station(
content["station_id"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to station {}. Cancelling...".format(
self.agent.name, content["station_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["station_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["station_id"])
self.agent.status = TRANSPORT_WAITING
elif self.agent.status == TRANSPORT_CHARGING:
if content["status"] == TRANSPORT_CHARGED:
self.agent.transport_charged()
await self.agent.drop_station()
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.fleet_type
)
)
################################################################
# #
# Customer Strategy #
# #
################################################################
class MyCustomerStrategy(CustomerStrategyBehaviour):
"""
The default strategy for the Customer agent. By default it accepts the first proposal it receives.
"""
async def run(self):
if self.agent.fleetmanagers is None:
await self.send_get_managers(self.agent.fleet_type)
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
if performative == INFORM_PERFORMATIVE:
self.agent.fleetmanagers = json.loads(msg.body)
return
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.type_service
)
)
return
if self.agent.status == CUSTOMER_WAITING:
await self.send_request(content={})
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
transport_id = msg.sender
if performative == PROPOSE_PERFORMATIVE:
if self.agent.status == CUSTOMER_WAITING:
logger.debug(
"Customer {} received proposal from transport {}".format(
self.agent.name, transport_id
)
)
await self.accept_transport(transport_id)
self.agent.status = CUSTOMER_ASSIGNED
else:
await self.refuse_transport(transport_id)
elif performative == CANCEL_PERFORMATIVE:
if self.agent.transport_assigned == str(transport_id):
logger.warning(
"Customer {} received a CANCEL from Transport {}.".format(
self.agent.name, transport_id
)
)
self.agent.status = CUSTOMER_WAITING
| 41.909747 | 111 | 0.484193 | 917 | 11,609 | 5.970556 | 0.164667 | 0.073973 | 0.049315 | 0.06137 | 0.454247 | 0.384658 | 0.331872 | 0.262283 | 0.191233 | 0.179726 | 0 | 0.001321 | 0.413214 | 11,609 | 276 | 112 | 42.061594 | 0.802408 | 0.073564 | 0 | 0.348214 | 0 | 0 | 0.103131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.040179 | 0 | 0.066964 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab152d88bdfe1c7ddd598bc9ed02543b5951c4d | 1,582 | py | Python | Chapter07/cell_counting.py | giulic3/DeepLearningLifeSciences | 20b4b2eeff421d331722637899845e4c9a4a52a6 | [
"MIT"
] | 1 | 2020-04-06T04:17:27.000Z | 2020-04-06T04:17:27.000Z | Chapter07/cell_counting.py | joe-nano/DeepLearningLifeSciences | 258066f904159a7c1c81aba16e74ae4e6b4263b5 | [
"MIT"
] | null | null | null | Chapter07/cell_counting.py | joe-nano/DeepLearningLifeSciences | 258066f904159a7c1c81aba16e74ae4e6b4263b5 | [
"MIT"
] | 1 | 2020-02-16T23:43:16.000Z | 2020-02-16T23:43:16.000Z | import deepchem as dc
import deepchem.models.tensorgraph.layers as layers
import numpy as np
import os
import re
RETRAIN = False
# Load the datasets.
image_dir = 'BBBC005_v1_images'
files = []
labels = []
for f in os.listdir(image_dir):
if f.endswith('.TIF'):
files.append(os.path.join(image_dir, f))
labels.append(int(re.findall('_C(.*?)_', f)[0]))
loader = dc.data.ImageLoader()
dataset = loader.featurize(files, np.array(labels))
splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset, seed=123)
# Create the model.
learning_rate = dc.models.optimizers.ExponentialDecay(0.001, 0.9, 250)
model = dc.models.TensorGraph(learning_rate=learning_rate, model_dir='models/model')
features = layers.Feature(shape=(None, 520, 696))
labels = layers.Label(shape=(None,))
prev_layer = features
for num_outputs in [16, 32, 64, 128, 256]:
prev_layer = layers.Conv2D(num_outputs, kernel_size=5, stride=2, in_layers=prev_layer)
output = layers.Dense(1, in_layers=layers.Flatten(prev_layer))
model.add_output(output)
loss = layers.ReduceSum(layers.L2Loss(in_layers=(output, labels)))
model.set_loss(loss)
if not os.path.exists('./models'):
os.mkdir('models')
if not os.path.exists('./models/model'):
os.mkdir('models/model')
if not RETRAIN:
model.restore()
# Train it and evaluate performance on the test set.
if RETRAIN:
print("About to fit model for 50 epochs")
model.fit(train_dataset, nb_epoch=50)
y_pred = model.predict(test_dataset).flatten()
print(np.sqrt(np.mean((y_pred-test_dataset.y)**2)))
| 32.285714 | 95 | 0.747155 | 248 | 1,582 | 4.625 | 0.459677 | 0.031386 | 0.012206 | 0.01918 | 0.040105 | 0.040105 | 0 | 0 | 0 | 0 | 0 | 0.031892 | 0.108091 | 1,582 | 48 | 96 | 32.958333 | 0.781006 | 0.054994 | 0 | 0 | 0 | 0 | 0.075788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.128205 | 0 | 0.128205 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab161405dab5a9d19625915f984dc3130fe9f36 | 2,428 | py | Python | fn_playbook_utils/fn_playbook_utils/components/funct_pb_get_playbook_data.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_playbook_utils/fn_playbook_utils/components/funct_pb_get_playbook_data.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_playbook_utils/fn_playbook_utils/components/funct_pb_get_playbook_data.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # -*- coding: utf-8 -*-
#(c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
#pragma pylint: disable=unused-argument, no-self-use, line-too-long
"""AppFunction implementation"""
from cachetools import cached, TTLCache
from resilient_circuits import AppFunctionComponent, app_function, FunctionResult
from fn_playbook_utils.lib.common import get_playbooks_by_incident_id, parse_inputs
PACKAGE_NAME = "fn_playbook_utils"
FN_NAME = "pb_get_playbook_data"
class FunctionComponent(AppFunctionComponent):
"""Component that implements function 'pb_get_playbook_data'"""
def __init__(self, opts):
super(FunctionComponent, self).__init__(opts, PACKAGE_NAME)
self.restclient = self.rest_client()
@app_function(FN_NAME)
def _app_function(self, fn_inputs):
"""
Function: Get information on workflows run for this incident or for a range of incidents
Inputs:
- fn_inputs.pb_min_incident_id
- fn_inputs.pb_max_incident_id
- fn_inputs.pb_min_incident_date
- fn_inputs.pb_max_incident_date
- fn_inputs.pb_object_name
- fn_inputs.pb_object_type
"""
yield self.status_message("Starting App Function: '{0}'".format(FN_NAME))
min_id, max_id = parse_inputs(self.restclient, fn_inputs)
yield self.status_message("Using min_incident: {} max_incident: {}".format(min_id, max_id))
result_data = self.get_all_incident_playbooks(min_id, max_id)
yield self.status_message("Finished running App Function: '{0}'".format(FN_NAME))
yield FunctionResult(result_data)
@cached(cache=TTLCache(maxsize=30, ttl=60))
def get_all_incident_playbooks(self, min_id, max_id):
# get all the incident data to return
result_dict = {}
result_data = {
"org_id" : self.restclient.org_id,
"min_id": min_id,
"max_id": max_id,
"playbook_content": result_dict
}
# don't continue if no values
if bool(min_id and max_id):
search_results = get_playbooks_by_incident_id(self.restclient, min_id, max_id)
for pb in search_results.get('data', []):
if pb['incident_id'] in result_dict:
result_dict[pb['incident_id']].append(pb)
else:
result_dict[pb['incident_id']] = [pb]
return result_data
| 36.787879 | 99 | 0.66598 | 315 | 2,428 | 4.790476 | 0.35873 | 0.042412 | 0.032472 | 0.039761 | 0.163022 | 0.031809 | 0 | 0 | 0 | 0 | 0 | 0.008104 | 0.237644 | 2,428 | 65 | 100 | 37.353846 | 0.807131 | 0.250412 | 0 | 0 | 0 | 0 | 0.122248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.088235 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab7f304cf92455bd030e34fb9b57d32e2f4394e | 1,864 | py | Python | Amazone/Android_Phone_Price.py | anivalogy/Web_Scraping | 9431ee434e9c19adcf45d185065625608755acc4 | [
"Apache-2.0"
] | 1 | 2020-11-10T11:30:07.000Z | 2020-11-10T11:30:07.000Z | Amazone/Android_Phone_Price.py | anivalogy/Web_Scraping | 9431ee434e9c19adcf45d185065625608755acc4 | [
"Apache-2.0"
] | null | null | null | Amazone/Android_Phone_Price.py | anivalogy/Web_Scraping | 9431ee434e9c19adcf45d185065625608755acc4 | [
"Apache-2.0"
] | 1 | 2020-12-24T12:25:40.000Z | 2020-12-24T12:25:40.000Z | import csv
from bs4 import BeautifulSoup
from selenium import webdriver
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver import Chrome
def get_url(search_item):
template="https://www.amazon.in/s?k={}&crid=1GNY6Q6AHOOKS&sprefix=and%2Caps%2C524&ref=nb_sb_ss_ts-oa-p_1_3"
search_item=search_item.replace(' ','+')
#add query tool
url =template.format(search_item)
url+='&page{}'
return url
def extract_record(item):
#description Url and heading
atag=item.h2.a
description=atag.text.strip()
url="https://www.amazon.in/" +atag.get('href')
try:
#price
price_present=item.find('span','a-price')
price=price_present.find('span' ,'a-offscreen').text
except AttributeError:
return
try:
#rating and review
rating=item.i.text
review_count = item.find('span',{'class':'a-size-base','dir':'auto'}).text
except AttributeError:
rating=''
review_count
results=(description,price,rating,review_count,url)
return results
def main(search_item):
record=[]
url=get_url(search_item)
for page in range(1,21):
driver.get(url.format(page))
soup=BeautifulSoup(driver.page_source,'html.parser')
results =soup.find_all('div',{"data-component-type":"s-search-result"})
for item in results:
record =extract_record(item)
if record:
records.append(record)
driver.close()
#save data as csv file
with open('results.csv','w',newline='',encoding='utf-8')as f:
writer=csv.writer(f)
writer.writerow(['Description','Price','Rating','ReviewCount','url'])
writer.writerows(records)
print(main('android phone'))
| 24.207792 | 111 | 0.624464 | 235 | 1,864 | 4.859574 | 0.455319 | 0.052539 | 0.022767 | 0.028021 | 0.108581 | 0.108581 | 0.108581 | 0.108581 | 0.108581 | 0.108581 | 0 | 0.012057 | 0.243562 | 1,864 | 76 | 112 | 24.526316 | 0.797872 | 0.045064 | 0 | 0.212766 | 0 | 0.021277 | 0.168362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.148936 | 0 | 0.276596 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab8479693143f8c93f4e4b1bf7ece0d1eb036d2 | 658 | py | Python | construct-binary-search-tree-from-preorder-traversal/construct-binary-search-tree-from-preorder-traversal.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | 1 | 2021-10-10T20:21:18.000Z | 2021-10-10T20:21:18.000Z | construct-binary-search-tree-from-preorder-traversal/construct-binary-search-tree-from-preorder-traversal.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | null | null | null | construct-binary-search-tree-from-preorder-traversal/construct-binary-search-tree-from-preorder-traversal.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> Optional[TreeNode]:
def subtree(lo, hi):
if lo >= hi: return None
rootval = preorder[lo]
root = TreeNode( rootval )
mid = bisect.bisect_left(preorder, rootval, lo+1, hi)
root.left = subtree(lo+1, mid)
root.right = subtree(mid, hi)
return root
return subtree( 0, len(preorder) ) | 31.333333 | 73 | 0.550152 | 78 | 658 | 4.576923 | 0.423077 | 0.061625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009324 | 0.348024 | 658 | 21 | 74 | 31.333333 | 0.822844 | 0.051672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab93f465042b5cca276e73a401edc8f8be4aec0 | 4,113 | py | Python | aimsprop/xyz.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 1 | 2022-03-28T13:11:56.000Z | 2022-03-28T13:11:56.000Z | aimsprop/xyz.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 11 | 2021-03-17T17:53:58.000Z | 2021-07-17T17:59:25.000Z | aimsprop/xyz.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 2 | 2021-04-05T08:36:35.000Z | 2021-05-20T22:12:12.000Z | import os
import re
import numpy as np
from . import atom_data, bundle
# TODO: Maybe should be in atom data
_N_table = {val: key for key, val in list(atom_data.atom_symbol_table.items())}
def parse_xyz(
filename: str,
label=1,
w=1.0,
I=1,
t0=0.0,
dt=20.0,
ts=None,
N_table=None,
) -> bundle.Bundle:
"""Parse an XYZ adiabatic bundle file directly into a Bundle.
filename (str): the absolute or relative path to the xyz file.
label (hashable): the label of this bundle
w (float): the weight of this bundle
I (int): electronic state label
t0 (float): the initial time in au
dt (float): the timestep in au
ts (list of float): an explicit list of times in au, overrides t0 and dt
N_table (dict of str : int): an optional dictionary mapping atomic
symbol to atomic number, used for non-standard atom names.
Returns:
bundle (Bundle): the Bundle object.
"""
lines = open(filename).readlines()
natom = int(lines[0]) # This should always work
if len(lines) % (natom + 2):
raise ValueError("Invalid number of lines in xyz file")
nframe = len(lines) / (natom + 2)
xyzs = []
Zs = []
for frame in range(nframe):
lines2 = lines[frame * (natom + 2) + 2 : (frame + 1) * (natom + 2)]
Z = []
xyz = []
for line in lines2:
mobj = re.match(r"^\s*(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$", line)
Z.append(mobj.group(1))
xyz.append([float(mobj.group(x)) for x in (2, 3, 4)])
xyz = np.array(xyz)
xyzs.append(xyz)
Zs.append(Z)
# User symbol table or default?
N_table2 = N_table if N_table else _N_table
frames2 = []
for ind, xyz in enumerate(xyzs):
Z = Zs[ind]
Ns = [N_table2[key] for key in Z]
widths = atom_data.from_Ns_to_widths(Ns)
frame2 = bundle.Frame(
label=label,
t=dt * ind + t0 if ts is None else ts[ind],
w=w,
I=I,
N=Ns,
xyz=xyz,
widths=widths,
)
frames2.append(frame2)
parsed_bundle = bundle.Bundle(frames2)
return parsed_bundle
def write_xyzs(
bundle: bundle.Bundle,
dirname: str,
atom_format_str: str = "%-3s %24.16E %24.16E %24.16E\n",
):
"""Write a directory of xyz files to represent a Bundle, with
one xyz file containing all frames for each label
Params:
bundle: Bundle to write xyz file representation of
dirname: the directory to place the xyz files in (created if does not exist)
atom_format_str: the format string for each atom line in the xyz
file (useful to change precision).
Result:
xyz files are written for each label in bundle. Each xyz
file contains all frames for the label, in time-order
"""
# Make sure directoy exists
if not os.path.exists(dirname):
os.makedirs(dirname)
# Write xyz files
for label in bundle.labels:
bundle2 = bundle.subset_by_label(label)
xyzfilename = str(label)
# Munging with filename label
xyzfilename = xyzfilename.replace(" ", "")
xyzfilename = xyzfilename.replace("(", "")
xyzfilename = xyzfilename.replace(")", "")
xyzfilename = xyzfilename.replace(",", "-")
fh = open("%s/%s.xyz" % (dirname, xyzfilename), "w")
for frame in bundle2.frames:
fh.write("%d\n" % frame.xyz.shape[0])
fh.write(
"t = %24.16E, w = %24.16E, I = %d\n"
% (
frame.t,
frame.w,
frame.I,
)
)
for A in range(frame.xyz.shape[0]):
fh.write(
atom_format_str
% (
atom_data.atom_symbol_table[frame.N[A]],
frame.xyz[A, 0],
frame.xyz[A, 1],
frame.xyz[A, 2],
)
)
| 30.021898 | 84 | 0.540968 | 548 | 4,113 | 4 | 0.306569 | 0.008212 | 0.00958 | 0.010949 | 0.097172 | 0.076186 | 0.057026 | 0.057026 | 0.05292 | 0 | 0 | 0.022787 | 0.349137 | 4,113 | 136 | 85 | 30.242647 | 0.79604 | 0.302942 | 0 | 0.046512 | 0 | 0 | 0.056139 | 0.013401 | 0 | 0 | 0 | 0.007353 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.081395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ab9500cd5c1495d990ceec333363832a2cb723e | 745 | py | Python | src/wafec/fi/hypothesis/models/test_parameter.py | wafec/wafec-fi-hypothesis | e74fea0eb5da39e8f26973fa577dc4515317150c | [
"MIT"
] | null | null | null | src/wafec/fi/hypothesis/models/test_parameter.py | wafec/wafec-fi-hypothesis | e74fea0eb5da39e8f26973fa577dc4515317150c | [
"MIT"
] | null | null | null | src/wafec/fi/hypothesis/models/test_parameter.py | wafec/wafec-fi-hypothesis | e74fea0eb5da39e8f26973fa577dc4515317150c | [
"MIT"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.orm import relationship
from . import Base
class FITestParameter(Base):
__tablename__ = 'test_parameter'
id = Column(Integer, primary_key=True)
test_id = Column(Integer, ForeignKey('test.id'))
test = relationship('FITest')
name = Column(String(255), index=True)
test_parameter_service_id = Column(Integer, ForeignKey('test_parameter_service.id'))
test_parameter_service = relationship('FITestParameterService')
test_parameter_context_id = Column(Integer, ForeignKey('test_parameter_context.id'))
test_parameter_context = relationship('FITestParameterContext')
created_at = Column(DateTime)
updated_at = Column(DateTime)
updated_count = Column(Integer)
| 35.47619 | 88 | 0.759732 | 83 | 745 | 6.53012 | 0.385542 | 0.167897 | 0.110701 | 0.138376 | 0.193727 | 0.140221 | 0 | 0 | 0 | 0 | 0 | 0.004702 | 0.143624 | 745 | 20 | 89 | 37.25 | 0.844828 | 0 | 0 | 0 | 0 | 0 | 0.162416 | 0.126175 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7abd3d620eef1318489c51e350759a7e9fd53d7a | 1,481 | py | Python | hack/merge_cluster_roles.py | philips/cluster-monitoring-operator | e3d89785ebd70b369e8b9b4a1d8cfe93d3354731 | [
"Apache-2.0"
] | null | null | null | hack/merge_cluster_roles.py | philips/cluster-monitoring-operator | e3d89785ebd70b369e8b9b4a1d8cfe93d3354731 | [
"Apache-2.0"
] | 2 | 2018-08-13T11:46:13.000Z | 2018-08-13T12:47:12.000Z | hack/merge_cluster_roles.py | philips/cluster-monitoring-operator | e3d89785ebd70b369e8b9b4a1d8cfe93d3354731 | [
"Apache-2.0"
] | 2 | 2018-09-09T19:03:40.000Z | 2020-01-08T22:24:43.000Z | #!/usr/bin/python
""" merge_cluster_roles.py - merge OpenShift cluster roles into one """
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, print_function
import os.path
import sys
import yaml
def main():
base_role = {}
sources = [os.path.relpath(sys.argv[1])]
with open(sys.argv[1], 'r') as f:
base_role = yaml.load(f)
manifests = sys.argv[2:]
for manifest in manifests:
sources.append(os.path.relpath(manifest))
with open(manifest, 'r') as f:
rules = yaml.load(f)['rules']
if rules not in base_role['rules']:
base_role['rules'] += rules
print("---")
print("# This is a generated file. DO NOT EDIT")
print("# Run `make merge-cluster-roles` to generate.")
print("# Sources: ")
for source in sources:
print("# \t" + source)
print(yaml.dump(base_role))
if __name__ == "__main__":
main()
| 30.854167 | 74 | 0.667792 | 215 | 1,481 | 4.502326 | 0.525581 | 0.061983 | 0.035124 | 0.033058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009475 | 0.21607 | 1,481 | 47 | 75 | 31.510638 | 0.824289 | 0.427414 | 0 | 0 | 0 | 0 | 0.153382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0.28 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7abe0503f9560abe292475ee88102fdcd3ba34ab | 7,005 | py | Python | internationalflavor/timezone/data.py | MounirMesselmeni/django-internationalflavor | 30ea407fc9972243ea45f45c1ce09d5d17961730 | [
"BSD-3-Clause"
] | 22 | 2015-02-09T11:01:56.000Z | 2021-07-02T23:34:56.000Z | internationalflavor/timezone/data.py | MounirMesselmeni/django-internationalflavor | 30ea407fc9972243ea45f45c1ce09d5d17961730 | [
"BSD-3-Clause"
] | 19 | 2015-02-09T11:04:09.000Z | 2021-11-26T08:07:44.000Z | internationalflavor/timezone/data.py | MounirMesselmeni/django-internationalflavor | 30ea407fc9972243ea45f45c1ce09d5d17961730 | [
"BSD-3-Clause"
] | 18 | 2015-04-16T08:39:38.000Z | 2021-07-08T08:07:41.000Z | import datetime
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import gettext_lazy as _, gettext
import itertools
from internationalflavor.timezone._cldr_data import TIMEZONE_NAMES, METAZONE_NAMES, METAZONE_MAPPING_FROM_TZ, \
METAZONE_MAPPING_TO_TZ, TZ_REGION_FORMAT, TZ_GMT_FORMAT, TZ_HOUR_FORMAT
from internationalflavor._helpers import orig_str, string_format
try:
from pytz import common_timezones as COMMON_TIMEZONES
except ImportError:
COMMON_TIMEZONES = [x for x in TIMEZONE_NAMES if not x.startswith("Etc")]
CURRENT_METAZONES = [x for x in set(METAZONE_MAPPING_FROM_TZ.values()) if x is not None]
def get_timezones_cities(timezones=None, exclude=None):
"""Returns a list of choices with (timezone code, exemplar city)-pairs, grouped by their territory.
Only timezones present in the timezones argument, and not present in the exclude argument, are returned.
"""
# We require sorting for the groupby
timezones = COMMON_TIMEZONES if timezones is None else timezones
exclude = exclude if exclude else []
values = sorted(TIMEZONE_NAMES.items(), key=lambda item: orig_str(item[1][0]))
result = []
for territory, zones in itertools.groupby(values, lambda item: item[1][0]):
items = [(k, v[1]) for k, v in zones if k in timezones and k not in exclude]
if items:
result.append((territory, items))
return result
get_timezones_cities_lazy = lazy(get_timezones_cities, list)
def _get_metazone_cities(metazone, limit=5):
zones = [tz for mz, tz in METAZONE_MAPPING_TO_TZ.items() if mz[0] == metazone]
cities = sorted([territory[1] for tz, territory in TIMEZONE_NAMES.items() if tz in zones])
if len(cities) > limit:
return ", ".join(map(force_text, cities[:limit])) + ", ..."
else:
return ", ".join(map(force_text, cities))
_get_metazone_cities_lazy = lazy(_get_metazone_cities, str)
def _get_metazone_offset(metazone, correct_dst=True):
try:
import pytz
except ImportError:
raise ImproperlyConfigured("You can not use this display format without pytz")
# We need to ensure that we do utcoffset - dst to get the normal offset for this timezone
try:
tzinfo = pytz.timezone(get_timezone_by_metazone(metazone))
offset = tzinfo.utcoffset(datetime.datetime.now(), is_dst=False)
if correct_dst:
offset -= tzinfo.dst(datetime.datetime.now(), is_dst=False)
except pytz.UnknownTimeZoneError:
offset = datetime.timedelta(0)
return offset
def _get_metazone_offset_str(metazone, correct_dst=True, include_gmt=True):
offset = _get_metazone_offset(metazone, correct_dst=correct_dst)
# Format the timezone
if offset >= datetime.timedelta(0):
offset_str = force_text(TZ_HOUR_FORMAT).split(';')[0]
else:
offset = -offset
offset_str = force_text(TZ_HOUR_FORMAT).split(';')[1]
offset_str = offset_str.replace('HH', "%02d" % (offset.total_seconds() // 3600))
offset_str = offset_str.replace('mm', "%02d" % ((offset.total_seconds() % 3600) // 60))
if include_gmt:
return force_text(TZ_GMT_FORMAT) % offset_str
else:
return offset_str
_get_metazone_offset_str_lazy = lazy(_get_metazone_offset_str, str)
def get_metazone_name(metazone, display_format='name'):
"""Returns the name of a metazone, given a display_format. Available formats:
*name* -- The name of the metazone, e.g.
Central European Time
*name_cities* -- The above two options combined, e.g.
Central European Time (Abidjan, Accra, Bamako, Banjul, Conakry, ...)
*offset_name* -- The offset and the name, e.g.
GMT+01:00 Central European Time
*offset_name_cities* -- The offset and the name, e.g.
GMT+01:00 Central European Time (Abidjan, Accra, Bamako, Banjul, Conakry, ...)
Everything else is string formatted using traditional Python string formatting, with the following arguments
available:
* tzname
* cities
* offset
* gmt_offset -- The offset including the GMT string
* dst_offset -- The offset with current DST applied
* gmt_dst_offset - The above two combined
"""
if display_format == 'name':
display_format = gettext("%(tzname)s")
elif display_format == 'name_cities':
display_format = gettext("%(tzname)s (%(cities)s)")
elif display_format == 'offset_name':
display_format = gettext("%(gmt_offset)s %(tzname)s")
elif display_format == 'offset_name_cities':
display_format = gettext("%(gmt_offset)s %(tzname)s (%(cities)s)")
name = force_text(METAZONE_NAMES.get(metazone, string_format(TZ_REGION_FORMAT, _(metazone))))
result = display_format % {
'tzname': name,
'cities': _get_metazone_cities_lazy(metazone),
'offset': _get_metazone_offset_str_lazy(metazone, True, False),
'gmt_offset': _get_metazone_offset_str_lazy(metazone, True, True),
'dst_offset': _get_metazone_offset_str_lazy(metazone, False, False),
'gmt_dst_offset': _get_metazone_offset_str_lazy(metazone, False, True)
}
return result
get_metazone_name_lazy = lazy(get_metazone_name, str)
def get_metazones(metazones=None, exclude=None, display_format='name'):
"""Returns a list of metazones.
By default, returns all current metazones. If the metazones argument defines metazones, they are returned. Values
in exclude are never returned.
"""
metazones = CURRENT_METAZONES if metazones is None else metazones
exclude = exclude if exclude else []
return [(k, get_metazone_name_lazy(k, display_format)) for k in metazones if k not in exclude]
get_metazones_lazy = lazy(get_metazones, list)
def get_timezone_by_metazone(metazone, territories=None, fallback='001'):
"""Returns the timezone name from the metazone name. It takes three arguments:
:param metazone: Name of the metazone
:param territories: String of a single territory or a list of territories in order of preference for retrieving
the correct timezone. This is used when a metazone has multiple base timezones. It is optional
as there is always a fallback to the default 'World' territory (001). Use case: you could use
it to fill in the country of the user.
:param fallback: The territory to use when no other territory could be found. This should always be 001 (=world)
"""
if territories is None:
territories = []
elif isinstance(territories, str):
territories = [territories]
for ter in territories:
if (metazone, ter) in METAZONE_MAPPING_TO_TZ:
return METAZONE_MAPPING_TO_TZ[(metazone, ter)]
return METAZONE_MAPPING_TO_TZ[(metazone, fallback)]
| 39.801136 | 118 | 0.698644 | 940 | 7,005 | 5 | 0.218085 | 0.042128 | 0.032553 | 0.029787 | 0.262979 | 0.170213 | 0.105319 | 0.105319 | 0.039574 | 0.02 | 0 | 0.007788 | 0.211849 | 7,005 | 175 | 119 | 40.028571 | 0.843507 | 0.287937 | 0 | 0.12766 | 0 | 0 | 0.057172 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074468 | false | 0 | 0.12766 | 0 | 0.308511 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ac2453ef253d20470030f8d8112f23a1f877440 | 1,366 | py | Python | sloth-scratch.py | xer0-1ne/sloth-scratch | 1d81ffe65ff7d72fcc766a6ba025366a3319a838 | [
"MIT"
] | null | null | null | sloth-scratch.py | xer0-1ne/sloth-scratch | 1d81ffe65ff7d72fcc766a6ba025366a3319a838 | [
"MIT"
] | null | null | null | sloth-scratch.py | xer0-1ne/sloth-scratch | 1d81ffe65ff7d72fcc766a6ba025366a3319a838 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
from assets.lib.bottle import route, run, static_file, response, request, redirect
#get default routes for files/paths
@route('/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root="./")
#route index as default page
@route('/')
def index():
filename='index.html'
return static_file(filename, root="./")
#return json object
@route('/commands')
def getCommands():
objFile = 'commands.json'
response.content_type = 'application/json'
with open(objFile, "r") as file:
data = json.load(file)
return json.dumps(data)
@route('/addcommand', method="POST")
def addCommands():
command = request.forms.get('newCommand')
commandName = request.forms.get('newCommandName')
commandOS = request.forms.get('newCommandOS')
commandDescription = request.forms.get('newCommandDescription')
commandObj = {
"Command":command,
"Name":commandName,
"OS":commandOS,
"Description":commandDescription
}
objFile = 'commands.json'
response.content_type = 'application/json'
with open(objFile, "r") as file:
data = json.load(file)
data.append(commandObj)
with open(objFile, "w") as file:
json.dump(data, file)
redirect('/')
run(host='localhost', port=8080, debug=True, reloader=True)
| 25.296296 | 82 | 0.666911 | 157 | 1,366 | 5.764331 | 0.471338 | 0.053039 | 0.066298 | 0.059669 | 0.201105 | 0.201105 | 0.201105 | 0.201105 | 0.201105 | 0.201105 | 0 | 0.004521 | 0.190337 | 1,366 | 53 | 83 | 25.773585 | 0.813743 | 0.073206 | 0 | 0.216216 | 0 | 0 | 0.164025 | 0.01664 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.054054 | 0.027027 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f816a322098d1a1e68d37250e949dfedc9e5d44 | 2,569 | py | Python | neighbour/views.py | Eccie-K/neighbour-hood | f874f9468160aa34dee294d685374e4c5e2eec4d | [
"MIT"
] | null | null | null | neighbour/views.py | Eccie-K/neighbour-hood | f874f9468160aa34dee294d685374e4c5e2eec4d | [
"MIT"
] | 4 | 2020-06-05T23:21:40.000Z | 2021-06-10T21:57:32.000Z | neighbour/views.py | Eccie-K/neighbour-hood | f874f9468160aa34dee294d685374e4c5e2eec4d | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from .forms import SignupForm, HoodForm, UserProfileUpdateForm, UserUpdateForm
from .models import *
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib import messages
# Create your views here.
@login_required(login_url="/accounts/login/")
def index(request):
hoods = Hood.objects.all()
return render(request,"index.html",locals())
def home (request):
return render(request, "index.html", locals())
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = True
user.save()
return render(request, 'index.html')
else:
form = SignupForm()
return render(request, 'signup.html', {'form': form})
def new_hood(request):
current_user = request.user
if request.method == "POST":
form = HoodForm(request.POST, request.FILES)
if form.is_valid():
hood = form.save(commit=False)
hood.user = current_user
hood.save()
return redirect("index")
else:
form = HoodForm()
return render(request, "new_hood.html", {"form": form})
@login_required
def profile(request):
"""Display user profile information."""
user = request.user
return render(request, 'profile.html', {'user': user})
@login_required
def update_profile(request):
"""Edit user profile information."""
user = request.user
form1 = UserUpdateForm(instance=user)
form2 = UserProfileUpdateForm(instance=user.profile)
if request.method == 'POST':
form1 = UserUpdateForm(instance=user, data=request.POST)
form2 = UserProfileUpdateForm(
instance=user,
data=request.POST,
files=request.FILES
)
if form1.is_valid() and form2.is_valid():
form1.save()
form2.save()
messages.success(request, "Your profile has been updated!")
return HttpResponseRedirect(reverse('profile'))
return render(request, 'update_profile.html',
{'form1': form1, 'form2': form2})
def details(request, hood_id):
hood = Hood.objects.get(id=hood_id)
return render(request, "details.html", locals())
| 30.223529 | 79 | 0.657844 | 293 | 2,569 | 5.706485 | 0.259386 | 0.047847 | 0.090909 | 0.037679 | 0.165072 | 0.088517 | 0.044258 | 0 | 0 | 0 | 0 | 0.006051 | 0.228104 | 2,569 | 84 | 80 | 30.583333 | 0.837115 | 0.034644 | 0 | 0.184615 | 0 | 0 | 0.07658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107692 | false | 0 | 0.153846 | 0.015385 | 0.415385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f81ac36d9e586a2803a67107baad029f84be150 | 2,636 | py | Python | isitfit/dotMan.py | autofitcloud/isitf | 6ffc0c67c00140120f5d5ad8dfe11c8f0f7dacc1 | [
"Apache-2.0"
] | 82 | 2019-09-04T17:39:10.000Z | 2021-08-10T14:59:18.000Z | isitfit/dotMan.py | autofitcloud/isitf | 6ffc0c67c00140120f5d5ad8dfe11c8f0f7dacc1 | [
"Apache-2.0"
] | 11 | 2019-09-10T03:54:19.000Z | 2020-02-21T22:58:44.000Z | isitfit/dotMan.py | autofitcloud/isitf | 6ffc0c67c00140120f5d5ad8dfe11c8f0f7dacc1 | [
"Apache-2.0"
] | 9 | 2019-09-13T15:57:42.000Z | 2021-02-13T15:56:40.000Z | import os
class DotMan:
def get_dotisitfit(self):
# get home
import pathlib
p1_home = str(pathlib.Path.home())
# check dot folder
p2_dot = os.path.join(p1_home, ".isitfit")
if not os.path.exists(p2_dot):
pathlib.Path(p2_dot).mkdir(exist_ok=True)
return p2_dot
def get_myuid(self, is_reentry=False):
"""
Create a UUID for each installation of isitfit
This also creates a .isitfit folder in the user's home directory
and caches the generated UUID in a txt file for re-use
is_reentry - internally used flag to identify that this is a case when
UUID is identified as invalid and needs to be set again
"""
p2_dot = self.get_dotisitfit()
# check uid file within dot folder
p3_uidtxt = os.path.join(p2_dot, "uid.txt")
uuid_val = None
if not os.path.exists(p3_uidtxt):
import uuid
uuid_val = uuid.uuid4().hex
with open(p3_uidtxt, 'w') as fh:
fh.write(uuid_val)
# if not created above, read from file
if uuid_val is None:
with open(p3_uidtxt, 'r') as fh:
uuid_val = fh.read()
uuid_val = uuid_val.strip() # strip the new-line or spaces if any
# if re-entry due to invalid ID or not
if is_reentry:
# any further processing of this would be an overkill
pass
else:
# verify that the UUID is valid (in case of accidental overwrite)
if len(uuid_val)!=32:
# drop the uid.txt file and overwrite it
os.remove(p3_uidtxt)
uuid_val = self.get_myuid(True)
# return
return uuid_val
def tempdir(self):
import os
import tempfile
isitfit_tmpdir = os.path.join(tempfile.gettempdir(), 'isitfit')
os.makedirs(isitfit_tmpdir, exist_ok=True)
return isitfit_tmpdir
import os
class DotFile:
"""
Base class to set/get files in ~/.isitfit like ~/.isitfit/last_email.txt
"""
filename = None
def __init__(self):
self._init_fn()
def _init_fn(self):
if self.filename is None:
raise Exception("Derived classes should set filename member")
from isitfit.dotMan import DotMan
dm = DotMan()
fold = dm.get_dotisitfit()
self.fn = os.path.join(fold, self.filename)
def get(self):
if not os.path.exists(self.fn):
return None
with open(self.fn, 'r') as fh:
val = fh.read()
val = val.strip()
if val=='':
return None
return val
def set(self, val):
with open(self.fn, 'w') as fh:
fh.write(val)
class DotLastEmail(DotFile):
filename = "last_email.txt"
class DotLastProfile(DotFile):
filename = "last_profile.txt"
| 24.407407 | 74 | 0.646434 | 400 | 2,636 | 4.1425 | 0.34 | 0.042245 | 0.02414 | 0.019916 | 0.045263 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008193 | 0.259105 | 2,636 | 107 | 75 | 24.635514 | 0.840246 | 0.270106 | 0 | 0.079365 | 0 | 0 | 0.05235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.015873 | 0.111111 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f865163bfbe4b0bc8d900996f2290d3bae441c0 | 437 | py | Python | workflows/base/interaction_views.py | xflows/clowdflows | 697b36ebc976d1ba4ab726bda2fc4593422af080 | [
"MIT"
] | 38 | 2015-11-21T08:16:14.000Z | 2021-06-22T16:14:12.000Z | workflows/base/interaction_views.py | chimeng089/clowdflows | e19bf57906e893d8f0be93329168b76eae758384 | [
"MIT"
] | 21 | 2017-04-05T08:03:54.000Z | 2022-03-11T23:16:03.000Z | workflows/base/interaction_views.py | chimeng089/clowdflows | e19bf57906e893d8f0be93329168b76eae758384 | [
"MIT"
] | 26 | 2016-01-11T17:51:07.000Z | 2022-02-24T11:49:40.000Z | import json
from django.shortcuts import render
def base_js_snippet(request, input_dict, output_dict, widget):
try:
inputs = json.dumps(input_dict['in'])
except:
raise Exception("Problem serializing the inputs. Only JSON-serializable objects can be used.")
return render(request, 'interactions/base_js_snippet.html',
{'widget': widget, 'snippet': input_dict['snippet'], 'inputs': inputs})
| 36.416667 | 102 | 0.695652 | 54 | 437 | 5.481481 | 0.62963 | 0.091216 | 0.087838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19222 | 437 | 11 | 103 | 39.727273 | 0.838527 | 0 | 0 | 0 | 0 | 0 | 0.311213 | 0.075515 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f8686c5c30243fe1085ef5f1ba45de06bb5c4cd | 928 | py | Python | jira_devops/release_notes/settings.py | clutcher/jira_devops | 61360f3fa9bd9b402d752dde84b3cf486245879e | [
"MIT"
] | null | null | null | jira_devops/release_notes/settings.py | clutcher/jira_devops | 61360f3fa9bd9b402d752dde84b3cf486245879e | [
"MIT"
] | null | null | null | jira_devops/release_notes/settings.py | clutcher/jira_devops | 61360f3fa9bd9b402d752dde84b3cf486245879e | [
"MIT"
] | null | null | null | import os
from django.apps import AppConfig
class ReleaseNotesAppConfig(AppConfig):
name = 'jira_devops.release_notes'
verbose_name = 'Release Notes'
DEFAULT_JIRA_RELEASE_FIELD_MAP = {
"hac_update": "customfield_13359",
"need_impex": "customfield_13360",
"need_manual": "customfield_13361",
"special_notes": "customfield_13362",
"responsible_person": "customfield_12200",
}
def ready(self):
from django.conf import settings
settings = settings._wrapped.__dict__
settings.setdefault('JIRA_RELEASE_FIELD_MAP', self.DEFAULT_JIRA_RELEASE_FIELD_MAP)
settings.setdefault('FILE_CLEAN_UP_PREFIX', self.get_env_variable("FILE_CLEAN_UP_PREFIX", "hybris/bin/custom"))
@staticmethod
def get_env_variable(variable, default=""):
value = os.getenv(variable)
if not value:
return default
return value
| 29.935484 | 119 | 0.690733 | 103 | 928 | 5.84466 | 0.533981 | 0.054817 | 0.079734 | 0.094684 | 0.086379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034341 | 0.215517 | 928 | 30 | 120 | 30.933333 | 0.792582 | 0 | 0 | 0 | 0 | 0 | 0.284483 | 0.050647 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f88225e95ec2579d2321422fdd57413d254306d | 3,581 | py | Python | azad/exp/alternatives/optuna_dqn2.py | CoAxLab/azad | d1498069dd8856e93ae077b34dd7c9f1c7ce80e6 | [
"MIT"
] | 6 | 2018-09-11T21:06:12.000Z | 2022-01-28T17:36:52.000Z | azad/exp/alternatives/optuna_dqn2.py | CoAxLab/azad | d1498069dd8856e93ae077b34dd7c9f1c7ce80e6 | [
"MIT"
] | null | null | null | azad/exp/alternatives/optuna_dqn2.py | CoAxLab/azad | d1498069dd8856e93ae077b34dd7c9f1c7ce80e6 | [
"MIT"
] | 2 | 2018-09-12T00:40:52.000Z | 2018-10-29T15:45:54.000Z | """Tune the dqn2 model of wythoff's using the opotune lib"""
import optuna
import fire
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torchvision import datasets
from torchvision import transforms
from azad.exp.alternatives import wythoff_dqn2
from copy import deepcopy
def _build(trial):
"""Build a nn.Module MLP model"""
# Sample hidden layers and features
in_features = 4 # Initial
n_layers = trial.suggest_int('n_layers', 2, 6)
layers = []
for l in range(n_layers):
out_features = trial.suggest_int(f'{l}', in_features, MAX_FEATURES)
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
in_features = deepcopy(out_features)
# Output layer topo is fixed
layers.append(nn.Linear(in_features, 1))
# Define the nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
return Model
def _objective(trial):
"""Runs a single HP trial"""
# Build a new Model
Model = _build(trial)
# Sample new HP
learning_rate = trial.suggest_float("learning_rate", 0.005, 0.5)
gamma = trial.suggest_float("gamma", 0.01, 0.5)
epsilon = trial.suggest_float("epsilon", 0.1, 0.9)
# Run wythoff_dqn2
result = wythoff_dqn2(epsilon=epsilon,
gamma=gamma,
learning_rate=learning_rate,
num_episodes=NUM_EPISODES,
batch_size=20,
memory_capacity=1000,
game=GAME,
network=Model,
anneal=True,
tensorboard=None,
update_every=1,
double=False,
double_update=10,
save=False,
save_model=False,
monitor=None,
return_none=False,
debug=False,
device=DEVICE,
clip_grad=True,
progress=False,
zero=False,
seed=SEED)
return result["score"] # the final
def optuna_dqn2(save=None,
num_trials=100,
num_episodes=100,
max_features=20,
game='Wythoff15x15',
num_jobs=1,
device="cpu",
debug=True,
seed=None):
# Set globals used in _objective. A lazy bad soln.
global DEVICE
global SEED
global GAME
global NUM_EPISODES
global MAX_FEATURES
DEVICE = device
SEED = seed
GAME = game
NUM_EPISODES = num_episodes
MAX_FEATURES = max_features
# Run the study
study = optuna.create_study(direction="maximize")
study.optimize(_objective, n_trials=num_trials, n_jobs=num_jobs)
trial = study.best_trial
if debug:
print(f">>> Saving to {save}")
print(f">>> Number of finished trials: {study.trials}")
print(f">>> Best trial {trial}")
print(f">>> score: {trial.value}")
print(f">>> params:\n")
for k, v in trial.params.items():
print(f"\t{k}: {v}")
# Save?
if save is not None:
torch.save(study, save)
return study
| 29.113821 | 75 | 0.543144 | 411 | 3,581 | 4.579075 | 0.364964 | 0.035069 | 0.022317 | 0.023379 | 0.031881 | 0.031881 | 0 | 0 | 0 | 0 | 0 | 0.020282 | 0.366657 | 3,581 | 122 | 76 | 29.352459 | 0.809524 | 0.088523 | 0 | 0 | 0 | 0 | 0.061149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.122222 | 0.011111 | 0.233333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f90f912716a6632b2bcc2ac457c7c623545be22 | 869 | py | Python | gsplines/services/gsplinesjson.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | 3 | 2021-08-28T01:42:40.000Z | 2021-12-02T22:39:45.000Z | gsplines/services/gsplinesjson.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | null | null | null | gsplines/services/gsplinesjson.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | null | null | null | from ..piecewisefunction.piecewisefunction import cPiecewiseFunction
import json
import numpy as np
import gsplines.basis
def piecewise2json(_pw):
basis_name = _pw.basis_.__class__.__name__
if hasattr(_pw.basis_, 'params_'):
basis_params = _pw.basis_.params_
else:
basis_params = None
basis = [basis_name, basis_params]
result = [_pw.tau_.tolist(), _pw.y_.tolist(), _pw.dim_, basis]
return json.dumps(result)
def json2piecewise(_data):
array = json.loads(_data)
for i, element in enumerate(array[:-2]):
array[i] = np.array(element)
basis_data = array[-1]
class_ = getattr(gsplines.basis, basis_data[0])
if basis_data[1] is not None:
basis = class_(basis_data[1])
else:
basis = class_()
array[-1] = basis
result = cPiecewiseFunction(*array)
return result
| 22.868421 | 68 | 0.665132 | 109 | 869 | 4.963303 | 0.385321 | 0.101664 | 0.048059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011852 | 0.223245 | 869 | 37 | 69 | 23.486486 | 0.78963 | 0 | 0 | 0.076923 | 0 | 0 | 0.008055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f926ec70f17a27fd3a3870a6289f1a7f3c638c1 | 3,085 | py | Python | ovos_utils/waiting_for_mycroft/settings_gui_generator.py | forslund/ovos_utils | bfca2d9175b72b0d157385af07627aefcd280177 | [
"Apache-2.0"
] | 3 | 2021-11-10T11:46:05.000Z | 2022-03-06T01:59:51.000Z | ovos_utils/waiting_for_mycroft/settings_gui_generator.py | forslund/ovos_utils | bfca2d9175b72b0d157385af07627aefcd280177 | [
"Apache-2.0"
] | 5 | 2021-08-10T17:26:49.000Z | 2022-03-03T14:43:55.000Z | ovos_utils/waiting_for_mycroft/settings_gui_generator.py | forslund/ovos_utils | bfca2d9175b72b0d157385af07627aefcd280177 | [
"Apache-2.0"
] | 1 | 2021-11-19T09:31:07.000Z | 2021-11-19T09:31:07.000Z | # Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import yaml
import pathlib
class SettingsGuiGenerator:
"""Skill Settings Generator For GUI. """
def __init__(self):
""" Create a SettingList Object """
self.settings_list = []
def populate(self, skill_id, settings_file, settings_dict):
"""
Populates settings list for current skill.
Arguments:
skill_id: ID of target skill.
settings_file: Settings meta file from skill folder.
settings_dict: Dictionary of current settings.json file.
"""
file_type = pathlib.Path(settings_file).suffix
if file_type == ".json":
with open(settings_file, 'r') as f:
settingsmeta_dict = json.load(f)
__skillMetaData = settingsmeta_dict.get('skillMetadata')
for section in __skillMetaData.get('sections'):
self.settings_list.append(section)
if file_type == ".yaml":
with open(settings_file, 'r') as f:
settingsmeta_dict = yaml.safe_load(f)
__skillMetaData = settingsmeta_dict.get('skillMetadata')
for section in __skillMetaData.get('sections'):
self.settings_list.append(section)
if settings_dict is not None:
__updated_list = []
for sections in self.settings_list:
for fields in sections['fields']:
if "name" in fields:
if fields["name"] in settings_dict.keys():
fields["value"] = settings_dict[fields["name"]]
__updated_list.append(sections)
self.clear()
self.settings_list = __updated_list
def fetch(self):
"""Return Settings List """
return self.settings_list
def clear(self):
"""Clear Settings List """
self.settings_list.clear()
def update(self, settings_dict):
"""Getting Changed Settings & Update List.
Arguments:
settings_dict: Dictionary of current settings.json file.
"""
__updated_list = []
for sections in self.settings_list:
for fields in sections['fields']:
if "name" in fields:
if fields["name"] in settings_dict.keys():
fields["value"] = settings_dict[fields["name"]]
__updated_list.append(sections)
self.clear()
self.settings_list = __updated_list | 33.901099 | 75 | 0.60778 | 351 | 3,085 | 5.162393 | 0.339031 | 0.07947 | 0.07947 | 0.01766 | 0.449227 | 0.449227 | 0.449227 | 0.449227 | 0.397351 | 0.353201 | 0 | 0.00373 | 0.3047 | 3,085 | 91 | 76 | 33.901099 | 0.841026 | 0.316045 | 0 | 0.590909 | 0 | 0 | 0.050226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.068182 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f949563ac0e205ebc380e370afe4937e75dab84 | 2,690 | py | Python | blender/.blender/scripts/textplugin_templates.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 41 | 2021-02-18T05:56:26.000Z | 2021-12-06T07:58:15.000Z | blender/.blender/scripts/textplugin_templates.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 19 | 2021-02-18T05:59:03.000Z | 2022-01-13T01:00:52.000Z | blender/.blender/scripts/textplugin_templates.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 18 | 2021-02-22T13:32:56.000Z | 2022-01-22T12:38:29.000Z | #!BPY
"""
Name: 'Template Completion | Tab'
Blender: 246
Group: 'TextPlugin'
Shortcut: 'Tab'
Tooltip: 'Completes templates based on the text preceding the cursor'
"""
# Only run if we have the required modules
try:
import bpy
from BPyTextPlugin import *
from Blender import Text
except ImportError:
OK = False
else:
OK = True
templates = {
'ie':
'if ${1:cond}:\n'
'\t${2}\n'
'else:\n'
'\t${3}\n',
'iei':
'if ${1:cond}:\n'
'\t${2}\n'
'elif:\n'
'\t${3}\n'
'else:\n'
'\t${4}\n',
'def':
'def ${1:name}(${2:params}):\n'
'\t"""(${2}) - ${3:comment}"""\n'
'\t${4}',
'cls':
'class ${1:name}(${2:parent}):\n'
'\t"""${3:docs}"""\n'
'\t\n'
'\tdef __init__(self, ${4:params}):\n'
'\t\t"""Creates a new ${1}"""\n'
'\t\t${5}',
'class':
'class ${1:name}(${2:parent}):\n'
'\t"""${3:docs}"""\n'
'\t\n'
'\tdef __init__(self, ${4:params}):\n'
'\t\t"""Creates a new ${1}"""\n'
'\t\t${5}'
}
def main():
txt = bpy.data.texts.active
if not txt:
return
row, c = txt.getCursorPos()
line = txt.asLines(row, row+1)[0]
indent=0
while indent<c and (line[indent]==' ' or line[indent]=='\t'):
indent += 1
# Check we are in a normal context
if get_context(txt) != CTX_NORMAL:
return
targets = get_targets(line, c-1);
if len(targets) != 1: return
color = (0, 192, 32)
for trigger, template in templates.items():
if trigger != targets[0]: continue
inserts = {}
txt.delete(-len(trigger)-1)
y, x = txt.getCursorPos()
first = None
# Insert template text and parse for insertion points
count = len(template); i = 0
while i < count:
if i<count-1 and template[i]=='$' and template[i+1]=='{':
i += 2
e = template.find('}', i)
item = template[i:e].split(':')
if len(item)<2: item.append('')
if not inserts.has_key(item[0]):
inserts[item[0]] = (item[1], [(x, y)])
else:
inserts[item[0]][1].append((x, y))
item[1] = inserts[item[0]][0]
if not first: first = (item[1], x, y)
txt.insert(item[1])
x += len(item[1])
i = e
else:
txt.insert(template[i])
if template[i] == '\n':
txt.insert(line[:indent])
y += 1
x = indent
else:
x += 1
i += 1
# Insert markers at insertion points
for id, (text, points) in inserts.items():
for x, y in points:
txt.setCursorPos(y, x)
txt.setSelectPos(y, x+len(text))
txt.markSelection((hash(text)+int(id)) & 0xFFFF, color,
Text.TMARK_TEMP | Text.TMARK_EDITALL)
if first:
text, x, y = first
txt.setCursorPos(y, x)
txt.setSelectPos(y, x+len(text))
break
# Check we are running as a script and not imported as a module
if __name__ == "__main__" and OK:
main()
| 21.693548 | 69 | 0.568773 | 427 | 2,690 | 3.531616 | 0.30445 | 0.019894 | 0.007958 | 0.01061 | 0.155172 | 0.155172 | 0.155172 | 0.140584 | 0.140584 | 0.140584 | 0 | 0.02891 | 0.215613 | 2,690 | 123 | 70 | 21.869919 | 0.685782 | 0.141264 | 0 | 0.265306 | 0 | 0 | 0.194166 | 0.032651 | 0 | 0 | 0.002612 | 0 | 0 | 1 | 0.010204 | false | 0 | 0.040816 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f94b300e2b34ff76f0d2873be32657ff67630be | 1,939 | py | Python | kive/file_access_utils.py | cfe-lab/Kive | e46b9eb40f085d579c12f47b6b5696d5ee93a9d3 | [
"BSD-3-Clause"
] | 2 | 2016-10-02T18:24:53.000Z | 2019-01-19T09:37:56.000Z | kive/file_access_utils.py | cfe-lab/Kive | e46b9eb40f085d579c12f47b6b5696d5ee93a9d3 | [
"BSD-3-Clause"
] | 1,190 | 2015-07-10T22:57:23.000Z | 2022-03-30T05:10:14.000Z | kive/file_access_utils.py | cfe-lab/Kive | e46b9eb40f085d579c12f47b6b5696d5ee93a9d3 | [
"BSD-3-Clause"
] | 2 | 2019-07-16T00:25:25.000Z | 2019-11-25T16:32:58.000Z | """
Basic file-checking functionality used by Kive.
"""
import hashlib
import mimetypes
import os
from contextlib import contextmanager
from django.http import FileResponse
def build_download_response(field_file):
# Intentionally leave this open for streaming response.
# FileResponse will close it when streaming finishes.
field_file.open('rb')
mimetype = mimetypes.guess_type(field_file.name)[0]
response = FileResponse(field_file, content_type=mimetype)
response['Content-Length'] = field_file.size
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
os.path.basename(field_file.name))
return response
def compute_md5(file_to_checksum, chunk_size=1024*64):
"""Computes MD5 checksum of specified file.
file_to_checksum should be an open, readable, file handle, with
its position at the beginning, i.e. so that .read() gets the
entire contents of the file.
NOTE: under python3, the file should have been open in binary mode ("rb")
so that bytes (not strings) are returned when iterating over the file.
"""
md5gen = hashlib.md5()
while True:
chunk = file_to_checksum.read(chunk_size)
if not chunk:
return md5gen.hexdigest()
md5gen.update(chunk)
@contextmanager
def use_field_file(field_file, mode='rb'):
""" Context manager for FieldFile objects.
Tries to leave a file object in the same state it was in when the context
manager started.
It's hard to tell when to close a FieldFile object. It opens implicitly
when you first read from it. Sometimes, it's an in-memory file object, and
it can't be reopened.
"""
was_closed = field_file.closed
field_file.open(mode)
start_position = field_file.tell()
try:
yield field_file
finally:
if was_closed:
field_file.close()
else:
field_file.seek(start_position)
| 30.777778 | 78 | 0.700877 | 268 | 1,939 | 4.951493 | 0.488806 | 0.094951 | 0.03165 | 0.027129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009253 | 0.219701 | 1,939 | 62 | 79 | 31.274194 | 0.867812 | 0.410005 | 0 | 0 | 0 | 0 | 0.057728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.15625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f958f12563b74a6555a910c0edcd3c3aff2c51a | 16,170 | py | Python | gem/quaternion.py | AlexMarinescu/pyGameMath | 5257291431bb45db0274dc48edf24694ecfe2e2d | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2020-04-15T22:30:52.000Z | 2022-01-18T01:05:45.000Z | gem/quaternion.py | SmithSamuelM/gem | d05dfa50739aa0b3dcd5e9cd2eb7d147fb4c0d63 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | gem/quaternion.py | SmithSamuelM/gem | d05dfa50739aa0b3dcd5e9cd2eb7d147fb4c0d63 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-11-12T00:41:21.000Z | 2021-11-12T00:41:21.000Z | import math
import six.moves as sm
from gem import vector
from gem import matrix
def quat_identity():
''' Returns the quaternion identity. '''
return [1.0, 0.0, 0.0, 0.0]
def quat_add(quat, quat1):
''' Add two quaternions. '''
return [quat[0] + quat1[0], quat[1] + quat1[1], quat[2] + quat1[2], quat[3] + quat1[3]]
def quat_sub(quat, quat1):
''' Subtract two quaternions. '''
return [quat[0] - quat1[0], quat[1] - quat1[1], quat[2] - quat1[2], quat[3] - quat1[3]]
def quat_mul_quat(quat, quat1):
''' Multiply a quaternion with a quaternion. '''
w = quat[0] * quat1[0] - quat[1] * quat1[1] - quat[2] * quat1[2] - quat[3] * quat1[3]
x = quat[0] * quat1[1] + quat[1] * quat1[0] + quat[2] * quat1[3] - quat[3] * quat1[2]
y = quat[0] * quat1[2] + quat[2] * quat1[0] + quat[3] * quat1[1] - quat[1] * quat1[3]
z = quat[0] * quat1[3] + quat[3] * quat1[0] + quat[1] * quat1[2] - quat[2] * quat1[1]
return [w, x, y, z]
def quat_mul_vect(quat, vect):
''' Multiply a quaternion with a vector. '''
w = -quat[1] * vect[0] - quat[2] * vect[1] - quat[3] * vect[2]
x = quat[0] * vect[0] + quat[2] * vect[2] - quat[3] * vect[1]
y = quat[0] * vect[1] + quat[3] * vect[0] - quat[1] * vect[2]
z = quat[0] * vect[2] + quat[1] * vect[1] - quat[2] * vect[0]
return [w, x, y, z]
def quat_mul_float(quat, scalar):
''' Multiply a quaternion with a scalar (float). '''
return [quat[0] * scalar, quat[1] * scalar, quat[2] * scalar, quat[3] * scalar]
def quat_div_float(quat, scalar):
''' Divide a quaternion with a scalar (float). '''
return [quat[0] / scalar, quat[1] / scalar, quat[2] / scalar, quat[3] / scalar]
def quat_neg(quat):
''' Negate the elements of a quaternion. '''
return [-quat[0], -quat[1], -quat[2], -quat[3]]
def quat_dot(quat1, quat2):
''' Dot product between two quaternions. Returns a scalar. '''
rdp= 0
for i in sm.range(4):
rdp += quat1[i] * quat2[i]
return rdp
def quat_magnitude(quat):
''' Compute magnitude of a quaternion. Returns a scalar. '''
rmg = 0
for i in sm.range(4):
rmg += quat[i] * quat[i]
return math.sqrt(rmg)
def quat_normalize(quat):
''' Returns a normalized quaternion. '''
length = quat_magnitude(quat)
oquat = quat_identity()
if length is not 0:
for i in sm.range(4):
oquat[i] = quat[i] / length
return oquat
def quat_conjugate(quat):
''' Returns the conjugate of a quaternion. '''
idquat = quat_identity()
for i in sm.range(4):
idquat[i] = -quat[i]
idquat[0] = -idquat[0]
return idquat
def quat_inverse(quat):
''' Returns the inverse of a quaternion. '''
lengthSquared = quat[0] * quat[0] + quat[1] * quat[1] + quat[2] * quat[2] + quat[3] * quat[3]
return [quat[0] / lengthSquared,
quat[1] / lengthSquared,
quat[2] / lengthSquared,
quat[3] / lengthSquared]
def quat_from_axis_angle(axis, theta):
''' Returns a quaternion from a given axis and a angle. '''
thetaOver2 = theta * 0.5
sto2 = math.sin(math.radians(thetaOver2))
cto2 = math.cos(math.radians(thetaOver2))
quat1List = []
if isinstance(axis, vector.Vector):
axis.i_normalize()
quat1List = [cto2, axis.vector[0] * sto2, axis.vector[1] * sto2, axis.vector[2] * sto2]
elif isinstance(axis, list):
naxis = axis.normalize()
quat1List = (cto2, naxis[0] * sto2, naxis[1] * sto2, naxis[2] * sto2)
else:
return NotImplemented
return Quaternion(data=quat1List)
def quat_rotate(origin, axis, theta):
''' Returns a vector that is rotated around an axis. '''
thetaOver2 = theta * 0.5
sinThetaOver2 = math.sin(math.radians(thetaOver2))
cosThetaOver2 = math.cos(math.radians(thetaOver2))
quat = Quaternion(data = [cosThetaOver2, axis[0] * sinThetaOver2, axis[1] * sinThetaOver2, axis[2] * sinThetaOver2])
rotation = (quat * origin) * quat.conjugate()
return vector.Vector(3, data=[rotation.data[1], rotation.data[2], rotation.data[3]])
def quat_rotate_x_from_angle(theta):
''' Creates a quaternion that rotates around X axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, sto2, 0.0, 0.0]
def quat_rotate_y_from_angle(theta):
''' Creates a quaternion that rotates around Y axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, 0.0, sto2, 0.0]
def quat_rotate_z_from_angle(theta):
''' Creates a quaternion that rotates around Z axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, 0.0, 0.0, sto2]
def quat_rotate_from_axis_angle(axis, theta):
''' Creates a quaternion that rotates around an arbitary axis given an angle. '''
thetaOver2 = theta * 0.5
sto2 = math.sin(math.radians(thetaOver2))
cto2 = math.cos(math.radians(thetaOver2))
quat1List = []
if isinstance(axis, vector.Vector):
axis.i_normalize()
quat1List = [cto2, axis.vector[0] * sto2, axis.vector[1] * sto2, axis.vector[2] * sto2]
elif isinstance(axis, list):
naxis = axis.normalize()
quat1List = (cto2, naxis[0] * sto2, naxis[1] * sto2, naxis[2] * sto2)
else:
return NotImplemented
quat1 = Quaternion(data=quat1List)
rotation = (quat1 * axis) * quat1.conjugate()
return rotation
def quat_rotate_vector(quat, vec):
''' Rotates a vector by a quaternion, returns a vector. '''
outQuat = (quat * vec) * quat.conjugate()
return vector.Vector(3, data=[outQuat.data[1], outQuat.data[2], outQuat.data[3]])
def quat_pow(quat, exp):
''' Returns a quaternion to the power of N. '''
quatExp = Quaternion()
if quat.data[0] is not 0.0:
angle = math.acos(quat.data[0])
newAngle = angle * exp
quatExp.data[0] = math.cos(newAngle)
divAngle = math.sin(newAngle) / math.sin(angle)
quatExp.data[1] *= divAngle
quatExp.data[2] *= divAngle
quatExp.data[3] *= divAngle
return quatExp
def quat_log(quat):
''' Returns the logatithm of a quaternion. '''
alpha = math.acos(quat.data[0])
sinAlpha = math.sin(alpha)
outList = [1.0, 0.0, 0.0, 0.0]
if sinAlpha > 0.0:
outList[1] = quat.data[1] * alpha / sinAlpha
outList[2] = quat.data[2] * alpha / sinAlpha
outList[3] = quat.data[3] * alpha / sinAlpha
else:
outList = quat.data
return outList
def quat_lerp(quat0, quat1, t):
''' Linear interpolation between two quaternions. '''
k0 = 1.0 - t
k1 = t
output = Quaternion()
output = (quat0 * k0) + (quat1 * k1)
return output
def quat_slerp(quat0, quat1, t):
''' Spherical interpolation between two quaternions. '''
k0 = 0.0
k1 = 0.0
output = Quaternion()
quat1Neg = Quaternion()
cosTheta = quat0.dot(quat1)
if cosTheta < 0.0:
quat1Neg = quat1.negate()
cosTheta = -cosTheta
else:
quat1Neg = quat1
if cosTheta > 0.999:
k0 = 1.0 - t
k1 = t
else:
theta = math.acos(cosTheta)
oneOverSinTheta = 1.0 / math.sin(theta)
k0 = math.sin((1.0 - t) * theta) * oneOverSinTheta
k1 = math.sin(t * theta) * oneOverSinTheta
output = (quat0 * k0) + (quat1Neg * k1)
return output
def quat_slerp_no_invert(quat0, quat1, t):
''' Spherical interpolation between two quaternions, it does not check for theta > 90. Used by SQUAD. '''
dotP = quat0.dot(quat1)
output = Quaternion()
if (dotP > -0.95) and (dotP < 0.95):
angle = math.acos(dotP)
k0 = math.sin(angle * (1.0 - t)) / math.sin(angle)
k1 = math.sin(t * angle) / math.sin(angle)
output = (quat0 * k0) + (quat1 * k1)
else:
output = quat_lerp(quat0, quat1, t)
return output
def quat_squad(quat0, quat1, quat2, t):
''' Quaternion splines. '''
return quat_slerp_no_invert(quat_slerp_no_invert(quat0, quat2, t), quat_slerp_no_invert(quat0, quat1, t), 2 * t(1 - t))
def quat_to_matrix(quat):
''' Converts a quaternion to a rotational 4x4 matrix. '''
x2 = quat.data[1] * quat.data[1]
y2 = quat.data[2] * quat.data[2]
z2 = quat.data[3] * quat.data[3]
xy = quat.data[1] * quat.data[2]
xz = quat.data[1] * quat.data[3]
yz = quat.data[2] * quat.data[3]
wx = quat.data[0] * quat.data[1]
wy = quat.data[0] * quat.data[2]
wz = quat.data[0] * quat.data[3]
outputMatrix = matrix.Matrix(4)
outputMatrix.matrix[0][0] = 1.0 - 2.0 * y2 - 2.0 * z2
outputMatrix.matrix[0][1] = 2.0 * xy + 2.0 * wz
outputMatrix.matrix[0][2] = 2.0 * xz - 2.0 * wy
outputMatrix.matrix[0][3] = 0.0
outputMatrix.matrix[1][0] = 2.0 * xy - 2.0 * wz
outputMatrix.matrix[1][1] = 1.0 - 2.0 * x2 - 2.0 * z2
outputMatrix.matrix[1][2] = 2.0 * yz + 2.0 * wx
outputMatrix.matrix[1][3] = 0.0
outputMatrix.matrix[2][0] = 2.0 * xz + 2.0 * wy
outputMatrix.matrix[2][1] = 2.0 * yz - 2.0 * wx
outputMatrix.matrix[2][2] = 1.0 - 2.0 * x2 - 2.0 * y2
outputMatrix.matrix[2][3] = 0.0
return outputMatrix
class Quaternion(object):
def __init__(self, data=None):
if data is None:
self.data = quat_identity()
else:
self.data = data
def __add__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_add(self.data, other.data))
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Quaternion):
self.data = quat_add(self.data, other.data)
return self
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_sub(self.data, other.data))
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, Quaternion):
self.data = quat_sub(self.data, other.data)
return self
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_mul_quat(self.data, other.data))
elif isinstance(other, vector.Vector):
return Quaternion(quat_mul_vect(self.data, other.vector))
elif isinstance(other, float):
return Quaternion(quat_mul_float(self.data, other))
else:
return NotImplemented
def __imul__(self, other):
if isinstance(other, Quaternion):
self.data = quat_mul_quat(self.data, other.data)
return self
elif isinstance(other, vector.Vector):
self.data = quat_mul_vect(self.data, other.data)
return self
elif isinstance(other, float):
self.data = quat_mul_float(self.data, other)
return self
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, float):
return Quaternion(quat_div_float(self.data, other))
else:
return NotImplemented
def __idiv__(self, other):
if isinstance(other, float):
self.data = quat_div_float(self.data, other)
return self
else:
return NotImplemented
def i_negate(self):
self.data = quat_neg(self.data)
return self
def negate(self):
quatList = quat_neg(self.data)
return Quaternion(quatList)
def i_identity(self):
self.data = quat_identity()
return self
def identity(self):
quatList = quat_identity()
return Quaternion(quatList)
def magnitude(self):
return quat_magnitude(self.data)
def dot(self, quat2):
if isinstance(quat2, Quaternion):
return quat_dot(self.data, quat2.data)
else:
return NotImplemented
def i_normalize(self):
self.data = quat_normalize(self.data)
return self
def normalize(self):
quatList = quat_normalize(self.data)
return Quaternion(quatList)
def i_conjugate(self):
self.data = quat_conjugate(self.data)
return self
def conjugate(self):
quatList = quat_conjugate(self.data)
return Quaternion(quatList)
def inverse(self):
quatList = quat_inverse(self.data)
return Quaternion(quatList)
def pow(self, e):
exponent = e
return quat_pow(self, exponent)
def log(self):
return quat_log(self)
def lerp(self, quat1, time):
return quat_lerp(self, quat1, time)
def slerp(self, quat1, time):
return quat_slerp(self, quat1, time)
def slerp_no_invert(self, quat1, time):
return quat_slerp_no_invert(self, quat1, time)
def squad(self, quat1, quat2, time):
return quat_squad(self, quat1, quat2, time)
def toMatrix(self):
return quat_to_matrix(self)
# The following are used for orientation and motion
def getForward(self):
''' Returns the forward vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 0.0, 1.0]))
def getBack(self):
''' Returns the backwards vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 0.0, -1.0]))
def getLeft(self):
''' Returns the left vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[-1.0, 0.0, 0.0]))
def getRight(self):
''' Returns the right vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[1.0, 0.0, 0.0]))
def getUp(self):
''' Returns the up vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 1.0, 0.0]))
def getDown(self):
''' Returns the down vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, -1.0, 0.0]))
def quat_from_matrix(matrix):
''' Converts a 4x4 rotational matrix to quaternion. '''
fourXSquaredMinus1 = matrix.matrix[0][0] - matrix.matrix[1][1] - matrix.matrix[2][2]
fourYSquaredMinus1 = matrix.matrix[1][1] - matrix.matrix[0][0] - matrix.matrix[2][2]
fourZSquaredMinus1 = matrix.matrix[2][2] - matrix.matrix[0][0] - matrix.matrix[1][1]
fourWSquaredMinus1 = matrix.matrix[0][0] + matrix.matrix[1][1] + matrix.matrix[2][2]
biggestIndex = 0
fourBiggestSquaredMinus1 = fourWSquaredMinus1
if (fourXSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 1
elif(fourYSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 2
elif(fourZSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 3
biggestVal = math.sqrt(fourBiggestSquaredMinus1 + 1) * 0.5
mult = 0.25 / biggestVal
rquat = Quaternion()
if biggestIndex is 0:
rquat.data[0] = biggestVal
rquat.data[1] = (matrix.matrix[1][2] - matrix.matrix[2][1]) * mult
rquat.data[2] = (matrix.matrix[2][0] - matrix.matrix[0][2]) * mult
rquat.data[3] = (matrix.matrix[0][1] - matrix.matrix[1][0]) * mult
return rquat
if biggestIndex is 1:
rquat.data[0] = (matrix.matrix[1][2] - matrix.matrix[2][1]) * mult
rquat.data[1] = biggestVal
rquat.data[2] = (matrix.matrix[0][1] + matrix.matrix[1][0]) * mult
rquat.data[3] = (matrix.matrix[2][0] + matrix.matrix[0][2]) * mult
return rquat
if biggestIndex is 2:
rquat.data[0] = (matrix.matrix[2][0] - matrix.matrix[0][2]) * mult
rquat.data[1] = (matrix.matrix[0][1] + matrix.matrix[1][0]) * mult
rquat.data[2] = biggestVal
rquat.data[3] = (matrix.matrix[1][2] + matrix.matrix[2][1]) * mult
return rquat
if biggestIndex is 3:
rquat.data[0] = (matrix.matrix[0][1] - matrix.matrix[1][0]) * mult
rquat.data[1] = (matrix.matrix[2][0] + matrix.matrix[0][2]) * mult
rquat.data[2] = (matrix.matrix[1][2] + matrix.matrix[2][1]) * mult
rquat.data[3] = biggestVal
return rquat
| 32.865854 | 123 | 0.60402 | 2,229 | 16,170 | 4.309107 | 0.08838 | 0.011036 | 0.008121 | 0.006663 | 0.570953 | 0.450703 | 0.389485 | 0.356065 | 0.300156 | 0.232795 | 0 | 0.052518 | 0.254607 | 16,170 | 491 | 124 | 32.93279 | 0.744379 | 0.095857 | 0 | 0.300836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169916 | false | 0 | 0.011142 | 0.019499 | 0.401114 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f980b7c105970772ce222d03d8f0a75f6d84be2 | 9,141 | py | Python | entity.py | TheNicGard/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | 3 | 2019-07-11T17:54:42.000Z | 2021-03-09T10:58:13.000Z | entity.py | BandW2011/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | 1 | 2019-07-11T17:55:38.000Z | 2020-05-03T06:34:56.000Z | entity.py | TheNicGard/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | null | null | null | import tcod as libtcod
import math
from components.item import Item
from render_functions import RenderOrder
class Entity:
def __init__(self, id, x, y, char, color, name, weight=0, blocks=False,
render_order = RenderOrder.CORPSE, fighter=None, ai=None,
item=None, inventory=None, stairs=None, level=None,
equipment=None, equippable=None, valuable=None, door=None,
animation=None, hunger=None, food=None, trap=None,
classification=[], sign=None, identity=None):
self.id = id
self.x = x
self.y = y
self.char = char
self.color = color
self.name = name
self.weight = weight
self.blocks = blocks
self.render_order = render_order
self.fighter = fighter
self.ai = ai
self.item = item
self.inventory = inventory
self.stairs = stairs
self.level = level
self.equipment = equipment
self.equippable = equippable
self.valuable = valuable
self.door = door
self.animation = animation
self.hunger = hunger
self.food = food
self.trap = trap
self.classification = classification
self.sign = sign
self.identity = identity
if self.fighter:
self.fighter.owner = self
if self.ai:
self.ai.owner = self
if self.item:
self.item.owner = self
if self.inventory:
self.inventory.owner = self
if self.stairs:
self.stairs.owner = self
if self.door:
self.door.owner = self
if self.level:
self.level.owner = self
if self.equipment:
self.equipment.owner = self
if self.equippable:
self.equippable.owner = self
if not self.item:
item = Item(1)
self.item = item
self.item.owner = self
if self.valuable:
self.valuable.owner = self
if self.animation:
self.animation.owner = self
if self.hunger:
self.hunger.owner = self
if self.sign:
self.sign.owner = self
if self.trap:
self.trap.owner = self
def __str__(self):
return "Entity \'{0}\' is represented by {1} at location ({2}, {3}).".format(self.name, self.char, self.x, self.y)
@property
def get_name(self):
if self.identity and not self.identity.identified:
return self.identity.name
return self.name
@property
def get_char(self):
if self.item and self.item.light_source:
return self.item.light_source.get_char
return self.char
@property
def get_color(self):
if self.identity and not self.identity.identified:
return self.identity.color
return self.color
def move(self, dx, dy):
self.x += dx
self.y += dy
def move_towards(self, target_x, target_y, game_map, entities):
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt((dx ** 2) + (dy ** 2))
if distance == 0:
return
dx = int(round(dx / distance))
dy = int(round(dy / distance))
if not (game_map.is_blocked(self.x + dx, self.y + dy) or
get_blocking_entities_at_location(entities, self.x + dx, self.y + dy)):
self.move(dx, dy)
def distance(self, x, y):
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)
def move_astar(self, target, entities, game_map):
# Create a FOV map that has the dimensions of the map
fov = libtcod.map_new(game_map.width, game_map.height)
# Scan the current map each turn and set all the walls as unwalkable
for y1 in range(game_map.height):
for x1 in range(game_map.width):
libtcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight,
not game_map.tiles[x1][y1].blocked)
# Scan all the objects to see if there are objects that must be navigated around
# Check also that the object isn't self or the target (so that the start and the end points are free)
# The AI class handles the situation if self is next to the target so it will not use this A* function anyway
for entity in entities:
if entity.blocks and entity != self and entity != target:
# Set the tile as a wall so it must be navigated around
libtcod.map_set_properties(fov, entity.x, entity.y, True, False)
# Allocate a A* path
# The 1.41 is the normal diagonal cost of moving, it can be set as 0.0 if diagonal moves are prohibited
my_path = libtcod.path_new_using_map(fov, 1.41)
# Compute the path between self's coordinates and the target's coordinates
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
# Check if the path exists, and in this case, also the path is shorter than 25 tiles
# The path size matters if you want the monster to use alternative longer paths (for
# example through other rooms) if for example the player is in a corridor
# It makes sense to keep path size relatively low to keep the monsters from running around
# the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
# Find the next coordinates in the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
# Set self's coordinates to the next path tile
self.x = x
self.y = y
else:
# Keep the old move function as a backup so that if there are no paths
# (for example another monster blocks a corridor)
# it will still try to move towards the player (closer to the corridor opening)
self.move_towards(target.x, target.y, game_map, entities)
# Delete the path to free memory
libtcod.path_delete(my_path)
def flee_astar(self, predator, entities, game_map, safe_range):
target_locations = []
fov = libtcod.map_new(game_map.width, game_map.height)
for y1 in range(game_map.height):
for x1 in range(game_map.width):
libtcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight,
not game_map.tiles[x1][y1].blocked)
for entity in entities:
if entity.blocks and entity != self and entity != predator:
libtcod.map_set_properties(fov, entity.x, entity.y, True, False)
my_path = libtcod.path_new_using_map(fov, 1.41)
# Compute the path between self's coordinates and the target's coordinates
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
# Check if the path exists, and in this case, also the path is shorter than 25 tiles
# The path size matters if you want the monster to use alternative longer paths (for
# example through other rooms) if for example the player is in a corridor
# It makes sense to keep path size relatively low to keep the monsters from running around
# the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
# Find the next coordinates in the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
# Set self's coordinates to the next path tile
self.x = x
self.y = y
else:
# Keep the old move function as a backup so that if there are no paths
# (for example another monster blocks a corridor)
# it will still try to move towards the player (closer to the corridor opening)
self.move_towards(target.x, target.y, game_map, entities)
# Delete the path to free memory
libtcod.path_delete(my_path)
def distance_to(self, other):
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt((dx ** 2) + (dy ** 2))
def get_blocking_entities_at_location(entities, destination_x, destination_y):
for entity in entities:
if entity.blocks and entity.x == destination_x and entity.y == destination_y:
if entity.fighter:
if entity.fighter.is_effect("invisible"):
return None
else:
return entity
else:
return entity
return None
def get_entities_at_location(entities, destination_x, destination_y):
found_entities = []
for entity in entities:
if entity and entity.x == destination_x and entity.y == destination_y:
found_entities.append(entity)
return found_entities
| 37.310204 | 122 | 0.597528 | 1,260 | 9,141 | 4.244444 | 0.16746 | 0.020194 | 0.029918 | 0.036462 | 0.544316 | 0.544316 | 0.514959 | 0.499439 | 0.48074 | 0.47457 | 0 | 0.007801 | 0.326879 | 9,141 | 244 | 123 | 37.463115 | 0.861368 | 0.233672 | 0 | 0.32716 | 0 | 0 | 0.009897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080247 | false | 0 | 0.024691 | 0.012346 | 0.203704 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f99c35a950490ba3164b9cd0128d650d0ec5cf9 | 5,459 | py | Python | qasrl/models/question.py | gililior/qasrl-modeling | 2f9684536f6d5f0283b0e4b90a911ea12fa72f72 | [
"MIT"
] | 1 | 2021-07-18T18:55:54.000Z | 2021-07-18T18:55:54.000Z | qasrl/models/question.py | gililior/qasrl-modeling | 2f9684536f6d5f0283b0e4b90a911ea12fa72f72 | [
"MIT"
] | 2 | 2021-09-13T15:52:39.000Z | 2021-10-06T21:58:06.000Z | qasrl/models/question.py | gililior/qasrl-modeling | 2f9684536f6d5f0283b0e4b90a911ea12fa72f72 | [
"MIT"
] | 1 | 2021-09-19T13:46:49.000Z | 2021-09-19T13:46:49.000Z | from typing import Dict, List, TextIO, Optional, Set, Tuple
from overrides import overrides
import torch
from torch.nn.modules import Linear, Dropout
from torch.autograd import Variable
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp.nn.util import batched_index_select
from allennlp.training.metrics import SpanBasedF1Measure
from qasrl.modules.sentence_encoder import SentenceEncoder
from qasrl.modules.slot_sequence_generator import SlotSequenceGenerator
from qasrl.metrics.question_metric import QuestionMetric
@Model.register("qasrl_question")
class QuestionModel(Model):
def __init__(self, vocab: Vocabulary,
sentence_encoder: SentenceEncoder,
question_generator: SlotSequenceGenerator,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None):
super(QuestionModel, self).__init__(vocab, regularizer)
self._sentence_encoder = sentence_encoder
self._question_generator = question_generator
if self._sentence_encoder.get_output_dim() != self._question_generator.get_input_dim():
raise ConfigurationError(
("Input dimension of question generator (%s) must be " % self._question_generator.get_input_dim()) + \
("equal to the output dimension of the sentence encoder (%s)." % self._sentence_encoder.get_output_dim()))
self.metric = QuestionMetric(vocab, self._question_generator.get_slot_names())
def get_slot_names(self):
return self._question_generator.get_slot_names()
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
predicate_indicator: torch.LongTensor,
predicate_index: torch.LongTensor,
**kwargs):
# slot_name -> Shape: batch_size, 1
gold_slot_labels = self._get_gold_slot_labels(kwargs)
if gold_slot_labels is None:
raise ConfigurationError("QuestionModel requires gold labels for teacher forcing when running forward. "
"You may wish to run beam_decode instead.")
# Shape: batch_size, num_tokens, self._sentence_encoder.get_output_dim()
encoded_text, text_mask = self._sentence_encoder(text, predicate_indicator)
# Shape: batch_size, self._sentence_encoder.get_output_dim()
pred_rep = batched_index_select(encoded_text, predicate_index).squeeze(1)
# slot_name -> Shape: batch_size, slot_name_vocab_size
slot_logits = self._question_generator(pred_rep, **gold_slot_labels)
batch_size, _ = pred_rep.size()
# Shape: <scalar>
slot_nlls, neg_log_likelihood = self._get_cross_entropy(slot_logits, gold_slot_labels)
self.metric(slot_logits, gold_slot_labels, torch.ones([batch_size]), slot_nlls, neg_log_likelihood)
return {**slot_logits, "loss": neg_log_likelihood}
def beam_decode(self,
text: Dict[str, torch.LongTensor],
predicate_indicator: torch.LongTensor,
predicate_index: torch.LongTensor,
max_beam_size: int,
min_beam_probability: float,
clause_mode: bool = False):
# Shape: batch_size, num_tokens, self._sentence_encoder.get_output_dim()
encoded_text, text_mask = self._sentence_encoder(text, predicate_indicator)
# Shape: batch_size, self._sentence_encoder.get_output_dim()
pred_rep = batched_index_select(encoded_text, predicate_index).squeeze(1)
return self._question_generator.beam_decode(pred_rep, max_beam_size, min_beam_probability, clause_mode)
def get_metrics(self, reset: bool = False):
return self.metric.get_metric(reset=reset)
def _get_cross_entropy(self, slot_logits, gold_slot_labels):
slot_xes = {}
xe = None
for slot_name in self.get_slot_names():
slot_xe = F.cross_entropy(slot_logits[slot_name], gold_slot_labels[slot_name].squeeze(-1), reduction = "sum")
slot_xes[slot_name] = slot_xe
if xe is None:
xe = slot_xe
else:
xe = xe + slot_xe
return slot_xes, xe
def _get_gold_slot_labels(self, instance_slot_labels_dict):
# each of gold_slot_labels[slot_name] is of
# Shape: batch_size
gold_slot_labels = {}
for slot_name in self.get_slot_names():
if slot_name in instance_slot_labels_dict and instance_slot_labels_dict[slot_name] is not None:
gold_slot_labels[slot_name] = instance_slot_labels_dict[slot_name].unsqueeze(-1)
for slot_name in self.get_slot_names():
if slot_name not in instance_slot_labels_dict or instance_slot_labels_dict[slot_name] is None:
gold_slot_labels = None
return gold_slot_labels
| 50.546296 | 122 | 0.709104 | 669 | 5,459 | 5.428999 | 0.230194 | 0.055066 | 0.053965 | 0.036344 | 0.377478 | 0.294328 | 0.236509 | 0.199615 | 0.19163 | 0.19163 | 0 | 0.001643 | 0.219454 | 5,459 | 107 | 123 | 51.018692 | 0.850739 | 0.077304 | 0 | 0.151163 | 0 | 0 | 0.049334 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081395 | false | 0 | 0.232558 | 0.023256 | 0.395349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f9c7d739dfd9df8261ba3fb8f8da2e2029bd3f2 | 7,059 | py | Python | libkloudtrader/algorithm.py | KloudTrader/libkloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 11 | 2019-01-16T16:10:09.000Z | 2021-03-02T00:59:17.000Z | libkloudtrader/algorithm.py | KloudTrader/kloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 425 | 2019-07-10T06:59:49.000Z | 2021-01-12T05:32:14.000Z | libkloudtrader/algorithm.py | KloudTrader/kloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 6 | 2019-03-15T16:25:06.000Z | 2021-05-03T10:02:13.000Z | from typing import Any, List
import random
import time
import datetime
import numpy as np
import pandas as pd
import libkloudtrader.stocks as stocks
from libkloudtrader.exceptions import InvalidAlgorithmMode, EmptySymbolBucket, InvalidDataFeedType
from libkloudtrader.enumerables import Data_Types
import libkloudtrader.processing as processing
from libkloudtrader.logs import start_logger
import libkloudtrader.backtest as bt
import libkloudtrader.crypto as crypto
import libkloudtrader.analysis as analysis
from tqdm import tqdm
#pd.set_option('display.max_columns', None) # or 1000
#pd.set_option('display.max_rows', None) # or 1000
#pd.set_option('display.max_colwidth', -1) # or 199
logger = start_logger(__name__, ignore_module='libkloudtrader.analysis')
def run_backtest(strategy: str,
symbol_bucket: List[str],
data: str,
start: Any,
end: Any,
data_interval:str="1d",
preferred_price_point: str = 'close',
preferred_benchmark: str = 'SPY',
initial_capital: float = 100000,
commission: float = 0,
slippage=True):
"""Backtester function"""
try:
logger.info(
'Starting Backtest for {} from {} to {} with initial capital = {}'.
format(strategy.__name__, start, end, initial_capital))
data_to_backtest_on = Data_Types[data].value
for symbol in symbol_bucket:
data_batch = data_to_backtest_on(symbol=symbol,
start=start,
end=end,interval=data_interval
)
batch = processing.Buffer(len(data_batch), dtype=object)
backtest=bt.Backtest(capital=100000,commission=1,enable_slippage=True)
for datetime, bar in data_batch.iterrows():
batch.append(bar)
backtest.update_bar(datetime,bar)
data_batch = pd.DataFrame(batch)
locals()['strategy'](backtest,data_batch)
print(backtest.get_trade_log)
del backtest
'''
for symbol in symbol_bucket:
data_batch = data_to_backtest_on(symbol,
start,
end,
interval=data_interval)
for symbol in symbol_bucket:
a = bt.Backtest(locals()['strategy'](data_batch),
preferred_price_point)
print(a.preferred_price_point)
signals=locals()['strategy'](data_batch)
df=pd.DataFrame()
df['buy']=signals['buy']
df['sell']=signals['sell']
df['short']=signals['short']
df['cover']=signals['cover']
'''
#bt = Backtest(locals()['strategy'](data_batch), strategy.__name__)
#df=bt.signals
#df['positions']=bt.positions
#df['price']=bt.trades['price']
#df['trade volume']=bt.trades['vol']
#df['trade_price']=bt.trade_price
#df['equity']=bt.equity
#df['trades']=bt.trades
#df['positions in '+symbol]=100*df['positions']
#print(bt.trades)
#logger.info("Received Signals from {}".format(strategy.__name__))
except (KeyboardInterrupt, SystemExit):
print('\n')
logger.critical("User's keyboard prompt stopped {}".format(
strategy.__name__))
except Exception as exception:
logger.critical('Exiting {}...‼️'.format(strategy.__name__))
logger.error(
'Oops! Something went wrong while your algorithm was being backtested. ⚠️'
)
raise exception
exit()
#print(return_data_from_enum(a,symbol,start, end))
#print(locals()[a](symbol, start, end))
def run_live(strategy: str,
symbol_bucket: list,
data_feed_type: str,
exempted_states: list = [''],
exempted_days:list=[''],
exempted_dates:list=[''],
batch_size: int = 1000,
data_feed_delay: float = 1.0,
fake_feed: bool = False):
try:
logger.info("{} is now entering the live markets. 📈\n".format(
strategy.__name__))
[x.lower() for x in exempted_states]
[x.lower() for x in exempted_days]
if isinstance(symbol_bucket, list):
symbol_bucket = np.array(symbol_bucket)
elif type(symbol_bucket) not in (numpy.ndarray, list):
raise TypeError('Symbol bucket must be a list or numpy array')
if data_feed_type not in ('CRYPTO_live_feed', 'US_STOCKS_live_feed',
'CRYPTO_live_feed_level2'):
raise InvalidDataFeedType(
'This Data Feed is not available for live trading.'
)
if data_feed_type in ("CRYPTO_live_feed", 'CRYPTO_live_feed_level2'):
data_feed_delay = crypto.exchange_attribute('rateLimit')
data_feed = Data_Types[data_feed_type].value
while stocks.intraday_status()['state'] not in exempted_states: #and datetime.datetime.now().strftime("%A").lower() not in exempted_days:
batch = processing.Buffer(batch_size, dtype=object)
while len(batch) < batch_size:
for symbol in symbol_bucket:
batch.append(data_feed(symbol, fake_feed=fake_feed))
data_batch = pd.DataFrame(batch)
locals()['strategy'](data_batch)
if len(batch) == batch_size:
batch.popleft()
time.sleep(data_feed_delay / 1000)
except (KeyboardInterrupt, SystemExit):
print('\n')
logger.critical("User's keyboard prompt stopped {}".format(
strategy.__name__))
except Exception as exception:
logger.critical('Exiting {}...‼️'.format(strategy.__name__))
logger.error('Oops! Something went wrong ⚠️')
raise exception
exit()
'''
def generate_positions_and_handle_portfolio(symbol, signals, data, commission,
initial_capital, quantity):
try:
initial_capital = float(initial_capital)
positions = pd.DataFrame(index=signals.index).fillna(0.0)
positions['Positions in' + " " +
symbol] = (quantity * signals['signal']) + commission
portfolio = positions.multiply(data['close'], axis=0)
poss_diff = positions.diff()
portfolio['holdings'] = (positions.multiply(data['close'],
axis=0)).sum(axis=1)
return portfolio
except Exception as exception:
raise exception
'''
| 40.803468 | 145 | 0.563536 | 732 | 7,059 | 5.239071 | 0.271858 | 0.03442 | 0.032855 | 0.017731 | 0.257888 | 0.226336 | 0.167927 | 0.147588 | 0.131421 | 0.131421 | 0 | 0.009994 | 0.333758 | 7,059 | 172 | 146 | 41.040698 | 0.803104 | 0.099873 | 0 | 0.22549 | 0 | 0 | 0.11556 | 0.014315 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.147059 | 0 | 0.166667 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8f9d7cb9d8688441fb1f44b0f15087c179e57505 | 34,164 | py | Python | src/wavecalLib.py | rterrien/HPFSpec2 | e7cefc37184926ae65a4626a0fe3299cc6b3deb3 | [
"MIT"
] | null | null | null | src/wavecalLib.py | rterrien/HPFSpec2 | e7cefc37184926ae65a4626a0fe3299cc6b3deb3 | [
"MIT"
] | null | null | null | src/wavecalLib.py | rterrien/HPFSpec2 | e7cefc37184926ae65a4626a0fe3299cc6b3deb3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import numpy as np
import copy
import scipy.optimize
from skimage import filters
from skimage import morphology
from scipy import interpolate
from astropy.stats import biweight_location, mad_std
from collections import OrderedDict
import scipy.constants
import logging
import datetime
import astropy
import astropy.time
""" This is a library of functions that are called in the
wavelength calibration.
"""
def fgauss(x, center, sigma, amp):
"""A Gaussian function.
This is a standard Gaussian function.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
return(amp * np.exp(-((x - center) / sigma) ** 2.))
def fgauss_const(x, center, sigma, amp, offset):
"""Gaussian + offset function.
This is a Gaussian with a constant offset.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
offset : float or ndarray of float
Offset for the Gaussian
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = float(offset)
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset)
def fgauss_line(x, center, sigma, amp, offset, slope):
"""Gaussian + line function.
This is a Gaussian with a linear offset.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
offset : float or ndarray of float
Offset for the Gaussian linear offset (y-intercept)
slope : float or ndarray of float
Slope for the Gaussian linear offset
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = float(offset)
slope = float(slope)
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset + x * slope)
def fgauss_from_1(x, center, sigma, amp):
"""Gaussian + offset function.
This is a Gaussian with a fixed offset (1 = continuum). Convenience function.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian (negative to )
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = 1.
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset)
def discretize_oversample(func, x, *args, **kwargs):
"""Upsample a function.
This function enables discrete "upsampling" of a function
by an arbitrary (integer) factor.
Parameters
----------
func : function
The mathematical function to be upsampled. First argument must be
independent variable.
x : float or ndarray of float
Independent variable for the function prior to upsampling.
*args : list
Arguments to be passed to the mathematical function.
**kwargs : dict
Keywords for the upsampling. Includes "factor" as upsampling factor.
"""
if 'factor' in kwargs.keys():
factor = kwargs['factor']
else:
factor = 10
assert factor > 1
x1 = np.amin(x)
x2 = np.amax(x)
xx = np.arange(x1 - 0.5 * (1 - 1 / factor),
x2 + 0.5 * (1 + 1 / factor), 1. / factor) + 0.5 / factor
values = func(xx, *args)
values = np.reshape(values, (xx.size // factor, factor))
return(values.sum(axis=1) * 1. / factor)
def dfgauss(x, *args, **kwargs):
"""Upsampled Gaussian function.
This is an "upsampled" Gaussian, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss, x, *args, **kwargs))
def dfgauss_const(x, *args, **kwargs):
"""Upsampled Gaussian+constant function.
This is an "upsampled" Gaussian + constant, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude, offset
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss_const, x, *args, **kwargs))
def dfgauss_line(x, *args, **kwargs):
"""Upsampled Gaussian+line function.
This is an "upsampled" Gaussian + line, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude, offset
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss_line, x, *args, **kwargs))
def make_legendre(legendre_order, legfix, legbound):
"""Make a Legendre function.
This generates a Legendre function with arbitrary fixed/constrained
parameters. This is potentially useful for measuring and accounting
for wavelength calibration drift, if it affects some Legendre
coefficients and not others.
Parameters
----------
legendre_order : int
The order of the desired Legendre function (e.g. order 2 = 3
coefficients).
legfix : list of booleans
For each Legendre coefficient, is it fixed (True) or variable (False)
legbound : list
For each Legendre coefficient, a single value (for fixed coeffs) or
an arbtitrary placeholder.
Returns
-------
function
Legendre function with arguments (x,*args); x is independent variable
and *args are the fitted coefficients for the Legendre polynomial
"""
assert len(legbound) == (legendre_order + 1)
assert len(legfix) == (legendre_order + 1)
# build a flexible legendre function, isolating the fitted variables
# so curve_fit can digest
def flexible_Legendre(x, *args):
# make sure that the number of coefficients is adequate
assert len(args) == (legendre_order - np.count_nonzero(legfix) + 1)
# intialize the coefficients
coeffs = [0 for i in range(legendre_order + 1)]
iused = 0
# for each coefficient, feed the function its fixed value
# or acknowledge that it is a variable and move on
for i in range(legendre_order + 1):
if legfix[i]:
coeffs[i] = legbound[i]
else:
coeffs[i] = args[iused]
iused = iused + 1
return np.polynomial.legendre.legval(x, coeffs)
return flexible_Legendre
def rescale(x, oldmin, oldmax, newmin, newmax):
"""Linearly rescale and offset a series.
This function takes a series and linearly scales/offsets
it to a new domain. Useful for calling Legendre polynomial, e.g.
Parameters
----------
x : ndarray
Original series, float
oldmin : float
min of old domain
oldmax : float
max of old domain
newmin : float
min of new domain
newmax : float
max of new domain
Returns
-------
Standardized series.
"""
out = (newmax - newmin) / (oldmax - oldmin) * (x - oldmin) + newmin
return(out)
def fitProfile(inp_x, inp_y, fit_center_in, fit_width=8, sigma=None,
func='fgauss_const', return_residuals=False,p0=None,bounds=(-np.inf,np.inf)):
"""Perform a least-squares fit to a peak-like function.
Parameters
----------
inp_x : ndarray of float
x-values of line to be fit (full array; subset is
taken based on fit width)
inp_y : ndarray of float
y-values of line to be fit (full array; subset is
taken based on fit width)
fit_center_in : float
Index value of estimated location of line center;
used to select region for fitting
fit_width : int, optional
Half-width of fitting window. (the default is 8)
sigma : ndarray of float, optional
The standard error for each x/y value in the fit.
(the default is None, which implies an unweighted fit)
func : {'fgauss','fgauss_const','fgauss_line','fgauss_from_1'} , optional
The function to use for the fit. (the default is 'fgauss')
return_residuals : bool, optional
Output the fit residuals (the default is False)
p0 : list of first-guess coefficients. The fit can be quite sensitive to these
choices.
bounds : Directly sent to scipy.optimize.curve_fit()
Returns
-------
dict of fit parameters:
{'centroid': fitted centroid
'e_centroid': std error of fitted peak centroid (covar diagonals)
'sigma': fitted sigma of peak
'e_sigma': std error of fitted sigma of peak (covar diagonals)
'nanflag': are there NaNs present
'pcov': covariance array - direct output of optimize.curve_fit
'popt': parameter array - direct output of optimize.curve_fit
'function_used': function used for fitting
'tot_counts_in_line': simple sum of y-values in used line region
'fit_successful': bool, did the fit give a non-errored output?
'scale_value': scaling factor used to normalize y-values
'residuals': optional, differences btwn data and optimized model output}
"""
# select out the region to fit
# this will be only consistent to +- integer pixels
fit_center = copy.copy(fit_center_in)
xx_index = np.arange(len(inp_x))
assert len(inp_x) == len(inp_y)
j1 = int(np.round(np.amax([0, fit_center - fit_width])))
j2 = int(round(np.amin([np.amax(xx_index), fit_center + fit_width])))
# define sub-arrays to fit
sub_x1 = inp_x[j1:j2]
sub_y1 = inp_y[j1:j2]
tot_counts_in_line = float(np.nansum(sub_y1))
# normalize the sub-array
try:
scale_value = np.nanmax(sub_y1)
except ValueError as e:
print(e,j1,j2,sub_x1,sub_y1)
sub_y_norm1 = sub_y1 / scale_value
# select out the finite elements
ii_good = np.isfinite(sub_y_norm1)
sub_x = sub_x1[ii_good]
sub_y_norm = sub_y_norm1[ii_good]
if sigma is not None:
sub_sigma1 = sigma[j1:j2]
ii_good = np.isfinite(sub_y_norm1) & (np.isfinite(sub_sigma1))
sub_sigma = sub_sigma1[ii_good]
sub_y_norm = sub_y_norm1[ii_good]
else:
sub_sigma = None
# note whether any NaNs were present
if len(sub_x) == len(sub_x1):
nanflag = False
else:
nanflag = True
# set up initial parameter guesses, function names, and bounds.
# initial guess assumes that the gaussian is centered at the middle of the input array
# the sigma is "1" in x units
# the amplitude is -0.1.
# for the functions with an additional constant and line, the constant defaults to 1.
if func == 'fgauss':
if p0 is None:
p0 = (np.mean(sub_x), 5., -0.5)
use_function = fgauss
elif func == 'fgauss_const':
if p0 is None:
p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm), np.nanmedian(sub_y_norm))
use_function = fgauss_const
elif func == 'fgauss_line':
if p0 is None:
p0 = (np.mean(sub_x), 1., -0.5, 1., 0.)
use_function = fgauss_line
elif func == 'fgauss_from_1':
if p0 is None:
p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm))
use_function = fgauss_from_1
else:
raise ValueError
# perform the least squares fit
try:
popt, pcov = scipy.optimize.curve_fit(use_function,
sub_x,
sub_y_norm,
p0=p0,
sigma=sub_sigma,
maxfev=10000,
bounds=bounds)
# Pull out fit results
# fitted values (0 is the centroid, 1 is the sigma, 2 is the amp)
# lists used to facilitate json recording downstream
errs = np.diag(pcov)
centroid = popt[0]
centroid_error = np.sqrt(errs[0])
width = popt[1]
width_error = np.sqrt(errs[1])
fit_successful = True
pcov_list = pcov.tolist()
popt_list = popt.tolist()
except RuntimeError:
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except ValueError as e:
print('ValueError: {}'.format(e))
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except TypeError as e:
print('TypeError: {}'.format(e))
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except:
print('unknown error')
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
if np.isnan(centroid_error) or np.isnan(centroid):
fit_successful = False
# build the returned dictionary
retval = {'centroid': centroid,
'e_centroid': centroid_error,
'sigma': width,
'e_sigma': width_error,
'nanflag': nanflag,
'pcov': pcov_list,
'popt': popt_list,
'indices_used': (j1, j2),
'function_used': func,
'tot_counts_in_line': tot_counts_in_line,
'fit_successful': fit_successful,
'scale_value':float(scale_value)}
# since residual array can be large, optionally include it
if return_residuals:
if fit_successful:
predicted = use_function(sub_x, *popt)
residuals = (predicted - sub_y_norm).tolist()
else:
residuals = np.NaN
retval['residuals'] = residuals
#return(retval['popt'][0], retval['popt'][1], retval['popt'][2], retval)
return(retval)
def generate_comb_wavelengths(mode_numbers,comb_f0,comb_fr):
""" Use the comb equation to generate known wavelengths.
Frequency comb equation f_n = f_0 + n * f_r used to define wavelengths for
arbitrary set of mode numbers.
Parameters
----------
mode_numbers : ndarray, int
Mode indices used; positive integers
comb_f0 : float
Offset frequency of comb in Hz
comb_fr : float
Repetition rate of comb in Hz
Returns
-------
ndarray
Comb wavelengths in angstroms
"""
# catch if a list is accidentally fed:
if isinstance(mode_numbers,list):
mode_numbers = np.ndarray(mode_numbers)
freqs = comb_f0 + comb_fr * mode_numbers
wavelengths_vac = scipy.constants.c / freqs
wavelengths_vac_angstrom = wavelengths_vac / 1e-10
return(wavelengths_vac_angstrom)
def bugfix_biweight_location(array,**kargs):
""" Temperory bug fix for biweight_location which returns nan for zero varience array """
array = array[~np.isnan(array)] # Remove any nans
if np.any(mad_std(array,**kargs)==0):
return np.median(array,**kargs)
else:
return biweight_location(array,**kargs)
def subtract_Continuum_fromlines(inputspec,refspec=None,thresh_mask=None,thresh_window=21,mask_dilation=2,spline_kind='cubic'):
""" Returns a smooth continuum subtracted `inputspec` . If `refspec` is provided, it is used to create the mask fo the continuum region.
"""
# Use inputspec for thersholding if refspec is not provided
if refspec is None:
refspec = inputspec
Xaxis = np.arange(len(refspec))
if thresh_mask is None:
# Create a mask for the emission lines
ThresholdMask = np.atleast_2d(refspec) > filters.threshold_local(np.atleast_2d(refspec), thresh_window,offset=0)
# Dilate the mask
ThresholdMask = morphology.binary_dilation(ThresholdMask,selem=np.array([[1]*mask_dilation+[1]+[1]*mask_dilation]))[0]
else:
ThresholdMask = thresh_mask
pix_pos_list = []
continuum_list = []
for sli in np.ma.clump_unmasked(np.ma.array(refspec,mask=ThresholdMask)):
pix_pos_list.append(np.mean(Xaxis[sli]))
continuum_list.append(bugfix_biweight_location(inputspec[sli]))
Continuum_Func = interpolate.interp1d(pix_pos_list,continuum_list,kind=spline_kind,fill_value='extrapolate')
Continuum = Continuum_Func(Xaxis)
outspec = inputspec - Continuum
return outspec, Continuum, ThresholdMask
def measure_peaks_order(wl,fl,peak_locs,xx=None,pix_to_wvl=None,pix_to_wvl_per_pix=None,fitfunc='fgauss_const',continuum_subtract=False,
continuum_subtract_kw={}):
""" Fit all peaks in an order
Parameters
----------
wl : float arr
ndarray of wavelengths
fl : flat arr
ndarray of fluxes
peak_locs : list
List of peak locations to center fit windows on
xx : None, optional
Index values to use for fits - defaults to pixel number
pix_to_wvl : None, optional
Function for translating pixel to wavelength
pix_to_wvl_per_pix : None, optional
Function for translating pixel to dispersion
fitfunc : str, optional
Function to fit to lines
continuum_subtract : bool, optional
Subtract background continuum (only typically for comb)
continuum_subtract_kw : dict, optional
Keywords to pass to continuum subtraction method
Returns
-------
measure_peaks_order OrderedDict of fit results
.. deprecated:: 0.1
This function is only retained for backwards-compatibility - use fit_lines_order instead
"""
if xx is None:
xx = np.arange(len(wl))
if not isinstance(peak_locs,dict):
peak_locs_dict = OrderedDict()
mode_names = range(len(peak_locs))
for mi in mode_names:
peak_locs_dict[mi] = peak_locs[mi]
else:
peak_locs_dict = copy.deepcopy(peak_locs)
out = OrderedDict()
if pix_to_wvl is None:
pix_to_wvl = scipy.interpolate.interp1d(xx,wl,kind='cubic',bounds_error=False)
if pix_to_wvl_per_pix is None:
dwl = np.diff(wl)
dwl = np.append(dwl,dwl[-1])
pix_to_wvl_per_pix = scipy.interpolate.interp1d(xx,dwl,kind='cubic',bounds_error=False)
if continuum_subtract:
fl_subtracted, _, _ = subtract_Continuum_fromlines(fl,*continuum_subtract_kw)
fl = fl_subtracted
for mi in peak_locs_dict.keys():
loc_this = peak_locs_dict[mi]
if fitfunc == 'fgauss_const':
p0 = [loc_this,2.5,1.,0.]
elif fitfunc == 'fgauss_line':
p0 = [loc_this,2.5,1.,0.,0.]
elif fitfunc == 'fgauss':
p0 = [loc_this,2.1,1.]
else:
raise ValueError
tmp = fitProfile(xx,fl,loc_this,fit_width=8,sigma=None,
func=fitfunc,p0=p0)
#tmp['centroid_wl'] = interp(tmp['centroid'],xx_pix,xx_test)
dwl_per_pix = pix_to_wvl_per_pix(tmp['centroid'])
centroid_pix = tmp['centroid']
centroid_wl = pix_to_wvl(centroid_pix)[()]
fwhm_pix = 2.36 * tmp['sigma']
fwhm_wl = fwhm_pix * dwl_per_pix
fwhm_vel = fwhm_wl / centroid_wl * 3e8
peak_counts = tmp['scale_value']
out1 = OrderedDict()
out1['fit_output'] = tmp
out1['centroid_pix'] = centroid_pix
out1['centroid_wl'] = centroid_wl
out1['fwhm_pix'] = fwhm_pix
out1['fwhm_wl'] = fwhm_wl
out1['snr_peak'] = np.sqrt(peak_counts)
out1['prec_est'] = 0.4 * fwhm_vel / (np.sqrt(fwhm_pix) * np.sqrt(peak_counts))
out[mi] = out1
return(out)
def fit_lines_order(xx,fl,peak_locs,sigma=None,wl=None,pix_to_wvl=None,pix_to_wvl_per_pix=None,fitfunction='fgauss_const',
fit_width_pix=8,basic_window_check=True):
""" Fit all peaks in an order
This is a wrapper for the fitProfile function to do the (often used) task of repeated fitting of many lines
in a single spectral order.
Parameters
----------
xx : ndarray of float
ndarray of pixel values
fl : ndarray of float
ndarray of fluxes
peak_locs : list or dict
List of peak locations to center fit windows on.
If dict, the peaks are labaled by their resspective keys.
If list, the peaks are given sequential labels.
sigma : optional, ndarray of float
ndarray of sigma values to send to fitter
wl : optional, ndarray of float
ndarray of wavelength values.
If not provided, no wavelength values are output.
pix_to_wvl : optional, function
Function for translating pixel to wavelength
If not provided, a cubic spline is used
pix_to_wvl_per_pix : optional, function
Function for translating pixel to dispersion
If not provided, a cubic spline is used
fitfunction : str, optional
Name of function to fit to lines, name must be accepted by fitProfile
fit_width_pix : int
Half-width of fitting window in pixels
basic_window_check : bool
Check whether the fitted centroid falls in given peak_loc +- fit_width_pix
Return NaNs if not
Returns
-------
OrderedDict of fit results. Each entry has (key,value) where
key = peak label as defined by input dictionary (or sequential labels if not provided)
value = OrderedDict of fit parameters as given by fitProfile.
.. note::
The interpolation functions are planned to be upgraded to a more stable form
(e.g. cumsum or PCHIP based)
"""
if not isinstance(peak_locs,dict):
peak_locs_dict = OrderedDict()
mode_names = range(len(peak_locs))
for mi in mode_names:
peak_locs_dict[mi] = peak_locs[mi]
else:
peak_locs_dict = copy.deepcopy(peak_locs)
out = OrderedDict()
# if we have a wavelength array, also translate the (d)pixels to (d)wavelengths
if wl is not None:
if pix_to_wvl is None:
pix_to_wvl = scipy.interpolate.interp1d(xx,wl,kind='cubic',bounds_error=False)
if pix_to_wvl_per_pix is None:
dwl = np.diff(wl)
dwl = np.append(dwl,dwl[-1])
pix_to_wvl_per_pix = scipy.interpolate.interp1d(xx,dwl,kind='cubic',bounds_error=False)
for mi in peak_locs_dict.keys():
loc_this = peak_locs_dict[mi]
if fitfunction == 'fgauss_const':
p0 = [loc_this,2.5,1.,0.]
elif fitfunction == 'fgauss_line':
p0 = [loc_this,2.5,1.,0.,0.]
elif fitfunction == 'fgauss':
p0 = [loc_this,2.1,1.]
else:
raise ValueError
try:
tmp = fitProfile(xx,fl,loc_this,fit_width=fit_width_pix,sigma=sigma,
func=fitfunction,p0=p0)
except (RuntimeError, ValueError, RuntimeWarning) as e:
tmp = OrderedDict()
tmp['fit_successful'] = False
tmp['sigma'] = np.NaN
tmp['scale_value'] = np.NaN
tmp['centroid'] = np.NaN
logging.warning(' ... ... Raised "{0}" on mode {1}'.format(e, mi))
#tmp['centroid_wl'] = interp(tmp['centroid'],xx_pix,xx_test)
centroid_pix = tmp['centroid']
if basic_window_check:
check_val = np.abs(loc_this - float(centroid_pix))
if check_val > fit_width_pix:
centroid_pix = np.nan
tmp['fit_successful'] = False
if wl is not None:
dwl_per_pix = pix_to_wvl_per_pix(tmp['centroid'])
centroid_wl = pix_to_wvl(centroid_pix)[()]
else:
dwl_per_pix = np.NaN
centroid_wl = np.NaN
fwhm_pix = 2.36 * tmp['sigma']
fwhm_wl = fwhm_pix * dwl_per_pix
fwhm_vel = fwhm_wl / centroid_wl * 3e8
peak_counts = tmp['scale_value']
if not tmp['fit_successful']:
fwhm_pix = np.NaN
fwhm_wl = np.NaN
fwhm_vel = np.NaN
peak_counts = np.NaN
out1 = OrderedDict()
out1['fit_output'] = tmp
out1['centroid_pix'] = centroid_pix
out1['centroid_wl'] = centroid_wl
out1['fwhm_pix'] = fwhm_pix
out1['fwhm_wl'] = fwhm_wl
out1['snr_peak'] = np.sqrt(peak_counts)
out1['prec_est'] = 0.4 * fwhm_vel / (np.sqrt(fwhm_pix) * np.sqrt(peak_counts))
#print(mi)
#if mi == 89:
# print(out1)
out[mi] = out1
return(out)
def redshift(x, vo=0., ve=0.,def_wlog=False):
"""
Doppler shift a wavelength array.
Parameters
----------
x : float or ndarray of float
The wavelengths to be shifted.
vo : optional, float
The velocity of the observer [m/s]. (the default is 0.)
ve : optional, float
The velocity of the emitter [m/s]. (the default is 0.)
def_wlog : bool, optional
Is the input in logarithmic wavelength? (the default is False)
Returns
-------
float or ndarray of float
The emitted wavelength(s).
"""
if np.isnan(vo):
vo = 0 # propagate nan as zero
a = (1.0+vo/scipy.constants.c) / (1.0+ve/scipy.constants.c)
if def_wlog:
return x + np.log(a) # logarithmic
#return x + a # logarithmic + approximation v << c
else:
return x * a
#return x / (1.0-v/c)
def datetime_avg(dates):
''' Return the average time for a list of datetime objects
Parameters
----------
dates : list of datetimese
Returns
-------
datetime
Average datetime
'''
reference_date = datetime.datetime(1900, 1, 1)
if len(dates) == 0:
return None
else:
return(reference_date + sum([date - reference_date for date in dates],
datetime.timedelta()) / len(dates))
def getData(dataObj,fiber,choice,justext=False):
""" Helper function to retrieve level 1 data from neidData object
Parameters
----------
neidDataObj : neidData instance
neidData object to be parsed
fiber: str {SCI, CAL, or SKY}
fiber to be returned
choice: str {flux, wave, var}
type of data array to be returned
justext: optional, bool
only return extension number for fiber. Default is False
Return
------
register name or data value, depending on justname
"""
#Check input validity
if fiber.upper() not in ['SCI','CAL','SKY']:
raise ValueError('fiber must be SCI / CAL / SKY, not "{}"'.format(fiber))
if choice.lower() not in ['flux','var','wave']:
raise ValueError('choice must be flux / var / wave, not "{}"'.format(choice))
cf = fiber.upper()
cc = choice.lower()
if cf == 'SCI' and cc == 'flux':
ext = 1
elif cf == 'SCI' and cc == 'var':
ext = 4
elif cf == 'SCI' and cc == 'wave':
ext = 7
elif cf == 'CAL' and cc == 'flux':
ext = 3
elif cf == 'CAL' and cc == 'var':
ext = 6
elif cf == 'CAL' and cc == 'wave':
ext = 9
elif cf == 'SKY' and cc == 'flux':
ext = 2
elif cf == 'SKY' and cc == 'var':
ext = 5
elif cf == 'SKY' and cc == 'wave':
ext = 8
if justext:
return(ext)
else:
return(dataObj[ext].data)
def pool_measure_velshift(cmdset):
""" Parallelization wrapper for measure_velshift
This function translates a list of dicts into the appropriate kw/args for
the measure_velshift function. This facilitates parallelization with the
method adopted in the NEID-DRP.
Parameters
----------
cmdset : list
Each element is a dctionary with keys "precal", "postcal" and "wcal"
Returns
-------
velocity_shift : list of float
List of velocity shifts corresponding to input line fits.
"""
out = measure_velshift(cmdset['Precal'],cmdset['Postcal'],cmdset['Wcal'])
return out
def measure_velshift(fits1,fits2,wave,pix_to_wavl_funcs=None):
''' Measure the velocity shift between two collections of mode fits
Assuming a certain wavelength solution, translate the pixel position
change of each mode into a velocity.
Parameters
----------
fits1 : Centroid dictionary
indexed as dict[spectrum_index][order_index][mode_index]
the stored value is just the pixel centroid (for start of night)
fits2 : Centroid dictionary
indexed as dict[spectrum_index][order_index][mode_index]
the stored value is just the pixel centroid (for start of night)
wave : ndarray
Wavecal array
pix_to_wavl_funcs : optional wavelength solution function
How to translate pixel to wavelength.
If not provided, a cubic spline is used.
Returns
-------
List of float
List of velocity differences corresponding to each line fit in the "fits" inputs.
Notes
-----
This function requires that the two fit dictionaries have the same indexing. If order or mode index
keys are missing, the function will return None.
'''
allvals = []
if pix_to_wavl_funcs is None:
xx = np.arange(9216)
pix_to_wavl_funcs = OrderedDict()
for oi in fits1.keys():
pix_to_wavl_funcs[oi] = scipy.interpolate.interp1d(xx,wave[oi],kind='cubic',bounds_error=False)
for oi in fits1.keys():
if oi not in fits2.keys():
logging.warning('Order index {} is not shared between the fit sets being compared'.format(oi))
return(None)
for mi in fits1[oi].keys():
if mi not in fits2[oi].keys():
logging.warning('Order index {}, mode {} is not shared between the fit sets being compared'.format(oi,mi))
return(None)
pix_to_wavl = pix_to_wavl_funcs[oi]
pix1 = fits1[oi][mi]
pix2 = fits2[oi][mi]
wavl1 = pix_to_wavl(pix1)
wavl2 = pix_to_wavl(pix2)
diff = (wavl2 - wavl1)/wavl1 * 3e8
allvals.append(diff)
return(allvals)
def combine_peak_locations(fitlist):
''' Combine fit of pixel centroids for etalon or comb
Parameters
----------
fitlist : list of fit dictionaries
Each fit dictionary is indexed as dict[spectrum_index][order_index][mode_index]
and the stored value (itself also a dict) must have the 'centroid_pix' key
Returns
-------
velocity_shift : list of float
List of velocity shifts corresponding to input line fits.
'''
nvals = len(fitlist)
out = OrderedDict() #fitlist[0]
for oi in fitlist[0].keys():
out[oi] = OrderedDict()
for oi in fitlist[0].keys():
for mi in fitlist[0][oi].keys():
vals = []
for si in range(nvals):
vals.append(fitlist[si][oi][mi]['centroid_pix'])
out[oi][mi] = astropy.stats.biweight_location(vals,ignore_nan=True)
return(out)
def getLFCf0offset(isot,offsetfile):
''' Return the manual f0 LFC offset
This function reads a master file that has as columns:
effective_date_1[isot] f0_offset_1[hz]
effective_date_2[isot] f0_offset_2[hz]
interpreted as time between effective_date_1 - effective_date_2 has f0_offset_1
(i.e. f0 offset for that row applies beginning at the effective date)
Assumption is that last row on the table applies currently
and there is a 0th row that precedes all observations.
Parameters
----------
isot : str [isot]
Date of current wavecal.
offsetfile : str
Path to file described above.
Returns
-------
f0_offset : float
LFC offset frequency correction corresponding to input date.
'''
offsetTable = np.genfromtxt(offsetfile,
names=('isot','offset'),
dtype=[('isot','U19'),('offset',float)],
delimiter=None)
this_dt = astropy.time.Time(isot).to_datetime()
for i in range(len(offsetTable)):
compare_dt = astropy.time.Time(offsetTable['isot'][i]).to_datetime()
if compare_dt < this_dt:
offsetUse = offsetTable['offset'][i]
else:
break
return(offsetUse)
| 33.363281 | 140 | 0.61837 | 4,502 | 34,164 | 4.566193 | 0.153487 | 0.016199 | 0.021112 | 0.01868 | 0.367223 | 0.327674 | 0.297174 | 0.276937 | 0.272316 | 0.261176 | 0 | 0.011877 | 0.287759 | 34,164 | 1,023 | 141 | 33.395894 | 0.832943 | 0.42776 | 0 | 0.367946 | 0 | 0 | 0.05662 | 0 | 0 | 0 | 0 | 0 | 0.011287 | 1 | 0.054176 | false | 0 | 0.031603 | 0 | 0.106095 | 0.011287 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa0f8b72083d7c88830c1eaf19a165da04a03d6 | 4,215 | py | Python | src/update_repositories_md.py | computer-science-engineering/readme | 0025ec188fa65f0fbd80f5f0a379ab3eb44bafc8 | [
"MIT"
] | null | null | null | src/update_repositories_md.py | computer-science-engineering/readme | 0025ec188fa65f0fbd80f5f0a379ab3eb44bafc8 | [
"MIT"
] | 8 | 2021-01-04T10:38:37.000Z | 2021-04-24T06:32:12.000Z | src/update_repositories_md.py | computer-science-engineering/readme | 0025ec188fa65f0fbd80f5f0a379ab3eb44bafc8 | [
"MIT"
] | null | null | null | import json
import os
from itertools import chain, groupby
from operator import itemgetter, sub
from urllib.parse import urlparse
import git
import numpy as np
import validators
from mdutils.mdutils import MdUtils
LCS_REPO_NAME = 'learning-computer-science'
def walk_max_depth(top, maxdepth):
dirs, nondirs = [], []
for entry in os.scandir(top):
(dirs if entry.is_dir() else nondirs).append(entry.path)
yield top, dirs, nondirs
if maxdepth > 1:
for path in dirs:
for x in walk_max_depth(path, maxdepth - 1):
yield x
def find_files():
"""Return the list of files to process."""
result = {}
root_dir = "./repositories"
cwd = os.getcwd()
#print(os.listdir(root_dir))
for root, dirs, files in walk_max_depth(root_dir, 2):
dirs.sort()
for file in files:
if file.endswith("metadata.json"):
metadatafile = os.path.normpath(os.path.join(cwd, file))
metadata_file = open(metadatafile)
metadata = json.load(metadata_file)
result[os.path.normpath(root)] = (metadatafile, metadata)
return result
def get_submodules(files):
submodules_result = {}
cwd = os.getcwd()
for key, value in files.items():
repo = git.Repo(key)
for submodule in repo.submodules:
path_to_submodule_part = os.path.normpath(
os.path.join(key, submodule.path))
path_to_metadata_file = os.path.normpath(
os.path.join(cwd, path_to_submodule_part, 'metadata.json'))
metadata_file = open(path_to_metadata_file)
metadata = json.load(metadata_file)
submodules_result[path_to_submodule_part] = (submodule.url,
metadata)
return dict(
chain.from_iterable(d.items() for d in (files, submodules_result)))
def get_data(files):
data = []
for key, value in files.items():
data_dict = {}
data_dict['type'] = value[1]['type']
data_dict['name'] = value[1]['name']
valid = validators.url(value[0])
if valid == True:
parse_object = urlparse(value[0])
url_paths = parse_object.path.split('/')
repo_name = url_paths[-1]
else:
local_path_parts = value[0].split(os.path.sep)
repo_name = local_path_parts[-2]
data_dict[
'url'] = f'https://github.com/computer-science-engineering/{repo_name}'
if data_dict['type'] == 'Reading':
data_dict['reading_sub_header'] = get_reading_sub_header(value[1])
data.append(data_dict)
return data
def create_file(files):
data = get_data(files)
md_file = MdUtils(file_name='repositories')
md_file.new_header(level=1, title='Repositories')
grouped_by_type = groupby(data, key=itemgetter('type'))
for key, value in grouped_by_type:
value_sorted = sorted(value, key=lambda x: x['name'])
md_file.new_header(level=2, title=key)
if key == 'Reading':
write_reading_entries(value_sorted, md_file)
else:
for item in value_sorted:
write_item(item, md_file)
md_file.new_line()
md_file.create_md_file()
def write_reading_entries(data, md_file):
grouped_by_sub_heading = groupby(data,
key=itemgetter('reading_sub_header'))
for key, value in grouped_by_sub_heading:
md_file.new_header(level=3, title=key)
for item in value:
write_item(item, md_file)
def write_item(item, md_file):
md_file.new_line(
'- ' + md_file.new_inline_link(link=item['url'], text=item['name']))
def get_reading_sub_header(file):
if 'origin' in file and 'name' in file['origin'] and len(
file['origin']['name']) > 0:
if 'Notes - ' in file['origin']['name']:
return file['origin']['name'].replace('Notes - ', '')
def main():
"""main method."""
files = find_files()
files_including_submodules = get_submodules(files)
create_file(files_including_submodules)
if __name__ == '__main__':
main()
| 32.423077 | 83 | 0.612337 | 545 | 4,215 | 4.504587 | 0.231193 | 0.034216 | 0.021996 | 0.021181 | 0.154379 | 0.099389 | 0.052953 | 0.030957 | 0.030957 | 0.030957 | 0 | 0.004881 | 0.270937 | 4,215 | 129 | 84 | 32.674419 | 0.794012 | 0.018268 | 0 | 0.096154 | 0 | 0 | 0.074873 | 0.006058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086538 | false | 0 | 0.086538 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa273b6250c57b641ea8334821621b4a5dcb2d9 | 625 | py | Python | intercepts.py | XDCoder3289/x_y_intercepts_coordinate-plane | f332829f0f8055c2a05b7cbf9564105b81e96407 | [
"MIT"
] | null | null | null | intercepts.py | XDCoder3289/x_y_intercepts_coordinate-plane | f332829f0f8055c2a05b7cbf9564105b81e96407 | [
"MIT"
] | null | null | null | intercepts.py | XDCoder3289/x_y_intercepts_coordinate-plane | f332829f0f8055c2a05b7cbf9564105b81e96407 | [
"MIT"
] | null | null | null | # Calculator for calculating x and y intercepts
xaxis = int(input("Enter the x axis(excluding the variable): "))
yaxis = int(input("Enter the y axis(excluding the variable): "))
equality = int(input("What is the equation equal to: "))
# example 3x + 4y = 6
# x - intercept can be found out by supposing y = 0
# 3x = 6
# x = 6/3
# x = 2
def x_intercept():
xint = equality / xaxis
printansx = str(xint)
print("x-intercept = " + printansx)
def y_intercept():
yint = equality /yaxis
printansy = str(yint)
print("y-intercept = " + printansy)
x_intercept()
y_intercept() | 27.173913 | 66 | 0.6256 | 87 | 625 | 4.448276 | 0.494253 | 0.103359 | 0.067183 | 0.082687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019272 | 0.2528 | 625 | 23 | 67 | 27.173913 | 0.809422 | 0.2176 | 0 | 0 | 0 | 0 | 0.310195 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.153846 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa45edbf6a77ed236667d3c6563904200b0867d | 1,950 | py | Python | cimbar/util/symhash.py | sz3/cimbar | 6d41d8b7bf42a646e6afec5772525fc6296c4c5a | [
"MIT"
] | 25 | 2020-08-21T01:19:34.000Z | 2022-03-26T12:58:31.000Z | cimbar/util/symhash.py | sz3/cimbar | 6d41d8b7bf42a646e6afec5772525fc6296c4c5a | [
"MIT"
] | null | null | null | cimbar/util/symhash.py | sz3/cimbar | 6d41d8b7bf42a646e6afec5772525fc6296c4c5a | [
"MIT"
] | 4 | 2021-01-04T08:54:32.000Z | 2022-03-26T13:00:20.000Z | import imagehash
def matrix_slice(l, dim, start, end):
# start and end are tuples
startX, startY = start
endX, endY = end
res = []
print(f'len l is {len(l)}')
for y in range(startY, endY):
for x in range(startX, endX):
i = x + (y*dim)
print(f'{x}, {y} ({i}) = {l[i]}')
res.append(l[i])
return res
class SymbolicHash:
def __init__(self, binary_array, dim=8):
if isinstance(binary_array, imagehash.ImageHash):
binary_array = binary_array.hash
self.full = imagehash.ImageHash(binary_array)
self.center = self._imagehash_slice(binary_array, (1, 1), (dim-1, dim-1))
corners = [
[(0, 0), (dim-2, dim-2)],
[(0, 1), (dim-2, dim-1)],
[(0, 2), (dim-2, dim)],
[(1, 0), (dim-1, dim-2)],
[(1, 1), (dim-1, dim-1)],
[(1, 2), (dim-1, dim)],
[(2, 0), (dim, dim-2)],
[(2, 1), (dim, dim-1)],
[(2, 2), (dim, dim)],
]
self.corners = [self._imagehash_slice(binary_array, *c) for c in corners]
def _imagehash_slice(self, binary_array, start, end):
startX, startY = start
endX, endY = end
res = binary_array[startX:endX, startY:endY]
return imagehash.ImageHash(res)
def __hash__(self):
return hash(self.full)
def __eq__(self, other):
return self - other == 0 # for now...
def __sub__(self, other):
# compare both centers to everything
# compare both full
mind = self.full - other.full
mind = min(mind, self.center - other.center)
for c in other.corners:
mind = min(mind, self.center - c)
for c in self.corners:
mind = min(mind, other.center - c)
return mind
def symhash(img, size=8):
baseline = imagehash.average_hash(img, size)
return SymbolicHash(baseline, size)
| 29.545455 | 81 | 0.533846 | 262 | 1,950 | 3.847328 | 0.236641 | 0.098214 | 0.019841 | 0.041667 | 0.198413 | 0.081349 | 0.061508 | 0 | 0 | 0 | 0 | 0.02784 | 0.318462 | 1,950 | 65 | 82 | 30 | 0.730625 | 0.045128 | 0 | 0.08 | 0 | 0 | 0.02154 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14 | false | 0 | 0.02 | 0.04 | 0.3 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa806eec0f9962a1d72ce6896e50f6d4b6fa870 | 3,417 | py | Python | leaf/doc_events/salary_slip.py | leaftechnology/leaf | 7f1ce0628a19605615a93c2673f08bdc91ca2c0d | [
"MIT"
] | null | null | null | leaf/doc_events/salary_slip.py | leaftechnology/leaf | 7f1ce0628a19605615a93c2673f08bdc91ca2c0d | [
"MIT"
] | null | null | null | leaf/doc_events/salary_slip.py | leaftechnology/leaf | 7f1ce0628a19605615a93c2673f08bdc91ca2c0d | [
"MIT"
] | 1 | 2020-06-11T21:44:59.000Z | 2020-06-11T21:44:59.000Z | import frappe
from datetime import *
@frappe.whitelist()
def add_leave_encashment(doc, method):
from_date = (datetime.strptime(doc.start_date, "%Y-%m-%d")).date()
to_date = (datetime.strptime(doc.end_date, "%Y-%m-%d")).date()
salary_structure = frappe.db.sql(""" SELECT * FROM `tabSalary Structure Assignment` WHERE salary_structure=%s and employee=%s""",(doc.salary_structure,doc.employee),as_dict=1)
amount = 0
leave = 0
while (from_date <= to_date):
leave_application = get_leave_application(from_date, doc.employee)
if len(leave_application) > 0:
leave += 1
from_date = (from_date + timedelta(days=1))
reg = 30 - leave
doc.total_leaves = leave
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """,doc.employee,as_dict=1)[0].leave_balance)
quarters = [{"quarter":"First Quarter", "days": 90}, {"quarter":"Second Quarter", "days": 60}, {"quarter":"Third Quarter", "days": 30}, {"quarter":"Fourth Quarter", "days": 0}]
for i in quarters:
if remaining_leaves > i.get("days") and leave > 0:
leave_deduction = remaining_leaves - i.get("days") #90 - 60
if leave_deduction >= leave:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave
remaining_leaves = remaining_leaves - leave
leave = 0
else:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave_deduction
remaining_leaves = remaining_leaves - leave
leave -= leave_deduction
add = True
for ii in doc.earnings:
if ii.__dict__['salary_component'] == "Basic":
add = False
ii.__dict__['amount'] = amount + ((salary_structure[0].base / 30) * reg)
if amount > 0 and add:
doc.append("earnings", {
"salary_component": "Basic",
"amount": amount + ((salary_structure[0].base / 30) * reg)
})
doc.remaining_leaves = remaining_leaves - leave
def update_leave_employee(leave,employee):
frappe.db.sql(""" UPDATE tabEmployee SET leave_balance=%s WHERE name=%s""",(str(leave),employee))
frappe.db.commit()
def get_leave_application(from_date, employee):
query = """ SELECT * FROM `tabLeave Application` WHERE '{0}' BETWEEN from_date and to_date and employee='{1}' and status='{2}' """.format(str(from_date), employee, "Approved")
return frappe.db.sql(query, as_dict=1)
def get_leave_balances(name):
query = """ SELECT * FROM `tabLeave Balances` WHERE parent='{0}' ORDER BY idx DESC """.format(name)
return frappe.db.sql(query, as_dict=1)
def get_leave_type(leave_type, quarter):
return frappe.db.sql(""" SELECT * FROM `tabLeave Type Quarter Percentages` AS LTQP WHERE parent=%s and LTQP.type=%s""", (leave_type,quarter), as_dict=True)
def submit_salary_slip(doc, method):
update_leave_employee(doc.remaining_leaves, doc.employee)
def cancel_salary_slip(doc, method):
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """, doc.employee, as_dict=1)[0].leave_balance)
update_leave_employee(remaining_leaves + doc.total_leaves, doc.employee)
| 48.126761 | 180 | 0.648815 | 450 | 3,417 | 4.728889 | 0.22 | 0.084586 | 0.036184 | 0.031955 | 0.388628 | 0.286654 | 0.24906 | 0.24906 | 0.214286 | 0.214286 | 0 | 0.019181 | 0.206614 | 3,417 | 70 | 181 | 48.814286 | 0.765769 | 0.002049 | 0 | 0.172414 | 0 | 0.034483 | 0.215312 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.034483 | 0.017241 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa83efc91ad5f26caada18999efa7a20f73758a | 1,427 | py | Python | setup.py | openeuler-mirror/A-Tune-Collector | ce1d990034eb3fbdcc8e0101be955a7370bdc6c8 | [
"MulanPSL-1.0"
] | null | null | null | setup.py | openeuler-mirror/A-Tune-Collector | ce1d990034eb3fbdcc8e0101be955a7370bdc6c8 | [
"MulanPSL-1.0"
] | null | null | null | setup.py | openeuler-mirror/A-Tune-Collector | ce1d990034eb3fbdcc8e0101be955a7370bdc6c8 | [
"MulanPSL-1.0"
] | null | null | null | import os
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install
from setuptools.command.test import test
version = '0.1'
class InstallScripts(install):
"""
install scripts
"""
def run(self):
install.run(self)
class TestCommand(test):
"""
test cases
"""
def run_tests(self):
os.system("py.test-%s %s" % (3, "tests"))
s = setup(name='atune_collector',
version=version,
description="The tool for data collection and analysis",
classifiers=[],
keywords='collection analysis',
url='',
license='MulanPSL-2.0',
packages=find_packages(".", exclude=['tests']),
data_files=[('/etc/atune_collector', ['atune_collector/collect_data.json',
'atune_collector/plugin/configurator/bootloader/grub2.json'])],
include_package_data=True,
zip_safe=False,
install_requires=['dict2xml'],
cmdclass={
'install': InstallScripts,
'test': TestCommand,
},
)
if 'install' in s.command_obj:
src_dir = "atune_collector/scripts"
dst_dir = os.path.join(s.command_obj['install'].install_lib, src_dir)
shutil.rmtree(dst_dir, ignore_errors=True)
shutil.copytree(src_dir, dst_dir)
os.system("chmod -R 750 %s" % dst_dir)
| 26.924528 | 111 | 0.602663 | 159 | 1,427 | 5.251572 | 0.490566 | 0.083832 | 0.050299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009625 | 0.271899 | 1,427 | 52 | 112 | 27.442308 | 0.794033 | 0.01822 | 0 | 0 | 0 | 0 | 0.215328 | 0.082482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.138889 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fa88115d46b2c412775b4cba4b87b0497c0dd29 | 3,002 | py | Python | projects/CharGrid/inference.py | timctho/detectron2-chargrid | 547479c88ad7d1de2348377706167a84d024a622 | [
"Apache-2.0"
] | 3 | 2020-03-15T18:33:21.000Z | 2020-03-28T18:06:45.000Z | projects/CharGrid/inference.py | timctho/detectron2-chargrid | 547479c88ad7d1de2348377706167a84d024a622 | [
"Apache-2.0"
] | 2 | 2021-09-08T01:46:39.000Z | 2022-01-13T02:22:56.000Z | projects/CharGrid/inference.py | timctho/detectron2-chargrid | 547479c88ad7d1de2348377706167a84d024a622 | [
"Apache-2.0"
] | null | null | null | import os
import random
import cv2
import detectron2.utils.comm as comm
from detectron2 import model_zoo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, build_detection_train_loader, get_detection_dataset_dicts, \
MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch, DefaultPredictor
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.utils.logger import setup_logger
from detectron2.data.datasets import register_coco_instances
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
import logging
import glob
import data
def setup(args):
logging.basicConfig(level=logging.DEBUG)
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 8
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def main(args):
cfg = setup(args)
predictor = DefaultPredictor(cfg)
if args.img != '':
show_prediction(args.img, predictor, args.scale)
elif args.dir != '':
files = []
for ext in ['/*.jpg', '/*.png']:
files.extend(glob.glob(args.dir + ext))
for file in files:
key = show_prediction(file, predictor, args.scale)
if key == ord('q'):
cv2.destroyAllWindows()
break
else:
dataset_dicts = get_detection_dataset_dicts(['bizcard_val'])
for d in random.sample(dataset_dicts, 300):
key = show_prediction(d['file_name'], predictor, args.scale)
if key == ord('q'):
cv2.destroyAllWindows()
break
def show_prediction(img_file, predictor, scale):
im = cv2.imread(img_file)
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=MetadataCatalog.get('bizcard_val'),
scale=scale,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imshow('', v.get_image()[:, :, ::-1])
key = cv2.waitKey(0)
return key
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument('--img', default='', type=str)
parser.add_argument('--dir', default='', type=str)
parser.add_argument('--scale', default=0.4, type=float)
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 33.730337 | 117 | 0.675883 | 368 | 3,002 | 5.309783 | 0.383152 | 0.078813 | 0.027636 | 0.024565 | 0.178096 | 0.134084 | 0.102354 | 0.102354 | 0.053224 | 0.053224 | 0 | 0.012356 | 0.218188 | 3,002 | 88 | 118 | 34.113636 | 0.820196 | 0.012991 | 0 | 0.077922 | 0 | 0 | 0.038838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0 | 0.233766 | 0 | 0.298701 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fab5fa61656c5e5cabced79af44120fbf3f0dc1 | 7,953 | py | Python | romshake/core/numerical_rom_builder.py | jrekoske/reduced-order-shaking | cd01ef9d6a313a06d2083d7dcb1cc59cb767cc47 | [
"MIT"
] | null | null | null | romshake/core/numerical_rom_builder.py | jrekoske/reduced-order-shaking | cd01ef9d6a313a06d2083d7dcb1cc59cb767cc47 | [
"MIT"
] | null | null | null | romshake/core/numerical_rom_builder.py | jrekoske/reduced-order-shaking | cd01ef9d6a313a06d2083d7dcb1cc59cb767cc47 | [
"MIT"
] | null | null | null | import os
import pickle
import logging
import numpy as np
import pandas as pd
from scipy.stats import qmc
from romshake.sample import voronoi
from romshake.core.reduced_order_model import ReducedOrderModel
FNAME = 'rom_builder.pkl'
class NumericalRomBuilder():
def __init__(self, folder, simulator, n_seeds_initial,
n_seeds_refine,
n_seeds_stop, samp_method,
bounds,
ranks=[], update_basis=False,
ml_regressors={}, rbf_kernels=[],
k_val=5, vor_kval_refine=None, vor_interp_refine=None):
"""Class for building reduced-order models from numerical simulations.
Args:
folder (str): Path associated with ROM data.
simulator (object): Simulator for generating
new data from parameters. Can be either analytic or launch
numerical simulation jobs.
n_seeds_initial (int): Number of seeds for the first iteration.
n_seeds_refine (int): Number of seeds to generate with each
iteration.
n_seeds_stop (int): Maximum number of seeds.
samp_method (str): Sampling refinement strategy.
bounds (dict): Min/max values for the parameter space.
rank (int, optional): Rank of the basis. Defaults to None.
update_basis (bool, optional): Whether to update the basis with
each iteration. Defaults to False.
ml_regressors (dict, optional): Scikit-learn ML regressors.
The keys are strings identifying the regressors and
the values are Scikit learn regressors. Defaults to None.
rbf_kernels (list, optional): List of scipy rbf kernels (strings).
Defaults to [].
k_val (int, optional): k value for k-fold errors.
Defaults to None.
vor_kval_refine (int, optional): k-value for Voronoi refinement.
Defaults to None.
vor_interp_refine (str, optional): interpolator (string) for
Voronoi refinement. Defaults to None.
"""
self.folder = folder
self.simulator = simulator
self.n_seeds_initial = n_seeds_initial
self.n_seeds_refine = n_seeds_refine
self.n_seeds_stop = n_seeds_stop
self.samp_method = samp_method
self.bounds = bounds
self.ranks = ranks
self.update_basis = update_basis
self.ml_regressors = ml_regressors
self.rbf_kernels = rbf_kernels
self.k_val = k_val
self.vor_kval_refine = vor_kval_refine
self.vor_interp_refine = vor_interp_refine
self.dim = len(self.bounds.keys())
self.halton_sampler = qmc.Halton(d=self.dim, seed=0)
if not os.path.exists(folder):
os.makedirs(folder)
else:
raise ValueError(
'A ROM builder has already been started in the folder %s.'
' You should load that instead.' % folder)
# Set up the logging file
logfile = os.path.join(folder, 'output.log')
logging.basicConfig(
filename=logfile, level=logging.DEBUG,
format='%(asctime)s %(message)s')
initial_params, initial_indices = self.draw_samples(
'halton', n_seeds_initial)
initial_params, initial_data = self.run_forward_models(
initial_params, initial_indices)
# Create ROM from the initial data/parameters
self.rom = ReducedOrderModel(
initial_params, initial_data,
ranks, ml_regressors, rbf_kernels)
# Get k-fold errors and store
_, kf_error_means = self.rom.get_kfold_errors(self.k_val)
self.error_history = {rank: {
interp_name: [kf_error_means[rank][interp_name]]
for interp_name in kf_error_means[rank].keys()} for rank in ranks}
self.nsamples_history = [self.rom.P.shape[0]]
# Iteratively update the reduced order model
self.train()
@classmethod
def from_folder(cls, folder):
with open(os.path.join(folder, FNAME), 'rb') as f:
return pickle.load(f)
def draw_samples(self, sampling_method, n_samps=None):
"""Draws new samples to feed into the reduced order model.
Args:
sampling_method (str): Sampling method.
n_samps (int, optional): Number of samples to draw (for sampling
methods that use it). Defaults to None.
Returns:
tuple: Tuple of the samples and the indices.
"""
logging.info('Drawing new samples..')
min_vals = np.array([val[0] for val in self.bounds.values()])
max_vals = np.array([val[1] for val in self.bounds.values()])
if sampling_method == 'halton':
samples = qmc.scale(self.halton_sampler.random(
n=n_samps), min_vals, max_vals)
else:
kf_errors, _ = self.rom.get_kfold_errors(self.k_val)
samples = voronoi.voronoi_sample(
self.rom.P, min_vals, max_vals, kf_errors, sampling_method,
n_samps, self.vor_kval_refine, self.vor_interp_refine)
# Discard any samples that we already have run.
if hasattr(self, 'rom'):
new_samples_idxs = [
sample.tolist() not in self.rom.P.tolist()
for sample in samples]
samples = samples[new_samples_idxs]
logging.info('Drew %s new samples.' % len(samples))
# Store samples in a dataframe
newdf = pd.DataFrame(samples, columns=list(self.bounds.keys()))
if hasattr(self, 'df'):
start_idx = max(self.df.index) + 1
self.df = pd.concat([self.df, newdf]).reset_index(drop=True)
else:
self.df = newdf
start_idx = 0
indices = list(range(start_idx, start_idx + samples.shape[0]))
return samples, indices
def run_forward_models(self, params, indices):
"""Execute the forward models.
Args:
params (array): Array of the parameter values. Each row is a
forward model and each column is a parameter.
indices (list): List of the indices.
Returns:
tuple: Tuple contain the array of parameters that were succesfully
executed and the associated data.
"""
logging.info(
'Running forward models for simulation indices %s' % indices)
labels = list(self.bounds.keys())
params_dict = {label: param for label, param in zip(labels, params.T)}
return self.simulator.evaluate(
params_dict, indices=indices, folder=self.folder)
def train(self):
"""Run the training loop to build the reduced order model.
"""
while self.rom.P.shape[0] < self.n_seeds_stop:
logging.info(
'Current number of simulations: %s', self.rom.P.shape[0])
new_params, new_indices = self.draw_samples(
self.samp_method, self.n_seeds_refine)
new_params, new_data = self.run_forward_models(
new_params, new_indices)
self.rom.update(new_params, new_data, self.update_basis)
_, kf_error_means = self.rom.get_kfold_errors(self.k_val)
for rank in kf_error_means.keys():
for interp_name in kf_error_means[rank].keys():
self.error_history[rank][interp_name].append(
kf_error_means[rank][interp_name])
self.nsamples_history.append(self.rom.P.shape[0])
# Save the updated ROM builder
with open(os.path.join(self.folder, FNAME), 'wb') as outp:
pickle.dump(self, outp)
logging.info(
'Finished training the ROM. Ended with %s simulations.' %
self.rom.P.shape[0])
| 40.576531 | 78 | 0.608324 | 994 | 7,953 | 4.693159 | 0.246479 | 0.019293 | 0.018006 | 0.013934 | 0.173633 | 0.101393 | 0.052519 | 0.0388 | 0.032583 | 0.017578 | 0 | 0.002179 | 0.307557 | 7,953 | 195 | 79 | 40.784615 | 0.844925 | 0.28178 | 0 | 0.070175 | 0 | 0 | 0.061636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.070175 | 0 | 0.149123 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fac5dd366786148f026b7a575637e3933c748e7 | 37,610 | py | Python | misc/model.py | TheShadow29/grounded-video-description | eae96ba8db084e9cec6aac5cb920112f761f3ac5 | [
"BSD-3-Clause"
] | 1 | 2021-04-19T12:05:34.000Z | 2021-04-19T12:05:34.000Z | misc/model.py | TheShadow29/grounded-video-description | eae96ba8db084e9cec6aac5cb920112f761f3ac5 | [
"BSD-3-Clause"
] | null | null | null | misc/model.py | TheShadow29/grounded-video-description | eae96ba8db084e9cec6aac5cb920112f761f3ac5 | [
"BSD-3-Clause"
] | 1 | 2021-07-12T07:16:18.000Z | 2021-07-12T07:16:18.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from torch.autograd import Variable
import math
import numpy as np
import random
import pdb
import pickle
import misc.utils as utils
from misc.CaptionModelBU import CaptionModel
from misc.transformer import Transformer, TransformerDecoder
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.detect_size = opt.detect_size # number of object classes
self.input_encoding_size = opt.input_encoding_size
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.seg_info_size = 50
self.fc_feat_size = opt.fc_feat_size+self.seg_info_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.seq_per_img = opt.seq_per_img
self.itod = opt.itod
self.att_input_mode = opt.att_input_mode
self.transfer_mode = opt.transfer_mode
self.test_mode = opt.test_mode
self.enable_BUTD = opt.enable_BUTD
self.w_grd = opt.w_grd
self.w_cls = opt.w_cls
self.num_sampled_frm = opt.num_sampled_frm
self.num_prop_per_frm = opt.num_prop_per_frm
self.att_model = opt.att_model
self.unk_idx = int(opt.wtoi['UNK'])
if opt.region_attn_mode == 'add':
self.alpha_net = nn.Linear(self.att_hid_size, 1)
elif opt.region_attn_mode == 'cat':
self.alpha_net = nn.Linear(self.att_hid_size*2, 1)
self.stride = 32 # downsizing from input image to feature map
self.t_attn_size = opt.t_attn_size
self.tiny_value = 1e-8
if self.enable_BUTD:
assert(self.att_input_mode == 'region')
self.pool_feat_size = self.att_feat_size
else:
self.pool_feat_size = self.att_feat_size+300+self.detect_size+1
self.min_value = -1e8
opt.beta = 1
self.beta = opt.beta
self.loc_fc = nn.Sequential(nn.Linear(5, 300),
nn.ReLU(),
nn.Dropout(inplace=True))
self.embed = nn.Sequential(nn.Embedding(self.vocab_size,
self.input_encoding_size), # det is 1-indexed
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
if self.transfer_mode in ('none', 'cls'):
self.vis_encoding_size = 2048
elif self.transfer_mode == 'both':
self.vis_encoding_size = 2348
elif self.transfer_mode == 'glove':
self.vis_encoding_size = 300
else:
raise NotImplementedError
self.vis_embed = nn.Sequential(nn.Embedding(self.detect_size+1,
self.vis_encoding_size), # det is 1-indexed
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)
)
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.seg_info_embed = nn.Sequential(nn.Linear(4, self.seg_info_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.att_embed = nn.ModuleList([nn.Sequential(nn.Linear(2048, self.rnn_size//2), # for rgb feature
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)),
nn.Sequential(nn.Linear(1024, self.rnn_size//2), # for motion feature
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))])
self.att_embed_aux = nn.Sequential(nn.BatchNorm1d(self.rnn_size),
nn.ReLU())
self.pool_embed = nn.Sequential(nn.Linear(self.pool_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.ctx2pool = nn.Linear(self.rnn_size, self.att_hid_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size)
if opt.obj_interact:
n_layers = 2
n_heads = 6
attn_drop = 0.2
self.obj_interact = Transformer(self.rnn_size, 0, 0,
d_hidden=int(self.rnn_size/2),
n_layers=n_layers,
n_heads=n_heads,
drop_ratio=attn_drop,
pe=False)
if self.att_model == 'transformer':
n_layers = 2
n_heads = 6
attn_drop = 0.2
print('initiailze language decoder transformer...')
self.cap_model = TransformerDecoder(self.rnn_size, 0, self.vocab_size, \
d_hidden = self.rnn_size//2, n_layers=n_layers, n_heads=n_heads, drop_ratio=attn_drop)
if opt.t_attn_mode == 'bilstm': # frame-wise feature encoding
n_layers = 2
attn_drop = 0.2
self.context_enc = nn.LSTM(self.rnn_size, self.rnn_size//2, n_layers, dropout=attn_drop, \
bidirectional=True, batch_first=True)
elif opt.t_attn_mode == 'bigru':
n_layers = 2
attn_drop = 0.2
self.context_enc = nn.GRU(self.rnn_size, self.rnn_size//2, n_layers, dropout=attn_drop, \
bidirectional=True, batch_first=True)
else:
raise NotImplementedError
self.ctx2pool_grd = nn.Sequential(nn.Linear(self.att_feat_size, self.vis_encoding_size), # fc7 layer
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)
)
self.critLM = utils.LMCriterion(opt)
# initialize the glove weight for the labels.
# self.det_fc[0].weight.data.copy_(opt.glove_vg_cls)
# for p in self.det_fc[0].parameters(): p.requires_grad=False
# self.embed[0].weight.data.copy_(torch.cat((opt.glove_w, opt.glove_clss)))
# for p in self.embed[0].parameters(): p.requires_grad=False
# weights transfer for fc7 layer
with open('data/detectron_weights/fc7_w.pkl') as f:
fc7_w = torch.from_numpy(pickle.load(f))
with open('data/detectron_weights/fc7_b.pkl') as f:
fc7_b = torch.from_numpy(pickle.load(f))
self.ctx2pool_grd[0].weight[:self.att_feat_size].data.copy_(fc7_w)
self.ctx2pool_grd[0].bias[:self.att_feat_size].data.copy_(fc7_b)
if self.transfer_mode in ('cls', 'both'):
# find nearest neighbour class for transfer
with open('data/detectron_weights/cls_score_w.pkl') as f:
cls_score_w = torch.from_numpy(pickle.load(f)) # 1601x2048
with open('data/detectron_weights/cls_score_b.pkl') as f:
cls_score_b = torch.from_numpy(pickle.load(f)) # 1601x2048
assert(len(opt.itod)+1 == opt.glove_clss.size(0)) # index 0 is background
assert(len(opt.vg_cls) == opt.glove_vg_cls.size(0)) # index 0 is background
sim_matrix = torch.matmul(opt.glove_vg_cls/torch.norm(opt.glove_vg_cls, dim=1).unsqueeze(1), \
(opt.glove_clss/torch.norm(opt.glove_clss, dim=1).unsqueeze(1)).transpose(1,0))
max_sim, matched_cls = torch.max(sim_matrix, dim=0)
self.max_sim = max_sim
self.matched_cls = matched_cls
vis_classifiers = opt.glove_clss.new(self.detect_size+1, cls_score_w.size(1)).fill_(0)
self.vis_classifiers_bias = nn.Parameter(opt.glove_clss.new(self.detect_size+1).fill_(0))
vis_classifiers[0] = cls_score_w[0] # background
self.vis_classifiers_bias[0].data.copy_(cls_score_b[0])
for i in range(1, self.detect_size+1):
vis_classifiers[i] = cls_score_w[matched_cls[i]]
self.vis_classifiers_bias[i].data.copy_(cls_score_b[matched_cls[i]])
if max_sim[i].item() < 0.9:
print('index: {}, similarity: {:.2}, {}, {}'.format(i, max_sim[i].item(), \
opt.itod[i], opt.vg_cls[matched_cls[i]]))
if self.transfer_mode == 'cls':
self.vis_embed[0].weight.data.copy_(vis_classifiers)
else:
self.vis_embed[0].weight.data.copy_(torch.cat((vis_classifiers, opt.glove_clss), dim=1))
elif self.transfer_mode == 'glove':
self.vis_embed[0].weight.data.copy_(opt.glove_clss)
elif self.transfer_mode == 'none':
print('No knowledge transfer...')
else:
raise NotImplementedError
# for p in self.ctx2pool_grd.parameters(): p.requires_grad=False
# for p in self.vis_embed[0].parameters(): p.requires_grad=False
if opt.enable_visdom:
import visdom
self.vis = visdom.Visdom(server=opt.visdom_server, env='vis-'+opt.id)
def forward(self, segs_feat, seq, gt_seq, num, ppls, gt_boxes, mask_boxes, ppls_feat, frm_mask, sample_idx, pnt_mask, opt, eval_opt = {}):
if opt == 'MLE':
return self._forward(segs_feat, seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask)
elif opt == 'GRD':
return self._forward(segs_feat, seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask, True)
elif opt == 'sample':
seq, seqLogprobs, att2, sim_mat = self._sample(segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, eval_opt)
return Variable(seq), Variable(att2), Variable(sim_mat)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()),
Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()))
def _grounder(self, xt, att_feats, mask, bias=None):
# xt - B, seq_cnt, enc_size
# att_feats - B, rois_num, enc_size
# mask - B, rois_num
#
# dot - B, seq_cnt, rois_num
B, S, _ = xt.size()
_, R, _ = att_feats.size()
if hasattr(self, 'alpha_net'):
# Additive attention for grounding
if self.alpha_net.weight.size(1) == self.att_hid_size:
dot = xt.unsqueeze(2) + att_feats.unsqueeze(1)
else:
dot = torch.cat((xt.unsqueeze(2).expand(B, S, R, self.att_hid_size),
att_feats.unsqueeze(1).expand(B, S, R, self.att_hid_size)), 3)
dot = F.tanh(dot)
dot = self.alpha_net(dot).squeeze(-1)
else:
# Dot-product attention for grounding
assert(xt.size(-1) == att_feats.size(-1))
dot = torch.matmul(xt, att_feats.permute(0,2,1).contiguous()) # B, seq_cnt, rois_num
if bias is not None:
assert(bias.numel() == dot.numel())
dot += bias
if mask.dim() == 2:
expanded_mask = mask.unsqueeze(1).expand_as(dot)
elif mask.dim() == 3: # if expanded already
expanded_mask = mask
else:
raise NotImplementedError
dot.masked_fill_(expanded_mask, self.min_value)
return dot
def _forward(self, segs_feat, input_seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask, eval_obj_ground=False):
seq = gt_seq[:, :self.seq_per_img, :].clone().view(-1, gt_seq.size(2)) # choose the first seq_per_img
seq = torch.cat((Variable(seq.data.new(seq.size(0), 1).fill_(0)), seq), 1)
input_seq = input_seq.view(-1, input_seq.size(2), input_seq.size(3)) # B*self.seq_per_img, self.seq_length+1, 5
input_seq_update = input_seq.data.clone()
batch_size = segs_feat.size(0) # B
seq_batch_size = seq.size(0) # B*self.seq_per_img
rois_num = ppls.size(1) # max_num_proposal of the batch
state = self.init_hidden(seq_batch_size) # self.num_layers, B*self.seq_per_img, self.rnn_size
rnn_output = []
roi_labels = [] # store which proposal match the gt box
att2_weights = []
h_att_output = []
max_grd_output = []
frm_mask_output = []
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
# pooling the conv_feats
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
# calculate the overlaps between the rois/rois and rois/gt_bbox.
# apply both frame mask and proposal mask
overlaps = utils.bbox_overlaps(ppls.data, gt_boxes.data, \
(frm_mask | pnt_mask[:, 1:].unsqueeze(-1)).data)
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(input_seq.type()))
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
# region-class similarity matrix
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static.view(batch_size, 1, self.detect_size+1, rois_num) \
.expand(batch_size, self.seq_per_img, self.detect_size+1, rois_num).contiguous() \
.view(seq_batch_size, self.detect_size+1, rois_num)
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if self.test_mode:
cls_pred = 0
else:
sim_target = utils.sim_mat_target(overlaps, gt_boxes[:,:,5].data) # B, num_box, num_rois
sim_mask = (sim_target > 0)
if not eval_obj_ground:
masked_sim = torch.gather(sim_mat_static, 1, sim_target)
masked_sim = torch.masked_select(masked_sim, sim_mask)
cls_loss = F.binary_cross_entropy(masked_sim, masked_sim.new(masked_sim.size()).fill_(1))
else:
# region classification accuracy
sim_target_masked = torch.masked_select(sim_target, sim_mask)
sim_mat_masked = torch.masked_select(torch.max(sim_mat_static, dim=1)[1].unsqueeze(1).expand_as(sim_target), sim_mask)
cls_pred = torch.stack((sim_target_masked, sim_mat_masked), dim=1).data
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), \
F.layer_norm(loc_feats, [loc_feats.size(-1)]), F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# replicate the feature to map the seq size.
fc_feats = fc_feats.view(batch_size, 1, self.fc_feat_size)\
.expand(batch_size, self.seq_per_img, self.fc_feat_size)\
.contiguous().view(-1, self.fc_feat_size)
pool_feats = pool_feats.view(batch_size, 1, rois_num, self.pool_feat_size)\
.expand(batch_size, self.seq_per_img, rois_num, self.pool_feat_size)\
.contiguous().view(-1, rois_num, self.pool_feat_size)
g_pool_feats = g_pool_feats.view(batch_size, 1, rois_num, self.vis_encoding_size) \
.expand(batch_size, self.seq_per_img, rois_num, self.vis_encoding_size) \
.contiguous().view(-1, rois_num, self.vis_encoding_size)
pnt_mask = pnt_mask.view(batch_size, 1, rois_num+1).expand(batch_size, self.seq_per_img, rois_num+1)\
.contiguous().view(-1, rois_num+1)
overlaps = overlaps.view(batch_size, 1, rois_num, overlaps.size(2)) \
.expand(batch_size, self.seq_per_img, rois_num, overlaps.size(2)) \
.contiguous().view(-1, rois_num, overlaps.size(2))
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
pool_feats = self.pool_embed(pool_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats) # same here
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
conv_feats = conv_feats.view(batch_size, 1, self.t_attn_size, self.rnn_size)\
.expand(batch_size, self.seq_per_img, self.t_attn_size, self.rnn_size)\
.contiguous().view(-1, self.t_attn_size, self.rnn_size)
p_conv_feats = self.ctx2att(conv_feats) # self.rnn_size (1024) -> self.att_hid_size (512)
else:
# dummy
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
if self.att_model == 'transformer': # Masked Transformer does not support box supervision yet
if self.att_input_mode == 'both':
lm_loss = self.cap_model([conv_feats, pool_feats], seq)
elif self.att_input_mode == 'featmap':
lm_loss = self.cap_model([conv_feats, conv_feats], seq)
elif self.att_input_mode == 'region':
lm_loss = self.cap_model([pool_feats, pool_feats], seq)
return lm_loss.unsqueeze(0), lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0), \
lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0)
elif self.att_model == 'topdown':
for i in range(self.seq_length):
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].data.sum() == 0:
break
xt = self.embed(it)
if not eval_obj_ground:
roi_label = utils.bbox_target(mask_boxes[:,:,:,i+1], overlaps, input_seq[:,i+1], \
input_seq_update[:,i+1], self.vocab_size) # roi_label if for the target seq
roi_labels.append(roi_label.view(seq_batch_size, -1))
# use frame mask during training
box_mask = mask_boxes[:,0,:,i+1].contiguous().unsqueeze(1).expand((
batch_size, rois_num, mask_boxes.size(2)))
frm_mask_on_prop = (torch.sum((~(box_mask | frm_mask)), dim=2)<=0)
frm_mask_on_prop = torch.cat((frm_mask_on_prop.new(batch_size, 1).fill_(0.), \
frm_mask_on_prop), dim=1) | pnt_mask
output, state, att2_weight, att_h, max_grd_val, grd_val = self.core(xt, fc_feats, \
conv_feats, p_conv_feats, pool_feats, p_pool_feats, pnt_mask, frm_mask_on_prop, \
state, sim_mat_static_update)
frm_mask_output.append(frm_mask_on_prop)
else:
output, state, att2_weight, att_h, max_grd_val, grd_val = self.core(xt, fc_feats, \
conv_feats, p_conv_feats, pool_feats, p_pool_feats, pnt_mask, pnt_mask, \
state, sim_mat_static_update)
att2_weights.append(att2_weight)
h_att_output.append(att_h) # the hidden state of attention LSTM
rnn_output.append(output)
max_grd_output.append(max_grd_val)
seq_cnt = len(rnn_output)
rnn_output = torch.cat([_.unsqueeze(1) for _ in rnn_output], 1) # seq_batch_size, seq_cnt, vocab
h_att_output = torch.cat([_.unsqueeze(1) for _ in h_att_output], 1)
att2_weights = torch.cat([_.unsqueeze(1) for _ in att2_weights], 1) # seq_batch_size, seq_cnt, att_size
max_grd_output = torch.cat([_.unsqueeze(1) for _ in max_grd_output], 1)
if not eval_obj_ground:
frm_mask_output = torch.cat([_.unsqueeze(1) for _ in frm_mask_output], 1)
roi_labels = torch.cat([_.unsqueeze(1) for _ in roi_labels], 1)
decoded = F.log_softmax(self.beta * self.logit(rnn_output), dim=2) # text word prob
decoded = decoded.view((seq_cnt)*seq_batch_size, -1)
# object grounding
h_att_all = h_att_output # hidden states from the Attention LSTM
xt_clamp = torch.clamp(input_seq[:, 1:seq_cnt+1, 0].clone()-self.vocab_size, min=0)
xt_all = self.vis_embed(xt_clamp)
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias[xt_clamp].type(xt_all.type()) \
.unsqueeze(2).expand(seq_batch_size, seq_cnt, rois_num)
else:
bias = 0
if not eval_obj_ground:
# att2_weights/ground_weights with both proposal mask and frame mask
ground_weights = self._grounder(xt_all, g_pool_feats, frm_mask_output[:,:,1:], bias+att2_weights)
lm_loss, att2_loss, ground_loss = self.critLM(decoded, att2_weights, ground_weights, \
seq[:, 1:seq_cnt+1].clone(), roi_labels[:, :seq_cnt, :].clone(), input_seq[:, 1:seq_cnt+1, 0].clone())
return lm_loss.unsqueeze(0), att2_loss.unsqueeze(0), ground_loss.unsqueeze(0), cls_loss.unsqueeze(0)
else:
# att2_weights/ground_weights with proposal mask only
ground_weights = self._grounder(xt_all, g_pool_feats, pnt_mask[:,1:], bias+att2_weights)
return cls_pred, torch.max(att2_weights.view(seq_batch_size, seq_cnt, self.num_sampled_frm, \
self.num_prop_per_frm), dim=-1)[1], torch.max(ground_weights.view(seq_batch_size, \
seq_cnt, self.num_sampled_frm, self.num_prop_per_frm), dim=-1)[1]
def _sample(self, segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
inference_mode = opt.get('inference_mode', True)
batch_size = segs_feat.size(0)
rois_num = ppls.size(1)
if beam_size > 1:
return self._sample_beam(segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt)
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
att_mask = pnt_mask.clone()
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(fc_feats.type())).long()
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), F.layer_norm(loc_feats, \
[loc_feats.size(-1)]), F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# embed fc and att feats
pool_feats = self.pool_embed(pool_feats)
fc_feats = self.fc_embed(fc_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats)
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
p_conv_feats = self.ctx2att(conv_feats)
else:
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
if self.att_model == 'transformer':
if self.att_input_mode == 'both':
seq = self.cap_model([conv_feats, pool_feats], [], infer=True, seq_length=self.seq_length)
elif self.att_input_mode == 'featmap':
seq = self.cap_model([conv_feats, conv_feats], [], infer=True, seq_length=self.seq_length)
elif self.att_input_mode == 'region':
seq = self.cap_model([pool_feats, pool_feats], [], infer=True, seq_length=self.seq_length)
return seq, seq.new(batch_size, 1).fill_(0), seq.new(batch_size, 1).fill_(0).long()
elif self.att_model == 'topdown':
state = self.init_hidden(batch_size)
seq = []
seqLogprobs = []
att2_weights = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
elif sample_max:
sampleLogprobs_tmp, it_tmp = torch.topk(logprobs.data, 2, dim=1)
unk_mask = (it_tmp[:,0] != self.unk_idx) # mask on non-unk
sampleLogprobs = unk_mask.float()*sampleLogprobs_tmp[:,0] + (1-unk_mask.float())*sampleLogprobs_tmp[:,1]
it = unk_mask.long()*it_tmp[:,0] + (1-unk_mask.long())*it_tmp[:,1]
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data) # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature))
it = torch.multinomial(prob_prev, 1)
sampleLogprobs = logprobs.gather(1, Variable(it)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = self.embed(Variable(it))
if t >= 1:
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
if t < self.seq_length:
rnn_output, state, att2_weight, att_h, _, _ = self.core(xt, fc_feats, conv_feats, \
p_conv_feats, pool_feats, p_pool_feats, att_mask, pnt_mask, state, \
sim_mat_static_update)
decoded = F.log_softmax(self.beta * self.logit(rnn_output), dim=1)
logprobs = decoded
att2_weights.append(att2_weight)
seq = torch.cat([_.unsqueeze(1) for _ in seq], 1)
seqLogprobs = torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
att2_weights = torch.cat([_.unsqueeze(1) for _ in att2_weights], 1) # batch_size, seq_cnt, att_size
return seq, seqLogprobs, att2_weights, sim_mat_static
def _sample_beam(self, segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt={}):
batch_size = ppls.size(0)
rois_num = ppls.size(1)
beam_size = opt.get('beam_size', 10)
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(fc_feats.type())).long()
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), F.layer_norm(loc_feats, [loc_feats.size(-1)]), \
F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# embed fc and att feats
pool_feats = self.pool_embed(pool_feats)
fc_feats = self.fc_embed(fc_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats)
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
p_conv_feats = self.ctx2att(conv_feats)
else:
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
vis_offset = (torch.arange(0, beam_size)*rois_num).view(beam_size).type_as(ppls.data).long()
roi_offset = (torch.arange(0, beam_size)*(rois_num+1)).view(beam_size).type_as(ppls.data).long()
seq = ppls.data.new(self.seq_length, batch_size).zero_().long()
seqLogprobs = ppls.data.new(self.seq_length, batch_size).float()
att2 = ppls.data.new(self.seq_length, batch_size).fill_(-1).long()
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
beam_fc_feats = fc_feats[k:k+1].expand(beam_size, fc_feats.size(1))
beam_pool_feats = pool_feats[k:k+1].expand(beam_size, rois_num, self.rnn_size).contiguous()
if self.att_input_mode in ('both', 'featmap'):
beam_conv_feats = conv_feats[k:k+1].expand(beam_size, conv_feats.size(1), self.rnn_size).contiguous()
beam_p_conv_feats = p_conv_feats[k:k+1].expand(beam_size, conv_feats.size(1), self.att_hid_size).contiguous()
else:
beam_conv_feats = beam_pool_feats.new(1,1).fill_(0)
beam_p_conv_feats = beam_pool_feats.new(1,1).fill_(0)
beam_p_pool_feats = p_pool_feats[k:k+1].expand(beam_size, rois_num, self.att_hid_size).contiguous()
beam_ppls = ppls[k:k+1].expand(beam_size, rois_num, 7).contiguous()
beam_pnt_mask = pnt_mask[k:k+1].expand(beam_size, rois_num+1).contiguous()
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(Variable(it))
beam_sim_mat_static_update = sim_mat_static_update[k:k+1].expand(beam_size, self.detect_size+1, rois_num)
rnn_output, state, att2_weight, att_h, _, _ = self.core(xt, beam_fc_feats, beam_conv_feats,
beam_p_conv_feats, beam_pool_feats, beam_p_pool_feats, beam_pnt_mask, beam_pnt_mask,
state, beam_sim_mat_static_update)
assert(att2_weight.size(0) == beam_size)
att2[0, k] = torch.max(att2_weight, 1)[1][0]
self.done_beams[k] = self.beam_search(state, rnn_output, beam_fc_feats, beam_conv_feats, beam_p_conv_feats, \
beam_pool_feats, beam_p_pool_feats, beam_sim_mat_static_update, beam_ppls, beam_pnt_mask, vis_offset, roi_offset, opt)
seq[:, k] = self.done_beams[k][0]['seq'].cuda() # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps'].cuda()
att2[1:, k] = self.done_beams[k][0]['att2'][1:].cuda()
return seq.t(), seqLogprobs.t(), att2.t()
| 50.619112 | 168 | 0.602632 | 5,275 | 37,610 | 3.992038 | 0.078863 | 0.038465 | 0.015956 | 0.014959 | 0.639757 | 0.574366 | 0.527021 | 0.469085 | 0.430715 | 0.413335 | 0 | 0.021157 | 0.282398 | 37,610 | 742 | 169 | 50.687332 | 0.759087 | 0.086546 | 0 | 0.399287 | 0 | 0 | 0.018507 | 0.004087 | 0 | 0 | 0 | 0 | 0.016043 | 1 | 0.012478 | false | 0 | 0.030303 | 0 | 0.065954 | 0.00713 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fac820506cfa2b173f4a71f0937824ec8098018 | 5,371 | py | Python | apps/front/urls.py | 1786016767jjk/pj | 17de9e2ab2bac842abf7ad4605c00dcb1a7d079f | [
"Apache-2.0"
] | null | null | null | apps/front/urls.py | 1786016767jjk/pj | 17de9e2ab2bac842abf7ad4605c00dcb1a7d079f | [
"Apache-2.0"
] | null | null | null | apps/front/urls.py | 1786016767jjk/pj | 17de9e2ab2bac842abf7ad4605c00dcb1a7d079f | [
"Apache-2.0"
] | null | null | null | # 前台
from flask.views import MethodView
from apps.front.forms import SendSmsCodeForm,SignupFrom,FindpwdFrom,SendCodeForm,AddPostForm,SigninFrom
from flask import Blueprint,make_response
from flask import render_template,session
from flask import views,request,jsonify
import string,random
from apps.common.baseResp import *
import json
from dysms_python.demo_sms_send import send_sms
from apps.common.captcha.xtcaptcha import Captcha
from io import BytesIO
from apps.common.memcachedUtil import saveCache,delete,getCache
from apps.front.models import *
from apps.common.models import Banner,Board,Post
from functools import wraps
from config import FRONT_USER_ID
from flask import redirect
from flask import url_for
#
bp = Blueprint('front',__name__)
def lonigDecotor(func):
"""限制登录的装饰器"""
@wraps(func)
def inner(*args,**kwargs):
if not session.get(FRONT_USER_ID,None): # 没有登陆
return redirect(location=url_for("front.signin"))
else:
r = func(*args,**kwargs)
return r
return inner
@bp.route("/")
def loginView():
# 查出来轮播图(4)
banners = Banner.query.order_by(Banner.priority.desc()).limit(4)
board = Board.query.all()
posts=Post.query.all()
context = {
'banners':banners,
'boards':board,
'posts':posts
}
return render_template("front/index.html",**context)
class Signup(MethodView):
def get(self):
# 从那个页面点击的注册按钮 (Referer: http://127.0.0.1:9000/signin/)
location = request.headers.get("Referer")
if not location : # 如果直接输入的注册的连接,location为空
location = '/'
context = {
'location':location
}
return render_template("front/signup.html",**context)
class Signup(MethodView):
def get(self):
return render_template("front/signup.html")
def post(self):
fm = SigninFrom(formdata=request.form)
if fm.validate():
# 把这个用户保存到数据库中
u = FrontUser(telephone=fm.telephone.data,
username=fm.username.data,
password=fm.password.data)
db.session.add(u)
db.session.commit()
delete(fm.telephone.data) #注册成功,删除手机验证码
return jsonify(respSuccess("注册成功,真不容易啊"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/send_sms_code/",methods=['post'])
def sendSMSCode():
fm = SendSmsCodeForm(formdata=request.form)
if fm.validate():
#生成验证码
source = string.digits
source = ''.join(random.sample(source, 4))
#发送验证码
r = send_sms(phone_numbers=fm.telephone.data,smscode=source) #b'{"Message":"OK","RequestId":"26F47853-F6CD-486A-B3F7-7DFDCE119713","BizId":"102523637951132428^0","Code":"OK"}'
if json.loads(r.decode("utf-8"))['Code'] == 'OK':
# 存到缓存中
saveCache(fm.telephone.data,source,30*60)
return jsonify(respSuccess("短信验证码发送成功,请查收"))
else: # 发送失败
return jsonify(respParamErr("请检查网络"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/img_code/")
def ImgCode():
# 生成6位的字符串
# 把这个字符串放在图片上
# 用特殊字体
# 添加横线
# 添加噪点
text,img = Captcha.gene_code() # 通过工具类生成验证码
print(text)
out = BytesIO() # 初始化流对象
img.save(out, 'png') # 保存成png格式
out.seek(0) # 从文本的开头开始读
saveCache(text,text,60)
resp = make_response(out.read()) # 根据流对象生成一个响应
resp.content_type = "image/png" # 设置响应头中content-type
return resp
class Singnin(MethodView):
def get(self):
return render_template("front/signin.html")
def post(self):
fm=SigninFrom(formdata=request.form)
if fm.validate():
#通过电话查询密码
user=FrontUser.query.filter(FrontUser.telephone==fm.telephone.data).first()
if not user:
return jsonify(respParamErr("未注册"))
# 密码进行比较
r=user.checkPwd(fm.password.data)
if r :
return jsonify(respSuccess("登录成功"))
else:
return jsonify(respParamErr("密码错误"))
else:
return jsonify(respParamErr(fm.err))
class Addpost(views.MethodView):
decorators = [lonigDecotor]
def get(self):
# 查询所有的板块
board = Board.query.all()
context = {
"boards": board
}
return render_template("front/addpost.html",**context)
def post(self):
fm = AddPostForm(formdata=request.form)
if fm.validate() :
# 存储到数据库中
user_id = session[FRONT_USER_ID]
post = Post(title=fm.title.data,content=fm.content.data,
board_id=fm.boarder_id.data,user_id=user_id)
db.session.add(post)
db.session.commit()
return jsonify(respSuccess("发布成功"))
else:
print(respParamErr(fm.err))
return jsonify(respParamErr(fm.err))
bp.add_url_rule("/addpost/",endpoint='addpost',view_func=Addpost.as_view('addpost'))
bp.add_url_rule("/signin/",endpoint='signin',view_func=Singnin.as_view('signin'))
bp.add_url_rule("/signup/",endpoint='signup',view_func=Signup.as_view('signup'))
# 验证码
# 在阿里云申请账号
# 申请accesskey
# 申请签名和模板
# 下载pythondemo
# 修改demo中demo_sms_send.py
# 在项目中进行调用
# 图片验证码
# 1.使用PIL这个库生成图片验证码
# 2.返回给客户端
# 3.通过js变换scr的值进行切换图片 | 31.046243 | 183 | 0.62614 | 620 | 5,371 | 5.341935 | 0.346774 | 0.043176 | 0.052838 | 0.037742 | 0.176027 | 0.156099 | 0.10628 | 0.10628 | 0.035024 | 0.035024 | 0 | 0.015885 | 0.24986 | 5,371 | 173 | 184 | 31.046243 | 0.806155 | 0.101098 | 0 | 0.259843 | 0 | 0 | 0.062905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094488 | false | 0.015748 | 0.141732 | 0.015748 | 0.433071 | 0.031496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fac9ed6a463a4b70203af101a3a252ddbc496f4 | 1,911 | py | Python | logger.py | thatsIch/sublime-rainmeter | 89d67adfd0ef196360785aa2aedecb693f71e965 | [
"MIT"
] | 56 | 2015-11-16T06:45:51.000Z | 2021-12-31T10:06:55.000Z | logger.py | thatsIch/sublime-rainmeter | 89d67adfd0ef196360785aa2aedecb693f71e965 | [
"MIT"
] | 82 | 2016-11-06T01:18:50.000Z | 2021-12-15T04:37:50.000Z | logger.py | thatsIch/sublime-rainmeter | 89d67adfd0ef196360785aa2aedecb693f71e965 | [
"MIT"
] | 8 | 2015-11-26T19:28:49.000Z | 2021-03-08T22:39:26.000Z | """This module provides general methods for logging puprposes.
Basic operations are:
* info
* error
with these operations it is easier to track from where the information is printed
"""
import os
import inspect
from datetime import datetime
import sublime
__LOG = None
'''
Called automatically from ST3 if plugin is loaded
# Is required now due to async call and ignoring sublime.* from main routine
'''
__SETTING_KEY = "rainmeter_enable_logging"
def plugin_loaded():
"""Will be called when sublime API is ready to use."""
settings = __load_settings()
settings.add_on_change(__SETTING_KEY, __load_settings)
info("Logger succesfully loaded.")
def __load_settings():
settings = sublime.load_settings("Rainmeter.sublime-settings")
global __LOG
__LOG = settings.get(__SETTING_KEY, False)
return settings
def info(message):
"""
Display information about the current state it is in.
Only shown if logging is enabled.
"""
if __LOG:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = calframe[1]
caller_name = caller[3]
caller_file = caller[1]
_log("info", caller_file, caller_name, message)
def error(message):
"""
Display error states.
Always shown because supposed not to reach that level.
"""
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = calframe[1]
caller_name = caller[3]
caller_file = caller[1]
_log("error", caller_file, caller_name, message)
def _log(error_type, file_path, function, string):
now = datetime.now()
timestamp = now.strftime("%H:%M:%S.%f")[:-3]
filename = os.path.basename(file_path)
withoutext = os.path.splitext(filename)[0]
print("[" + timestamp + "]", "[" + error_type + "]", withoutext + "." + function + ':', string)
| 22.482353 | 99 | 0.682365 | 238 | 1,911 | 5.281513 | 0.470588 | 0.038186 | 0.050915 | 0.055688 | 0.23389 | 0.23389 | 0.186158 | 0.186158 | 0.186158 | 0.186158 | 0 | 0.007299 | 0.211408 | 1,911 | 84 | 100 | 22.75 | 0.826808 | 0.207745 | 0 | 0.277778 | 0 | 0 | 0.076692 | 0.037594 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.111111 | 0 | 0.277778 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fb18dd9019ef56c95d458fb46ca62af50467f38 | 4,655 | py | Python | doc_gen/doc_gen.py | alters-mit/multimodal_challenge | 1a2c6bdde290e474fa2cd6400970ef723f50ac30 | [
"MIT"
] | 1 | 2022-01-23T08:57:02.000Z | 2022-01-23T08:57:02.000Z | doc_gen/doc_gen.py | alters-mit/multimodal_challenge | 1a2c6bdde290e474fa2cd6400970ef723f50ac30 | [
"MIT"
] | null | null | null | doc_gen/doc_gen.py | alters-mit/multimodal_challenge | 1a2c6bdde290e474fa2cd6400970ef723f50ac30 | [
"MIT"
] | null | null | null | from pathlib import Path
from py_md_doc import PyMdDoc
import re
if __name__ == "__main__":
# API documentation.
md = PyMdDoc(input_directory=Path("../multimodal_challenge"), files=["dataset/dataset_trial.py",
"dataset/env_audio_materials.py",
"multimodal_object_init_data.py",
"multimodal_base.py",
"trial.py"])
md.get_docs(output_directory=Path("../doc/api"))
# Multimodal API documentation.
md = PyMdDoc(input_directory=Path("../multimodal_challenge"), files=["multimodal.py"],
metadata_path=Path("doc_metadata.json"))
doc = md.get_doc(Path("../multimodal_challenge/multimodal.py"))
# Get the Magnebot API. This assumes that it's located in the home directory.
magnebot_api = Path.home().joinpath("magnebot/doc/api/magnebot_controller.md").read_text(encoding="utf-8")
# Fix relative links.
magnebot_api = re.sub(r"\[(.*?)\]\((?!https)(.*?)\.md\)",
r"[\1](https://github.com/alters-mit/magnebot/blob/main/doc/api/\2.md)", magnebot_api)
# Remove code examples.
magnebot_api = re.sub(r"(```python((.|\n)*?)```\n)", "", magnebot_api)
# Remove this paragraph.
magnebot_api = re.sub(r"(Images of occupancy maps can be found(.*)\.\n\n)", "", magnebot_api, flags=re.MULTILINE)
# Remove this sentence.
magnebot_api = magnebot_api.replace("This only works if you've loaded an occupancy map via "
"`self.init_floorplan_scene()`.\n\n\n", "")
# Get all of the movement actions from the Magnebot API.
api_txt = re.search(r"(### Movement((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
for action in ["turn_by", "turn_to", "move_by", "move_to"]:
api_txt += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Get all of the movement actions from the Magnebot API.
api_txt = re.search(r"(### Arm Articulation((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
for action in ["reach_for", "grasp", "drop", "reset_arm"]:
api_txt += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append the movement actions before the Torso section.
doc = re.sub(r"((.|\n)*?)(### Torso)", r"\1" + api_txt + "***\n\n" + r"\3", doc)
# Append camera actions.
doc += "### Camera\n\n_These commands rotate the Magnebot's camera or add additional camera to the scene." \
" They advance the simulation by exactly 1 frame._\n\n"
for action in ["rotate_camera", "reset_camera"]:
doc += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append misc.
doc += "### Misc.\n\n_These are utility functions that won't advance the simulation by any frames._\n\n"
for action in ["get_occupancy_position", "get_visible_objects", "end"]:
doc += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append class variables.
magnebot_class_vars = re.search(f"## Class Variables" + r"\n((.|\n)*?)\*$", magnebot_api,
flags=re.MULTILINE).group(1). \
replace("| Variable | Type | Description |\n| --- | --- | --- |", "").strip()
doc = re.sub(f"## Class Variables" + r"\n((.|\n)*?)\n\*", "## Class Variables\n" + r"\1" + magnebot_class_vars, doc)
# Append fields.
magnebot_fields = re.search(f"## Fields" + r"\n((.|\n)*?)\*$", magnebot_api, flags=re.MULTILINE).group(1)
doc = re.sub(f"## Fields" + r"\n((.|\n)*?)\n\*", "## Fields" + r"\1" + magnebot_fields, doc)
# Append other sections.
sections = ""
for s in ["Frames", "Parameter types"]:
section = re.search(f"## {s}\n" + r"((.|\n)*?)\*\*\*", magnebot_api, flags=re.MULTILINE).group(0)
sections += section + "\n\n"
doc = re.sub(r"## Fields", sections + "\n## Fields\n", doc)
doc = doc.replace("[TOC-MM]", PyMdDoc.get_toc(doc)).replace("****", "***").replace("\n\n\n", "\n\n")
Path("../doc/api/multimodal.md").write_text(doc, encoding="utf-8")
# Dataset generation documentation.
md = PyMdDoc(input_directory=Path("../dataset_generation"), files=["dataset.py", "rehearsal.py",
"occupancy_mapper.py", "init_data.py"])
md.get_docs(output_directory=Path("../doc/dataset"))
| 62.905405 | 120 | 0.565199 | 586 | 4,655 | 4.346416 | 0.269625 | 0.090695 | 0.051826 | 0.066745 | 0.369454 | 0.332548 | 0.303887 | 0.279545 | 0.253632 | 0.253632 | 0 | 0.005091 | 0.240387 | 4,655 | 73 | 121 | 63.767123 | 0.715215 | 0.108915 | 0 | 0.08 | 0 | 0.06 | 0.369886 | 0.094408 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.06 | 0 | 0.06 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fb2aab2d43bbb4a377c18a57b8bf99fa7afb6cb | 13,288 | py | Python | squidasm/sim/stack/common.py | QuTech-Delft/squidasm | 1d223a428620a2fb42a8a0e321dce732a1678f0d | [
"MIT"
] | 2 | 2022-01-16T00:25:27.000Z | 2022-01-21T13:56:45.000Z | squidasm/sim/stack/common.py | QuTech-Delft/squidasm | 1d223a428620a2fb42a8a0e321dce732a1678f0d | [
"MIT"
] | 8 | 2022-01-20T14:00:47.000Z | 2022-03-29T13:05:21.000Z | squidasm/sim/stack/common.py | QuTech-Delft/squidasm | 1d223a428620a2fb42a8a0e321dce732a1678f0d | [
"MIT"
] | 1 | 2022-01-23T18:55:47.000Z | 2022-01-23T18:55:47.000Z | import logging
from dataclasses import dataclass
from typing import Dict, Generator, List, Optional, Set, Tuple, Union
import netsquid as ns
from netqasm.lang import operand
from netqasm.lang.encoding import RegisterName
from netqasm.sdk.shared_memory import Arrays, RegisterGroup, setup_registers
from netsquid.components.component import Component, Port
from netsquid.protocols import Protocol
from pydynaa import EventExpression
class SimTimeFilter(logging.Filter):
def filter(self, record):
record.simtime = ns.sim_time()
return True
class LogManager:
STACK_LOGGER = "Stack"
_LOGGER_HAS_BEEN_SETUP = False
@classmethod
def _setup_stack_logger(cls) -> None:
logger = logging.getLogger(cls.STACK_LOGGER)
formatter = logging.Formatter(
"%(levelname)s:%(simtime)s ns:%(name)s:%(message)s"
)
syslog = logging.StreamHandler()
syslog.setFormatter(formatter)
syslog.addFilter(SimTimeFilter())
logger.addHandler(syslog)
logger.propagate = False
cls._LOGGER_HAS_BEEN_SETUP = True
@classmethod
def get_stack_logger(cls, sub_logger: Optional[str] = None) -> logging.Logger:
if not cls._LOGGER_HAS_BEEN_SETUP:
cls._setup_stack_logger()
logger = logging.getLogger(cls.STACK_LOGGER)
if sub_logger is None:
return logger
else:
return logger.getChild(sub_logger)
@classmethod
def set_log_level(cls, level: Union[int, str]) -> None:
logger = cls.get_stack_logger()
logger.setLevel(level)
@classmethod
def get_log_level(cls) -> int:
return cls.get_stack_logger().level
@classmethod
def log_to_file(cls, path: str) -> None:
fileHandler = logging.FileHandler(path, mode="w")
formatter = logging.Formatter(
"%(levelname)s:%(simtime)s ns:%(name)s:%(message)s"
)
fileHandler.setFormatter(formatter)
fileHandler.addFilter(SimTimeFilter())
cls.get_stack_logger().addHandler(fileHandler)
class PortListener(Protocol):
def __init__(self, port: Port, signal_label: str) -> None:
self._buffer: List[bytes] = []
self._port: Port = port
self._signal_label = signal_label
self.add_signal(signal_label)
@property
def buffer(self) -> List[bytes]:
return self._buffer
def run(self) -> Generator[EventExpression, None, None]:
while True:
# Wait for an event saying that there is new input.
yield self.await_port_input(self._port)
counter = 0
# Read all inputs and count them.
while True:
input = self._port.rx_input()
if input is None:
break
self._buffer += input.items
counter += 1
# If there are n inputs, there have been n events, but we yielded only
# on one of them so far. "Flush" these n-1 additional events:
while counter > 1:
yield self.await_port_input(self._port)
counter -= 1
# Only after having yielded on all current events, we can schedule a
# notification event, so that its reactor can handle all inputs at once.
self.send_signal(self._signal_label)
class RegisterMeta:
@classmethod
def prefixes(cls) -> List[str]:
return ["R", "C", "Q", "M"]
@classmethod
def parse(cls, name: str) -> Tuple[RegisterName, int]:
assert len(name) >= 2
assert name[0] in cls.prefixes()
group = RegisterName[name[0]]
index = int(name[1:])
assert index < 16
return group, index
class ComponentProtocol(Protocol):
def __init__(self, name: str, comp: Component) -> None:
super().__init__(name)
self._listeners: Dict[str, PortListener] = {}
self._logger: logging.Logger = LogManager.get_stack_logger(
f"{self.__class__.__name__}({comp.name})"
)
def add_listener(self, name, listener: PortListener) -> None:
self._listeners[name] = listener
def _receive_msg(
self, listener_name: str, wake_up_signal: str
) -> Generator[EventExpression, None, str]:
listener = self._listeners[listener_name]
if len(listener.buffer) == 0:
yield self.await_signal(sender=listener, signal_label=wake_up_signal)
return listener.buffer.pop(0)
def start(self) -> None:
super().start()
for listener in self._listeners.values():
listener.start()
def stop(self) -> None:
for listener in self._listeners.values():
listener.stop()
super().stop()
class AppMemory:
def __init__(self, app_id: int, max_qubits: int) -> None:
self._app_id: int = app_id
self._registers: Dict[RegisterName, RegisterGroup] = setup_registers()
self._arrays: Arrays = Arrays()
self._virt_qubits: Dict[int, Optional[int]] = {
i: None for i in range(max_qubits)
}
self._prog_counter: int = 0
@property
def prog_counter(self) -> int:
return self._prog_counter
def increment_prog_counter(self) -> None:
self._prog_counter += 1
def set_prog_counter(self, value: int) -> None:
self._prog_counter = value
def map_virt_id(self, virt_id: int, phys_id: int) -> None:
self._virt_qubits[virt_id] = phys_id
def unmap_virt_id(self, virt_id: int) -> None:
self._virt_qubits[virt_id] = None
def unmap_all(self) -> None:
for virt_id in self._virt_qubits:
self._virt_qubits[virt_id] = None
@property
def qubit_mapping(self) -> Dict[int, Optional[int]]:
return self._virt_qubits
def phys_id_for(self, virt_id: int) -> int:
return self._virt_qubits[virt_id]
def virt_id_for(self, phys_id: int) -> Optional[int]:
for virt, phys in self._virt_qubits.items():
if phys == phys_id:
return virt
return None
def set_reg_value(self, register: Union[str, operand.Register], value: int) -> None:
if isinstance(register, str):
name, index = RegisterMeta.parse(register)
else:
name, index = register.name, register.index
self._registers[name][index] = value
def get_reg_value(self, register: Union[str, operand.Register]) -> int:
if isinstance(register, str):
name, index = RegisterMeta.parse(register)
else:
name, index = register.name, register.index
return self._registers[name][index]
# for compatibility with netqasm Futures
def get_register(self, register: Union[str, operand.Register]) -> Optional[int]:
return self.get_reg_value(register)
# for compatibility with netqasm Futures
def get_array_part(
self, address: int, index: Union[int, slice]
) -> Union[None, int, List[Optional[int]]]:
if isinstance(index, int):
return self.get_array_value(address, index)
elif isinstance(index, slice):
return self.get_array_values(address, index.start, index.stop)
def init_new_array(self, address: int, length: int) -> None:
self._arrays.init_new_array(address, length)
def get_array(self, address: int) -> List[Optional[int]]:
return self._arrays._get_array(address)
def get_array_entry(self, array_entry: operand.ArrayEntry) -> Optional[int]:
address, index = self.expand_array_part(array_part=array_entry)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, int)
return result
def get_array_value(self, addr: int, offset: int) -> Optional[int]:
address, index = self.expand_array_part(
array_part=operand.ArrayEntry(operand.Address(addr), offset)
)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, int)
return result
def get_array_values(
self, addr: int, start_offset: int, end_offset
) -> List[Optional[int]]:
values = self.get_array_slice(
operand.ArraySlice(operand.Address(addr), start_offset, end_offset)
)
assert values is not None
return values
def set_array_entry(
self, array_entry: operand.ArrayEntry, value: Optional[int]
) -> None:
address, index = self.expand_array_part(array_part=array_entry)
self._arrays[address, index] = value
def set_array_value(self, addr: int, offset: int, value: Optional[int]) -> None:
address, index = self.expand_array_part(
array_part=operand.ArrayEntry(operand.Address(addr), offset)
)
self._arrays[address, index] = value
def get_array_slice(
self, array_slice: operand.ArraySlice
) -> Optional[List[Optional[int]]]:
address, index = self.expand_array_part(array_part=array_slice)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, list)
return result
def expand_array_part(
self, array_part: Union[operand.ArrayEntry, operand.ArraySlice]
) -> Tuple[int, Union[int, slice]]:
address: int = array_part.address.address
index: Union[int, slice]
if isinstance(array_part, operand.ArrayEntry):
if isinstance(array_part.index, int):
index = array_part.index
else:
index_from_reg = self.get_reg_value(register=array_part.index)
if index_from_reg is None:
raise RuntimeError(
f"Trying to use register {array_part.index} "
"to index an array but its value is None"
)
index = index_from_reg
elif isinstance(array_part, operand.ArraySlice):
startstop: List[int] = []
for raw_s in [array_part.start, array_part.stop]:
if isinstance(raw_s, int):
startstop.append(raw_s)
elif isinstance(raw_s, operand.Register):
s = self.get_reg_value(register=raw_s)
if s is None:
raise RuntimeError(
f"Trying to use register {raw_s} to "
"index an array but its value is None"
)
startstop.append(s)
else:
raise RuntimeError(
f"Something went wrong: raw_s should be int "
f"or Register but is {type(raw_s)}"
)
index = slice(*startstop)
else:
raise RuntimeError(
f"Something went wrong: array_part is a {type(array_part)}"
)
return address, index
@dataclass
class NetstackCreateRequest:
app_id: int
remote_node_id: int
epr_socket_id: int
qubit_array_addr: int
arg_array_addr: int
result_array_addr: int
@dataclass
class NetstackReceiveRequest:
app_id: int
remote_node_id: int
epr_socket_id: int
qubit_array_addr: int
result_array_addr: int
@dataclass
class NetstackBreakpointCreateRequest:
app_id: int
@dataclass
class NetstackBreakpointReceiveRequest:
app_id: int
class AllocError(Exception):
pass
class PhysicalQuantumMemory:
def __init__(self, qubit_count: int) -> None:
self._qubit_count = qubit_count
self._allocated_ids: Set[int] = set()
self._comm_qubit_ids: Set[int] = {i for i in range(qubit_count)}
@property
def qubit_count(self) -> int:
return self._qubit_count
@property
def comm_qubit_count(self) -> int:
return len(self._comm_qubit_ids)
def allocate(self) -> int:
"""Allocate a qubit (communcation or memory)."""
for i in range(self._qubit_count):
if i not in self._allocated_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more qubits available")
def allocate_comm(self) -> int:
"""Allocate a communication qubit."""
for i in range(self._qubit_count):
if i not in self._allocated_ids and i in self._comm_qubit_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more comm qubits available")
def allocate_mem(self) -> int:
"""Allocate a memory qubit."""
for i in range(self._qubit_count):
if i not in self._allocated_ids and i not in self._comm_qubit_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more mem qubits available")
def free(self, id: int) -> None:
self._allocated_ids.remove(id)
def is_allocated(self, id: int) -> bool:
return id in self._allocated_ids
def clear(self) -> None:
self._allocated_ids = {}
class NVPhysicalQuantumMemory(PhysicalQuantumMemory):
def __init__(self, qubit_count: int) -> None:
super().__init__(qubit_count)
self._comm_qubit_ids: Set[int] = {0}
| 33.725888 | 88 | 0.620184 | 1,614 | 13,288 | 4.885998 | 0.157373 | 0.02739 | 0.020289 | 0.006974 | 0.352904 | 0.326401 | 0.29191 | 0.244103 | 0.203779 | 0.175374 | 0 | 0.001685 | 0.285446 | 13,288 | 393 | 89 | 33.811705 | 0.828857 | 0.03966 | 0 | 0.274194 | 0 | 0 | 0.039881 | 0.01052 | 0 | 0 | 0 | 0 | 0.022581 | 1 | 0.158065 | false | 0.003226 | 0.032258 | 0.035484 | 0.374194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fb746921db0beee169291a09d57914c192dae4a | 3,599 | py | Python | rocheml/test/test_datasetgenerator.py | haydenroche5/rocheml | e081915f83697a6b95d7e13a63eb9a69e4bdde84 | [
"MIT"
] | null | null | null | rocheml/test/test_datasetgenerator.py | haydenroche5/rocheml | e081915f83697a6b95d7e13a63eb9a69e4bdde84 | [
"MIT"
] | null | null | null | rocheml/test/test_datasetgenerator.py | haydenroche5/rocheml | e081915f83697a6b95d7e13a63eb9a69e4bdde84 | [
"MIT"
] | null | null | null | import unittest
from datasetio.datasetwriter import DatasetWriter
from datasetio.datasetgenerator import DatasetGenerator
import h5py
import os
import numpy as np
import string
import random
import math
class TestDatasetGenerator(unittest.TestCase):
def setUp(self):
self.feat_length = 5
self.seq_length = 5
self.buffer_size = 5
self.num_rows = 50
self.dataset_file_path = 'test.hdf'
self.dataset_name = 'test'
self.dtypes = [('feat_seq', 'float', (self.seq_length,
self.feat_length)),
('label', 'int'), ('file', h5py.string_dtype())]
self.dataset_writer = DatasetWriter('test', self.num_rows, self.dtypes,
self.dataset_file_path,
self.buffer_size)
self.taken_files = set()
def tearDown(self):
os.remove(self.dataset_file_path)
def initialize_expected_rows(self):
expected_rows = []
for i in range(0, self.num_rows):
zero_features = np.zeros((self.seq_length, self.feat_length))
row = self.generate_row(zero_features, 0, '')
expected_rows.append(row)
return expected_rows
def generate_row(self, features, label, file):
return {'feat_seq': features, 'label': label, 'file': file}
def generate_random_row(self):
features = np.random.rand(self.seq_length, self.feat_length)
label = np.random.randint(2)
letters = string.ascii_lowercase
# Generate a unique file name, i.e. one that hasn't been used in this test yet.
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
while file in self.taken_files:
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
self.taken_files.add(file)
return {'feat_seq': features, 'label': label, 'file': file}
def check_db(self, batch_size, expected_rows, shuffle):
gen = DatasetGenerator(self.dataset_file_path,
self.dataset_name,
batch_size,
'feat_seq',
shuffle=shuffle)
gen_features = []
gen_labels = []
for features, labels in gen.generator(1):
gen_features.extend(features.tolist())
gen_labels.extend(labels.tolist())
self.assertEqual(len(expected_rows), len(gen_labels))
for gen_label, gen_features in zip(gen_labels, gen_features):
result = [
row for row in expected_rows if row['label'] == gen_label
and np.array_equal(row['feat_seq'], gen_features)
]
self.assertTrue(result)
def test_full(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
batch_size = 3
self.check_db(batch_size, expected_rows, False)
def test_full_shuffle(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
batch_size = 3
self.check_db(batch_size, expected_rows, True)
if __name__ == '__main__':
unittest.main()
| 35.284314 | 87 | 0.591275 | 430 | 3,599 | 4.716279 | 0.251163 | 0.088757 | 0.02712 | 0.02712 | 0.361933 | 0.33925 | 0.325937 | 0.294379 | 0.294379 | 0.294379 | 0 | 0.008454 | 0.309808 | 3,599 | 101 | 88 | 35.633663 | 0.807971 | 0.021395 | 0 | 0.231707 | 0 | 0 | 0.031818 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.097561 | false | 0 | 0.109756 | 0.012195 | 0.256098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fba32d12111f85dc80af9dc8bb2227ff00fd5d8 | 1,814 | py | Python | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-02T15:39:19.000Z | 2020-02-25T12:52:40.000Z | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 16 | 2018-03-19T15:02:18.000Z | 2020-02-08T02:06:07.000Z | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-17T10:43:30.000Z | 2020-03-12T10:49:51.000Z | """Test parser functions that converts the incoming json from API into dataclass models."""
from dataclasses import dataclass
from typing import Optional
import pytest
from aiohue.util import dataclass_from_dict
@dataclass
class BasicModelChild:
"""Basic test model."""
a: int
b: str
c: str
d: Optional[int]
@dataclass
class BasicModel:
"""Basic test model."""
a: int
b: float
c: str
d: Optional[int]
e: BasicModelChild
f: str = "default"
def test_dataclass_from_dict():
"""Test dataclass from dict parsing."""
raw = {
"a": 1,
"b": 1.0,
"c": "hello",
"d": 1,
"e": {"a": 2, "b": "test", "c": "test", "d": None},
}
res = dataclass_from_dict(BasicModel, raw)
# test the basic values
assert isinstance(res, BasicModel)
assert res.a == 1
assert res.b == 1.0
assert res.d == 1
# test recursive parsing
assert isinstance(res.e, BasicModelChild)
# test default value
assert res.f == "default"
# test int gets converted to float
raw["b"] = 2
res = dataclass_from_dict(BasicModel, raw)
assert res.b == 2.0
# test string doesn't match int
with pytest.raises(TypeError):
raw2 = {**raw}
raw2["a"] = "blah"
dataclass_from_dict(BasicModel, raw2)
# test missing key result in keyerror
with pytest.raises(KeyError):
raw2 = {**raw}
del raw2["a"]
dataclass_from_dict(BasicModel, raw2)
# test extra keys silently ignored in non-strict mode
raw2 = {**raw}
raw2["extrakey"] = "something"
dataclass_from_dict(BasicModel, raw2, strict=False)
# test extra keys not silently ignored in strict mode
with pytest.raises(KeyError):
dataclass_from_dict(BasicModel, raw2, strict=True)
| 25.549296 | 91 | 0.624035 | 237 | 1,814 | 4.704641 | 0.337553 | 0.116592 | 0.13722 | 0.145291 | 0.251121 | 0.222422 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 0.260198 | 1,814 | 70 | 92 | 25.914286 | 0.814456 | 0.233186 | 0 | 0.346939 | 0 | 0 | 0.044021 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.020408 | false | 0 | 0.081633 | 0 | 0.346939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fba6a4b08985a65fc7dc336160371e54cc2ae45 | 2,155 | py | Python | core/src/zeit/cms/workflow/modified.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T09:51:29.000Z | 2021-05-31T09:30:03.000Z | core/src/zeit/cms/workflow/modified.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 107 | 2019-05-24T12:19:02.000Z | 2022-03-23T15:05:56.000Z | core/src/zeit/cms/workflow/modified.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 3 | 2020-08-14T11:01:17.000Z | 2022-01-08T17:32:19.000Z | from datetime import datetime
import pytz
import zeit.cms.content.dav
import zeit.cms.interfaces
import zeit.cms.workflow.interfaces
import zope.component
import zope.dublincore.interfaces
import zope.interface
MIN_DATE = datetime.min.replace(tzinfo=pytz.UTC)
@zope.component.adapter(zeit.cms.interfaces.ICMSContent)
@zope.interface.implementer(zeit.cms.workflow.interfaces.IModified)
class Modified(zeit.cms.content.dav.DAVPropertiesAdapter):
zeit.cms.content.dav.mapProperties(
zeit.cms.workflow.interfaces.IModified,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('last_modified_by', 'date_last_checkout'))
@property
def date_last_modified(self):
dc = zope.dublincore.interfaces.IDCTimes(self.context, None)
if dc is not None:
return dc.modified
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.checkout.interfaces.IBeforeCheckinEvent)
def update_last_modified_by(context, event):
modified = zeit.cms.workflow.interfaces.IModified(context, None)
if modified is None:
return
zope.security.proxy.removeSecurityProxy(modified).last_modified_by = (
event.principal.id)
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.checkout.interfaces.IAfterCheckoutEvent)
def update_date_last_checkout(context, event):
modified = zeit.cms.workflow.interfaces.IModified(context, None)
if modified is None:
return
zope.security.proxy.removeSecurityProxy(modified).date_last_checkout = (
datetime.now(pytz.UTC))
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.workflow.interfaces.IBeforePublishEvent)
def update_date_last_published_semantic(context, event):
published = zeit.cms.workflow.interfaces.IPublishInfo(context)
date_last_published_semantic = (
published.date_last_published_semantic or MIN_DATE)
lsc = zeit.cms.content.interfaces.ISemanticChange(context)
last_semantic_change = lsc.last_semantic_change or MIN_DATE
if last_semantic_change > date_last_published_semantic:
published.date_last_published_semantic = published.date_last_published
| 34.206349 | 78 | 0.76891 | 261 | 2,155 | 6.176245 | 0.237548 | 0.069479 | 0.065136 | 0.108561 | 0.42866 | 0.365385 | 0.365385 | 0.365385 | 0.318859 | 0.245658 | 0 | 0 | 0.141067 | 2,155 | 62 | 79 | 34.758065 | 0.870881 | 0 | 0 | 0.24 | 0 | 0 | 0.015777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fbaf754c41ad4481aa6be8c252c4463ffd41432 | 10,141 | py | Python | bin/dm_proc_imob.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | bin/dm_proc_imob.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | bin/dm_proc_imob.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
This analyzes imitate observe behavioural data.It could be generalized
to analyze any rapid event-related design experiment fairly easily.
Usage:
dm_proc_imob.py [options] <study>
Arguments:
<study> Name of study in system-wide configuration file.
Options:
--subject SUBJID If given, run on a single subject
--debug Debug logging
DETAILS
1) Produces AFNI and FSL-compatible GLM timing files.
2) Runs an AFNI GLM analysis at the single-subject level.
Each subject is run through this pipeline if the outputs do not already exist.
Requires dm-proc-fmri.py to be complete for each subject.
DEPENDENCIES
+ afni
"""
import datman.utils as utils
import datman.config as cfg
from docopt import docopt
import glob
import logging
import os, sys
import tempfile
import time
import yaml
logging.basicConfig(level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s")
logger = logging.getLogger(os.path.basename(__file__))
def check_complete(directory, subject):
"""Checks to see if the output files have been created.
Returns True if the files exist
"""
expected_files = ['{}_glm_IM_1stlvl_MNI-nonlin.nii.gz',
'{}_glm_OB_1stlvl_MNI-nonlin.nii.gz']
for filename in expected_files:
if not os.path.isfile(os.path.join(directory, subject, filename.format(subject))):
return False
return True
def generate_analysis_script(subject, inputs, input_type, config, study):
"""
This writes the analysis script to replicate the methods in [insert paper
here]. It expects timing files to exist (these are static, and are generated
by 'imob-parse.py').
Briefly, this is a standard rapid-event related design. We use 5 tent
functions to explain each event over a 15 second window (this is the
standard length of the HRF).
Returns the path to the script that was generated or None if there was an
error.
"""
assets = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets')
study_base = config.get_study_base(study)
subject_dir = os.path.join(study_base, config.get_path('fmri'), 'imob', subject)
script = '{subject_dir}/{subject}_glm_1stlevel_{input_type}.sh'.format(
subject_dir=subject_dir, subject=subject, input_type=input_type)
IM_data = filter(lambda x: '_IMI_' in x, inputs[input_type])[0]
OB_data = filter(lambda x: '_OBS_' in x, inputs[input_type])[0]
f = open(script, 'wb')
f.write("""#!/bin/bash
#
# Contrasts: emotional faces vs. fixation, emotional faces vs. neutral faces.
# use the 'bucket' dataset (*_1stlevel.nii.gz) for group level analysis.
#
# Imitate GLM for {subject}.
3dDeconvolve \\
-input {IM_data} \\
-mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
-ortvec {subject_dir}/PARAMS/motion.*.01.1D motion_paramaters \\
-polort 4 \\
-num_stimts 6 \\
-local_times \\
-jobs 4 \\
-x1D {subject_dir}/{subject}_glm_IM_1stlevel_design_{input_type}.mat \\
-stim_label 1 IM_AN -stim_times 1 {assets}/IM_event-times_AN.1D \'BLOCK(1,1)\' \\
-stim_label 2 IM_FE -stim_times 2 {assets}/IM_event-times_FE.1D \'BLOCK(1,1)\' \\
-stim_label 3 IM_FX -stim_times 3 {assets}/IM_event-times_FX.1D \'BLOCK(1,1)\' \\
-stim_label 4 IM_HA -stim_times 4 {assets}/IM_event-times_HA.1D \'BLOCK(1,1)\' \\
-stim_label 5 IM_NE -stim_times 5 {assets}/IM_event-times_NE.1D \'BLOCK(1,1)\' \\
-stim_label 6 IM_SA -stim_times 6 {assets}/IM_event-times_SA.1D \'BLOCK(1,1)\' \\
-gltsym 'SYM: -1*IM_FX +0*IM_NE +0.25*IM_AN +0.25*IM_FE +0.25*IM_HA +0.25*IM_SA' \\
-glt_label 1 emot-fix \\
-gltsym 'SYM: +0*IM_FX -1*IM_NE +0.25*IM_AN +0.25*IM_FE +0.25*IM_HA +0.25*IM_SA' \\
-glt_label 2 emot-neut \\
-fitts {subject_dir}/{subject}_glm_IM_1stlvl_explained_{input_type}.nii.gz \\
-errts {subject_dir}/{subject}_glm_IM_1stlvl_residuals_{input_type}.nii.gz \\
-bucket {subject_dir}/{subject}_glm_IM_1stlvl_{input_type}.nii.gz \\
-cbucket {subject_dir}/{subject}_glm_IM_1stlvl_allcoeffs_{input_type}.nii.gz \\
-fout -tout -xjpeg {subject_dir}/{subject}_glm_IM_1stlevel_design_{input_type}.jpg
# Obserse GLM for {subject}.
3dDeconvolve \\
-input {OB_data} \\
-mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
-ortvec {subject_dir}/PARAMS/motion.*.02.1D motion_paramaters \\
-polort 4 \\
-num_stimts 6 \\
-local_times \\
-jobs 4 \\
-x1D {subject_dir}/{subject}_glm_OB_1stlevel_design_{input_type}.mat \\
-stim_label 1 OB_AN -stim_times 1 {assets}/OB_event-times_AN.1D \'BLOCK(1,1)\' \\
-stim_label 2 OB_FE -stim_times 2 {assets}/OB_event-times_FE.1D \'BLOCK(1,1)\' \\
-stim_label 3 OB_FX -stim_times 3 {assets}/OB_event-times_FX.1D \'BLOCK(1,1)\' \\
-stim_label 4 OB_HA -stim_times 4 {assets}/OB_event-times_HA.1D \'BLOCK(1,1)\' \\
-stim_label 5 OB_NE -stim_times 5 {assets}/OB_event-times_NE.1D \'BLOCK(1,1)\' \\
-stim_label 6 OB_SA -stim_times 6 {assets}/OB_event-times_SA.1D \'BLOCK(1,1)\' \\
-gltsym 'SYM: -1*OB_FX +0*OB_NE +0.25*OB_AN +0.25*OB_FE +0.25*OB_HA +0.25*OB_SA' \\
-glt_label 1 emot-fix \\
-gltsym 'SYM: +0*OB_FX -1*OB_NE +0.25*OB_AN +0.25*OB_FE +0.25*OB_HA +0.25*OB_SA' \\
-glt_label 2 emot-neut \\
-fitts {subject_dir}/{subject}_glm_OB_1stlvl_explained_{input_type}.nii.gz \\
-errts {subject_dir}/{subject}_glm_OB_1stlvl_residuals_{input_type}.nii.gz \\
-bucket {subject_dir}/{subject}_glm_OB_1stlvl_{input_type}.nii.gz \\
-cbucket {subject_dir}/{subject}_glm_OB_1stlvl_allcoeffs_{input_type}.nii.gz \\
-fout -tout -xjpeg {subject_dir}/{subject}_glm_OB_1stlevel_design_{input_type}.jpg
""".format(IM_data=IM_data, OB_data=OB_data, subject_dir=subject_dir, assets=assets,
subject=subject, input_type=input_type))
f.close()
return script
def get_inputs(files, config):
"""
finds the inputs for the imob experiment (one IMI and one OBS file,
respectively) for each epitome stage seperately.
"""
inputs = {}
for exported in config.study_config['fmri']['imob']['glm']:
candidates = filter(lambda x: '{}.nii.gz'.format(exported) in x, files)
tagged_candidates = []
for tag in config.study_config['fmri']['imob']['tags']:
tagged_candidates.extend(filter(lambda x: '_{}_'.format(tag) in x, candidates))
if len(tagged_candidates) == 2:
inputs[exported] = tagged_candidates
else:
raise Exception(candidates)
return inputs
def main():
"""
Loops through subjects, preprocessing using supplied script, and runs a
first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
"""
arguments = docopt(__doc__)
study = arguments['<study>']
subject = arguments['--subject']
debug = arguments['--debug']
logging.info('Starting')
if debug:
logger.setLevel(logging.DEBUG)
# load config for study
try:
config = cfg.config(study=study)
except ValueError:
logger.error('study {} not defined'.format(study))
sys.exit(1)
study_base = config.get_study_base(study)
imob_dir = os.path.join(study_base, config.get_path('fmri'), 'imob')
# process a single subject
if subject:
# get required inputs from each
files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
inputs = get_inputs(files, config)
# check if subject has already been processed
if check_complete(imob_dir, subject):
logger.info('{} already analysed'.format(subject))
sys.exit(0)
# first level GLM for inputs
for input_type in inputs.keys():
script = generate_analysis_script(subject, inputs, input_type, config, study)
rtn, out = utils.run('chmod 754 {}'.format(script))
rtn, out = utils.run(script)
if rtn:
logger.error('Script {} failed to run on subject {} with error:\n{}'.format(
script, subject, out))
sys.exit(1)
# process all subjects
else:
commands = []
for path in glob.glob('{}/*'.format(imob_dir)):
subject = os.path.basename(path)
# add subject if any of the expected outputs do not exist
files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
try:
inputs = get_inputs(files, config)
except:
logger.debug('Invalid inputs for {}'.format(subject))
continue
expected = inputs.keys()
for exp in expected:
if not filter(lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in x, files):
commands.append(" ".join([__file__, study, '--subject {}'.format(subject)]))
break
if commands:
logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
#fd, path = tempfile.mkstemp()
#os.write(fd, '\n'.join(commands))
#os.close(fd)
for i, cmd in enumerate(commands):
jobname = "dm_imob_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
jobfile = '/tmp/{}'.format(jobname)
logfile = '/tmp/{}.log'.format(jobname)
errfile = '/tmp/{}.err'.format(jobname)
with open(jobfile, 'wb') as fid:
fid.write('#!/bin/bash\n')
fid.write(cmd)
rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
logfile, errfile, jobname, jobfile))
#rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
if rtn:
logger.error("Job submission failed. Output follows.")
logger.error("stdout: {}".format(out))
sys.exit(1)
if __name__ == "__main__":
main()
| 40.40239 | 176 | 0.636229 | 1,451 | 10,141 | 4.24397 | 0.229497 | 0.032153 | 0.04417 | 0.042222 | 0.38584 | 0.325106 | 0.297174 | 0.286781 | 0.282559 | 0.242611 | 0 | 0.021731 | 0.228577 | 10,141 | 250 | 177 | 40.564 | 0.765435 | 0.189922 | 0 | 0.19375 | 0 | 0.1 | 0.472905 | 0.172324 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05625 | 0 | 0.10625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fbddd6698d341ba15c0546d8eddaff98692a0b3 | 1,713 | py | Python | examples/cls/v20201016/uploadlog.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 465 | 2018-04-27T09:54:59.000Z | 2022-03-29T02:18:01.000Z | examples/cls/v20201016/uploadlog.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 91 | 2018-04-27T09:48:11.000Z | 2022-03-12T08:04:04.000Z | examples/cls/v20201016/uploadlog.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 232 | 2018-05-02T08:02:46.000Z | 2022-03-30T08:02:48.000Z | # -*- coding: utf-8 -*-
import os
import sys
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.common import common_client
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import pb
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(
os.environ.get("TENCENTCLOUD_SECRET_ID"),
os.environ.get("TENCENTCLOUD_SECRET_KEY"))
# 实例化一个http选项,可选的,没有特殊需求可以跳过。
httpProfile = HttpProfile()
httpProfile.reqMethod = "POST" # post请求(默认为post请求)
httpProfile.reqTimeout = 30 # 请求超时时间,单位为秒(默认60秒)
httpProfile.endpoint = "cls.tencentcloudapi.com" # 指定接入地域域名(默认就近接入)
httpProfile.keepAlive = True
# 实例化一个client选项,可选的,没有特殊需求可以跳过。
clientProfile = ClientProfile()
clientProfile.signMethod = "TC3-HMAC-SHA256" # 指定签名算法(默认为HmacSHA256)
clientProfile.httpProfile = httpProfile
client = common_client.CommonClient("cls", '2020-10-16', cred, "ap-beijing", clientProfile)
headers = {
# 使用对应地域下真实存在的日志主题ID
"X-CLS-TopicId": "xxxxf2e2-166c-4174-9473-b6a6dfca6f6e",
# 主题分区,https://cloud.tencent.com/document/product/614/39259
# 取值00000000000000000000000000000000,ffffffffffffffffffffffffffffffff
"X-CLS-HashKey": "0fffffffffffffffffffffffffffffff",
# 压缩类型
"X-CLS-CompressType": "",
}
resp = client.call_octet_stream("UploadLog", headers, pb.pb_gen(1,1))
# 输出json格式的字符串回包
print("%s" % resp)
except TencentCloudSDKException as err:
print("%s" % err)
| 34.959184 | 95 | 0.730881 | 172 | 1,713 | 7.197674 | 0.581395 | 0.06462 | 0.088853 | 0.045234 | 0.048465 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055827 | 0.163456 | 1,713 | 48 | 96 | 35.6875 | 0.808095 | 0.224168 | 0 | 0 | 0 | 0 | 0.178843 | 0.103501 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fbe0c898a3e6ca3bd07184204c83e01965e71a7 | 1,746 | py | Python | collectors/spiders/committee_speech.py | alvesmatheus/fala-camarada | 47015fe95422d5f71c279e47edacdd31ea3f71b8 | [
"MIT"
] | 7 | 2021-02-11T20:36:16.000Z | 2021-02-12T17:22:05.000Z | collectors/spiders/committee_speech.py | alvesmatheus/fala-camarada | 47015fe95422d5f71c279e47edacdd31ea3f71b8 | [
"MIT"
] | null | null | null | collectors/spiders/committee_speech.py | alvesmatheus/fala-camarada | 47015fe95422d5f71c279e47edacdd31ea3f71b8 | [
"MIT"
] | null | null | null | import re
import pandas as pd
from scrapy import Spider, Request
from bs4 import BeautifulSoup
from collectors.loaders import CommitteeSpeechLoader
from collectors.utils.constants import (
COMMITTEES_SCHEDULE_PATH,
SPEECH_SPEAKER_PATTERN,
COMMITTEE_SPEECH_URL
)
class CommitteeSpeechSpider(Spider):
name = 'coletor-discursos-comissoes'
custom_settings = {
'FEED_EXPORT_FIELDS': ['id_evento', 'ordem_discurso', 'orador',
'transcricao']
}
def __init__(self, year=None):
events = pd.read_csv(COMMITTEES_SCHEDULE_PATH)
if year:
events = events[events['data'].str.contains(year)]
event_ids = events['id_evento'].drop_duplicates().values.tolist()
self.event_ids = event_ids
def start_requests(self):
for event_id in self.event_ids:
query = {'event_id': event_id}
url = COMMITTEE_SPEECH_URL.format_map(query)
yield Request(url=url, callback=self.parse, meta=query)
def parse(self, response):
body = response.css('body').get()
speeches = BeautifulSoup(body, 'html.parser').get_text()
sections = re.split(SPEECH_SPEAKER_PATTERN, speeches)[1:]
if sections:
section_order = range(0, len(sections), 2)
event_id = response.meta['event_id']
for order in section_order:
loader = CommitteeSpeechLoader()
loader.add_value('id_evento', event_id)
loader.add_value('ordem_discurso', (order // 2) + 1)
loader.add_value('orador', sections[order])
loader.add_value('transcricao', sections[order + 1])
yield loader.load_item()
| 30.103448 | 73 | 0.631157 | 197 | 1,746 | 5.360406 | 0.467005 | 0.039773 | 0.05303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005464 | 0.266323 | 1,746 | 57 | 74 | 30.631579 | 0.818891 | 0 | 0 | 0 | 0 | 0 | 0.096793 | 0.015464 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.146341 | 0 | 0.292683 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fc021d0c8051f8cee45541b0c98bbc63e0e0171 | 16,823 | py | Python | steelpy/f2uModel/mesh/sqlite/element.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | 4 | 2021-09-28T12:52:01.000Z | 2022-02-24T22:30:22.000Z | steelpy/f2uModel/mesh/sqlite/element.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | steelpy/f2uModel/mesh/sqlite/element.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | # Copyright (c) 2009-2022 fem2ufo
#
# Python stdlib imports
from dataclasses import dataclass
from array import array
from collections import Counter
from collections.abc import Mapping
from math import dist
from typing import NamedTuple, Tuple, List, Iterator, Iterable, Union, Dict
from itertools import chain
# package imports
from steelpy.f2uModel.mesh.sqlite.nodes import get_node
from steelpy.material.matsql import get_materialSQL
from steelpy.sections.main import get_sectionSQL
from steelpy.f2uModel.results.sqlite.operation.process_sql import create_connection, create_table
from steelpy.trave3D.preprocessor.assemble import (beam_stiffness, beam_Ks,
trans_3d_beam, Rmatrix)
#
#
@dataclass
class BeamElement:
""" """
__slots__ = ['name', 'db_file', 'type']
def __init__(self, element_name:int, db_file:str) -> None:
"""
"""
self.name = element_name
self.db_file = db_file
self.type: str = "beam"
#
@property
def number(self) -> int:
""" """
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[1]
@number.setter
def number(self, value:int) -> None:
""""""
1/0
conn = create_connection(self.db_file)
item = "number"
with conn:
update_element_item(conn, self.name, item, value)
#
@property
def connectivity(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
connodes = get_connectivity(conn, self.name)
return connodes
@connectivity.setter
def connectivity(self, nodes:List[int]) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
#push_connectivity(conn, self.name, nodes)
update_connectivity(conn, self.name, nodes)
#self._connectivity[self.index] = nodes
#
@property
def material(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[4]
@material.setter
def material(self, material_name: str) -> None:
"""
"""
conn = create_connection(self.db_file)
item = "material"
with conn:
update_element_item(conn, self.name, item, material_name)
#
@property
def section(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[5]
@section.setter
def section(self, section_name: str) -> None:
"""
"""
conn = create_connection(self.db_file)
item = "section"
with conn:
update_element_item(conn, self.name, item, self.name)
#
@property
def beta(self):
"""beta angle roll"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[3]
@beta.setter
def beta(self, value):
"""beta angle roll"""
conn = create_connection(self.db_file)
item = "roll_angle"
with conn:
update_element_item(conn, self.name, item, self.name)
#
#
def __str__(self) -> str:
""" """
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
#title = data[-1]
if (title := data[-1]) == "NULL":
title = ""
#
return "{:8d} {:8d} {:8d} {:>12s} {:>12s} {: 6.4f} {:>6.3f} {:>12s}\n"\
.format(self.name, *self.connectivity,
self.material, self.section, self.beta,
self.length, title)
#
#
@property
def DoF(self) -> List[ int ]:
"""
"""
conn = create_connection(self.db_file)
dof = [ ]
for node_name in self.connectivity:
node = get_node(conn, node_name=node_name)
number = node[0] - 1
dof.append(number) # * 6
return dof
#
@property
def length(self) -> float:
"""
"""
conn = create_connection(self.db_file)
nodes = self.connectivity
node1 = get_node(conn, node_name=nodes[0])
node2 = get_node(conn, node_name=nodes[1])
# _dx = _node1.x - _node2.x
# _dy = _node1.y - _node2.y
# _dz = _node1.z - _node2.z
# dist2 = (_dx * _dx + _dy * _dy + _dz * _dz)**0.50
return dist(node1[3:6], node2[3:6])
#
@property
def unit_vector(self) -> List[ float ]:
"""
"""
# TODO: get_node should be aligned with inmemmory
conn = create_connection(self.db_file)
node1 = get_node(conn, node_name=self.connectivity[0])
node2 = get_node(conn, node_name=self.connectivity[1])
dx = node2[3] - node1[3]
dy = node2[4] - node1[4]
dz = node2[5] - node1[5]
# direction cosines
L = dist(node1[3:6], node2[3:6])
l = dx / L
m = dy / L
n = dz / L
return [l, m, n]
#
@property
def Kmatrix(self):
""" """
#conn = create_connection(self.db_file)
material, section, beta = self._K_data()
#section = get_sectionSQL(conn, self.section)
#material = get_materialSQL(conn, self.material)
# solve K matrix
R = Rmatrix(*self.unit_vector, beta)
# R = Rmatrix(*self.direction_cosines, self.beta)
# K = beam_stiffness(self.length,
# section.area,
# section.J,
# section.Iy,
# section.Iz,
# material.E,
# material.G)
K = beam_Ks(self.length,
section.area, section.J,
section.Iy, section.Iz,
material.E, material.G,
section.area, section.area)
return trans_3d_beam(K, R)
#
def _K_data(self):
""" """
conn = create_connection(self.db_file)
cur = conn.cursor()
cur.execute ("SELECT * FROM tb_Elements\
WHERE tb_Elements.name = {:};".format(self.name))
row = cur.fetchone()
#
#connodes = get_connectivity(conn, self.name)
#data = [*row[4:], connodes]
material = get_materialSQL(conn, row[4])
section = get_sectionSQL(conn, row[5])
beta = row[6]
conn.close()
return material, section, beta
#
@property
def R(self):
"""
Rotation matrix
"""
if self.type in ['beam', 'truss']:
return Rmatrix(*self.unit_vector, self.beta)
else:
raise IOError("no yet included")
#
#
#
#
class ElementSQL(Mapping):
__slots__ = ['db_file', '_labels']
def __init__(self, db_file:str,
db_system:str="sqlite") -> None:
"""
"""
self.db_file = db_file
self._labels: array = array('I', [])
# create node table
self._create_table()
#
def __setitem__(self, element_number: int, parameters: List) -> None:
"""
parameters = ['beam', node1, node2, material, section, roll_angle]
"""
try:
self._labels.index(element_number)
raise Exception('element {:} already exist'.format(element_number))
except ValueError:
# default
self._labels.append(element_number)
# push to SQL
conn = create_connection(self.db_file)
with conn:
self.push_element(conn, element_number, parameters)
conn.commit()
#
def __getitem__(self, element_number: int):
""" """
try:
self._labels.index(element_number)
return BeamElement(element_number, self.db_file)
except ValueError:
raise IndexError(' ** element {:} does not exist'.format(element_number))
#
#
def __len__(self) -> float:
return len(self._labels)
def __iter__(self) -> Iterator:
"""
"""
return iter(self._labels)
def __contains__(self, value) -> bool:
return value in self._labels
#
#
def push_element(self, conn, element_number, parameters):
""" """
cur = conn.cursor()
cur.execute("SELECT tb_Materials.name, tb_Materials.number FROM tb_Materials;")
materials = cur.fetchall()
materials = {item[0]:item[1] for item in materials}
#
#cur = conn.cursor()
cur.execute("SELECT tb_Sections.name, tb_Sections.number FROM tb_Sections;")
sections = cur.fetchall()
sections = {item[0]:item[1] for item in sections}
# connectivity
push_connectivity(conn, element_number, parameters[1:3])
#
#try:
roll_angle = parameters[5]
#except IndexError:
# roll_angle = 0.0
#print('-->')
if (title := parameters[6]) == "NULL":
title = None
#
project = (element_number, title,
parameters[0],
materials[parameters[3]],
sections[parameters[4]],
roll_angle)
#
sql = 'INSERT INTO tb_Elements(name, title, type, material, section,\
roll_angle)\
VALUES(?,?,?,?,?,?)'
#cur = conn.cursor()
cur.execute(sql, project)
#
def _create_table(self) -> None:
""" """
_table_elements = "CREATE TABLE IF NOT EXISTS tb_Elements(\
number INTEGER PRIMARY KEY NOT NULL,\
name INTEGER NOT NULL,\
title TEXT,\
type TEXT NOT NULL,\
material INTEGER NOT NULL REFERENCES tb_Materials(number),\
section INTEGER NOT NULL REFERENCES tb_Sections(number),\
roll_angle DECIMAL);"
#
_table_connectivity = "CREATE TABLE IF NOT EXISTS tb_Connectivity(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
node_name INTEGER REFERENCES tb_Nodes(name),\
node_end INTEGER NOT NULL);"
#
_table_univectors = "CREATE TABLE IF NOT EXISTS tb_DirectionCosines(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
type TEXT NOT NULL);"
#
_table_offset = "CREATE TABLE IF NOT EXISTS tb_Eccentricities(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
node_name INTEGER REFERENCES tb_Nodes(name),\
node_end INTEGER NOT NULL,\
system TEXT NOT NULL,\
x DECIMAL,\
y DECIMAL,\
z DECIMAL);"
#
conn = create_connection(self.db_file)
create_table(conn, _table_elements)
create_table(conn, _table_connectivity)
create_table(conn, _table_offset)
create_table(conn, _table_univectors)
#
#def iter_elements(self, arraysize=1000):
# """
# """
# conn = create_connection(self.db_file)
# cur = conn.cursor()
# # TODO: check if direction cosines given
# cur.execute("SELECT tb_Elements.name, tb_Elements.number, tb_Elements.type,\
# tb_Elements.roll_angle, tb_Elements.material, tb_Elements.section\
# FROM tb_Elements;" )
# #
# try:
# while True:
# elements = cur.fetchmany(arraysize)
# if not elements:
# break
# for element in elements:
# #cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
# # FROM tb_Connectivity\
# # WHERE tb_Connectivity.element_name = {:};".format(element[0]))
# #row = cur.fetchall()
# #connodes = [x for _, x in sorted(row)]
# connodes = get_connectivity(conn, element[0])
# data = [*element[0:6], connodes, self.db_file]
# yield BeamElement(data)
# except Exception as e:
# print(e)
# finally:
# conn.close()
#
@property
def get_connectivities(self):
""" """
conn = create_connection(self.db_file)
cur = conn.cursor()
cur.execute( "SELECT tb_Elements.name FROM tb_Elements;")
elements = cur.fetchall()
connodes = []
for element in elements:
#cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
# FROM tb_Connectivity\
# WHERE tb_Connectivity.element_name = {:};".format(member[0]))
#row = cur.fetchall()
#connodes.append([x for _,x in sorted(row)])
connodes.append(get_connectivity(conn, element[0]))
conn.close()
return connodes
#
#
def get_number(self, start:int=0)-> Iterable[int]:
"""
"""
try:
n = max(self._labels)
except ValueError:
n = start
#
while True:
n += 1
yield n
#
#
def update_item(self, element_number:int, item:str, value:Union[float,int]):
""" """
conn = create_connection(self.db_file)
with conn:
update_element_item(conn, element_number, item, value)
#conn.commit()
#
@property
def get_free_nodes(self):
"""
find nodes not sharing elements
"""
connectivities = self.get_connectivities
#connectivities = [conn for conn in connectivities.values()]
#column
flat = list(chain.from_iterable(connectivities))
return [k for k, v in Counter(flat).items() if v == 1]
#
#
def get_connectivity(conn, element_name):
""" """
cur = conn.cursor()
cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
FROM tb_Connectivity\
WHERE tb_Connectivity.element_name = {:};".format(element_name))
connodes = cur.fetchall()
return [x for _, x in sorted(connodes)]
#return connodes
#
def push_connectivity(conn, element_name, connectivity):
"""
"""
cur = conn.cursor()
for x, node in enumerate(connectivity):
project = (element_name, node, x+1)
sql = 'INSERT INTO tb_Connectivity(element_name,\
node_name, node_end)\
VALUES(?,?,?)'
cur.execute(sql, project)
#return cur.lastrowid
#
def update_connectivity(conn, element_name, connectivity):
"""
"""
cur = conn.cursor()
for x, node in enumerate(connectivity):
project = (node, element_name, x+1)
sql = 'UPDATE tb_Connectivity SET node_name = ? \
WHERE element_name = ?\
AND node_end = ?'
cur.execute(sql, project)
#return cur.lastrowid
#
#
def update_element_item(conn, name, item, value):
""" """
project = (value, name)
sql = 'UPDATE tb_Elements SET {:} = ? WHERE name = ?'.format(item)
cur = conn.cursor()
cur.execute(sql, project)
#
#
def get_element_data(conn, element_name):
""" """
cur = conn.cursor()
cur.execute ("SELECT tb_Elements.name, tb_Elements.number, tb_Elements.type,\
tb_Elements.roll_angle, tb_Materials.name, tb_Sections.name, tb_Elements.title\
FROM tb_Elements, tb_Materials, tb_Sections\
WHERE tb_Elements.name = {:} \
AND tb_Elements.material = tb_Materials.number \
AND tb_Elements.section = tb_Sections.number;".format(element_name))
row = cur.fetchone()
#
connodes = get_connectivity(conn, element_name)
data = [*row[:6], connodes, row[-1]]
#conn.close ()
return data
#
# | 33.713427 | 97 | 0.535041 | 1,774 | 16,823 | 4.878241 | 0.130214 | 0.021493 | 0.030044 | 0.058239 | 0.425468 | 0.3945 | 0.345852 | 0.300786 | 0.277675 | 0.234343 | 0 | 0.01068 | 0.354396 | 16,823 | 499 | 98 | 33.713427 | 0.786115 | 0.161267 | 0 | 0.304054 | 0 | 0.003378 | 0.031398 | 0 | 0 | 0 | 0 | 0.004008 | 0 | 1 | 0.118243 | false | 0 | 0.040541 | 0.006757 | 0.239865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fc0d6ad5b5827802c283b1aeac28939ea11fd86 | 1,256 | py | Python | tools/convert_lf.py | might-and-magic/mm678_i18n | ddc37c743ac37169908641dcd5328a7ccae5138f | [
"MIT"
] | 17 | 2020-02-25T14:37:23.000Z | 2022-02-21T15:33:09.000Z | tools/convert_lf.py | tomchen/csv2po | 75e53fbc5c473b16e8dac153ee99793fad0cd5d0 | [
"MIT"
] | 1 | 2022-01-09T02:14:56.000Z | 2022-02-13T10:08:11.000Z | tools/convert_lf.py | tomchen/csv2po | 75e53fbc5c473b16e8dac153ee99793fad0cd5d0 | [
"MIT"
] | 3 | 2020-10-06T20:38:13.000Z | 2021-02-17T02:11:17.000Z | # Batch convert LF to '\n' and vice versa
# Tool of csv2po.py
# By Tom CHEN <tomchen.org@gmail.com> (tomchen.org)
import re
from pathlib import Path
from getfilepaths import getFilePaths
def convertLf(inputPath, outputPath, encoding = None, SlashNTolf = True): # check LF (non CRLF)
f = inputPath.open(mode = 'r', newline = '\r\n', encoding = encoding)
content = f.read()
f.close()
if SlashNTolf:
content = re.sub(r'\\n', '\n', content)
else:
content = re.sub('(?<!\r)\n', r'\\n', content)
outputPath.parent.mkdir(parents = True, exist_ok = True)
fo = outputPath.open(mode = 'w', newline = '', encoding = encoding)
fo.write(content)
fo.close()
def batchConvertLf(inputPath, outputPath, SlashNTolf = True, extension = 'txt', encoding = 'UTF-8'):
for p in getFilePaths(inputPath, extension = extension):
convertLf(p, outputPath.joinpath(p.relative_to(inputPath)), encoding, SlashNTolf)
# batchConvertLf(inputPath = Path('0_source/zh_CN/customlist/t'), outputPath = Path('0_source/zh_CN/customlist/t2'), SlashNTolf = False, extension = 'list', encoding = 'UTF-8')
batchConvertLf(inputPath = Path('0_source/zh_CN/customlist/t2'), outputPath = Path('0_source/zh_CN/customlist/t3'), SlashNTolf = True, extension = 'list', encoding = 'UTF-8')
| 44.857143 | 176 | 0.710191 | 173 | 1,256 | 5.098266 | 0.433526 | 0.00907 | 0.049887 | 0.058957 | 0.281179 | 0.192744 | 0.192744 | 0.108844 | 0 | 0 | 0 | 0.010073 | 0.130573 | 1,256 | 27 | 177 | 46.518519 | 0.797619 | 0.240446 | 0 | 0 | 0 | 0 | 0.101266 | 0.059072 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8fc2ed7dd777338b030108cb7bb036127694653d | 5,939 | py | Python | src/semantic_segmentation/classic_trainer.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | 7 | 2022-01-10T19:04:34.000Z | 2022-03-16T03:19:48.000Z | src/semantic_segmentation/classic_trainer.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | null | null | null | src/semantic_segmentation/classic_trainer.py | alteia-ai/ICSS | 088ddb7a8b92c71cc0b95e55d186069b8af50b0a | [
"MIT"
] | null | null | null | import logging
import os
import time
from glob import glob
import cv2 as cv
import numpy as np
import pandas as pd
import torch
from src.semantic_segmentation.loaders import GTDataset, RGBIncrementalDataset
from src.semantic_segmentation.trainer import Trainer
from src.semantic_segmentation.utils.losses import CrossEntropy2d
from src.semantic_segmentation.utils.metrics import IoU, accuracy, f1_score
from tqdm import tqdm
class ClassicTrainer(Trainer):
def __init__(self, cfg, train=True, dataset=None):
super(ClassicTrainer, self).__init__(cfg)
if train:
self.train_dataset = RGBIncrementalDataset(dataset, self.cfg, finetune=False)
self.gt_dataset = GTDataset(dataset, self.cfg, self.train_dataset.train_ids)
logging.info(f"Train ids (len {len(self.train_dataset.imgs)}): {[os.path.basename(i) for i in self.train_dataset.imgs]}"
)
self.dataset = dataset
test_dataset = RGBIncrementalDataset(dataset, self.cfg, train=False, finetune=False)
logging.info(
f"Test ids (len {len(test_dataset.imgs)}): {[os.path.basename(i) for i in test_dataset.imgs]}"
)
self.metrics = pd.DataFrame(data={i:[] for i in [os.path.basename(i) for i in test_dataset.imgs]}).T
def train(self, epochs):
"""Train the network"""
# Initialization
logging.info(
"%s INFO: Begin training",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
)
iter_ = 0
start_epoch, accu, iou, f1, train_loss, test_loss, losses = self._load_init()
loss_weights = torch.ones(
self.cfg.N_CLASSES, dtype=torch.float32, device=self.device
)
if self.cfg.WEIGHTED_LOSS:
weights = self.gt_dataset.compute_frequency()
loss_weights = (
torch.from_numpy(weights).type(torch.FloatTensor).to(self.device)
)
train_loader = self.train_dataset.get_loader(
self.cfg.BATCH_SIZE, self.cfg.WORKERS
)
for e in tqdm(range(start_epoch, epochs + 1), total=epochs):
logging.info(
"\n%s Epoch %s",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
e,
)
self.scheduler.step()
self.net.train()
steps_pbar = tqdm(
train_loader, total=self.cfg.EPOCH_SIZE // self.cfg.BATCH_SIZE
)
for data in steps_pbar:
features, labels = data
self.optimizer.zero_grad()
features = features.float().to(self.device)
labels = labels.float().to(self.device)
output = self.net(features)
if isinstance(output, tuple):
output, _, _ = output
loss = CrossEntropy2d(output, labels, weight=loss_weights)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
iter_ += 1
steps_pbar.set_postfix({"loss": loss.item()})
train_loss.append(np.mean(losses[-1 * self.cfg.EPOCH_SIZE :]))
logging.info(f"Train loss: {train_loss}")
loss, iou_, acc_, f1_ = self.test()
test_loss.append(loss)
accu.append(acc_)
iou.append(iou_ * 100)
f1.append(f1_ * 100)
# Save final state
name = "_".join([os.path.join(self.cfg.PATH_MODELS, self.net_name), os.path.basename(self.dataset), f"{self.cfg.ext}.pt"])
self.save_to_jit(name)
def test(self):
logging.info(
"%s INFO: Begin testing",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
)
csv_name = "{}_{}{}.csv".format(os.path.join(self.cfg.SAVE_FOLDER, self.cfg.NET_NAME), os.path.basename(self.dataset), self.cfg.ext)
self.net.eval()
loss, acc, iou, f1 = (
[],
[],
[],
[],
) # will contain the metric and loss calculated for each image
test_dataset = RGBIncrementalDataset(self.dataset, self.cfg, train=False, finetune=False)
test_images = test_dataset.get_loader(1, self.cfg.TEST_WORKERS)
stride = self.cfg.STRIDE
for iteration, (idx, data) in enumerate(tqdm(zip(test_dataset.test_ids, test_images), total=len(test_dataset.test_ids))):
file_name = os.path.basename(sorted(glob(os.path.join(self.dataset, "gts", '*')))[idx])
logging.info("Filename: %s", file_name)
data = [i.squeeze(0) for i in data]
img = data[:-1][0]
gt = data[-1].cpu().numpy()
pred_ = self._infer_image(stride, img, self.net, self.cfg.N_CLASSES)
# Computes the class with the highest probability
pred = np.argmax(pred_, axis=-1)
# Compute the metrics
ignore_indx = None
metric_acc = accuracy(pred, gt, ignore_indx=ignore_indx)
metric_iou = IoU(pred, gt, self.cfg.N_CLASSES, all_iou=True, ignore_indx=ignore_indx)
metric_f1 = f1_score(pred, gt, self.cfg.N_CLASSES, all=True, ignore_indx=ignore_indx)
metric_iou, all_iou = metric_iou
metric_f1, all_f1, weighted_f1 = metric_f1
acc.append(metric_acc)
iou.append(metric_iou)
f1.append(metric_f1)
logging.info("Mean IoU : " + str(np.nanmean(iou)))
logging.info("Mean accu : " + str(np.nanmean(acc)))
logging.info("Mean F1 : " + str(np.nanmean(f1)))
return np.mean(loss), np.nanmean(iou), np.mean(acc), np.mean(f1)
def _load_init(self):
start_epoch = 1
train_loss = []
test_loss = []
losses = []
accu = []
iou = []
f1 = []
return start_epoch, accu, iou, f1, train_loss, test_loss, losses
| 42.120567 | 140 | 0.58461 | 748 | 5,939 | 4.479947 | 0.229947 | 0.045956 | 0.025067 | 0.032229 | 0.240525 | 0.170994 | 0.143539 | 0.088033 | 0.088033 | 0.075201 | 0 | 0.009736 | 0.290958 | 5,939 | 140 | 141 | 42.421429 | 0.786037 | 0.03014 | 0 | 0.088 | 0 | 0.016 | 0.07287 | 0.021739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032 | false | 0 | 0.104 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |