hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e6b0a9948d6ab9ae3bf82cdb88963f7746825d0
| 334
|
py
|
Python
|
consultas/urls.py
|
Valarr/django-app
|
2faac602ce5f36dc9007d4af7a3acd38504f4f95
|
[
"MIT"
] | null | null | null |
consultas/urls.py
|
Valarr/django-app
|
2faac602ce5f36dc9007d4af7a3acd38504f4f95
|
[
"MIT"
] | null | null | null |
consultas/urls.py
|
Valarr/django-app
|
2faac602ce5f36dc9007d4af7a3acd38504f4f95
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('consultaticket', views.consultaticket, name='consultaticket'),
path('consultadecredito', views.consultadecredito, name='consultadecredito'),
path('mostrarticket', views.mostrarticket, name='mostrarticket'),
]
| 33.4
| 81
| 0.730539
| 32
| 334
| 7.625
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122754
| 334
| 10
| 82
| 33.4
| 0.832765
| 0
| 0
| 0
| 0
| 0
| 0.277612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e6c1c6b5fbe5a4ffcca63260b56292216d80f44
| 1,973
|
py
|
Python
|
order_history.py
|
zylizy/DBMS_Project
|
d6ff25d566a362495e3b4eb68d48d8400f2f20e6
|
[
"MIT"
] | null | null | null |
order_history.py
|
zylizy/DBMS_Project
|
d6ff25d566a362495e3b4eb68d48d8400f2f20e6
|
[
"MIT"
] | null | null | null |
order_history.py
|
zylizy/DBMS_Project
|
d6ff25d566a362495e3b4eb68d48d8400f2f20e6
|
[
"MIT"
] | null | null | null |
import streamlit as st
from db_functions import *
def order_history():
st.title("Order History")
sorts = ['None','category','time']
sql_userids = f"select pk_user_id from Users"
user_ids = query_db(sql_userids)['pk_user_id'].tolist()
user_id = st.selectbox("Please select your userid", user_ids)
# get user info
sql_user_info = f"select * from Users where pk_user_id={user_id}"
df_user_info = query_db(sql_user_info)
username = df_user_info['username'].tolist()[0]
st.header(username+ '\'s Basic Information')
st.write(df_user_info)
sort = st.selectbox('How would you like to sort them?',sorts)
if sort == 'None':
# display order history
sql_order_history = f"select o.pk_order_id as oid, i.name as item, o.create_date as date, p.quantity " \
f"from orders as o, purchased_items as p, items as i " \
f"where o.user_id={user_id} and o.pk_order_id=p.order_id and i.pk_item_id=p.item_id " \
f"order by o.pk_order_id;"
if sort == 'category':
sql_order_history = f"select count(*) as count, cat.name as category " \
f"from orders as o, purchased_items as p, items as i, categories as cat " \
f"where o.user_id={user_id} and o.pk_order_id=p.order_id and i.pk_item_id=p.item_id " \
f"and i.category_id = cat.pk_category_id group by cat.name;"
if sort == 'time':
sql_order_history = f"select count(*) as count, o.create_date as time " \
f"from orders as o, purchased_items as p, items as i " \
f"where o.user_id={user_id} and o.pk_order_id=p.order_id and i.pk_item_id=p.item_id " \
f"group by o.create_date;"
df_order_history = query_db(sql_order_history)
st.header(username + "\'s Order History:")
st.write(df_order_history)
| 51.921053
| 115
| 0.611759
| 313
| 1,973
| 3.613419
| 0.217252
| 0.058355
| 0.035367
| 0.044209
| 0.342175
| 0.322723
| 0.322723
| 0.322723
| 0.262599
| 0.262599
| 0
| 0.000706
| 0.281804
| 1,973
| 38
| 116
| 51.921053
| 0.797459
| 0.017739
| 0
| 0.15625
| 0
| 0.125
| 0.469008
| 0.071281
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e6d175a2c46fd4c086a5aa6dbda506eabe35fd4
| 1,415
|
py
|
Python
|
cogs/commands/utility/8ball.py
|
teSill/temflix
|
31d40265fa71695966c6178145a1057cd2aeda27
|
[
"MIT"
] | 3
|
2020-12-21T20:51:56.000Z
|
2022-01-04T11:55:45.000Z
|
cogs/commands/utility/8ball.py
|
teSill/temflix
|
31d40265fa71695966c6178145a1057cd2aeda27
|
[
"MIT"
] | null | null | null |
cogs/commands/utility/8ball.py
|
teSill/temflix
|
31d40265fa71695966c6178145a1057cd2aeda27
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import random
class EightBall(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["8ball", "8-ball"], description="Have the magic 8-ball answer your most burning questions.")
async def eight_ball(self, ctx):
responses = ["It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"I'm the most certain I've ever been that the answer is yes.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Definitely, maybe.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good",
"Very doubtful",
"Certainly not.",
"How could you even suggest otherwise?",
"I'm the most certain I've ever been that the answer is no.",
]
await ctx.send(random.choice(responses))
def setup(client):
client.add_cog(EightBall(client))
| 36.282051
| 123
| 0.466431
| 150
| 1,415
| 4.36
| 0.566667
| 0.030581
| 0.015291
| 0.027523
| 0.12844
| 0.12844
| 0.12844
| 0.12844
| 0.12844
| 0.12844
| 0
| 0.003741
| 0.433216
| 1,415
| 38
| 124
| 37.236842
| 0.811721
| 0
| 0
| 0
| 0
| 0
| 0.359717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e74eb605f50a2789671592734f1dea5fd163012
| 918
|
py
|
Python
|
gharchive/parse_json.py
|
IAMABOY/Mining-Github
|
cf11c94e72b11f3ce9d638b562df438c8e56d149
|
[
"MIT"
] | 8
|
2019-12-08T11:57:59.000Z
|
2022-01-24T06:26:56.000Z
|
gharchive/parse_json.py
|
IAMABOY/Mining-Github
|
cf11c94e72b11f3ce9d638b562df438c8e56d149
|
[
"MIT"
] | null | null | null |
gharchive/parse_json.py
|
IAMABOY/Mining-Github
|
cf11c94e72b11f3ce9d638b562df438c8e56d149
|
[
"MIT"
] | 2
|
2019-12-17T02:38:55.000Z
|
2021-12-16T01:53:11.000Z
|
import sys
import os
import json
import gzip
def jsonReader(inputJsonFilePath,pos):
flag = False
with gzip.open(inputJsonFilePath, 'r') as jsonContent:
for rowNumber, line in enumerate(jsonContent, start=1):
try:
#此处加上flag的目的在于,当程序挂掉时候,可以根据域名从指定位置开始,不必重头开始跑
if rowNumber == pos:
flag = True
if not flag:
continue
line = line.strip()
if len(line) <= 0:
continue
jsonObject = json.loads(line)
repoInfo = jsonObject.get('repo',None)
if repoInfo == '' or repoInfo == None:
continue
print(repoInfo)
except Exception as e:
print(e)
if __name__ == '__main__':
jsonReader('2019-09-19-10.json.gz',1)
| 24.157895
| 63
| 0.484749
| 85
| 918
| 5.141176
| 0.623529
| 0.032037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.433551
| 918
| 38
| 64
| 24.157895
| 0.815385
| 0.046841
| 0
| 0.12
| 0
| 0
| 0.038857
| 0.024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.16
| 0
| 0.2
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e753e4b76a7bccde83190218fa4e3ea302764fe
| 393
|
py
|
Python
|
iotalib/check_roof.py
|
WWGolay/iota
|
f3e67502d7f96bb836b45b7eca4ebb9fe5490e6d
|
[
"MIT"
] | null | null | null |
iotalib/check_roof.py
|
WWGolay/iota
|
f3e67502d7f96bb836b45b7eca4ebb9fe5490e6d
|
[
"MIT"
] | null | null | null |
iotalib/check_roof.py
|
WWGolay/iota
|
f3e67502d7f96bb836b45b7eca4ebb9fe5490e6d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import pycurl
from io import BytesIO
def checkOpen():
isOpen = False
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, 'https://www.winer.org/Site/Roof.php')
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
if body.find(b'ROOFPOSITION=OPEN') > -1:
isOpen = True
return(isOpen)
| 21.833333
| 59
| 0.592875
| 52
| 393
| 4.480769
| 0.711538
| 0.060086
| 0.06867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003436
| 0.259542
| 393
| 18
| 60
| 21.833333
| 0.797251
| 0.040712
| 0
| 0
| 0
| 0
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e7863d676fdd4741e30575b304165077d18541c
| 2,238
|
py
|
Python
|
egg/app.py
|
eanorambuena/Driver
|
3cb14f5d741c6bae364326305ae0ded04e10e9d4
|
[
"MIT"
] | null | null | null |
egg/app.py
|
eanorambuena/Driver
|
3cb14f5d741c6bae364326305ae0ded04e10e9d4
|
[
"MIT"
] | null | null | null |
egg/app.py
|
eanorambuena/Driver
|
3cb14f5d741c6bae364326305ae0ded04e10e9d4
|
[
"MIT"
] | null | null | null |
# Imports
from egg.resources.console import get, clearConsole
from egg.resources.constants import *
from egg.resources.modules import install, upgrade, Repo
from egg.resources.help import help
from egg.resources.auth import login, register
"""
FUNCTION eggConsole(condition: bool = True)
Display the Egg Console
Currently, the Egg Console commands are:
$nqs Start the NQS Depeloper console
$new Start the News Journalist console
$login Log in Egg-cosystem *comming soon*
$register Register in Egg-cosystem *comming soon*
$install Install a pip package
$upgrade Upgrade a pip package
$pull Import a package stored on a GitHUb repository *comming soon: currently, just use github_com package*
$help Get started command
$clear Clear the Egg Console
$end End the Egg Console
WARNING:
Always use $end command in every console you run
*ONLY use a condition different to True as an argument of eggConsole(condition) if you know what are you doing**
This is the reason why condition only allows <<bool>> as data type
"""
def eggConsole(condition: bool = True):
print(white+"Egg Console is now running")
logged=0
while condition:
i=get("egg")
if i=="$nqs":
from nqs.developer.app import developerConsole
developerConsole()
elif i=="$new":
from news.app import journalistConsole
journalistConsole()
elif i=="$login":
login()
elif i=="$register":
register()
elif i=="$install":
print(white+"Package:")
name=get("egg")
install(name)
elif i=="$upgrade":
print(white+"Package:")
name=get("egg")
upgrade(name)
elif i=="$pull":
print(white+"Repo:")
name=get("egg")
repo=Repo(name)
print(white+"Package:")
package=get("egg")
last=repo.pull(package)
# *comming soon*
elif i=="$help":
help()
elif i=="$clear":
clearConsole()
elif i=="$end":
print(white+"Egg Console stopped running")
return "done"
else:
pass
| 32.434783
| 113
| 0.605004
| 271
| 2,238
| 4.99262
| 0.357934
| 0.033259
| 0.059128
| 0.039911
| 0.075388
| 0.039911
| 0
| 0
| 0
| 0
| 0
| 0.000637
| 0.298034
| 2,238
| 69
| 114
| 32.434783
| 0.860598
| 0.00983
| 0
| 0.136364
| 0
| 0
| 0.115357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.022727
| 0.159091
| 0
| 0.204545
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e7d231b81300bc8be65b86f6758957fdbb26baa
| 653
|
py
|
Python
|
backend-project/small_eod/users/models.py
|
merito/small_eod
|
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
|
[
"MIT"
] | 64
|
2019-12-30T11:24:03.000Z
|
2021-06-24T01:04:56.000Z
|
backend-project/small_eod/users/models.py
|
merito/small_eod
|
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
|
[
"MIT"
] | 465
|
2018-06-13T21:43:43.000Z
|
2022-01-04T23:33:56.000Z
|
backend-project/small_eod/users/models.py
|
merito/small_eod
|
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
|
[
"MIT"
] | 72
|
2018-12-02T19:47:03.000Z
|
2022-01-04T22:54:49.000Z
|
from django.contrib.auth.models import AbstractUser
from ..notifications.utils import TemplateKey, TemplateMailManager
class User(AbstractUser):
def notify(self, **kwargs):
kwargs["user"] = self
enabled = self.get_enabled_notifications()
key = getattr(
TemplateKey, f"{kwargs['source']}_{kwargs['action']}".upper(), None
)
if key not in enabled:
return False
return TemplateMailManager.send(
template_key=key, recipient_list=[self.email], context=kwargs
)
def get_enabled_notifications(self):
return TemplateMailManager.TEMPLATE_MAP.keys()
| 29.681818
| 79
| 0.658499
| 67
| 653
| 6.298507
| 0.58209
| 0.047393
| 0.109005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240429
| 653
| 21
| 80
| 31.095238
| 0.850806
| 0
| 0
| 0
| 0
| 0
| 0.062787
| 0.056662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.0625
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e7efc62df24d3372d57ba9f3602f16dfbfbeff6
| 2,689
|
py
|
Python
|
rtlsdr_sstv/utils.py
|
martinber/rtlsdr_sstv
|
f59ca523408e949f98c4b81b09b2d46232111f4a
|
[
"MIT"
] | 3
|
2019-03-16T01:20:09.000Z
|
2020-12-31T12:31:17.000Z
|
rtlsdr_sstv/utils.py
|
martinber/rtlsdr_sstv
|
f59ca523408e949f98c4b81b09b2d46232111f4a
|
[
"MIT"
] | null | null | null |
rtlsdr_sstv/utils.py
|
martinber/rtlsdr_sstv
|
f59ca523408e949f98c4b81b09b2d46232111f4a
|
[
"MIT"
] | 1
|
2020-12-27T02:31:18.000Z
|
2020-12-27T02:31:18.000Z
|
import collections
import math
import numpy as np
def mapeadora(value):
valor_mapeado = int((value-1500)/800*255)
return valor_mapeado
def escribir_pixel(img, columna, linea, canal, valor):
'''funcion encargada de escribir pixel por pixel la imagen'''
if linea >= img.height:
return
if canal == "lum":
prev = img.getpixel((columna,linea-1))
datapixel = (mapeadora(valor), prev[1], prev[2])
img.putpixel((columna,linea-1), datapixel)
if canal == "cr":
prev = img.getpixel((columna,linea-1))
nxt_prev = img.getpixel((columna,linea))
datapixel = (prev[0], prev[1], mapeadora(valor))
nxt_datapixel = (nxt_prev[0], nxt_prev[1], mapeadora(valor))
img.putpixel((columna,linea-1), datapixel)
img.putpixel((columna,linea), nxt_datapixel)
if canal == "cb":
prev = img.getpixel((columna,linea-1))
nxt_prev = img.getpixel((columna,linea))
datapixel = (prev[0], mapeadora(valor), prev[2])
nxt_datapixel = (nxt_prev[0], mapeadora(valor), nxt_prev[2])
img.putpixel((columna,linea-1), datapixel)
img.putpixel((columna,linea), nxt_datapixel)
if canal == "nxt_lum":
prev = img.getpixel((columna,linea))
datapixel = (mapeadora(valor), prev[1], prev[2])
img.putpixel((columna,linea), datapixel)
def lowpass(cutout, delta_w, atten):
'''
cutout y delta_w en fracciones de pi radianes por segundo.
atten en decibeles positivos.
'''
beta = 0
if atten > 50:
beta = 0.1102 * (atten - 8.7)
elif atten < 21:
beta = 0
else:
beta = 0.5842 * (atten - 21)**0.4 + 0.07886 * (atten - 21)
length = math.ceil((atten - 8) / (2.285 * delta_w * math.pi)) + 1;
if length % 2 == 0:
length += 1
coeffs = np.kaiser(length, beta)
# i es el indice en el vector, n es el eje con el cero centrado en el medio
# del filtro
for i, n in enumerate(range(
int(-(length - 1) / 2),
int((length - 1) / 2)+1)):
if n == 0:
coeffs[i] *= cutout
else:
coeffs[i] *= math.sin(n * math.pi * cutout) / (n * math.pi)
return coeffs
def filtrar(input, cutout, delta_w, atten):
'''
La derecha del buff tiene la muestra mas reciente y tiene el indice mas alto
'''
coeffs = lowpass(cutout, delta_w, atten)
# plot(coeffs, numpy.abs(numpy.fft.fft(coeffs)))
buf = collections.deque([0] * len(coeffs))
for s in input:
buf.popleft()
buf.append(s)
sum = 0
for j in range(len(coeffs)):
sum += buf[-j - 1] * coeffs[j]
yield sum
| 29.549451
| 80
| 0.581629
| 368
| 2,689
| 4.19837
| 0.296196
| 0.100971
| 0.058252
| 0.085437
| 0.368932
| 0.311974
| 0.272492
| 0.272492
| 0.262783
| 0.262783
| 0
| 0.039115
| 0.277427
| 2,689
| 90
| 81
| 29.877778
| 0.756047
| 0.132391
| 0
| 0.262295
| 0
| 0
| 0.006111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0.032787
| 0.04918
| 0
| 0.163934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e7f9f610ed95d40e15a8580e0dd70e9219fb93d
| 3,653
|
py
|
Python
|
Pong.py
|
Mishkanian/pong_game
|
5a04b4b5fc36af2159e60fb85941034a2325996c
|
[
"MIT"
] | null | null | null |
Pong.py
|
Mishkanian/pong_game
|
5a04b4b5fc36af2159e60fb85941034a2325996c
|
[
"MIT"
] | null | null | null |
Pong.py
|
Mishkanian/pong_game
|
5a04b4b5fc36af2159e60fb85941034a2325996c
|
[
"MIT"
] | 1
|
2021-11-15T20:21:53.000Z
|
2021-11-15T20:21:53.000Z
|
"""
Pong game by Michael Mishkanian
"""
import turtle
wn = turtle.Screen()
wn.title("Pong by Michael Mishkanian")
wn.bgcolor("black")
wn.setup(width=800, height=600)
wn.tracer(0)
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid=5, stretch_len=1) # make paddle a rectangle
paddle_a.penup()
paddle_a.goto(-350, 0) # starting location of paddle on left side of screen
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5, stretch_len=1)
paddle_b.penup()
paddle_b.goto(350, 0) # starting location of paddle on right side of screen
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0) # ball starts in middle of screen
ball.dx = .33 # movement speed of the ball dx
ball.dy = .33 # movement speed of the ball dy
# Score Display
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player 1: 0 Player 2: 0", align="center", font=("Courier", 24, "normal"))
# Start Tracking Scores
score_a = 0
score_b = 0
def paddle_a_up():
"""
This function takes in the current y-coordinate of paddle A
and then increases the position by 20 (AKA "go up")
"""
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
"""
This function takes in the current y-coordinate of paddle A
and then decreases the position down 20 (AKA "go down")
"""
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
"""
This function takes in the current y-coordinate of paddle B
and then increases the position by 20 (AKA "go up")
"""
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
"""
This function takes in the current y-coordinate of paddle B
and then decreases the position by 20 (AKA "go down")
"""
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
# Key bindings
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
# Main game loop
while True:
wn.update()
# Ball movement
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checks
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1 # reverse direction if ball is too high
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1 # reverse direction if ball is too low
# retart game when the ball passes a paddle
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
score_a += 1
pen.clear() # clear score
pen.write("Player 1: {} Player 2: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
score_b += 1
pen.clear() # clear score
pen.write("Player 1: {} Player 2: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
# Collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40
and ball.ycor() > paddle_b.ycor() - 40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40
and ball.ycor() > paddle_a.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
| 26.280576
| 120
| 0.611005
| 565
| 3,653
| 3.854867
| 0.215929
| 0.067493
| 0.031221
| 0.034894
| 0.583563
| 0.55877
| 0.506887
| 0.455464
| 0.402204
| 0.402204
| 0
| 0.043952
| 0.246373
| 3,653
| 138
| 121
| 26.471014
| 0.747185
| 0.266083
| 0
| 0.284091
| 0
| 0
| 0.081522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.011364
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e83c39b04f2c10f748cc83b7509198a99b52216
| 1,432
|
py
|
Python
|
clean.py
|
glqstrauss/oopsgenie
|
d1984e332b11f972db2008867f1aba0917457b9b
|
[
"MIT"
] | 5
|
2020-01-02T21:15:31.000Z
|
2020-07-29T18:01:51.000Z
|
clean.py
|
glqstrauss/oopsgenie
|
d1984e332b11f972db2008867f1aba0917457b9b
|
[
"MIT"
] | 2
|
2020-01-07T15:36:44.000Z
|
2020-01-13T20:38:45.000Z
|
clean.py
|
glqstrauss/oopsgenie
|
d1984e332b11f972db2008867f1aba0917457b9b
|
[
"MIT"
] | 1
|
2020-07-29T17:10:32.000Z
|
2020-07-29T17:10:32.000Z
|
import csv
from utils import get_valid_colum_indices
class Cleaner():
def clean(file, clean_columns, remove):
print ("Cleaning {}".format(file))
print ("For columns {}".format(clean_columns))
new_file = file[0:-7] + "clean.csv"
with open(file, 'r') as raw_file:
reader = csv.reader(raw_file, delimiter=',')
headers = next(reader)
col_count = len(clean_columns)
if remove:
clean_columns.append("Message")
indices = get_valid_colum_indices(headers, clean_columns)
if indices is None:
print ("invalid column specified for in {}".format(file))
return
with open(new_file, 'w') as clean_file:
writer = csv.writer(clean_file, delimiter=',')
writer.writerow(clean_columns)
for row in reader:
if remove:
blacklisted = False
for r in remove:
if r in row[indices[-1]]:
blacklisted = True
if blacklisted:
continue
cleaned_row = []
for i in range(col_count):
cleaned_row.append(row[indices[i]])
writer.writerow(cleaned_row)
print("Done")
| 34.926829
| 73
| 0.48743
| 143
| 1,432
| 4.72028
| 0.398601
| 0.106667
| 0.038519
| 0.059259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003641
| 0.424581
| 1,432
| 41
| 74
| 34.926829
| 0.815534
| 0
| 0
| 0.060606
| 0
| 0
| 0.05792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.151515
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e8a5b0b6fc0612db9638f1736e52adef498431d
| 37,129
|
py
|
Python
|
morm/db.py
|
neurobin/python-morm
|
2b6dcedc7090a9e642331300a24dfcca41ea1afe
|
[
"BSD-3-Clause"
] | 4
|
2021-03-12T16:36:24.000Z
|
2022-03-06T09:26:14.000Z
|
morm/db.py
|
neurobin/python-morm
|
2b6dcedc7090a9e642331300a24dfcca41ea1afe
|
[
"BSD-3-Clause"
] | null | null | null |
morm/db.py
|
neurobin/python-morm
|
2b6dcedc7090a9e642331300a24dfcca41ea1afe
|
[
"BSD-3-Clause"
] | null | null | null |
"""DB utilities.
"""
__author__ = 'Md Jahidul Hamid <jahidulhamid@yahoo.com>'
__copyright__ = 'Copyright © Md Jahidul Hamid <https://github.com/neurobin/>'
__license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)'
__version__ = '0.1.0'
import collections
import re
import asyncio
import nest_asyncio # type: ignore
import atexit
import logging
import asyncpg # type: ignore
from asyncpg import Record, Connection # type: ignore
from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any
from morm import exceptions
from morm.model import ModelType, Model, ModelBase, _FieldNames
from morm.q import Q
from morm.types import Void
LOGGER_NAME = 'morm.db-'
log = logging.getLogger(LOGGER_NAME)
nest_asyncio.apply()
def record_to_model(record: Record, model_class: ModelType) -> Model:
"""Convert a Record object to Model object.
Args:
record (Record): Record object.
model_class (ModelType): Model class
Returns:
Model: Model instance.
"""
new_record = model_class()
for k,v in record.items():
new_record.Meta._fromdb_.append(k)
setattr(new_record, k, v)
return new_record
class Pool(object):
"""Open database connection pool.
```python
from morm.db import Pool
DB_POOL = Pool(
dsn='postgres://',
host='localhost',
port=5432,
user='jahid', # change accordingly
password='jahid', # change accordingly
database='test', # change accordingly
min_size=10, # change accordingly
max_size=90, # change accordingly
)
```
This will create and open an asyncpg pool which will be automatically closed at exit.
You should set this in a settings file from where you can import the `DB_POOL`
Args:
dsn (str, optional): DSN string. Defaults to None.
min_size (int, optional): Minimum connection in the pool. Defaults to 10.
max_size (int, optional): Maximum connection in the pool. Defaults to 100.
max_queries (int, optional): Number of queries after a connection is closed and replaced with a new connection. Defaults to 50000.
max_inactive_connection_lifetime (float, optional): Number of seconds after which inactive connections in the pool will be closed. Pass `0` to disable this mechanism. Defaults to 300.0.
setup ([type], optional): A coroutine to prepare a connection right before it is returned from `Pool.acquire()`. Defaults to None.
init ([type], optional): A coroutine to initialize a connection when it is created. Defaults to None.
loop ([type], optional): Asyncio even loop instance. Defaults to None.
connection_class ([type], optional): The class to use for connections. Must be a subclass of `asyncpg.connection.Connection`. Defaults to asyncpg.connection.Connection.
"""
def __init__(self, dsn: str = None,
min_size: int = 10,
max_size: int = 100,
max_queries: int = 50000,
max_inactive_connection_lifetime: float = 300.0,
setup=None,
init=None,
loop=None,
connection_class=Connection,
**connect_kwargs):
self.dsn = dsn
self.min_size = min_size
self.max_size = max_size
self.max_queries = max_queries
self.max_inactive_connection_lifetime = max_inactive_connection_lifetime
self.setup = setup
self.init = init
self.loop = loop
self.connection_class = connection_class
self.connect_kwargs = connect_kwargs
self._pool = None
self._open()
atexit.register(self._close)
@property
def pool(self) -> asyncpg.pool.Pool:
"""Property pool that is an asyncpg.pool.Pool object
"""
return self._pool
async def __create_pool(self) -> asyncpg.pool.Pool:
"""Create a asyncpg.pool.Pool for this Pool object.
Returns:
asyncpg.pool.Pool: Pool object (singleton)
"""
return await asyncpg.create_pool(
dsn=self.dsn,
min_size=self.min_size,
max_size=self.max_size,
max_queries=self.max_queries,
max_inactive_connection_lifetime=self.max_inactive_connection_lifetime,
setup=self.setup,
init=self.init,
loop=self.loop,
connection_class=self.connection_class,
**self.connect_kwargs)
def _open(self):
"""Open the pool. Called on init so not need to call this
method explicitly.
"""
if not self._pool:
self._pool = asyncio.get_event_loop().run_until_complete(self.__create_pool())
log.debug("Pool opened")
def _close(self):
"""Attempt to close the pool gracefully. registered with atexit.
You do not need to call this method explicitly.
"""
if self._pool:
asyncio.get_event_loop().run_until_complete(self._pool.close())
self._pool = None
log.debug("Pool closed")
class DB(object):
"""Initialize a DB object setting a pool to get connection from.
If connection is given, it is used instead.
The `corp()` method returns an asyncpg.pool.Pool or an
asyncpg.Connection
Args:
pool (Pool): A connection pool
con (Connection): Connection. Defaults to None.
"""
def __init__(self, pool: Pool, con: Connection=None):
self._pool = pool
self._con = con
self.DATA_NO_CHANGE = 'DATA_NO_CHANGE_TRIGGERED'
def corp(self) -> Union[asyncpg.pool.Pool, Connection]:
"""Return the connection if available, otherwise return a Pool.
Note: The name reads 'c or p'
Returns:
asyncpg.Connection or asyncpg.pool.Pool object
"""
if self._con:
return self._con
return self._pool.pool
async def fetch(self, query: str, *args,
timeout: float = None,
model_class: ModelType=None
) -> Union[List[ModelBase], List[Record]]:
"""Make a query and get the results.
Resultant records can be mapped to model_class objects.
Args:
query (str): Query string.
args (*list or *tuple): Query arguments.
timeout (float, optional): Timeout value. Defaults to None.
model_class (Model, optional): Defaults to None.
Returns:
List[Model] or List[Record] : List of model instances if model_class is given, otherwise list of Record instances.
"""
pool = self.corp()
records = await pool.fetch(query, *args, timeout=timeout)
if not model_class:
return records
else:
new_records = []
for record in records:
new_record = record_to_model(record, model_class)
new_records.append(new_record)
return new_records
async def fetchrow(self, query: str, *args,
timeout: float = None,
model_class: ModelType=None
) -> Union[ModelBase, Record]:
"""Make a query and get the first row.
Resultant record can be mapped to model_class objects.
Args:
query (str): Query string.
args (*list or *tuple): Query arguments.
timeout (float, optional): Timeout value. Defaults to None.
model_class (Model, optional): Defaults to None.
Returns:
Record or model_clas object or None if no rows were selected.
"""
pool = self.corp()
record = await pool.fetchrow(query, *args, timeout=timeout)
if not model_class:
return record
else:
if not record:
return record
new_record = record_to_model(record, model_class)
return new_record
async def fetchval(self, query: str, *args,
column: int = 0,
timeout: float = None
) -> Any:
"""Run a query and return a column value in the first row.
Args:
query (str): Query to run.
args (*list or *tuple): Query arguments.
column (int, optional): Column index. Defaults to 0.
timeout (float, optional): Timeout. Defaults to None.
Returns:
Any: Coulmn (indentified by index) value of first row.
"""
pool = self.corp()
return await pool.fetchval(query, *args, column=column, timeout=timeout)
async def execute(self, query: str, *args,
timeout: float = None
) -> str:
"""Execute a query.
Args:
query (str): Query to run.
args (*list or *tuple): Query arguments.
timeout (float, optional): Timeout. Defaults to None.
Returns:
str: Status of the last SQL command
"""
pool = self.corp()
return await pool.execute(query, *args, timeout=timeout)
def get_insert_query(self, mob: ModelBase, reset=False) -> Tuple[str, List[Any]]:
"""Get insert query for the model object (mob) with its current data
Args:
mob (ModelBase): Model object
reset (bool): Reset the value change counter. Defaults to False
Returns:
(str, list): query, args
"""
data = mob.Meta._fields_
new_data_gen = mob.__class__._get_FieldValue_data_valid_(data, up=True)
columns = []
values = []
markers = []
c = 0
for n,v in new_data_gen:
c += 1
if reset:
v.value_change_count = 0
mob.Meta._fromdb_ = []
columns.append(n)
values.append(v.value)
markers.append(f'${c}')
column_q = '","'.join(columns)
if column_q:
column_q = f'"{column_q}"'
marker_q = ', '.join(markers)
query = f'INSERT INTO "{mob.__class__._get_db_table_()}" ({column_q}) VALUES ({marker_q}) RETURNING "{mob.__class__._get_pk_()}"'
else:
query = ''
return query, values
def get_update_query(self, mob: ModelBase, reset=False) -> Tuple[str, List[Any]]:
"""Get the update query for the changed data in the model object (mob)
Args:
mob (ModelBase): Model object
reset (bool): If True, this method can be called just once to get the changes done on mob. Subsequent call will return empty query.
Raises:
AttributeError: If primary key does not exists i.e if not updatable
Returns:
str, args: tuple of query, args
"""
pkval = getattr(mob, mob.__class__._get_pk_()) #save method depends on it's AttributeError
data = mob.Meta._fields_
new_data_gen = mob.__class__._get_FieldValue_data_valid_(data, up=True)
colval = []
values = []
c = 0
for n,v in new_data_gen:
if n == mob.__class__._get_pk_(): continue
if v.value_change_count > 0:
c += 1
colval.append(f'"{n}"=${c}')
values.append(v.value)
if reset:
v.value_change_count = 0
colval_q = ', '.join(colval)
if colval_q:
where = f'"{mob.__class__._get_pk_()}"=${c+1}'
values.append(pkval)
query = f'UPDATE "{mob.__class__._get_db_table_()}" SET {colval_q} WHERE {where}'
else:
query = ''
return query, values
def get_delete_query(self, mob: ModelBase) -> Tuple[str, List[Any]]:
"""Get the delete query for the model object.
Args:
mob (ModelBase): model object.
Returns:
Tuple[str, List[Any]]: quey, args
"""
pkval = getattr(mob, mob.__class__._get_pk_())
query = f'DELETE FROM "{mob.__class__._get_db_table_()}" WHERE "{mob.__class__._get_pk_()}"=$1'
return query, [pkval]
async def delete(self, mob: ModelBase, timeout: float = None) -> str:
"""Delete the model object data from database.
Args:
mob (ModelBase): Model object
timeout (float): timeout value. Defaults to None.
Returns:
(str): status of last sql command.
"""
query, args = self.get_delete_query(mob)
await mob._pre_delete_(self)
res = await self.execute(query, *args, timeout=timeout)
await mob._post_delete_(self)
return res
async def insert(self, mob: ModelBase, timeout: float = None) -> Any:
"""Insert the current data state of mob into db.
Args:
mob (ModelBase): Model object
timeout (float): timeout value. Defaults to None.
Returns:
(Any): Value of primary key of the inserted row
"""
query, args = self.get_insert_query(mob, reset=True)
await mob._pre_insert_(self)
pkval = await self.fetchval(query, *args, timeout=timeout)
if pkval is not None:
setattr(mob, mob.__class__._get_pk_(), pkval)
await mob._post_insert_(self)
return pkval
async def update(self, mob: ModelBase, timeout: float = None) -> str:
"""Update the current changed data of mob onto db
Args:
mob (ModelBase): Model object
timeout (float): timeout value. Defaults to None.
Raises:
AttributeError: If primary key does not exists.
Returns:
str: status of last sql command.
Successful status starts with the word 'UPDATE' followed by
number of rows updated, which should be 1 in this case.
"""
query, args = self.get_update_query(mob, reset=True)
if query:
await mob._pre_update_(self)
res = await self.execute(query, *args, timeout=timeout)
await mob._post_update_(self)
else:
res = self.DATA_NO_CHANGE
return res
async def save(self, mob: ModelBase, timeout: float = None) -> Union[str, Any]:
"""Insert if not exists and update if exists.
update is tried first, if fails (if pk does not exist), insert
is called.
Args:
mob (ModelBase): Model object
timeout (float): timeout value. Defaults to None.
Returns:
int or str: The value of the primary key for insert or
status for update.
"""
await mob._pre_save_(self)
try:
res = await self.update(mob, timeout=timeout)
except AttributeError:
res = await self.insert(mob, timeout=timeout)
await mob._post_save_(self)
return res
def q(self, model: ModelType = None) -> 'ModelQuery':
"""Return a ModelQuery for model
If `None` is passed, it will give a `ModelQuery` without setting
`self.model` on the `ModelQuery` object.
Args:
model (ModelType, optional): model class. Defaults to None.
Raises:
TypeError: If invalid model type is passed
Returns:
ModelQuery: ModelQuery object
"""
return self(model)
def __call__(self, model: ModelType = None) -> 'ModelQuery':
"""Return a ModelQuery for model
If `None` is passed, it will give a `ModelQuery` without setting
`self.model` on the `ModelQuery` object.
Args:
model (ModelType, optional): model class. Defaults to None.
Raises:
TypeError: If invalid model type is passed
Returns:
ModelQuery: ModelQuery object
"""
if isinstance(model, ModelType) or model is None:
return ModelQuery(self, model)
raise TypeError(f"Invalid model: {model}. model must be of type {ModelType.__name__}. Make sure you did not pass a model object by mistake.")
class ModelQuery():
"""Query builder for model class.
Calling `db(Model)` gives you a model query handler which have several query methods to help you make queries.
Use `q(query, *args)` method to make queries with positional arguments. If you want named arguments, use the uderscored version of these methods. For example, `q(query, *args)` has an underscored version `q_(query, *args, **kwargs)` that can take named arguments.
You can add a long query part by part:
```python
from morm.db import DB
db = DB(DB_POOL) # get a db handle.
qh = db(User) # get a query handle.
query, args = qh.q(f'SELECT * FROM {qh.db_table}')\
.q(f'WHERE {qh.f.profession} = ${qh.c}', 'Teacher')\
.q_(f'AND {qh.f.age} = :age', age=30)\
.getq()
print(query, args)
# fetch:
await qh.fetch()
```
The `q` family of methods (`q, qc, qu etc..`) can be used to
build a query step by step. These methods can be chained
together to break down the query building in multiple steps.
Several properties are available to get information of the model
such as:
1. `qh.db_table`: Quoted table name e.g `"my_user_table"`.
2. `qh.pk`: Quoted primary key name e.g `"id"`.
3. `qh.ordering`: ordering e.g `"price" ASC, "quantity" DESC`.
4. `qh.f.<field_name>`: quoted field names e.g`"profession"`.
5. `qh.c`: Current available position for positional argument (Instead of hardcoded `$1`, `$2`, use `f'${qh.c}'`, `f'${qh.c+1}'`).
`qh.c` is a counter that gives an integer representing the
last existing argument position plus 1.
`reset()` can be called to reset the query to start a new.
To execute a query, you need to run one of the execution methods
: `fetch, fetchrow, fetchval, execute`.
**Notable convenience methods:**
* `qupdate(data)`: Initialize a update query for data
* `qfilter()`: Initialize a filter query upto WHERE clasue.
* `get(pkval)`: Get an item by primary key.
Args:
db (DB): DB object
model_class (ModelType): model
"""
def __init__(self, db: DB, model_class: ModelType = None):
self.reset()
self.db = db
self.model = model_class # can be None
def func(k):
return Q(model_class._check_field_name_(k))
self._f = _FieldNames(func) # no reset
def __repr__(self):
return f'ModelQuery({self.db}, {self.model})'
def reset(self) -> 'ModelQuery':
"""Reset the model query by returning it to its initial state.
Returns:
self (Enables method chaining)
"""
self._query_str_queue: List[str] = []
self.end_query_str = ''
self.start_query_str = ''
self._args: List[Any] = []
self._arg_count = 0
self._named_args: Dict[str, Any] = {}
self._named_args_mapper: Dict[str, int] = {}
self.__filter_initiated = False
self._ordering = ''
self.__update_initiated = False
return self
@property
def c(self) -> int:
"""Current available argument position in the query
arg_count + 1 i.e if $1 and $2 has been used so far, then
self.c is 3 so that you can use it to make $3.
Returns:
int
"""
return self._arg_count + 1
@property
def db_table(self) -> str:
"""Table name of the model (quoted)
"""
return Q(self.model._get_db_table_()) #type: ignore
@property
def pk(self) -> str:
"""Primary key name (quoted)
"""
return Q(self.model._get_pk_()) #type: ignore
@property
def ordering(self) -> str:
"""Ordering query in SQL, does not include `ORDER BY`.
Example: `"price" ASC, "quantity" DESC`
"""
if not self._ordering:
self._ordering = ','.join([' '.join(y) for y in self.model._get_ordering_(quote='"')]) # type: ignore
return self._ordering
@property
def f(self) -> _FieldNames:
"""Field name container where names are quoted.
It can be used to avoid spelling mistakes in writing query.
Example: query `'select "profesion" from "table_name"'`
will only produce error after actually running the query against
a correctly spelled column 'profession'.
while,
query `f'select {self.f.profesion} from {self.db_table}'`
will throw python exception telling you that there is no
misspelled 'profesion' field.
Note: you have to change `self` in above to the current
`ModelQuery` instance
"""
return self._f
def _process_positional_args(self, *args):
if args:
self._args.extend(args)
self._arg_count += len(args)
def _process_keyword_args(self, q: str, **kwargs) -> str:
# TODO: improvents need to be done
# 1. needs to handle only unquoted keyword :field_name
# and ignore ':field_name' or ":field_name"
self._named_args.update(kwargs)
for k,v in self._named_args.items():
if k in self._named_args_mapper:
q, mc = re.subn(f':{k}\\b', f'${self._named_args_mapper[k]}', q)
else:
q, mc = re.subn(f':{k}\\b', f'${self._arg_count+1}', q)
if mc > 0:
self._args.append(v)
self._arg_count += 1
self._named_args_mapper[k] = self._arg_count
return q
def q(self, q: str, *args: Any) -> 'ModelQuery':
"""Add raw query stub without parsing to check for keyword arguments
Use `$1`, `$2` etc. for arguments.
Use `self.c` (instance property, use fstring) to get the current
available argument position.
This is an efficient way to add query that do not have any
keyword arguments to handle, compared to `q_()` which checks for
keyword arguments everytime it is called.
Example:
```python
mq = db(SomeModel)
mq\
.q('SELECT * FROM "table" WHERE $1', True)\
.q('AND "price" >= $2', 33)\
.q(f'OR "price" = ${mq.c}', 0) # mq.c=3 (now)\
.q_('OR "status" = :status', status='OK')\
# :status is $4:
.q('OR "active" = $5', 0)\
.q_('AND "status" = :status')\
# status='OK' from previous call
.q('OR "price" = $2')\
# $2=33 from previous call
#using format string and mq.c to get the argument position:
.q(f'OR "price" > ${mq.c} OR "quantity" > ${mq.c+1}', 12, 3)
# mq.c=6 ^
```
Args:
q (str): raw query string
*args (Any): positional arguments
Returns:
ModelQuery: self, enables method chaining.
"""
self._process_positional_args(*args)
self._query_str_queue.append(q)
return self
def q_(self, q: str, *args, **kwargs) -> 'ModelQuery':
"""Add a query stub having keyword params.
Use the format `:field_name` for keyword parameter.
`:field_name` is converted to positional parameter (`$n`).
This method checks the query against all keyword arguments
that has been added so far with other `q*()` methods.
Args:
q (str): query string (SQL)
Returns:
ModelQuery: returns `self` to enable method chaining
"""
self._process_positional_args(*args)
q = self._process_keyword_args(q, **kwargs)
self._query_str_queue.append(q)
return self
def qq(self, word: str) -> 'ModelQuery':
"""Quote and add a word to the query.
Enable to add names with auto-quote. For example, if the name
for a field value is `status`, it can be added to the query
with auto-quoting, i.e for postgresql it will be added
as `"status"`.
Example:
```python
.qq('price').q('>= $1',34)
```
Args:
word (str): the word that needs to be added with quote.
Returns:
ModelQuery: returns `self` to enable method chaining
"""
if word:
self._query_str_queue.append(Q(word))
return self
def qc(self, word: str, rest: str, *args) -> 'ModelQuery':
"""Add query by quoting `word` while adding the `rest` as is.
This is a shorthand for making where clause conditions.
For example: `qc('price', '>=$1', 34)` is a safe way to write
a where condition like: `"price" >=34`.
The same can be achieved by using a combination of
`qq()` and `q()` or manually quoting and using
with `q()`
Example:
```python
.qc('price', '>= $1', 34)
```
Args:
word (str): left part of query that needs to be quoted
rest (str): right part of query that does not need to be quoted
*args (any): args
Returns:
ModelQuery: returns `self` to enable method chaining
"""
return self.qq(word).q(rest, *args)
def qc_(self, word: str, rest: str, *args, **kwargs) -> 'ModelQuery':
"""Add query by quoting `word` while adding the `rest` as is.
Same as `qc()` except this method parses the `rest` query string
for keyword params in the format: `:field_name`
Args:
word (str): left part of query that needs to be quoted
rest (str): right part of query that does not need to be quoted
*args (any): args
*kwargs: keyword args
Returns:
ModelQuery: returns `self` to enable method chaining
"""
return self.qq(word).q_(rest, *args, **kwargs)
def qorder(self):
"""Add ORDER BY
Returns:
ModelQuery: returns `self` to enable method chaining
"""
return self.q('ORDER BY')
def qo(self, order: str) -> 'ModelQuery':
"""Convert `+/-field_name,` to proper order_by criteria and add to query.
Example: `-field_name,` will become: `"field_name" DESC,`
* `+` at beginning means ascending order (default)
* `-` at beginning means descending order
* `,` at end means you will add more order criteria
Ommit the comma (`,`) when it is the last ordering criteria.
Args:
order (str): order criteria in the format `+/-field_name,`
Returns:
ModelQuery: returns `self` to enable method chaining
"""
direction = 'ASC'
if order.startswith('-'):
order = order[1:]
direction = 'DESC'
elif order.startswith('+'):
order = order[1:]
if order.endswith(','):
order = order[0:-1]
direction += ','
return self.qq(order).q(direction)
def qu(self, data: dict) -> 'ModelQuery':
"""Convert data to `"column"=$n` query with args as the
values and add to the main query.
The counter of positional arguments increases by the number of
items in `data`. Make use of `self.c` counter to add more
queries after using this method.
Args:
data (dict): data in format: `{'column': value}`
Returns:
ModelQuery: returns `self` to enable method chaining
"""
setq = ', '.join([f'"{c}"=${i}' for i,c in enumerate(data, self.c)])
return self.q(setq, *data.values())
def qreturning(self, *column_names) -> 'ModelQuery':
"""Convenience to add a `RETURNING` clause.
Args:
column_names: column names.
Returns:
ModelQuery: returns `self` to enable method chaining
"""
q = '","'.join(column_names)
if q:
q = f'RETURNING "{q}"'
return self.q(q)
def qwhere(self) -> 'ModelQuery':
"""Convenience to add 'WHERE' to the main query.
Make use of `qc()` method to add conditions.
Returns:
ModelQuery: returns `self` to enable method chaining
"""
return self.q('WHERE')
def qfilter(self, no_ordering=False) -> 'ModelQuery':
"""Initiate a filter.
This initiates a `SELECT` query upto `WHERE`. You can then use the
`q()`, `qc()`, etc. methods to add conditions and finally
execute the `fetch()` method to get all results or execute the
`fetchrow()` method to get a single row.
Example:
```python
.qfilter().q('"price" >= $1 AND "status" = $2', 32.12, 'OK')
```
Args:
no_ordering (bool): Whether to remove the default ordering SQL. Defaults to False.
Returns:
ModelQuery: returns self to enable method chaining
"""
if not self.__filter_initiated:
down_fields = ','.join([Q(x) for x in self.model._get_fields_(up=False)]) #type: ignore
self.reset().q(f'SELECT {down_fields} FROM "{self.model._get_db_table_()}" WHERE') #type: ignore
self.__filter_initiated = True
order_by = self.ordering
if order_by and not no_ordering:
self.end_query_str = f'ORDER BY {order_by}'
else:
raise ValueError(f"Filter is already initiated for this {self.__class__.__name__} query object: {self}")
return self
def qupdate(self, data: dict) -> 'ModelQuery':
"""Initiate a UPDATE query for data.
This initiates an `UPDATE` query upto `WHERE` and leaves you to
add conditions with other methods such as `qc` or the generic
method `q()`.
Finally call the `execute()` method to execute the query or
call the `fetchval()` method if using `RETURNING` clause.
Args:
data (dict): data in key value dictionary
Returns:
ModelQuery: returns `self` to enable method chaining
"""
if not self.__update_initiated:
self.reset().q(f'UPDATE {self.db_table} SET').qu(data).qwhere()
self.__update_initiated = True
else:
raise ValueError(f"update is already initiated for this {self.__class__.__name__} query: {self}")
return self
def getq(self) -> Tuple[str, List[Any]]:
"""Return query string and arg list
Returns:
tuple: (str, list) : (query, args)
"""
query = ' '.join(self._query_str_queue)
self._query_str_queue = [query]
query = f'{self.start_query_str} {query} {self.end_query_str}'
return query, self._args
async def fetch(self, timeout: float = None) -> Union[List[ModelBase], List[Record]]:
"""Run query method `fetch` that returns the results in model class objects
Returns the results in model class objects.
Args:
timeout (float, optional): Timeout in seconds. Defaults to None.
Returns:
List[Model]: List of model instances.
"""
query, args = self.getq()
return await self.db.fetch(query, *args, timeout=timeout, model_class=self.model)
async def fetchrow(self, timeout: float = None) -> Union[ModelBase, Record]:
"""Make a query and get the first row.
Resultant record is mapped to model_class object.
Args:
timeout (float, optional): Timeout value. Defaults to None.
Returns:
model_clas object or None if no rows were selected.
"""
query, args = self.getq()
return await self.db.fetchrow(query, *args, timeout=timeout, model_class=self.model)
async def fetchval(self, column: int = 0, timeout: float = None) -> Any:
"""Run the query and return a column value in the first row.
Args:
column (int, optional): Column index. Defaults to 0.
timeout (float, optional): Timeout. Defaults to None.
Returns:
Any: Coulmn (indentified by index) value of first row.
"""
query, args = self.getq()
return await self.db.fetchval(query, *args, column=column, timeout=timeout)
async def execute(self, timeout: float = None) -> str:
"""Execute the query.
Args:
timeout (float, optional): Timeout. Defaults to None.
Returns:
str: Status of the last SQL command
"""
query, args = self.getq()
return await self.db.execute(query, *args, timeout=timeout)
async def get(self, *vals: Any, col: str = '', comp: str = '=$1') -> Union[ModelBase, Record]:
"""Get the first row found by column and value.
If `col` is not given, it defaults to the primary key (`pk`) of
the model.
If comparison is not given, it defaults to `=$1`
Example:
```python
from morm.db import DB
db = DB(DB_POOL) # get a db handle.
# get by pk:
user5 = await db(User).get(5)
# price between 5 and 2000
user = await db(User).get(5, 2000, col='price', comp='BETWEEN $1 AND $2')
```
Args:
*vals (any): Values to compare. Must be referenced with $1, $2 etc.. in `comp`.
col (str, optional): Column name. Defaults to the primary key.
comp (str, optional): Comparison. Defaults to '=$1'.
Returns:
model_clas object or None if no rows were selected.
"""
if not col:
col = self.model.Meta.pk #type: ignore
return await self.reset().qfilter().qc(col, comp, *vals).fetchrow()
SERIALIZABLE = 'serializable'
REPEATABLE_READ = 'repeatable_read'
READ_COMMITTED = 'read_committed'
class Transaction():
"""Start a transaction.
Example:
```python
from morm.db import Transaction
async with Transaction(DB_POOL) as tdb:
# use tdb just like you use db
user6 = await tdb(User).get(6)
user6.age = 34
await tdb.save(user6)
user5 = await tdb(User).get(5)
user5.age = 34
await tdb.save(user6)
```
Args:
pool (Pool): Pool instance.
isolation (str, optional): Transaction isolation mode, can be one of:
'serializable',
'repeatable_read',
'read_committed'.
Defaults to 'read_committed'.
See https://www.postgresql.org/docs/9.5/transaction-iso.html
readonly (bool, optional): Specifies whether this transaction is read-only. Defaults to False.
deferrable (bool, optional): Specifies whether this transaction is deferrable. Defaults to False.
"""
def __init__(self, pool: Pool, *,
isolation: str=READ_COMMITTED,
readonly: bool=False,
deferrable: bool=False):
self._pool = pool
self.db = DB(None) # type: ignore
self.tr = None
self.tr_args = {
'isolation': isolation,
'readonly': readonly,
'deferrable': deferrable,
}
async def __aenter__(self) -> DB:
return await self.start()
async def start(self) -> DB:
"""Start transaction.
Raises:
exceptions.TransactionError: When same object is used simultaneously for transaction
Returns:
DB: DB object.
"""
if self.db._con:
raise exceptions.TransactionError('Another transaction is running (or not ended properly) with this Transaction object')
self.db._con = await self._pool.pool.acquire() # type: ignore
self.tr = self.db._con.transaction(**self.tr_args) # type: ignore
await self.tr.start() # type: ignore
return self.db
async def rollback(self):
"""Rollback the transaction.
"""
if self.tr:
await self.tr.rollback()
async def commit(self):
"""Commit the transaction.
"""
if self.tr:
await self.tr.commit()
async def end(self):
"""Close the transaction gracefully.
Resources are released and some cleanups are done.
"""
try:
if self.db._con:
await self._pool.pool.release(self.db._con)
finally:
self.db._con = None
self.tr = None
async def __aexit__(self, extype, ex, tb):
try:
if extype is not None:
await self.rollback()
else:
await self.commit()
finally:
await self.end()
| 33.969808
| 267
| 0.577177
| 4,602
| 37,129
| 4.538027
| 0.125815
| 0.017717
| 0.014078
| 0.011061
| 0.351992
| 0.292521
| 0.262354
| 0.231134
| 0.189667
| 0.171854
| 0
| 0.005954
| 0.321474
| 37,129
| 1,092
| 268
| 34.000916
| 0.822967
| 0.348299
| 0
| 0.239691
| 0
| 0.005155
| 0.08693
| 0.025129
| 0
| 0
| 0
| 0.000916
| 0
| 1
| 0.095361
| false
| 0.002577
| 0.033505
| 0.005155
| 0.268041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e8c2e49f52c5a966e053c091e7e268d680d58d4
| 2,397
|
py
|
Python
|
cvxpy/reductions/solvers/conic_solvers/super_scs_conif.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-09-24T12:59:45.000Z
|
2021-09-24T13:00:08.000Z
|
cvxpy/reductions/solvers/conic_solvers/super_scs_conif.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/reductions/solvers/conic_solvers/super_scs_conif.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-12T05:17:18.000Z
|
2020-04-12T05:17:18.000Z
|
"""
Copyright 2018 Riley Murray
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy.reductions.solvers.conic_solvers.conic_solver import ConicSolver
from cvxpy.reductions.solvers.conic_solvers.scs_conif import dims_to_solver_dict, SCS
class SuperSCS(SCS):
DEFAULT_SETTINGS = {'use_indirect': False, 'eps': 1e-8, 'max_iters': 10000}
def name(self):
return s.SUPER_SCS
def import_solver(self):
import superscs
superscs # For flake8
def solve_via_data(self, data, warm_start, verbose, solver_opts, solver_cache=None):
"""Returns the result of the call to SuperSCS.
Parameters
----------
data : dict
Data generated via an apply call.
warm_start : Bool
Whether to warm_start SuperSCS.
verbose : Bool
Control the verbosity.
solver_opts : dict
SuperSCS-specific options.
Returns
-------
The result returned by a call to superscs.solve().
"""
import superscs
args = {"A": data[s.A], "b": data[s.B], "c": data[s.C]}
if warm_start and solver_cache is not None and \
self.name in solver_cache:
args["x"] = solver_cache[self.name()]["x"]
args["y"] = solver_cache[self.name()]["y"]
args["s"] = solver_cache[self.name()]["s"]
cones = dims_to_solver_dict(data[ConicSolver.DIMS])
# settings
user_opts = list(solver_opts.keys())
for k in list(SuperSCS.DEFAULT_SETTINGS.keys()):
if k not in user_opts:
solver_opts[k] = SuperSCS.DEFAULT_SETTINGS[k]
results = superscs.solve(
args,
cones,
verbose=verbose,
**solver_opts)
if solver_cache is not None:
solver_cache[self.name()] = results
return results
| 33.291667
| 88
| 0.635378
| 318
| 2,397
| 4.669811
| 0.399371
| 0.059259
| 0.040404
| 0.051178
| 0.078114
| 0.051178
| 0
| 0
| 0
| 0
| 0
| 0.009159
| 0.271172
| 2,397
| 71
| 89
| 33.760563
| 0.84087
| 0.377138
| 0
| 0.064516
| 0
| 0
| 0.024158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.193548
| 0.032258
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e40f115d7100a36cb4b801ec2f9f1a7a1eb33d05
| 4,984
|
py
|
Python
|
linear_model.py
|
gavb222/flatpanel-localize
|
6504eb94379f5df268ae280f996c7dd66f063e4e
|
[
"MIT"
] | 1
|
2021-02-01T18:17:11.000Z
|
2021-02-01T18:17:11.000Z
|
linear_model.py
|
gavb222/flatpanel-localize
|
6504eb94379f5df268ae280f996c7dd66f063e4e
|
[
"MIT"
] | null | null | null |
linear_model.py
|
gavb222/flatpanel-localize
|
6504eb94379f5df268ae280f996c7dd66f063e4e
|
[
"MIT"
] | 1
|
2021-02-01T18:07:12.000Z
|
2021-02-01T18:07:12.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import time
import random
import matlab.engine
def gaussian(spread):
#spread controls the size of the array
linspace = torch.linspace(-2.5,2.5,spread)
# gaussian = e^((-x)^2/2) when standard dev is 1 and height is 1
linspace = torch.exp(-1 * torch.div(torch.pow(linspace,2),2))
out_x = linspace.expand(spread,spread)
out_y = out_x.permute(1,0)
out_gaussian = out_x * out_y
return out_gaussian
#panel_x, panel_y = panel dimensions
#n_freq = n frequency bins
#x, y = top left of gaussian
#spread = spread of the gaussian
#NB that x-spread > 0, y-spread > 0, x+spread < panel_x, y+spread < panel_y
def produce_freq_response(panel_x, panel_y, n_freq, x, y, spread, expand_dim=False):
#TODO: change these to return errors
if x+spread > panel_x-1:
return torch.ones(panel_x,panel_y)*-1
elif y+spread > panel_y-1:
return torch.ones(panel_x,panel_y)*-1
response = gaussian(spread)
#response.size is (dim,dim)
#n_gaussian_elems = response.size()[0]
#pad response with zeros until its the size we want
#n = math.floor(n_gaussian_elems/2)
#pad of x starting from 0,
#pad_left = torch.zeros((x-n),n_gaussian_elems)
#pad_right = torch.zeros((panel_x-(x+n))-1,n_gaussian_elems)
#pad_top = torch.zeros(panel_x,(y-n))
#pad_bottom = torch.zeros(panel_x,(panel_y-(y+n))-1)
#response = torch.cat((pad_left,response), dim=0)
#response = torch.cat((response,pad_right), dim=0)
#response = torch.cat((pad_top,response), dim=1)
#response = torch.cat((response,pad_bottom), dim=1)
out_array = torch.zeros(panel_x,panel_y)
out_array[x:x+spread,y:y+spread] = response
if expand_dim:
out_array = out_array.expand(n_freq,panel_x,panel_y)
return out_array
class Conv_Block(nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1,activation=True):
super(Conv_Block, self).__init__()
self.conv = nn.Conv2d(input_size, output_size, kernel_size, stride, padding)
self.activation = activation
def forward(self, x):
if self.activation:
out = self.conv(F.relu(x))
else:
out = self.conv(x)
return out
class Conv_Net(nn.Module):
def __init__(self, input_channels, n_filters, output_channels):
super(Conv_Net, self).__init__()
self.conv1 = Conv_Block(input_channels, n_filters, activation=False)
self.conv2 = Conv_Block(n_filters, n_filters * 2)
self.conv3 = Conv_Block(n_filters * 2, n_filters * 4)
self.conv4 = Conv_Block(n_filters * 4, n_filters * 8, stride=1)
self.conv5 = Conv_Block(n_filters * 8, output_channels, stride=1)
self.classifier = nn.Linear(384,24)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x.view(-1)
x = self.classifier(x)
#what size is this?
out = torch.nn.Sigmoid()(x)
out = torch.reshape(out,(4,6))
return out
model = Conv_Net(1,16,24)
model.cuda()
model.train()
loss_fn = nn.MSELoss
criterion = torch.optim.Adam(model.parameters(), lr = .0001, betas = (.5,.999))
keep_training = True
epoch_counter = 0
panel_x = 50
panel_y = 50
eng = matlab.engine.start_matlab()
#make a panel
driver_locations = torch.tensor((0.25, 0.25, 0.75, 0.75, 0.25, 0.75, 0.75, 0.25)).view(4,2)
Lx = 0.3
Ly = 0.5
while keep_training:
epoch_counter = epoch_counter + 1
time_start = time.time()
gt = torch.ones(panel_x,panel_y)*-1
model.zero_grad()
#random init starting conditions
while gt[0,0] == -1:
#returns -1 for invalid configuration
gt = produce_freq_response(panel_x,panel_y,1,random.randint(1,panel_x-1),random.randint(1,panel_y-1),random.randint(3,15))
coefs = model(gt.unsqueeze(0).unsqueeze(0).cuda())
print(coefs.size())
#very possible that the interpreter doesnt like torch tensors, might have to go numpy with this
response1, frequencies = eng.get_biquad_response(coefs[0,:].cpu().detach().numpy(),44100,nargout = 2)
response2, temp = eng.get_biquad_response(coefs[1,:].cpu().detach().numpy(),44100,nargout = 2)
response3, temp = eng.get_biquad_response(coefs[2,:].cpu().detach().numpy(),44100,nargout = 2)
response4, temp = eng.get_biquad_response(coefs[3,:].cpu().detach().numpy(),44100,nargout = 2)
responses = torch.stack((response1,response2,response3,response4),dim=-1)
matlab_panel = eng.Clamped_Panel[driver_locations,responses,frequencies,Lx,Ly]
matlab_out = eng.matlab_panel.view_total_scan(200,0)
loss = loss_fn(matlab_out,gt)
criterion.step()
print("holy moly!")
| 33.006623
| 131
| 0.647673
| 765
| 4,984
| 4.040523
| 0.249673
| 0.029117
| 0.032028
| 0.03494
| 0.213847
| 0.162731
| 0.052734
| 0.025558
| 0.018764
| 0
| 0
| 0.041967
| 0.220706
| 4,984
| 150
| 132
| 33.226667
| 0.753862
| 0.215891
| 0
| 0.068182
| 0
| 0
| 0.00268
| 0
| 0
| 0
| 0
| 0.006667
| 0
| 1
| 0.068182
| false
| 0
| 0.079545
| 0
| 0.238636
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e40f68af3b51a18af4106a68a0e2666e5541b720
| 4,438
|
py
|
Python
|
client/client.py
|
s-ball/remo_serv
|
66accbd77183db0628a9618cf258656ec2d81316
|
[
"MIT"
] | null | null | null |
client/client.py
|
s-ball/remo_serv
|
66accbd77183db0628a9618cf258656ec2d81316
|
[
"MIT"
] | null | null | null |
client/client.py
|
s-ball/remo_serv
|
66accbd77183db0628a9618cf258656ec2d81316
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 SBA- MIT License
import getpass
import argparse
import sys
import cmd
import shlex
from urllib.error import HTTPError
from cryptography.hazmat.primitives import serialization
from client.clientlib import login, Connection
from client import smartcard
def parse2(arg):
args = list(shlex.split(arg))
if len(args) == 1:
args.append(args[0])
elif len(args) != 2:
return None, None
return args
class CmdLoop(cmd.Cmd):
def __init__(self, con: Connection, server, encoding):
self.con = con
self.prompt = server + '> '
super().__init__()
self.encoding = encoding
def do_get(self, arg):
"""Get a file from remote: get remote_file [local_file]"""
params = parse2(arg)
if params[0] is None:
print('ERROR: 1 or 2 parameters required', file=sys.stderr)
else:
try:
self.con.get(*params)
except HTTPError as e:
print(e)
def do_put(self, arg):
"""Send a file to remote: put remote_file [local_file]"""
params = parse2(arg)
if params[0] is None:
print('ERROR: 1 or 2 parameters required', file=sys.stderr)
else:
try:
self.con.put(*params)
except HTTPError as e:
print(e)
def do_exec(self, arg):
"""Execute a command on the remote and print the result: exec cmd param"""
try:
r = self.con.exec(arg)
print(r.read().decode(self.encoding))
except HTTPError as e:
print(e)
def do_iexec(self, arg):
"""Execute an interactive command"""
try:
r = self.con.iexec(arg)
print(r.read().decode(self.encoding))
except HTTPError as e:
print(e)
def do_idata(self, arg):
"""Send input to the interactive command: idata data..."""
try:
r = self.con.idata(arg)
print(r.read().decode(self.encoding))
except HTTPError as e:
print(e)
def do_iend(self, _arg):
"""Close the input channel of the interactive command"""
try:
r = self.con.end_cmd()
print(r.read().decode(self.encoding))
except HTTPError as e:
print(e)
# noinspection PyPep8Naming
@staticmethod
def do_EOF(_arg):
"""Quit the program"""
return True
@staticmethod
def do_quit(_arg):
"""Quit the program"""
return True
def do_set_encoding(self, arg):
"""Set the server encoding"""
self.encoding = arg
def parse(args):
parser = argparse.ArgumentParser()
parser.add_argument('host', help='Name or address of remote')
parser.add_argument('port', nargs='?', default=80, type=int,
help='Server port (default: 80)')
parser.add_argument('--server', '-s', default='remo_serv.pem',
help='Public key of the server (PEM format)')
parser.add_argument('--user', '-u', default=getpass.getuser(),
help='user name')
parser.add_argument('--key', '-k', help='File name of user key'
' (PEM format). Default: user_key.pem')
parser.add_argument('--label', '-l', help='Label of a certificate '
'private key on a smart card')
parser.add_argument('--encoding', '-e', default='utf_8',
help='encoding of the server')
params = parser.parse_args(args)
if params.key is None:
params.key = params.user + '_key.pem'
return params
# noinspection PyArgumentList
def run(args):
params = parse(args)
with open(params.server, 'rb') as fd:
remo_pub = serialization.load_pem_public_key(fd.read())
if params.label is None:
with open(params.key, 'rb') as fd:
own_key = serialization.load_pem_private_key(fd.read(), b'foo')
signer = None
else:
own_key = None
signer = smartcard.get_token(params.label)
server = 'http://' + params.host
if params.port != 80:
server += ':' + str(params.port)
con = login(server, '/auth', params.user, own_key, signer, remo_pub)
cmd_loop = CmdLoop(con, server, params.encoding)
cmd_loop.cmdloop()
if __name__ == '__main__':
run(sys.argv[1:])
| 30.190476
| 83
| 0.570077
| 555
| 4,438
| 4.45045
| 0.275676
| 0.018219
| 0.048178
| 0.043725
| 0.255061
| 0.255061
| 0.209717
| 0.209717
| 0.209717
| 0.181377
| 0
| 0.008157
| 0.309374
| 4,438
| 146
| 84
| 30.39726
| 0.797716
| 0.1032
| 0
| 0.318182
| 0
| 0
| 0.102238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118182
| false
| 0.018182
| 0.081818
| 0
| 0.254545
| 0.109091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e410307635af99e3b3cc52fdda648a0910806c95
| 1,867
|
py
|
Python
|
unfollower.py
|
Sam-F90/unfollower
|
feee9815f440d3a654f77a21ec84680ac92022c1
|
[
"MIT"
] | null | null | null |
unfollower.py
|
Sam-F90/unfollower
|
feee9815f440d3a654f77a21ec84680ac92022c1
|
[
"MIT"
] | null | null | null |
unfollower.py
|
Sam-F90/unfollower
|
feee9815f440d3a654f77a21ec84680ac92022c1
|
[
"MIT"
] | null | null | null |
import tweepy
import datetime
import os
# get keys from evironment variable "TWITTER_KEYS"
TWITTER_API_KEYS = (os.environ.get("TWITTER_KEYS").split(","))
consumer_key,consumer_secret,access_token_key,access_token_secret = TWITTER_API_KEYS
# Authenticate to Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
# establish api
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
# verify
try:
api.verify_credentials()
except:
print("Error during authentication")
exit()
# get my id
me = api.me()
# get list of friends (id)
friends = api.friends_ids(me.id)
# get list of followers (id)
follower_ids = []
for follower in tweepy.Cursor(api.followers, me.screen_name).items(api.me().friends_count):
follower_ids.append(follower.id)
# get list of muted friends (id)
muted_friends = api.mutes_ids()
# create list of users who are muted and do not follow you
to_unfollow = []
for friend in friends:
if friend not in follower_ids and friend in muted_friends:
to_unfollow.append(friend)
# create log to record data and string to send to DM
log = [datetime.datetime.now().strftime("%m-%d-%Y %H:%M:%S")]
dm = [datetime.datetime.now().strftime("%m-%d-%Y %H:%M:%S")]
# unfollow useres in to_unfollow[] and record them in log[] and dm[]
for user in to_unfollow:
# unfollowed = api.destroy_friendship(user)
unfollowed = api.get_user(user)
log.append('unfollowed ' + unfollowed.screen_name + " [" +str(unfollowed.friends_count) + "," + str(unfollowed.followers_count) + "]")
dm.append("@" + unfollowed.screen_name)
# write info to log
with open("unfollow_log.txt","a") as fp:
for line in log:
fp.write(line + "\n")
fp.write("\n")
api.send_direct_message(api.me().id,"\n".join(dm))
print("finished")
| 27.455882
| 139
| 0.719336
| 285
| 1,867
| 4.536842
| 0.364912
| 0.042537
| 0.020882
| 0.03867
| 0.098995
| 0.098995
| 0.051044
| 0.051044
| 0.051044
| 0.051044
| 0
| 0
| 0.149438
| 1,867
| 68
| 140
| 27.455882
| 0.814232
| 0.225495
| 0
| 0
| 0
| 0
| 0.084438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4129e9fa1ffc789238869830a16a81f822bb51c
| 2,113
|
py
|
Python
|
alpha/NN/autoencoders/charlie.py
|
DanielBerns/keras-effective-adventure
|
d9bc8c08f769f0c07379d2a3756d040ca14239f2
|
[
"MIT"
] | null | null | null |
alpha/NN/autoencoders/charlie.py
|
DanielBerns/keras-effective-adventure
|
d9bc8c08f769f0c07379d2a3756d040ca14239f2
|
[
"MIT"
] | null | null | null |
alpha/NN/autoencoders/charlie.py
|
DanielBerns/keras-effective-adventure
|
d9bc8c08f769f0c07379d2a3756d040ca14239f2
|
[
"MIT"
] | null | null | null |
# https://medium.com/datadriveninvestor/deep-autoencoder-using-keras-b77cd3e8be95
from keras.datasets import mnist
from keras.layers import Input, Dense
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
(X_train, _), (X_test, _) = mnist.load_data()
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
X_train = X_train.reshape(len(X_train), np.prod(X_train.shape[1:]))
X_test = X_test.reshape(len(X_test), np.prod(X_test.shape[1:]))
print(X_train.shape)
print(X_test.shape)
input_img= Input(shape=(784,))
encoded = Dense(units=128, activation='relu')(input_img)
encoded = Dense(units=64, activation='relu')(encoded)
encoded = Dense(units=32, activation='relu')(encoded)
decoded = Dense(units=64, activation='relu')(encoded)
decoded = Dense(units=128, activation='relu')(decoded)
decoded = Dense(units=784, activation='sigmoid')(decoded)
autoencoder=Model(input_img, decoded)
encoder = Model(input_img, encoded)
print('autoencoder')
autoencoder.summary()
print('encoder')
encoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
autoencoder.fit(X_train, X_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(X_test, X_test))
encoded_imgs = encoder.predict(X_test)
predicted = autoencoder.predict(X_test)
plt.figure(figsize=(40, 4))
for i in range(10):
# display original images
ax = plt.subplot(3, 20, i + 1)
plt.imshow(X_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display encoded images
ax = plt.subplot(3, 20, i + 1 + 20)
plt.imshow(encoded_imgs[i].reshape(8,4))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstructed images
ax = plt.subplot(3, 20, 2*20 +i+ 1)
plt.imshow(predicted[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| 27.802632
| 87
| 0.69664
| 310
| 2,113
| 4.596774
| 0.329032
| 0.045614
| 0.063158
| 0.025263
| 0.317895
| 0.271579
| 0.178947
| 0.178947
| 0.146667
| 0.146667
| 0
| 0.040851
| 0.154283
| 2,113
| 75
| 88
| 28.173333
| 0.756575
| 0.073355
| 0
| 0.173077
| 0
| 0
| 0.046107
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4137613cb4a7761df5564e9e723f2867c6f080e
| 5,569
|
py
|
Python
|
tests/pages/alert_box_page.py
|
nairraghav/selenium-example
|
88e4316a75bcd7feced65489c0ffe1b8c2b8487b
|
[
"MIT"
] | null | null | null |
tests/pages/alert_box_page.py
|
nairraghav/selenium-example
|
88e4316a75bcd7feced65489c0ffe1b8c2b8487b
|
[
"MIT"
] | null | null | null |
tests/pages/alert_box_page.py
|
nairraghav/selenium-example
|
88e4316a75bcd7feced65489c0ffe1b8c2b8487b
|
[
"MIT"
] | null | null | null |
class AlertBoxPage:
def __init__(self, driver):
self.driver = driver
self.title_css = "h1"
self.title_text = "Alert Box Examples"
self.explanation_css = "div.explanation > p"
self.explanation_text = (
"There are three main JavaScript methods "
"which show alert dialogs: alert, confirm "
"and prompt. This page has examples of each."
)
self.alert_box_descriptions_css = "div.page-body > p"
self.alert_box_description_text = (
"The following button will display an alert when clicked."
)
self.alert_box_button_id = "alertexamples"
self.alert_box_button_text = "Show alert box"
self.alert_box_text = "I am an alert box!"
self.confirm_box_description_text = (
"The following button will display a confirm dialog when clicked."
)
self.confirm_box_button_id = "confirmexample"
self.confirm_box_button_text = "Show confirm box"
self.confirm_boolean_text_id = "confirmreturn"
self.confirm_boolean_confirm_text = "true"
self.confirm_boolean_cancel_text = "false"
self.confirm_text_id = "confirmexplanation"
self.confirm_text = "You clicked OK, confirm returned true."
self.cancel_text = "You clicked Cancel, confirm returned false."
self.prompt_box_description_text = (
"The following button will display a prompt dialog when clicked."
)
self.prompt_box_button_id = "promptexample"
self.prompt_box_button_text = "Show prompt box"
self.prompt_box_text = "I prompt you"
self.prompt_value_text_id = "promptreturn"
self.prompt_text_id = "promptexplanation"
self.prompt_text_prefix = "You clicked OK. 'prompt' returned "
self.prompt_cancel_text = "You clicked Cancel. 'prompt' returned null"
def is_page_rendered(self):
for element_css in (
self.title_css,
self.explanation_css,
self.alert_box_descriptions_css,
):
found_element = self.driver.find_element_by_css_selector(element_css)
if found_element is None:
return False
for element_id in (
self.alert_box_button_id,
self.confirm_box_button_id,
self.prompt_box_button_id,
):
found_element = self.driver.find_element_by_id(element_id)
if found_element is None:
return False
return True
def validate_text_on_page(self):
for element_css, text in (
(self.title_css, self.title_text),
(self.explanation_css, self.explanation_text),
):
found_element = self.driver.find_element_by_css_selector(element_css)
assert found_element.text == text
for element_id, text in (
(self.alert_box_button_id, self.alert_box_button_text),
(self.confirm_box_button_id, self.confirm_box_button_text),
(self.prompt_box_button_id, self.prompt_box_button_text),
):
found_element = self.driver.find_element_by_id(element_id)
assert found_element.get_attribute("value") == text, (
f"Actual: {found_element.get_attribute('value')}\t\t" "Expected: {text}"
)
descriptions = self.driver.find_elements_by_css_selector(
self.alert_box_descriptions_css
)
assert descriptions[0].text == self.alert_box_description_text
assert descriptions[2].text == self.confirm_box_description_text
assert descriptions[4].text == self.prompt_box_description_text
def interact_with_alert_box(self):
self.driver.find_element_by_id(self.alert_box_button_id).click()
alert = self.driver.switch_to.alert
alert.accept()
def interact_with_confirm_box(self):
self.driver.find_element_by_id(self.confirm_box_button_id).click()
alert = self.driver.switch_to.alert
alert.accept()
assert (
self.driver.find_element_by_id(self.confirm_boolean_text_id).text
== self.confirm_boolean_confirm_text
)
assert (
self.driver.find_element_by_id(self.confirm_text_id).text
== self.confirm_text
)
self.driver.find_element_by_id(self.confirm_box_button_id).click()
alert = self.driver.switch_to.alert
alert.dismiss()
assert (
self.driver.find_element_by_id(self.confirm_boolean_text_id).text
== self.confirm_boolean_cancel_text
)
assert (
self.driver.find_element_by_id(self.confirm_text_id).text
== self.cancel_text
)
def interact_with_prompt_box(self):
self.driver.find_element_by_id(self.prompt_box_button_id).click()
alert = self.driver.switch_to.alert
alert_text = "Testing"
alert.send_keys(alert_text)
alert.accept()
assert (
self.driver.find_element_by_id(self.prompt_value_text_id).text == alert_text
)
assert (
self.driver.find_element_by_id(self.prompt_text_id).text
== f"{self.prompt_text_prefix}{alert_text}"
)
self.driver.find_element_by_id(self.prompt_box_button_id).click()
alert = self.driver.switch_to.alert
alert.dismiss()
assert (
self.driver.find_element_by_id(self.prompt_text_id).text
== self.prompt_cancel_text
)
| 40.355072
| 88
| 0.644101
| 690
| 5,569
| 4.831884
| 0.146377
| 0.071986
| 0.071386
| 0.10078
| 0.64847
| 0.444811
| 0.440912
| 0.377325
| 0.358428
| 0.311638
| 0
| 0.000989
| 0.274017
| 5,569
| 137
| 89
| 40.649635
| 0.823646
| 0
| 0
| 0.309524
| 0
| 0
| 0.147064
| 0.014186
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.079365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e414c3ce91122f63e50497c6f5b8998f2cc88f9e
| 3,893
|
py
|
Python
|
padmini/prakarana/dvitva.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | 1
|
2022-03-01T05:05:04.000Z
|
2022-03-01T05:05:04.000Z
|
padmini/prakarana/dvitva.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | null | null | null |
padmini/prakarana/dvitva.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | null | null | null |
from padmini import filters as f
from padmini import operations as op
from padmini.constants import Tag as T
from padmini.sounds import s
from padmini.prakriya import Term, Prakriya
from padmini.term_views import TermView
from padmini.prakarana.utils import eka_ac
def _double(rule: str, p: Prakriya, dhatu: Term, i: int) -> Term:
# Special logic for Nic.
if (
dhatu.adi in s("ac")
and dhatu.antya in s("hal")
and p.terms[i + 1].u in {"Ric", "RiN"}
):
ni = p.terms[i + 1]
text = dhatu.text + ni.text
third = Term.make_term(text[1:])
while f.samyogadi(third) and third.adi in {"n", "d", "r"}:
third.text = third.text[1:]
third.u = p.terms[i + 1].u
third.add_tags(T.DHATU)
ni.text = third.text
dhatu.text = dhatu.text[: -len(third.text) + 1]
op.insert_after(rule, p, ni, third)
op.samjna("6.1.4", p, ni, T.ABHYASA)
dhatu.add_tags(T.ABHYASTA)
ni.add_tags(T.ABHYASTA)
third.add_tags(T.ABHYASTA)
p.step("6.1.5")
elif eka_ac(dhatu) or dhatu.adi in s("hal"):
# TODO: correctly double jAgR
abhyasa = Term.make_term(dhatu.text)
op.insert_before(rule, p, dhatu, abhyasa)
op.samjna("6.1.4", p, abhyasa, T.ABHYASA)
abhyasa.add_tags(T.ABHYASTA)
dhatu.add_tags(T.ABHYASTA)
if p.terms[i + 2].u in ("Ric", "RiN"):
p.terms[i + 2].add_tags(T.ABHYASTA)
p.step("6.1.5")
else:
# Create 3 terms:
# 1. the dhatu without the abhyasa
# 2. the abhyasa
# 3. the doubled portion
# 6.1.2 ajAder dvitIyasya
# 6.1.3 na ndrAH saMyogAdayaH
third = Term.make_term(dhatu.text[1:])
while f.samyogadi(third) and third.adi in {"n", "d", "r"}:
third.text = third.text[1:]
third.u = dhatu.u
third.add_tags(T.DHATU)
# Ru -> nu for UrRu
if dhatu.text == "UrRu":
third.text = "nu"
abhyasa = Term.make_term(third.text)
abhyasa.add_tags(T.ABHYASA)
dhatu.text = dhatu.text[: -len(third.text)]
op.insert_after(None, p, dhatu, abhyasa)
op.insert_after(rule, p, abhyasa, third)
op.samjna("6.1.4", p, abhyasa, T.ABHYASA)
dhatu.add_tags(T.ABHYASTA)
third.add_tags(T.ABHYASTA)
abhyasa.add_tags(T.ABHYASTA)
if p.terms[i + 3].u in ("Ric", "RiN"):
p.terms[i + 3].add_tags(T.ABHYASTA)
p.step("6.1.5")
def run_for_each(p: Prakriya, dhatu: Term, i: int):
n = TermView.make_pratyaya(p, i)
if not n:
return
# HACK for Nic + caN
if n.terms[0].u in ("Ric", "RiN"):
n = TermView.make_pratyaya(p, i + 1)
n.u = n.terms[0].u
if dhatu.text in {"jakz", "jAgf", "daridrA", "cakAs", "SAs", "dIDI", "vevI"}:
# These are termed abhyasta, but they can still undergo dvitva because
# the rules below inherit "anabhyAsasya" from 6.1.8.
op.tag("6.1.6", p, dhatu, T.ABHYASTA)
if n.all("li~w"):
# kAshikA:
# dayateḥ iti dīṅo grahaṇaṃ na tu daya dāne ityasya.
# digyādeśena dvirvacanasya bādhanam iṣyate.
if dhatu.u == "de\\N":
op.text("7.4.9", p, dhatu, "digi")
else:
_double("6.1.8", p, dhatu, i)
elif n.u in ("san", "yaN"):
_double("6.1.9", p, dhatu, i)
elif n.terms[0].any(T.SLU):
_double("6.1.10", p, dhatu, i)
elif n.u == "caN":
_double("6.1.11", p, dhatu, i)
def run(p: Prakriya):
i = 0
num_terms = len(p.terms)
while i < num_terms:
c = p.terms[i]
# HACK to avoid doubling the nic / nin
if c.any(T.DHATU) and c.u not in {"Ric", "RiN"}:
run_for_each(p, c, i)
# Skip new terms
i += 1 + (len(p.terms) - num_terms)
num_terms = len(p.terms)
| 31.144
| 81
| 0.554585
| 610
| 3,893
| 3.47541
| 0.247541
| 0.013208
| 0.049057
| 0.075472
| 0.396226
| 0.315094
| 0.242453
| 0.172642
| 0.149057
| 0.062264
| 0
| 0.025173
| 0.295916
| 3,893
| 124
| 82
| 31.395161
| 0.748267
| 0.12972
| 0
| 0.255814
| 0
| 0
| 0.048961
| 0
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.034884
| false
| 0
| 0.081395
| 0
| 0.127907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4160c8bd63d807a761f9c2eb1581d092fef5ff0
| 449
|
py
|
Python
|
modules/dbnd-airflow/src/dbnd_airflow/scheduler/dags/dbnd_dropin_scheduler.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd-airflow/src/dbnd_airflow/scheduler/dags/dbnd_dropin_scheduler.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd-airflow/src/dbnd_airflow/scheduler/dags/dbnd_dropin_scheduler.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | null | null | null |
import logging
logger = logging.getLogger("dbnd-scheduler")
try:
from dbnd_airflow.scheduler.scheduler_dags_provider import get_dags
# airflow will only scan files containing the text DAG or airflow. This comment performs this function
dags = get_dags()
if dags:
for dag in dags:
globals()[dag.dag_id] = dag
except Exception as e:
logging.exception("Failed to get dags form databand server")
raise e
| 24.944444
| 106
| 0.710468
| 63
| 449
| 4.968254
| 0.634921
| 0.067093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224944
| 449
| 17
| 107
| 26.411765
| 0.899425
| 0.222717
| 0
| 0
| 0
| 0
| 0.152738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e41914e68f6a31dadb107fe8bb9eaf841bed6173
| 4,268
|
py
|
Python
|
tanacompendium/utils/modelmanagers.py
|
nkoech/tanacompendium
|
b4fd81b23f2c8263735806765d93eb4a70be8aba
|
[
"MIT"
] | null | null | null |
tanacompendium/utils/modelmanagers.py
|
nkoech/tanacompendium
|
b4fd81b23f2c8263735806765d93eb4a70be8aba
|
[
"MIT"
] | null | null | null |
tanacompendium/utils/modelmanagers.py
|
nkoech/tanacompendium
|
b4fd81b23f2c8263735806765d93eb4a70be8aba
|
[
"MIT"
] | null | null | null |
import datetime
from django.contrib.contenttypes.models import ContentType
from django.db.models import FieldDoesNotExist
from django.db.models.base import ObjectDoesNotExist
def create_model_type(instance, model_type, key, slugify, **kwargs):
"""
Create object by model type
:param instance: Model manager instance
:param model_type: Content/model type
:param key: Primary key or slug
:param slugify: Boolean to indicate availability of a slug or primary key
:param kwargs: Fields to be created
:return: Data object
:rtype: Object
"""
model_qs = ContentType.objects.filter(model=model_type)
if model_qs.exists():
any_model = model_qs.first().model_class()
if slugify:
obj_qs = any_model.objects.filter(slug=key)
else:
obj_qs = any_model.objects.filter(pk=key)
if obj_qs.exists() and obj_qs.count() == 1:
field_values = {
'content_type': model_qs.first(),
'object_id': obj_qs.first().id
}
field_values.update(kwargs)
data_instance = instance.model(**field_values)
data_instance.save()
return data_instance
return None
def model_instance_filter(call_instance, current_instance, model_manager):
"""
Object query based on a model instance
:param call_instance: Instance of the model calling this method
:param current_instance: Instance of the model manager class this method would be called from
:param model_manager: The model manager class
:return: Object due to instantiation of the calling model class
:rtye: Object/record
"""
parent_obj = super(model_manager, current_instance)
content_type = ContentType.objects.get_for_model(call_instance.__class__)
try:
qs = parent_obj.filter(content_type=content_type, object_id=call_instance.id)
except parent_obj.DoesNotExist:
return None
return qs
def model_foreign_key_qs(call_instance, current_instance, model_manager):
"""
Object query based on foreign key
:param call_instance: Instance of the model calling this method
:param current_instance: Instance of the model manager class this method would be called from
:param model_manager: The model manager class
:return: Object query based on foreign key otherwise return none
:rtype: Object/record
"""
model_name = str(call_instance._meta.model_name) # Foreignkey name should be similar to related model name
qs_filter = {model_name: call_instance.id}
obj_qs = super(model_manager, current_instance).filter(**qs_filter)
return obj_qs
def model_type_filter(current_instance, obj_qs, model_manager):
"""
Object query based on a model class
:param current_instance: Instance of the model manager class this method would be called from
:param obj_qs: Initial object query
:param model_manager: The model manager class
:return: Object query based on the model type/class otherwise return none
:rtype: Object/record
"""
if obj_qs.exists():
if model_field_exists(obj_qs, 'content_type'):
for obj in obj_qs.iterator():
try:
qs = super(model_manager, current_instance).filter(content_type=obj.content_type) and obj_qs
return qs
except ObjectDoesNotExist:
return None
return obj_qs
def model_field_exists(instance, field_name):
"""
Check if field exists
:param instance: Instance of the model manager class this method would be called from
:param field_name: Field name to be checked
:return: True if field exists otherwise return false
:rtype: Boolean
"""
try:
instance.model._meta.get_field(field_name)
return True
except FieldDoesNotExist:
return False
def get_year_choices():
"""
Get years as model choices
:return: Years
"""
year_choice = []
for r in range(1950, (datetime.datetime.now().year + 1)):
year_choice.append((r, r))
return year_choice
def get_datetime_now():
"""
Get current year
:return: Curreent year
"""
return datetime.datetime.now().year
| 34.419355
| 112
| 0.684161
| 561
| 4,268
| 5.016043
| 0.194296
| 0.072495
| 0.037313
| 0.049751
| 0.366382
| 0.341507
| 0.290334
| 0.261905
| 0.246979
| 0.246979
| 0
| 0.001856
| 0.242502
| 4,268
| 123
| 113
| 34.699187
| 0.868543
| 0.376054
| 0
| 0.172414
| 0
| 0
| 0.013508
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.068966
| 0
| 0.396552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4193bf7c1b3cd811dde985083067c06d301bbfb
| 2,588
|
py
|
Python
|
deletion_test.py
|
tjake/cassandra-dtest
|
df49e4f16b2ed8b9c38f767fffd796ae3d9cc6f3
|
[
"Apache-2.0"
] | null | null | null |
deletion_test.py
|
tjake/cassandra-dtest
|
df49e4f16b2ed8b9c38f767fffd796ae3d9cc6f3
|
[
"Apache-2.0"
] | null | null | null |
deletion_test.py
|
tjake/cassandra-dtest
|
df49e4f16b2ed8b9c38f767fffd796ae3d9cc6f3
|
[
"Apache-2.0"
] | null | null | null |
from dtest import Tester
import os, sys, time
from ccmlib.cluster import Cluster
from tools import require, since
from jmxutils import make_mbean, JolokiaAgent
class TestDeletion(Tester):
def gc_test(self):
""" Test that tombstone are fully purge after gc_grace """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0, key_type='int', columns={'c1': 'int'})
cursor.execute('insert into cf (key, c1) values (1,1)')
cursor.execute('insert into cf (key, c1) values (2,1)')
node1.flush()
result = cursor.execute('select * from cf;')
assert len(result) == 2 and len(result[0]) == 2 and len(result[1]) == 2, result
cursor.execute('delete from cf where key=1')
result = cursor.execute('select * from cf;')
if cluster.version() < '1.2': # > 1.2 doesn't show tombstones
assert len(result) == 2 and len(result[0]) == 1 and len(result[1]) == 1, result
node1.flush()
time.sleep(.5)
node1.compact()
time.sleep(.5)
result = cursor.execute('select * from cf;')
assert len(result) == 1 and len(result[0]) == 2, result
@require(9194)
def tombstone_size_test(self):
self.cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = self.cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE test (i int PRIMARY KEY)')
stmt = cursor.prepare('DELETE FROM test where i = ?')
for i in range(100):
cursor.execute(stmt, [i])
self.assertEqual(memtable_count(node1, 'ks', 'test'), 100)
self.assertGreater(memtable_size(node1, 'ks', 'test'), 0)
def memtable_size(node, keyspace, table):
new_name = node.get_cassandra_version() >= '2.1'
name = 'MemtableLiveDataSize' if new_name else 'MemtableDataSize'
return columnfamily_metric(node, keyspace, table, name)
def memtable_count(node, keyspace, table):
return columnfamily_metric(node, keyspace, table, 'MemtableColumnsCount')
def columnfamily_metric(node, keyspace, table, name):
with JolokiaAgent(node) as jmx:
mbean = make_mbean('metrics', type='ColumnFamily',
name=name, keyspace=keyspace, scope=table)
value = jmx.read_attribute(mbean, 'Value')
return value
| 34.052632
| 91
| 0.632921
| 336
| 2,588
| 4.785714
| 0.324405
| 0.064677
| 0.037313
| 0.046642
| 0.304726
| 0.295398
| 0.198383
| 0.198383
| 0.126866
| 0.069652
| 0
| 0.027806
| 0.235703
| 2,588
| 75
| 92
| 34.506667
| 0.785137
| 0.031685
| 0
| 0.226415
| 0
| 0
| 0.131253
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 1
| 0.09434
| false
| 0
| 0.09434
| 0.018868
| 0.264151
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e41c425d0ed1f3d737beeff6b6c0f31113fafb62
| 768
|
py
|
Python
|
multicasting_test_scripts/sender.py
|
sandwichdoge/libmulticastudp
|
735a3a6242d5444f9a5a070322a7033296707cdf
|
[
"MIT"
] | null | null | null |
multicasting_test_scripts/sender.py
|
sandwichdoge/libmulticastudp
|
735a3a6242d5444f9a5a070322a7033296707cdf
|
[
"MIT"
] | null | null | null |
multicasting_test_scripts/sender.py
|
sandwichdoge/libmulticastudp
|
735a3a6242d5444f9a5a070322a7033296707cdf
|
[
"MIT"
] | null | null | null |
#
# mostly copied from
# http://bioportal.weizmann.ac.il/course/python/PyMOTW/PyMOTW/docs/socket/multicast.html
#
import socket
import struct
import sys
import time
message = 'data worth repeating'
multicast_group = ('226.1.1.1', 4321)
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(0.2)
counter = 0
try:
while True:
counter +=1
# Send data to the multicast group
print >>sys.stderr, '%d: sending "%s"' % (counter, message )
sent = sock.sendto(message, multicast_group)
time.sleep( 5 )
finally:
print >>sys.stderr, 'closing socket'
sock.close()
| 20.756757
| 90
| 0.670573
| 105
| 768
| 4.866667
| 0.628571
| 0.082192
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025042
| 0.220052
| 768
| 36
| 91
| 21.333333
| 0.828047
| 0.328125
| 0
| 0
| 0
| 0
| 0.116371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e42510b046e5ad727d96dec824908363abd5654f
| 852
|
py
|
Python
|
python/chol_factor_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | 2
|
2020-05-03T17:02:44.000Z
|
2022-02-21T04:09:34.000Z
|
python/chol_factor_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | null | null | null |
python/chol_factor_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | null | null | null |
import numpy as np
from chol_factor import chol_factor
from triangular import triangular
# TEST: Cholesky factorization (LL')
# Symmetric positive definite matrix
A = np.matrix('5 1.2 0.3 -0.6;'
'1.2 6 -0.4 0.9;'
'0.3 -0.4 8 1.7;'
'-0.6 0.9 1.7 10');
print('A = \n', A)
# Computation of the L factor
L = chol_factor(A)
print('L = \n', L)
# Check
if np.allclose(A, np.dot(L, L.transpose())) == False:
raise Exception('QR factorizzation test failure')
# TEST: System Resolution
# Ax = LL'x = b
b = np.matrix("68; 9; 45; 35")
print('b = \n', b)
# Lk = b
k = triangular(L, b, 1)
print('k = \n', k)
# L'x = k
x = triangular(L.transpose(), k, 0)
print('x = \n', x)
# Check
b1 = np.dot(A, x)
print('b1 = \n', b1)
if np.allclose(b, b1) == False:
raise Exception('System resolution failure')
| 23.027027
| 53
| 0.580986
| 150
| 852
| 3.28
| 0.373333
| 0.060976
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064815
| 0.239437
| 852
| 36
| 54
| 23.666667
| 0.694444
| 0.190141
| 0
| 0
| 0
| 0
| 0.264706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4257523a5f56faf33e09f713fd3a02e93109a4b
| 11,245
|
py
|
Python
|
PSO_system/GUI/gui_root.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 1
|
2022-03-23T21:51:59.000Z
|
2022-03-23T21:51:59.000Z
|
PSO_system/GUI/gui_root.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 1
|
2018-10-08T12:53:42.000Z
|
2018-10-08T13:46:13.000Z
|
PSO_system/GUI/gui_root.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 2
|
2020-04-26T08:22:53.000Z
|
2021-05-18T09:51:24.000Z
|
"""Build the tkinter gui root"""
import math
from PyQt5.QtWidgets import *#(QWidget, QToolTip, QDesktopWidget, QPushButton, QApplication)
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QIntValidator, QDoubleValidator
import sys
from PSO_system.Counting.plot import PlotCanvas
from PSO_system.Counting.run import CarRunning
from PSO_system.Counting.test_result import TestRunning
THREADS = []
class GuiRoot(QWidget):
"""Root of gui."""
def __init__(self, dataset, training_data):
"""Create GUI root with datasets dict"""
super().__init__()
self.threadpool = QThreadPool()
self.setFixedSize(800, 800)
self.center()
self.setWindowTitle('PSO')
self.show()
#read the map and training data
self.map_datalist = dataset.keys()
self.map_data = dataset
self.training_datalist = training_data.keys()
self.training_data = training_data
#creat file choosing area
self.file_run_creation(self.map_datalist, self.training_datalist)
self.operation_parameter_creation()
self.ouput_text_creation()
hbox = QHBoxLayout()
vbox = QVBoxLayout()
vbox.addWidget(self.file_run)
vbox.addWidget(self.operation_type)
vbox.addWidget(self.text_group_box)
hbox.addLayout(vbox)
self.m = PlotCanvas(self.map_data)
hbox.addWidget(self.m)
self.setLayout(hbox)
def file_run_creation(self, datalist, training_data):
self.file_run = QGroupBox("File choose")
layout = QGridLayout()
layout.setSpacing(10)
map_file_label = QLabel("Map file: ")
self.map_file_choose = QComboBox()
for i in datalist:
self.map_file_choose.addItem("{}".format(i))
self.map_file_choose.currentTextChanged.connect(self.file_changed)
training_file_label = QLabel("Training file: ")
self.training_file_choose = QComboBox()
for i in training_data:
self.training_file_choose.addItem("{}".format(i))
self.run_btn = QPushButton("Start", self)
self.run_btn.clicked.connect(self.run)
self.test_btn = QPushButton("Test", self)
self.test_btn.clicked.connect(self.test_rbfn)
layout.addWidget(map_file_label, 1, 0, 1, 1)
layout.addWidget(self.map_file_choose, 1, 1, 1, 3)
layout.addWidget(training_file_label, 2, 0, 1, 1)
layout.addWidget(self.training_file_choose, 2, 1, 1, 3)
layout.addWidget(self.run_btn, 3, 0, 1, 4)
layout.addWidget(self.test_btn, 4, 0, 1, 4)
layout.setVerticalSpacing(0)
layout.setHorizontalSpacing(0)
self.file_run.setLayout(layout)
self.test_parameter = None
def operation_parameter_creation(self):
"""Operation parameter field"""
self.operation_type = QGroupBox("Operation parameter setting")
vbox = QVBoxLayout()
#Set and operation paremeter region, including iteration times, population number,
#mutation probability, crossover probability, network j value
iteration_layout = QHBoxLayout()
iteration_setting = QLabel("Iteration times :")
self.iteration_line = QSpinBox()
self.iteration_line.setRange(1, 10000)
self.iteration_line.setValue(60)
self.iteration_line.setMaximumWidth(150)
iteration_layout.addWidget(iteration_setting)
iteration_layout.addWidget(self.iteration_line)
iteration_layout.insertSpacing(-1,100)
swarm_size_layout = QHBoxLayout()
swarm_size_setting = QLabel("Swarm size:")
self.swarm_size_line = QSpinBox()
self.swarm_size_line.setRange(1, 10000)
self.swarm_size_line.setValue(200)
self.swarm_size_line.setMaximumWidth(150)
swarm_size_layout.addWidget(swarm_size_setting)
swarm_size_layout.addWidget(self.swarm_size_line)
swarm_size_layout.insertSpacing(-1,100)
w_layout = QHBoxLayout()
w_setting = QLabel("Robust of w: ")
self.w_line = QDoubleSpinBox()
self.w_line.setRange(0, 10)
self.w_line.setDecimals(2)
self.w_line.setValue(0.8)
self.w_line.setMaximumWidth(150)
w_layout.addWidget(w_setting)
w_layout.addWidget(self.w_line)
w_layout.insertSpacing(-1,100)
# in PSO, φ1 means the parameter multiplied with (pi(t)-x(t))
fai_1_layout = QHBoxLayout()
fai_1_setting = QLabel("Robust of φ1: ")
self.fai_1_line = QDoubleSpinBox()
self.fai_1_line.setValue(1.5)
self.fai_1_line.setRange(0, 10)
self.fai_1_line.setDecimals(2)
self.fai_1_line.setMaximumWidth(150)
fai_1_layout.addWidget(fai_1_setting)
fai_1_layout.addWidget(self.fai_1_line)
fai_1_layout.insertSpacing(-1,100)
fai_2_layout = QHBoxLayout()
fai_2_setting = QLabel("Robust of φ2: ")
self.fai_2_line = QDoubleSpinBox()
self.fai_2_line.setRange(0, 10)
self.fai_2_line.setDecimals(2)
self.fai_2_line.setValue(2.5)
self.fai_2_line.setMaximumWidth(150)
fai_2_layout.addWidget(fai_2_setting)
fai_2_layout.addWidget(self.fai_2_line)
fai_2_layout.insertSpacing(-1,100)
net_j_layout = QHBoxLayout()
net_j_setting = QLabel("Network neurl number j: ")
self.net_j_line = QSpinBox()
self.net_j_line.setRange(1,10)
self.net_j_line.setValue(6)
self.net_j_line.setMaximumWidth(150)
net_j_layout.addWidget(net_j_setting)
net_j_layout.addWidget(self.net_j_line)
net_j_layout.insertSpacing(-1,100)
sd_layout = QHBoxLayout()
sd_setting = QLabel("Maximum SD: ")
self.sd_line = QSpinBox()
self.sd_line.setRange(1,100)
self.sd_line.setValue(10)
self.sd_line.setMaximumWidth(150)
sd_layout.addWidget(sd_setting)
sd_layout.addWidget(self.sd_line)
sd_layout.insertSpacing(-1,100)
v_max_layout = QHBoxLayout()
v_max_setting = QLabel("Maximum V: ")
self.v_max_line = QDoubleSpinBox()
self.v_max_line.setRange(0, 10)
self.v_max_line.setDecimals(2)
self.v_max_line.setValue(4)
self.v_max_line.setMaximumWidth(150)
v_max_layout.addWidget(v_max_setting)
v_max_layout.addWidget(self.v_max_line)
v_max_layout.insertSpacing(-1,100)
vbox.addLayout(iteration_layout)
vbox.addLayout(swarm_size_layout)
vbox.addLayout(w_layout)
vbox.addLayout(fai_1_layout)
vbox.addLayout(fai_2_layout)
vbox.addLayout(net_j_layout)
vbox.addLayout(v_max_layout)
vbox.addLayout(sd_layout)
self.operation_type.setLayout(vbox)
def ouput_text_creation(self):
self.text_group_box = QGroupBox("Execution log")
layout = QVBoxLayout()
self.console = QTextEdit()
self.console.setReadOnly(True)
layout.addWidget(self.console)
self.text_group_box.setLayout(layout)
def file_changed(self):
"""print map"""
self.m.plot_map(self.map_file_choose.currentText())
self.console.append('Map changed')
def run(self):
self.test_parameter = None
l = []
l.append(self.iteration_line.value())
l.append(self.swarm_size_line.value())
l.append(self.w_line.value())
l.append(self.fai_1_line.value())
l.append(self.fai_2_line.value())
l.append(self.net_j_line.value())
l.append(self.v_max_line.value())
l.append(self.sd_line.value())
# disable avoid to touch
self.disable('yes')
# transfer for counting
self.console.append('Start training RBFN with PSO')
car = CarRunning(self.map_data, self.map_file_choose.currentText(), self.training_data, self.training_file_choose.currentText(), l)
car.signals.iteration.connect(self.console_output)
car.signals.result.connect(self.dir_test_rbfn)
self.threadpool.start(car)
def dir_test_rbfn(self, parameters):
# disable avoid to touch
self.disable('yes')
# transfer for counting
self.test_parameter = parameters
self.console.append('Start testing result on current map.')
self.console.append("------------------------------------------------------")
test_thread = TestRunning(self.map_data, self.map_file_choose.currentText(), parameters, None)
test_thread.signals.plot.connect(self.plot_output)
self.threadpool.start(test_thread)
def test_rbfn(self):
if self.test_parameter == None:
self.console.append('No RBFN model, please push [Start] button first.')
else:
# disable avoid to touch
self.disable('yes')
# transfer for counting
self.console.append('Start testing result on current map.')
self.console.append("------------------------------------------------------")
test_thread = TestRunning(self.map_data, self.map_file_choose.currentText(), None, self.test_parameter)
test_thread.signals.plot.connect(self.plot_output)
self.threadpool.start(test_thread)
def console_output(self, s):
self.console.append(str(s))
def plot_output(self, s):
self.m.plot_car(s)
self.disable('no')
self.console.append('Test is complete, and showing on right area')
self.console.append("------------------------------------------------------")
def center(self):
"""Place window in the center"""
qr = self.frameGeometry()
central_p = QDesktopWidget().availableGeometry().center()
qr.moveCenter(central_p)
self.move(qr.topLeft())
def disable(self, yes_or_no):
if yes_or_no == 'yes':
self.iteration_line.setDisabled(True)
self.swarm_size_line.setDisabled(True)
self.w_line.setDisabled(True)
self.fai_2_line.setDisabled(True)
self.fai_1_line.setDisabled(True)
self.net_j_line.setDisabled(True)
self.map_file_choose.setDisabled(True)
self.training_file_choose.setDisabled(True)
self.run_btn.setDisabled(True)
self.test_btn.setDisabled(True)
self.v_max_line.setDisabled(True)
self.sd_line.setDisabled(True)
else:
self.iteration_line.setDisabled(False)
self.swarm_size_line.setDisabled(False)
self.w_line.setDisabled(False)
self.fai_2_line.setDisabled(False)
self.fai_1_line.setDisabled(False)
self.net_j_line.setDisabled(False)
self.map_file_choose.setDisabled(False)
self.training_file_choose.setDisabled(False)
self.run_btn.setDisabled(False)
self.test_btn.setDisabled(False)
self.v_max_line.setDisabled(False)
self.sd_line.setDisabled(False)
if __name__ == '__main__':
print("Error: This file can only be imported. Execute 'main.py'")
| 41.494465
| 139
| 0.649355
| 1,396
| 11,245
| 4.984241
| 0.161175
| 0.049583
| 0.035499
| 0.024432
| 0.237281
| 0.136677
| 0.088675
| 0.088675
| 0.08307
| 0.08307
| 0
| 0.020826
| 0.23566
| 11,245
| 270
| 140
| 41.648148
| 0.788714
| 0.052557
| 0
| 0.076596
| 0
| 0
| 0.061251
| 0.015266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051064
| false
| 0
| 0.042553
| 0
| 0.097872
| 0.004255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e425b8c86c1c0699016fdb4cfc8b01eea833c4f2
| 2,346
|
py
|
Python
|
qsrlib/src/qsrlib_qsrs/qsr_cardinal_direction.py
|
alexiatoumpa/QSR_Detector
|
ff92a128dddb613690a49a7b4130afeac0dd4381
|
[
"MIT"
] | 15
|
2015-06-15T16:50:37.000Z
|
2022-03-27T09:25:56.000Z
|
qsrlib/src/qsrlib_qsrs/qsr_cardinal_direction.py
|
alexiatoumpa/QSR_Detector
|
ff92a128dddb613690a49a7b4130afeac0dd4381
|
[
"MIT"
] | 205
|
2015-01-22T12:02:59.000Z
|
2022-03-29T11:59:55.000Z
|
qsrlib/src/qsrlib_qsrs/qsr_cardinal_direction.py
|
alexiatoumpa/QSR_Detector
|
ff92a128dddb613690a49a7b4130afeac0dd4381
|
[
"MIT"
] | 16
|
2015-02-04T23:13:18.000Z
|
2022-03-08T13:45:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_dyadic_abstractclass import QSR_Dyadic_1t_Abstractclass
import math
class QSR_Cardinal_Direction(QSR_Dyadic_1t_Abstractclass):
"""Cardinal direction relations.
Values of the abstract properties
* **_unique_id** = "cardir"
* **_all_possible_relations** = ("n", "ne", "e", "se", "s", "sw", "w", "nw", "eq")
* **_dtype** = "bounding_boxes_2d"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
_unique_id = "cardir"
"""str: Unique identifier name of the QSR."""
_all_possible_relations = ("n", "ne", "e", "se", "s", "sw", "w", "nw", "eq")
"""tuple: All possible relations of the QSR."""
_dtype = "bounding_boxes_2d"
"""str: On what kind of data the QSR works with."""
def __init__(self):
"""Constructor."""
super(QSR_Cardinal_Direction, self).__init__()
def _compute_qsr(self, data1, data2, qsr_params, **kwargs):
"""Compute QSR relation.
:param data1: Bounding box.
:type data1: list or tuple of int or floats
:param data2: Bounding box.
:type data2: list or tuple of int or floats
:return: QSR relation.
:rtype: str
"""
# Finds the differnece between the centres of each object
dx = ((data2[0]+data2[2])/2.0) - ((data1[0]+data1[2])/2.0)
dy = ((data2[1]+data2[3])/2.0) - ((data1[1]+data1[3])/2.0)
if dx==0 and dy==0:
return 'eq'
# Calculate the angle of the line between the two objects (in degrees)
angle = (math.atan2(dx,dy) * (180/math.pi))+22.5
# If that angle is negative, invert it
if angle < 0.0:
angle = (360.0 + angle)
# Lookup labels and return answer
return self.__direction_switch(math.floor(((angle)/45.0)))
def __direction_switch(self, x):
"""Switch Statement convert number into region label.
:param x:
:type x:
:return: QSR relation.
:rtype: str
"""
return {
0: 's',
1: 'sw',
2: 'w',
3: 'nw',
4: 'n',
5: 'ne',
6: 'e',
7: 'se',
}.get(x)
| 31.28
| 118
| 0.561381
| 307
| 2,346
| 4.123779
| 0.439739
| 0.015798
| 0.047393
| 0.037915
| 0.131122
| 0.091627
| 0.091627
| 0.053712
| 0.053712
| 0.053712
| 0
| 0.036992
| 0.297101
| 2,346
| 74
| 119
| 31.702703
| 0.730746
| 0.373402
| 0
| 0
| 0
| 0
| 0.043257
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.448276
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4287373cf648c93ed322e508af33deff1f8e862
| 4,291
|
py
|
Python
|
clustering/GMM.py
|
peasant98/NBA-Stats-Clustering
|
57ff7e70a8cbb0c609d6a6720134a37695e2a860
|
[
"MIT"
] | null | null | null |
clustering/GMM.py
|
peasant98/NBA-Stats-Clustering
|
57ff7e70a8cbb0c609d6a6720134a37695e2a860
|
[
"MIT"
] | null | null | null |
clustering/GMM.py
|
peasant98/NBA-Stats-Clustering
|
57ff7e70a8cbb0c609d6a6720134a37695e2a860
|
[
"MIT"
] | null | null | null |
# NBA Stats Clustering
# Copyright Matthew Strong, 2019
# gaussian mixture models with em algorithm
import numpy as np
from scipy import stats
from clustering.Cluster import NBACluster
# nba gmm class
# gmm from scratch as well, more explained below
class NBAGMM(NBACluster):
def fit(self):
self.method = 'GMM'
# get the points
a, m = self.get_points(self.num_clusters)
# em algorithm for 100 iterations
res = self.em_algorithm(self.num_clusters, m, a)
probs_given_data = res[2]
# probability of each point
# sum of squared disatnces
# and get assignments by max probability of each point to a certain cluster
l = []
dist = 0
for v in range(len(a)):
selection = np.argmax(probs_given_data[:,v])
dist += self.dist(a[v], res[0][selection])
l.append(selection)
self.ssd = dist
self.labels = l
self.centroids = res[0]
def get_points(self, k):
# select points randomly
a = self.df.values
indices = np.random.choice(list(range(len(a))), k, replace=False)
k_points = a[indices]
return a, k_points
def dist(self, x1, x2):
# euclidean distance
return np.sqrt(np.sum((x1-x2)**2))
# this algorithm was influenced by the gmm in class notebook, as well as my implementation in hw2
# but heavily adapted for n dimensions and varying values of k, now all vectorized, so, more
# dynamic to work with.
def em_algorithm(self, k, m, a):
# works for n dimensional data
# pick k random points
# get means from the randomly selected data
# works
mu = np.zeros((k, a.shape[-1]))
covariances = np.zeros((k, a.shape[-1], a.shape[-1]))
probs = np.zeros(k)
# also p_class_n
# set probabilities of each cluster, or the weight, to all equal
probs.fill(1./k)
# calculations of prob of m give data require these matrices
p_given_class = np.zeros((k, len(a)))
p_given_data = np.zeros((k, len(a)))
p_class_data = np.zeros((k, len(a), 1, 1))
n_class = np.zeros(k)
for ind,val in enumerate(mu):
mu[ind] = m[ind]
for ind,val in enumerate(covariances):
# set all covariances of k mixtures to overall covariance of dataset
if ind == 0:
covariances[0] = np.cov(a.T)
else:
covariances[ind] = covariances[0]
for _ in range(100):
# 100 iterations
summation = np.zeros((len(a)))
for i in range(k):
# compute pdf
p_given_class[i] = stats.multivariate_normal.pdf(a, mean=mu[i], cov=covariances[i], allow_singular=True)
p_given_data[i] = p_given_class[i] * probs[i]
summation += p_given_data[i]
length = len(a)
for i in range(k):
# get probabilities of mixtures
p_given_data[i]/=summation
n_class[i] = np.sum(p_given_data[i])
probs[i] = n_class[i]/length
for i in range(k):
means = np.zeros(a.shape[-1])
# get means from data
for j in range(len(means)):
means[j] = (1.0/n_class[i]) * np.sum(p_given_data[i]*a[:,j])
mu[i] = np.array(means)
for i in range(k):
# covariance calculations
covs = []
for p in a:
x_i = p
r = x_i - mu[i]
vec = np.expand_dims(r, axis=0)
cov_i = vec * vec.T
covs.append(cov_i)
# expand dims and use np sum to get results along axis=0
covs = np.array(covs)
temp = np.expand_dims(p_given_data[i], axis=1)
p_class_data[i] = np.expand_dims(temp, axis=1)
covariances[i] = np.sum(p_class_data[i] * covs, axis=0) / n_class[i]
# return means, covariances of cluster, probabilities of points being in certain mixture
# and probabilities of mixtures themselves.
return mu, covariances, p_given_data, probs
| 40.102804
| 120
| 0.554649
| 597
| 4,291
| 3.891122
| 0.286432
| 0.028412
| 0.034438
| 0.028412
| 0.093844
| 0.066294
| 0.033577
| 0.019802
| 0.019802
| 0
| 0
| 0.014255
| 0.346073
| 4,291
| 106
| 121
| 40.481132
| 0.813614
| 0.271265
| 0
| 0.057143
| 0
| 0
| 0.000969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.042857
| 0.014286
| 0.157143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e428f454d7dceb480c84f33f264e2ac819a010fd
| 1,484
|
py
|
Python
|
ML/eval.py
|
Data-Science-Community-SRM/Fashion-Generation
|
fa062e2b31b4fba8945820d911dfa41de45b1333
|
[
"MIT"
] | 1
|
2021-04-27T09:13:09.000Z
|
2021-04-27T09:13:09.000Z
|
ML/eval.py
|
Aradhya-Tripathi/Fashion-Generation
|
fa062e2b31b4fba8945820d911dfa41de45b1333
|
[
"MIT"
] | null | null | null |
ML/eval.py
|
Aradhya-Tripathi/Fashion-Generation
|
fa062e2b31b4fba8945820d911dfa41de45b1333
|
[
"MIT"
] | 1
|
2021-03-12T13:15:08.000Z
|
2021-03-12T13:15:08.000Z
|
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import sys
sys.path.append("./ML")
import Definitions.models as models
from Definitions.dataset import Data
def main(imgpath="Data", noise_dim=100, vec_shape=100, root="./ModelWeights/"):
netG = models.Generator(device="cpu", noise_dim=noise_dim, vec_shape=vec_shape)
netD = models.Discriminator()
netENC = models.ResNetEncoder(vec_shape)
netG.load_state_dict(torch.load(root + "Gen.pt"))
netD.load_state_dict(torch.load(root + "Dis.pt"))
netENC.load_state_dict(torch.load(root + "RES.pt"))
# netG.eval()
# netD.eval()
# netENC.eval()
numrows = 5
d = Data(path=imgpath, batch_size=numrows, size=(64, 64))
d_loaded = DataLoader(d.folderdata, numrows, shuffle=True)
# get one random batch of images
imgs = next(iter(d_loaded))[0]
with torch.no_grad():
vector = netENC(imgs)
fakeImages = netG(vector)
_, ax = plt.subplots(2, numrows, squeeze=False, sharex=True, sharey=True, figsize=(8, 4))
for i in range(numrows):
ax[0, i].imshow((fakeImages[i].permute(1, 2, 0).numpy() + [1, 1, 1]) / [2, 2, 2])
ax[0, i].axis(False)
ax[1, i].imshow((imgs[i].permute(1, 2, 0).numpy() + [1, 1, 1]) / [2, 2, 2])
ax[1, i].axis(False)
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
if __name__ == "__main__":
main()
| 26.981818
| 94
| 0.617925
| 213
| 1,484
| 4.178404
| 0.431925
| 0.035955
| 0.04382
| 0.060674
| 0.141573
| 0.141573
| 0.053933
| 0.053933
| 0.053933
| 0.053933
| 0
| 0.033943
| 0.225741
| 1,484
| 54
| 95
| 27.481481
| 0.740644
| 0.045822
| 0
| 0
| 0
| 0
| 0.038348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.193548
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e42935051444daddcd5cee33f9a2daa9cde6e823
| 4,965
|
py
|
Python
|
app/screens/authorize.py
|
jimkutter/rpi_lcars
|
f5ae0891f26d3494ad77f894c4f7733deaf063ee
|
[
"MIT"
] | null | null | null |
app/screens/authorize.py
|
jimkutter/rpi_lcars
|
f5ae0891f26d3494ad77f894c4f7733deaf063ee
|
[
"MIT"
] | null | null | null |
app/screens/authorize.py
|
jimkutter/rpi_lcars
|
f5ae0891f26d3494ad77f894c4f7733deaf063ee
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import pygame
from pygame.mixer import Sound
from screens.base_screen import BaseScreen
from ui import colours
from ui.widgets.background import LcarsBackgroundImage
from ui.widgets.gifimage import LcarsGifImage
from ui.widgets.lcars_widgets import LcarsButton
from ui.widgets.lcars_widgets import LcarsText
class CodeButton(LcarsButton):
def __init__(self, colour, pos, text, handler=None, rectSize=None):
super().__init__(colour, pos, text, handler, rectSize)
self.code = None
class ScreenAuthorize(BaseScreen):
def __init__(self, app):
super().__init__(app, None, None)
self.login_timeout = None
self.reset_timer()
def setup(self, all_sprites):
all_sprites.add(LcarsBackgroundImage("assets/lcars_screen_2.png"), layer=0)
all_sprites.add(LcarsGifImage("assets/gadgets/stlogorotating.gif", (103, 369), 50), layer=0)
all_sprites.add(LcarsText(colours.ORANGE, (270, -1), "AUTHORIZATION REQUIRED", 2), layer=0)
all_sprites.add(LcarsText(colours.BLUE, (330, -1), "ONLY AUTHORIZED PERSONNEL MAY ACCESS THIS TERMINAL", 1.5),
layer=1)
all_sprites.add(LcarsText(colours.BLUE, (360, -1), "TOUCH TERMINAL TO PROCEED", 1.5), layer=1)
greek_alphabet = [
"alpha",
"beta",
"gamma",
"delta",
"epsilon",
"zeta",
"eta",
"theta",
"iota",
"kappa",
"lambda",
"mu",
"nu",
"xi",
"omicron",
"pi",
"rho",
"sigma",
"tau",
"upsilon",
"phi",
"chi",
"psi",
"omega",
]
x_orig = 127
y_orig = 75
padding = 20
width = 122
height = 44
row = 0
col = 0
for letter in greek_alphabet:
x = x_orig + (col * (width + padding / 2))
y = y_orig + (row * (height + padding / 2))
button = CodeButton(colours.GREY_BLUE, (y, x), letter.upper(), self.button_handler)
button.code = letter
col = col + 1
if col > 3:
row = row + 1
col = 0
all_sprites.add(button, layer=2)
self.layer1 = all_sprites.get_sprites_from_layer(1)
self.layer2 = all_sprites.get_sprites_from_layer(2)
# sounds
if not self.app.is_screen_off:
Sound("assets/audio/panel/215.wav").play()
self.sound_granted = Sound("assets/audio/accessing.wav")
self.sound_beep1 = Sound("assets/audio/panel/201.wav")
self.sound_denied = Sound("assets/audio/access_denied.wav")
self.sound_deny1 = Sound("assets/audio/deny_1.wav")
self.sound_deny2 = Sound("assets/audio/deny_2.wav")
############
# SET PIN CODE WITH THIS VARIABLE
############
self.pin = self.app.config['pin']
############
self.reset()
def reset(self):
# Variables for PIN code verification
self.correct = 0
self.pin_i = 0
self.granted = False
for sprite in self.layer1: sprite.visible = True
for sprite in self.layer2: sprite.visible = False
def screen_update(self):
super().screen_update()
if self.login_timeout:
auth_delta = self.login_timeout - datetime.now()
if int(auth_delta.total_seconds()) == 0:
self.reset()
def handleEvents(self, event, fpsClock):
if event.type == pygame.MOUSEBUTTONDOWN:
# Play sound
self.sound_beep1.play()
self.app.screen_on()
if event.type == pygame.MOUSEBUTTONUP:
if not self.layer2[0].visible:
self.show_login_controls()
elif self.pin_i == len(self.pin):
# Ran out of button presses
if self.correct == len(self.pin):
self.sound_granted.play()
from screens.main import ScreenMain
self.loadScreen(ScreenMain(self.app))
else:
self.sound_deny2.play()
self.sound_denied.play()
self.reset()
return False
def show_login_controls(self):
for sprite in self.layer1: sprite.visible = False
for sprite in self.layer2: sprite.visible = True
Sound("assets/audio/enter_authorization_code.wav").play()
self.reset_timer()
def button_handler(self, item, event, clock):
self.reset_timer()
if self.pin[self.pin_i] == item.code:
self.correct += 1
print(self.correct)
self.pin_i += 1
def reset_timer(self):
self.login_timeout = datetime.now() + timedelta(seconds=self.app.config['login_timeout'])
| 31.03125
| 118
| 0.557301
| 568
| 4,965
| 4.723592
| 0.323944
| 0.033545
| 0.041744
| 0.020872
| 0.166232
| 0.135296
| 0.07678
| 0
| 0
| 0
| 0
| 0.023974
| 0.327895
| 4,965
| 159
| 119
| 31.226415
| 0.780042
| 0.022357
| 0
| 0.066116
| 0
| 0
| 0.096841
| 0.052577
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07438
| false
| 0
| 0.082645
| 0
| 0.181818
| 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4324e2ffd9d0f0cc445c08f1b32895fbc79b0d2
| 2,178
|
py
|
Python
|
Problems/P0010 - Soma de primos.py
|
clasenback/EulerProject
|
775d9774fcdfbbcc579e3c4ec0bb2d4a941764ad
|
[
"CC0-1.0"
] | null | null | null |
Problems/P0010 - Soma de primos.py
|
clasenback/EulerProject
|
775d9774fcdfbbcc579e3c4ec0bb2d4a941764ad
|
[
"CC0-1.0"
] | null | null | null |
Problems/P0010 - Soma de primos.py
|
clasenback/EulerProject
|
775d9774fcdfbbcc579e3c4ec0bb2d4a941764ad
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 7 17:11:12 2021
@author: User
SUMMATION OF PRIMES
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
21min19s to find.
"""
from datetime import datetime as date
def nextPrime(n, primes):
isPrime = False
if n % 2 == 0:
n +=1
else:
if n % primes[-1] == 0:
n += 2
while not isPrime:
for prime in primes:
# FIRST: Cheking if if is prime or should go next. SECOND: it is
# a waste to try division by a prime which returns less than 3
if n % prime == 0:
n +=2
# skipping 5
if str(n)[-1] == "5":
n += 2
break
if prime == primes[-1]:
isPrime = not isPrime
if n / prime < 3 :
isPrime = not isPrime
break
return n
# INPUTS
target = 2000000
primes = [2, 3, 5, 7, 11, 13, 17, 19]
control = target / 10
path = "C:/Users/User/Documents/AA - Pessoal/DataScience/Project Euler/"
file = "primos_ate_" + str(target) + ".csv"
print("INICIANDO BUSCA DOS NÚMEROS PRIMOS MENORES QUE", target)
start = date.now()
# PROCESSING
while primes[-1] < target :
candidate = nextPrime(primes[-1], primes)
if candidate > target :
break
primes.append(candidate)
# CONTROLLING
if candidate >= control:
print("O", len(primes), "º primo é", candidate, "em", date.now() - start)
control += target / 10
# OUTPUT
print("\n")
print("RESULTADOS:")
print("ENCONTRAR OS NÚMEROS PRIMOS MENORES QUE", target)
print("FORAM ENCONTRADOS", len(primes), "NÚMEROS PRIMOS")
print("ÚLTIMO PRIMO DA LISTA:", primes[-1])
print("SOMA DOS PRIMOS ENCONTRADOS:", sum(primes))
print("TEMPO TOTAL DA BUSCA:", date.now() - start)
# TO FILE
f = open(path + file, "w+")
for i in range(len(primes)):
f.write(str(i+1))
f.write("\t") # tab
f.write(str(primes[i]))
f.write("\r") # carriage return
f.close()
| 26.888889
| 82
| 0.539027
| 290
| 2,178
| 4.041379
| 0.451724
| 0.029863
| 0.013652
| 0.006826
| 0.049488
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045706
| 0.337006
| 2,178
| 80
| 83
| 27.225
| 0.765928
| 0.192837
| 0
| 0.16
| 0
| 0
| 0.179024
| 0.031947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.02
| 0
| 0.06
| 0.18
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4348a8c3eadb9042a4b4b0ebb7cd499d99a7b46
| 1,124
|
py
|
Python
|
l5kit/l5kit/tests/rasterization/render_context_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | 1
|
2021-12-04T17:48:53.000Z
|
2021-12-04T17:48:53.000Z
|
l5kit/l5kit/tests/rasterization/render_context_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/tests/rasterization/render_context_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | 1
|
2021-11-19T08:13:46.000Z
|
2021-11-19T08:13:46.000Z
|
import numpy as np
import pytest
from l5kit.geometry import transform_points
from l5kit.rasterization.render_context import RenderContext
@pytest.mark.parametrize("set_origin_to_bottom", [False, True])
def test_transform_points_to_raster(set_origin_to_bottom: bool) -> None:
image_shape_px = np.asarray((200, 200))
center_in_raster_ratio = np.asarray((0.5, 0.5))
pixel_size_m = np.asarray((1.0, 1.0))
center_world = np.asarray((0, -2))
render_context = RenderContext(
raster_size_px=image_shape_px,
pixel_size_m=pixel_size_m,
center_in_raster_ratio=center_in_raster_ratio,
set_origin_to_bottom=set_origin_to_bottom,
)
input_points = np.array([[0, 0], [10, 10], [-10, -10]])
if set_origin_to_bottom:
expected_output_points = np.array([[100, 98], [110, 88], [90, 108]])
else:
expected_output_points = np.array([[100, 102], [110, 112], [90, 92]])
tf = render_context.raster_from_world(center_world, 0.0)
output_points = transform_points(input_points, tf)
np.testing.assert_array_equal(output_points, expected_output_points)
| 35.125
| 77
| 0.715302
| 168
| 1,124
| 4.422619
| 0.375
| 0.060565
| 0.074024
| 0.114401
| 0.080754
| 0.080754
| 0
| 0
| 0
| 0
| 0
| 0.065032
| 0.16548
| 1,124
| 31
| 78
| 36.258065
| 0.727079
| 0
| 0
| 0
| 0
| 0
| 0.017794
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e434cb20e1bb4b89d1f4687abbe31af32ff3e3b8
| 1,528
|
py
|
Python
|
plugin/fcitx.py
|
bigshans/fcitx.vim
|
228a51c6c95997439feddff6c38d62ce014e6d59
|
[
"MIT"
] | null | null | null |
plugin/fcitx.py
|
bigshans/fcitx.vim
|
228a51c6c95997439feddff6c38d62ce014e6d59
|
[
"MIT"
] | null | null | null |
plugin/fcitx.py
|
bigshans/fcitx.vim
|
228a51c6c95997439feddff6c38d62ce014e6d59
|
[
"MIT"
] | null | null | null |
import vim
import functools
import dbus
class FcitxComm():
def __init__(self):
bus = dbus.SessionBus()
obj = bus.get_object('org.fcitx.Fcitx5', '/controller')
self.fcitx = dbus.Interface(obj, dbus_interface='org.fcitx.Fcitx.Controller1')
def status(self):
return self.fcitx.State() == 2
def activate(self):
self.fcitx.Activate()
def deactivate(self):
self.fcitx.Deactivate()
try:
Fcitx = FcitxComm()
fcitx_loaded = True
except dbus.exceptions.DBusException as e:
if not vim.vars.get('silent_unsupported'):
vim.command('echohl WarningMsg | echom "fcitx.vim not loaded: %s" | echohl NONE' % e)
fcitx_loaded = False
def may_reconnect(func):
@functools.wraps(func)
def wrapped():
global Fcitx
for _ in range(2):
try:
return func()
except Exception as e:
vim.command('echohl WarningMsg | echom "fcitx.vim: %s: %s" | echohl NONE' % (type(e).__name__, e))
Fcitx = FcitxComm()
return wrapped
@may_reconnect
def fcitx2en():
if vim.eval('g:disable_fcitx_toggle_temp') == '1':
return
if Fcitx.status():
vim.command('let b:inputtoggle = 1')
Fcitx.deactivate()
@may_reconnect
def fcitx2zh():
if vim.eval('g:disable_fcitx_toggle_temp') == '1':
vim.command('let g:disable_fcitx_toggle_temp = 0')
return
if vim.eval('exists("b:inputtoggle")') == '1':
if vim.eval('b:inputtoggle') == '1':
Fcitx.activate()
vim.command('let b:inputtoggle = 0')
else:
vim.command('let b:inputtoggle = 0')
| 25.466667
| 106
| 0.656414
| 205
| 1,528
| 4.765854
| 0.35122
| 0.061412
| 0.036847
| 0.058342
| 0.249744
| 0.200614
| 0.14739
| 0.067554
| 0.067554
| 0
| 0
| 0.011429
| 0.198298
| 1,528
| 59
| 107
| 25.898305
| 0.786122
| 0
| 0
| 0.24
| 0
| 0
| 0.254581
| 0.085733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.06
| 0.02
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e435bc6759728f66c9ba58ab0f9f30b4d9e6d31b
| 828
|
py
|
Python
|
avioclient/controller.py
|
HermenegildoK/AvioClient
|
9cad3a89bbf10d7212561cf15b3ad453060c9434
|
[
"MIT"
] | null | null | null |
avioclient/controller.py
|
HermenegildoK/AvioClient
|
9cad3a89bbf10d7212561cf15b3ad453060c9434
|
[
"MIT"
] | null | null | null |
avioclient/controller.py
|
HermenegildoK/AvioClient
|
9cad3a89bbf10d7212561cf15b3ad453060c9434
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from avioclient.send_data import SendControls
from avioclient import config
def send_data():
data_sender = SendControls(config.SERVER_URL)
connections_done = 0
while True:
connections_done += 1
print(
data_sender.get_data(
config.GET_ENDPOINT.format(
connection_id=connections_done
)
)
)
print(
data_sender.post_data(
config.POST_ENDPOINT.format(
connection_id=connections_done
),
data={
"position": "LEFT",
"offset": 180
}
)
)
if connections_done > 100:
break
if __name__ == "__main__":
send_data()
| 23.657143
| 50
| 0.48913
| 71
| 828
| 5.338028
| 0.507042
| 0.197889
| 0.079156
| 0.137203
| 0.216359
| 0.216359
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.434783
| 828
| 34
| 51
| 24.352941
| 0.790598
| 0.025362
| 0
| 0.137931
| 0
| 0
| 0.032298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.103448
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e43c4d5552c855523479c4f6f4237cbc56d53955
| 906
|
py
|
Python
|
tests/test_fitsutils.py
|
lsst-dm/despyfitsutils
|
7fb96869077712eb20a1cb0f5c132e1cc85424ec
|
[
"NCSA"
] | null | null | null |
tests/test_fitsutils.py
|
lsst-dm/despyfitsutils
|
7fb96869077712eb20a1cb0f5c132e1cc85424ec
|
[
"NCSA"
] | null | null | null |
tests/test_fitsutils.py
|
lsst-dm/despyfitsutils
|
7fb96869077712eb20a1cb0f5c132e1cc85424ec
|
[
"NCSA"
] | null | null | null |
import os
import unittest
import despyfitsutils.fitsutils as utils
TESTDIR = os.path.dirname(__file__)
class MefTest(unittest.TestCase):
"""Tests for a MEF object.
"""
def setUp(self):
inputs = [os.path.join(TESTDIR, 'data/input.fits.fz')]
output = os.path.join(TESTDIR, 'data/output.fits.fz')
# Instantiation of the class creates the output file (__init__()
# calls write()) so clobber must be set to True.
self.mef = utils.makeMEF(filenames=inputs, outname=output,
clobber=True)
def tearDown(self):
try:
os.remove(self.mef.outname)
except FileNotFoundError:
pass
def testRead(self):
self.mef.read()
self.assertEqual(len(self.mef.HDU), 1)
def testWrite(self):
self.mef.write()
self.assertTrue(os.path.isfile(self.mef.outname))
| 25.885714
| 72
| 0.611479
| 111
| 906
| 4.918919
| 0.54955
| 0.076923
| 0.03663
| 0.062271
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00152
| 0.273731
| 906
| 34
| 73
| 26.647059
| 0.828267
| 0.153422
| 0
| 0
| 0
| 0
| 0.048684
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.190476
| false
| 0.047619
| 0.142857
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e43dacaa5bafcd52f175484e3b1f257816fb14b1
| 4,047
|
py
|
Python
|
applications/MensajeriaMasiva/models/db.py
|
chitohugo/MassiveSMS
|
05b528de146498531c967aff1ee4fe72720febb3
|
[
"BSD-3-Clause"
] | null | null | null |
applications/MensajeriaMasiva/models/db.py
|
chitohugo/MassiveSMS
|
05b528de146498531c967aff1ee4fe72720febb3
|
[
"BSD-3-Clause"
] | null | null | null |
applications/MensajeriaMasiva/models/db.py
|
chitohugo/MassiveSMS
|
05b528de146498531c967aff1ee4fe72720febb3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import gmtime, strftime
from gluon.custom_import import track_changes
track_changes(True)
from gluon import current
from pydal import *
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
if request.global_settings.web2py_version < "2.14.1":
raise HTTP(500, "Requires web2py 2.13.3 or newer")
from gluon.contrib.appconfig import AppConfig
myconf = AppConfig(reload=True)
uri = "postgres://chito:yndrid@localhost/massivesms"
current.db = DAL(uri,pool_size=1, check_reserved=['all'], lazy_tables=False, migrate=False)
current.db.define_table('municipio',
Field('descripcion', type='string', length=20, required=True, notnull=True,
requires=[IS_NOT_EMPTY(error_message=('Este campo no puede ser vacio'))]),
)
current.db.define_table('cargo',
Field('descripcion', type='string', length=20, required=True, notnull=True,
requires=[IS_NOT_EMPTY(error_message=('Este campo no puede ser vacio'))]),
)
current.db.define_table('mun_cargo',
Field('fk_municipio', 'reference municipio'),
Field('fk_cargo', 'reference cargo'),
primarykey=['fk_municipio','fk_cargo'],
)
current.db.define_table('contacto',
Field('numero', type='string', length=11, required=True, notnull=True,unique=True,
requires=[IS_NOT_EMPTY(error_message=('Este campo no puede ser vacio'))]),
Field('fk_municipio_id', 'reference municipio',required=True),
Field('fk_cargo_id', 'reference cargo',required=True),
)
current.db.define_table('estado_mensaje',
Field('estado', length=1, required=True, notnull=True,default=1),
Field('estado_envio',length=1,required=True, notnull=True,default=1),
Field('fk_municipio_id', 'reference municipio',required=True),
Field('fk_cargo_id', 'reference cargo',required=True),
Field('destino',length=11,required=True, notnull=True),
Field('mensaje',length=160,required=True, notnull=True),
)
# -------------------------------------------------------------------------
response.generic_patterns = ['*'] if request.is_local else []
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
from gluon.tools import Auth, Service, PluginManager
# host names must be a list of allowed host names (glob syntax allowed)
auth = Auth(current.db, host_names=myconf.get('host.names'))
service = Service()
plugins = PluginManager()
# -------------------------------------------------------------------------
# create all tables needed by auth if not custom tables
# -------------------------------------------------------------------------
auth.define_tables(username=True, signature=False)
# -------------------------------------------------------------------------
# configure email
# -------------------------------------------------------------------------
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
# -------------------------------------------------------------------------
# configure auth policy
# -------------------------------------------------------------------------
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
| 45.988636
| 165
| 0.560909
| 420
| 4,047
| 5.280952
| 0.354762
| 0.059513
| 0.059964
| 0.072588
| 0.306132
| 0.288097
| 0.260144
| 0.260144
| 0.260144
| 0.223625
| 0
| 0.010264
| 0.205584
| 4,047
| 87
| 166
| 46.517241
| 0.679627
| 0.190511
| 0
| 0.155172
| 0
| 0
| 0.180564
| 0.013489
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.017241
| 0.12069
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e443a35a02a890811a35899fe38cc7d3bb4c7d5c
| 2,155
|
py
|
Python
|
api/resources/resources.py
|
arkhn/fhirball-server
|
b4d1a1c29dfff5ba60bfbb6b291f6bdb6e6ccd6e
|
[
"Apache-2.0"
] | 5
|
2018-12-21T13:20:12.000Z
|
2019-11-20T23:58:06.000Z
|
api/resources/resources.py
|
arkhn/fhir-ball-server
|
b4d1a1c29dfff5ba60bfbb6b291f6bdb6e6ccd6e
|
[
"Apache-2.0"
] | null | null | null |
api/resources/resources.py
|
arkhn/fhir-ball-server
|
b4d1a1c29dfff5ba60bfbb6b291f6bdb6e6ccd6e
|
[
"Apache-2.0"
] | null | null | null |
from flask_restful import Resource
import requests
from api.common.utils import file_response
ENCODING = 'utf-8'
SCHEMA_URL = 'http://127.0.0.1:8422'
STORE_URL = 'http://127.0.0.1:8423'
class FhirDatatypes(Resource):
@staticmethod
def get():
"""Returns CSV list of available database schemas."""
content = requests.get('{}/datatypes.json'.format(
STORE_URL
)).content.decode(ENCODING)
return file_response(content, 'json')
class FhirResources(Resource):
@staticmethod
def get():
"""Returns CSV list of available database schemas."""
content = requests.get('{}/resource_list.json'.format(
STORE_URL
)).content.decode(ENCODING)
return file_response(content, 'json')
class FhirResource(Resource):
@staticmethod
def get(resource_name):
"""Returns CSV list of available database schemas."""
content = requests.get('{}/fhirResources/{}.json'.format(
STORE_URL,
resource_name
)).content.decode(ENCODING)
return file_response(content, 'json')
class Schemas(Resource):
@staticmethod
def get():
"""Returns CSV list of available database schemas."""
content = requests.get('{}/databases.json'.format(
SCHEMA_URL
)).content.decode(ENCODING)
return file_response(content, 'json')
class Schema(Resource):
@staticmethod
def get(database_name, extension):
"""Fetches distant file and parses it according to its extension."""
content = requests.get('{}/{}.{}'.format(
SCHEMA_URL,
database_name,
extension
)).content.decode(ENCODING)
return file_response(content, extension)
class Store(Resource):
@staticmethod
def get(resource_name, extension):
"""Fetches distant file from Store and parses it according to its extension."""
content = requests.get('{}/{}.{}'.format(
STORE_URL,
resource_name,
extension
)).content.decode(ENCODING)
return file_response(content, extension)
| 24.770115
| 87
| 0.624594
| 228
| 2,155
| 5.802632
| 0.236842
| 0.063492
| 0.104308
| 0.117914
| 0.773243
| 0.697657
| 0.620559
| 0.620559
| 0.620559
| 0.578987
| 0
| 0.013109
| 0.256613
| 2,155
| 86
| 88
| 25.05814
| 0.812734
| 0.152204
| 0
| 0.54717
| 0
| 0
| 0.087973
| 0.025056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0
| 0.056604
| 0
| 0.396226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4466c3b9ecc29dbb105b55c4d10907897f3d25c
| 742
|
py
|
Python
|
ArtificialData/RhoAndBeta.py
|
AlfLobos/DSP
|
1e1073c6b0da562b0aea3dec9d62bc563a3b46f5
|
[
"CNRI-Python"
] | null | null | null |
ArtificialData/RhoAndBeta.py
|
AlfLobos/DSP
|
1e1073c6b0da562b0aea3dec9d62bc563a3b46f5
|
[
"CNRI-Python"
] | null | null | null |
ArtificialData/RhoAndBeta.py
|
AlfLobos/DSP
|
1e1073c6b0da562b0aea3dec9d62bc563a3b46f5
|
[
"CNRI-Python"
] | null | null | null |
import numpy as np
def CalcRhoAndBetaVectors(bid_vec, UB_bid, num_edges, index_Imps, adverPerImp, firstPrice):
## I will assume I want to evaluate the full vector.
rhoBetaMat=np.zeros((num_edges,2))
for edge_num,impType in enumerate(index_Imps):
rhoBetaMat[edge_num,:]=RhoBetaValue(bid_vec[edge_num], UB_bid[impType],\
adverPerImp[impType], firstPrice)
return [rhoBetaMat[:,0],rhoBetaMat[:,1]]
def CalcBeta(bid, num_adv, firstPrice):
if firstPrice:
return bid
else:
return (num_adv/(num_adv+1.0)) * bid
def RhoBetaValue(bid, ub, n, firstPrice):
## For rho_beta_Type=0, args[0]=adv
rho = np.power((bid/ub),n)
beta = CalcBeta(bid, n, firstPrice)
return [rho, beta]
| 35.333333
| 91
| 0.677898
| 106
| 742
| 4.59434
| 0.433962
| 0.043121
| 0.024641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011725
| 0.195418
| 742
| 21
| 92
| 35.333333
| 0.80402
| 0.110512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e44985df33485739c9a738d44c1ed72af3c01cd0
| 3,208
|
py
|
Python
|
src/utils/greedy.py
|
vmgabriel/tabu-base
|
615c45e4d6b6fdb1c85c8fbaa316a1e6ce829fcd
|
[
"Apache-2.0"
] | null | null | null |
src/utils/greedy.py
|
vmgabriel/tabu-base
|
615c45e4d6b6fdb1c85c8fbaa316a1e6ce829fcd
|
[
"Apache-2.0"
] | null | null | null |
src/utils/greedy.py
|
vmgabriel/tabu-base
|
615c45e4d6b6fdb1c85c8fbaa316a1e6ce829fcd
|
[
"Apache-2.0"
] | null | null | null |
"""
Greedy Module Solution for Utils control
"""
# Libraries
from typing import List
from functools import reduce
# Modules
from src.utils.math import (
list_negative,
invert_positions,
evaluate_fo
)
# Constants
COMPARE_VALUE = 99999999
def worst_solution(distance_matrix: List[List[float]]) -> List[int]:
"""This generate the worst solution"""
negative_matrix = list(map(
list_negative,
distance_matrix
))
return neghbord_most_near(negative_matrix)
def neghbord_most_near(
distance_matrix: List[List[float]],
start_city: int = 0
) -> List[int]:
"""
get the city most near in distance
"""
neghbord_used = [start_city]
def city_most_near(line: int) -> int:
"""
Get City most near
"""
compare_value = COMPARE_VALUE
most_near = -1
for key, value in enumerate(distance_matrix[line]):
if (
line != key and
value < compare_value and
key not in neghbord_used
):
compare_value = value
most_near = key
neghbord_used.append(most_near)
return most_near
return list(map(
lambda x: city_most_near(x) if x != start_city else start_city,
range(len(distance_matrix))
))
def best_change_not_tabu(
matrix_distance: List[List[float]],
solution: List[int]
) -> (float, tuple):
"""
change the data for best change based into function objective
matrix_distance: List[List[float]] -> Matrix of distances
solution: List[int] -> all solutions
return (float, (posx, posy)) -> the best solution into position
"""
# fun_before = evaluate_fo(matrix_distance, solution)
best_fo = 1E+100
position = (-1, -1)
tam = len(solution)
for posx in range(tam-1):
for posy in range(posx+1 if posx+1 != tam else tam, tam):
funobj = evaluate_fo(
matrix_distance,
invert_positions(solution, posx, posy)
)
if funobj < best_fo:
best_fo = funobj
position = (posx, posy)
return (best_fo, position)
def generate_local_search(
matrix_distance: List[List[float]],
solution: List[int]
) -> (int, List[int]):
"""
This generate a local search for the minize way based in fo
matrix_distance: List[List[float]]
"""
counter = 0
manage = True
best_change = best_change_not_tabu(matrix_distance, solution)
prev_change = (1E+100,)
while manage:
if prev_change[0] < best_change[0]:
manage = False
else:
prev_change = best_change
best_change = best_change_not_tabu(matrix_distance, solution)
solution = invert_positions(
solution,
origin=best_change[1][0],
destiny=best_change[1][1]
)
counter += 1
return (
counter,
(
prev_change[0]
if prev_change[0] < best_change[0] and
prev_change[0] != 0
else best_change[0]
),
solution
)
| 25.870968
| 73
| 0.5798
| 378
| 3,208
| 4.714286
| 0.246032
| 0.06734
| 0.043771
| 0.049383
| 0.208193
| 0.138608
| 0.129068
| 0.102132
| 0.054994
| 0
| 0
| 0.017193
| 0.329177
| 3,208
| 123
| 74
| 26.081301
| 0.810874
| 0.163653
| 0
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.036145
| 0
| 0.156627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45010e55211f1d8b353af0fb64ccf62757ae1c3
| 5,649
|
py
|
Python
|
codes/models/modules/Inv_arch.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | 14
|
2021-09-30T07:05:04.000Z
|
2022-03-31T08:22:39.000Z
|
codes/models/modules/Inv_arch.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | 3
|
2021-11-09T06:52:13.000Z
|
2021-11-20T08:00:46.000Z
|
codes/models/modules/Inv_arch.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.modules.model.vgg16 import Vgg16
import os
vgg = Vgg16()
vgg.load_state_dict(torch.load(os.path.join(os.path.abspath('.'), 'models/modules/model/', 'vgg16.weight')))
params = list(vgg.named_parameters())
encoding1 = params[0][1].data
encoding2 = params[2][1].data
class InvBlockExp(nn.Module):
def __init__(self, subnet_constructor, channel_num, channel_split_num, clamp=1.):
super(InvBlockExp, self).__init__()
self.split_len1 = channel_split_num
self.split_len2 = channel_num - channel_split_num
self.clamp = clamp
self.F = subnet_constructor(self.split_len2, self.split_len1)
self.G = subnet_constructor(self.split_len1, self.split_len2)
self.H = subnet_constructor(self.split_len1, self.split_len2)
def forward(self, x, rev=False):
x1, x2 = (x.narrow(1, 0, self.split_len1), x.narrow(1, self.split_len1, self.split_len2))
if not rev:
y1 = x1 + self.F(x2)
self.s = self.clamp * (torch.sigmoid(self.H(y1)) * 2 - 1)
y2 = x2.mul(torch.exp(self.s)) + self.G(y1)
else:
self.s = self.clamp * (torch.sigmoid(self.H(x1)) * 2 - 1)
y2 = (x2 - self.G(x1)).div(torch.exp(self.s))
y1 = x1 - self.F(y2)
return torch.cat((y1, y2), 1)
def jacobian(self, x, rev=False):
if not rev:
jac = torch.sum(self.s)
else:
jac = -torch.sum(self.s)
return jac / x.shape[0]
# class encoder(nn.Module):
# def __init__(self, in_channels, out_channels, num_features):
# super(encoder, self).__init__()
# stride = 1
# padding = 1
# kernel_size = 3
# self.conv1 = nn.Conv2d(in_channels, 2*num_features, kernel_size, stride=stride, padding=padding)
# self.conv2 = nn.Conv2d(2*num_features, num_features, kernel_size, stride=stride, padding=padding)
# self.conv3 = nn.Conv2d(num_features, out_channels, kernel_size=1, stride=1)
# self.prelu = nn.PReLU(num_parameters=1, init=0.2)
#
# def forward(self, x, rev=False):
# x1 = self.prelu(self.conv1(x))
# x2 = self.prelu(self.conv2(x1))
# x3 = self.prelu(self.conv3(x2))
# return x3
class Downsampling(nn.Module):
def __init__(self, channel_in):
super(Downsampling, self).__init__()
self.channel_in = channel_in
self.haar_weights1 = encoding1
self.haar_weights1 = nn.Parameter(self.haar_weights1)
self.haar_weights1.requires_grad = False
self.haar_weights2 = encoding2
self.haar_weights2 = nn.Parameter(self.haar_weights2)
self.haar_weights2.requires_grad = False
def forward(self, x, rev=False):
if not rev:
out = F.conv2d(x, self.haar_weights1, bias=None, stride=1, padding=1, groups=1)
out = F.conv2d(out, self.haar_weights2, bias=None, stride=1, padding=1, groups=1)
return out
else:
out = F.conv_transpose2d(x, self.haar_weights2, bias=None, stride=1, padding=1, groups=1)
out = F.conv_transpose2d(out, self.haar_weights1, bias=None, stride=1, padding=1, groups=1)
return out
def jacobian(self, x, rev=False):
return self.last_jac
class Upsampling(nn.Module):
def __init__(self, channel_in):
super(Upsampling, self).__init__()
self.channel_in = channel_in
self.haar_weights1 = encoding1
self.haar_weights1 = nn.Parameter(self.haar_weights1)
self.haar_weights1.requires_grad = False
self.haar_weights2 = encoding2
self.haar_weights2 = nn.Parameter(self.haar_weights2)
self.haar_weights2.requires_grad = False
def forward(self, x, rev=False):
if rev:
out = F.conv2d(x, self.haar_weights1, bias=None, stride=1, padding=1, groups=1)
out = F.conv2d(out, self.haar_weights2, bias=None, stride=1, padding=1, groups=1)
return out
else:
out = F.conv_transpose2d(x, self.haar_weights2, bias=None, stride=1, padding=1, groups=1)
out = F.conv_transpose2d(out, self.haar_weights1, bias=None, stride=1, padding=1, groups=1)
return out
def jacobian(self, x, rev=False):
return self.last_jac
class InvRescaleNet(nn.Module):
def __init__(self, channel_in=3, channel_out=3, subnet_constructor=None, block_num=[], down_num=2):
super(InvRescaleNet, self).__init__()
operations = []
current_channel = channel_in
for i in range(down_num):
b = Downsampling(current_channel)
operations.append(b)
current_channel = 64
for j in range(block_num[i]):
b = InvBlockExp(subnet_constructor, current_channel, channel_out)
operations.append(b)
b = Upsampling(current_channel)
operations.append(b)
self.operations = nn.ModuleList(operations)
def forward(self, x, rev=False, cal_jacobian=False):
out = x
jacobian = 0
if not rev:
for op in self.operations:
out = op.forward(out, rev)
if cal_jacobian:
jacobian += op.jacobian(out, rev)
else:
for op in reversed(self.operations):
out = op.forward(out, rev)
if cal_jacobian:
jacobian += op.jacobian(out, rev)
if cal_jacobian:
return out, jacobian
else:
return out
| 36.211538
| 108
| 0.615861
| 761
| 5,649
| 4.388962
| 0.156373
| 0.057485
| 0.057485
| 0.040419
| 0.588922
| 0.530539
| 0.512575
| 0.482036
| 0.417964
| 0.387425
| 0
| 0.034858
| 0.26872
| 5,649
| 155
| 109
| 36.445161
| 0.773663
| 0.127456
| 0
| 0.508929
| 0
| 0
| 0.006923
| 0.004276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098214
| false
| 0
| 0.0625
| 0.017857
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4520356b6e60cb7ea00f5353a2466e715bcd995
| 1,642
|
py
|
Python
|
py_algo/dynamic_programming/introduction/equal_array.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-07-05T15:39:04.000Z
|
2021-07-05T15:39:04.000Z
|
py_algo/dynamic_programming/introduction/equal_array.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | null | null | null |
py_algo/dynamic_programming/introduction/equal_array.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-09-02T21:31:34.000Z
|
2021-09-02T21:31:34.000Z
|
"""
Codemonk link: https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/equal-array-84cf6c5f/
You are given an array A of size N. Find the minimum non negative number X such that there exists an index j that when
you can replace Aj by Aj+X, the sum of elements of the array from index 1 to j and j+1 to N become equal where
1 <= j <= N-1. Assume array to be 1-indexed. If there is no possible X print -1 in a separate line.
Input - Output:
The first line contains the number of test cases.
The first line of each test case contains an integer N,which denotes the size of the array.
The second line contains N space-separated integers where the ith integer denotes Ai.
Sample input:
1
5
1 2 3 2 1
Sample Output:
3
"""
"""
We can simply find the partial sums array, iterate throught the array end at each step check for the minimum X number
that is required.
Final complexity: O(N)
"""
t = int(input())
for _ in range(t):
n = int(input())
array = list(map(int, input().split()))
partial_sums = [array[0]]
for i in range(1, n):
partial_sums.append(array[i]+partial_sums[i-1])
ans = float("inf")
stop = False
for i in range(n):
if partial_sums[i] < partial_sums[-1] - partial_sums[i]:
val = partial_sums[-1] - 2*partial_sums[i]
ans = min(ans, val)
if partial_sums[i] == partial_sums[-1] - partial_sums[i]:
print(0)
stop = True
break
if not stop:
if ans != float("inf"):
print(ans)
else:
print(-1)
| 30.407407
| 170
| 0.658343
| 273
| 1,642
| 3.915751
| 0.428571
| 0.12348
| 0.067353
| 0.02058
| 0.071094
| 0.071094
| 0.071094
| 0.071094
| 0.071094
| 0.071094
| 0
| 0.022508
| 0.242387
| 1,642
| 53
| 171
| 30.981132
| 0.836817
| 0.483557
| 0
| 0
| 0
| 0
| 0.008942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4526af2d705bb3c47b1ba3a6b79144d1876aeeb
| 1,331
|
py
|
Python
|
model.py
|
mollikka/Penrose
|
6d9870f54e9810f7e2f4ea82bb619424785a65db
|
[
"MIT"
] | 1
|
2019-07-17T02:46:45.000Z
|
2019-07-17T02:46:45.000Z
|
model.py
|
mollikka/Penrose
|
6d9870f54e9810f7e2f4ea82bb619424785a65db
|
[
"MIT"
] | null | null | null |
model.py
|
mollikka/Penrose
|
6d9870f54e9810f7e2f4ea82bb619424785a65db
|
[
"MIT"
] | null | null | null |
from itertools import chain
phi = 1.61803398875
class PenroseModel:
def __init__(self, start_state):
self.tiles = start_state
self.history = []
def split(self):
self.history.append(list(self.tiles))
self.tiles = list(chain(*[tile.split() for tile in self.tiles]))
def desplit(self):
if self.history:
self.tiles = self.history.pop()
def get_tiles(self):
return self.tiles
class HalfDart:
def __init__(self, A,B,C):
self.a = A
self.b = B
self.c = C
def split(self):
a,b,c = self.a,self.b,self.c
ax, ay = self.a
cx, cy = self.c
fx = cx + (ax-cx)*(1/phi)
fy = cy + (ay-cy)*(1/phi)
f = (fx,fy)
return [HalfKite(f, c, b),HalfDart(b, f, a)]
class HalfKite:
def __init__(self, A,B,C):
self.a = A
self.b = B
self.c = C
def split(self):
a,b,c = self.a,self.b,self.c
ax, ay = self.a
bx, by = self.b
cx, cy = self.c
gx = bx + (cx-bx)*(1/phi)
gy = by + (cy-by)*(1/phi)
g = (gx,gy)
fx = bx + (ax-bx)*(1/(phi**2))
fy = by + (ay-by)*(1/(phi**2))
f = (fx,fy)
return [HalfDart(g,f,b),
HalfKite(c,a,g),
HalfKite(f,a,g),]
| 22.183333
| 72
| 0.480841
| 205
| 1,331
| 3.04878
| 0.219512
| 0.08
| 0.0384
| 0.0448
| 0.2272
| 0.2272
| 0.2272
| 0.2272
| 0.2272
| 0.2272
| 0
| 0.023364
| 0.356875
| 1,331
| 59
| 73
| 22.559322
| 0.706776
| 0
| 0
| 0.413043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.021739
| 0.021739
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45397111350f9273e2cc86843e6973c134d6e85
| 1,465
|
py
|
Python
|
src/tests/unittests/configuration_helper/adapters/test_keysight_e8267d_instrument_adapter.py
|
QuTech-Delft/qilib
|
a87892f8a9977ed338c36e8fb1e262b47449cf44
|
[
"MIT"
] | 1
|
2019-02-20T16:56:30.000Z
|
2019-02-20T16:56:30.000Z
|
src/tests/unittests/configuration_helper/adapters/test_keysight_e8267d_instrument_adapter.py
|
QuTech-Delft/qilib
|
a87892f8a9977ed338c36e8fb1e262b47449cf44
|
[
"MIT"
] | 22
|
2019-02-16T06:10:55.000Z
|
2022-02-15T18:52:34.000Z
|
src/tests/unittests/configuration_helper/adapters/test_keysight_e8267d_instrument_adapter.py
|
QuTech-Delft/qilib
|
a87892f8a9977ed338c36e8fb1e262b47449cf44
|
[
"MIT"
] | 2
|
2020-02-04T08:46:21.000Z
|
2020-10-18T16:31:58.000Z
|
import unittest
from unittest.mock import call, patch, Mock, MagicMock
from qilib.configuration_helper import InstrumentAdapterFactory
class TestKeysightE8267DInstrumentAdapter(unittest.TestCase):
def test_read_filter_out_val_mapping(self):
with patch('qilib.configuration_helper.adapters.keysight_e8267d_instrument_adapter.Keysight_E8267D') \
as mock_instrument:
mock_instrument_instance = MagicMock()
mock_instrument.return_value = mock_instrument_instance
mock_instrument_instance.snapshot.return_value = {
'name': 'some_keysight',
'parameters': {
'good_parameter': {'value': 42},
'filtered_parameter_1': {'val_mapping': {1: True, 0: False}, 'value': False},
'filtered_parameter_2': {'on_off_mapping': {1: 'ON', 0: 'OFF'}, 'value': 'OFF'}
}
}
adapter = InstrumentAdapterFactory.get_instrument_adapter('KeysightE8267DInstrumentAdapter', 'fake')
config = adapter.read()
self.assertNotIn('val_mapping', config['filtered_parameter_1'])
self.assertNotIn('on_off_mapping', config['filtered_parameter_2'])
self.assertEqual(42, config['good_parameter']['value'])
self.assertFalse(config['filtered_parameter_1']['value'])
self.assertEqual('OFF', config['filtered_parameter_2']['value'])
adapter.close_instrument()
| 47.258065
| 112
| 0.661433
| 143
| 1,465
| 6.461538
| 0.384615
| 0.11039
| 0.099567
| 0.064935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026432
| 0.225256
| 1,465
| 30
| 113
| 48.833333
| 0.787665
| 0
| 0
| 0
| 0
| 0
| 0.264344
| 0.079918
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e455b64eee36fc129ded8331905ce5976719baa2
| 1,364
|
py
|
Python
|
scripts/mint.py
|
tomazmm/artsyapes-contract
|
95b10e1c73aa4e0712ff8d5162271e84aec91810
|
[
"Apache-2.0"
] | null | null | null |
scripts/mint.py
|
tomazmm/artsyapes-contract
|
95b10e1c73aa4e0712ff8d5162271e84aec91810
|
[
"Apache-2.0"
] | null | null | null |
scripts/mint.py
|
tomazmm/artsyapes-contract
|
95b10e1c73aa4e0712ff8d5162271e84aec91810
|
[
"Apache-2.0"
] | null | null | null |
import json
import pprint
import random
from terra_sdk.core import AccAddress, Coins
from terra_sdk.core.auth import StdFee
from terra_sdk.core.broadcast import BlockTxBroadcastResult
from scripts.deploy import owner, lt
from terra_sdk.core.wasm import MsgExecuteContract
def mint(contract_address: str):
mint_msg = MsgExecuteContract(
owner.key.acc_address,
AccAddress(contract_address),
{
"mint": {
"token_id": str(random.randint(1, 1000000)),
"owner": owner.key.acc_address,
"token_uri": "www.ipfs_link"
}
}
)
mint_tx = owner.create_and_sign_tx(msgs=[mint_msg], fee=StdFee(1000000, Coins(uluna=1000000)))
mint_tx_result = lt.tx.broadcast(mint_tx)
# print_tx_result(mint_tx_result)
def print_tx_result(tx_result: BlockTxBroadcastResult):
print(f"Height: {tx_result.height}")
print(f"TxHash: {tx_result.txhash}")
for event in tx_result.logs[0].events:
print(f"{event['type']} : {pprint.pformat(event['attributes'])}")
def main():
try:
with open("contract.json", "r") as f:
data = json.load(f)
mint(data['contract_address'])
except FileNotFoundError:
print("Contract.json file not found.\nDeploy contract before minting NFTs.")
if __name__ == '__main__':
main()
| 28.416667
| 98
| 0.662023
| 173
| 1,364
| 4.99422
| 0.445087
| 0.074074
| 0.055556
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021657
| 0.221408
| 1,364
| 47
| 99
| 29.021277
| 0.791902
| 0.022727
| 0
| 0
| 0
| 0
| 0.18858
| 0.027799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.222222
| 0
| 0.305556
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4589a7ec39dfb446ef1fe4c8fd01bbb42b8704d
| 1,507
|
py
|
Python
|
enbios/processing/indicators/__init__.py
|
ENVIRO-Module/enbios
|
10e93df9a168627833eca6d04e4e2b864de8e8d9
|
[
"BSD-3-Clause"
] | 2
|
2022-01-28T09:38:28.000Z
|
2022-01-28T09:38:32.000Z
|
enbios/processing/indicators/__init__.py
|
ENVIRO-Module/enbios
|
10e93df9a168627833eca6d04e4e2b864de8e8d9
|
[
"BSD-3-Clause"
] | 1
|
2022-01-27T21:42:42.000Z
|
2022-01-27T21:42:42.000Z
|
enbios/processing/indicators/__init__.py
|
ENVIRO-Module/enbios
|
10e93df9a168627833eca6d04e4e2b864de8e8d9
|
[
"BSD-3-Clause"
] | null | null | null |
import math
from nexinfosys.model_services import State
materials = {
"Aluminium",
"Antimony",
"Arsenic",
"Baryte",
"Beryllium",
"Borates",
"Cadmium",
"Cerium",
"Chromium",
"Cobalt",
"Copper",
"Diatomite",
"Dysprosium",
"Europium",
"Fluorspar",
"Gadolinium",
"Gallium",
"Gold",
"Gypsum",
"IronOre",
"KaolinClay",
"Lanthanum",
"Lead",
"Lithium",
"Magnesite",
"Magnesium",
"Manganese",
"Molybdenum",
"NaturalGraphite",
"Neodymium",
"Nickel",
"Palladium",
"Perlite",
"Phosphorus",
"Platinum",
"Praseodymium",
"Rhenium",
"Rhodium",
"Samarium",
"Selenium",
"SiliconMetal",
"Silver",
"Strontium",
"Sulphur",
"Talc",
"Tantalum",
"Tellurium",
"Terbium",
"Tin",
"Titanium",
"Tungsten",
"Vanadium",
"Yttrium",
"Zinc",
"Zirconium"
}
def supply_risk(state: State):
sr = 0
for i in materials:
ri = state.get(i)
if ri is not None:
SRi = state.get(f"sr{i}")
ci = state.get(f"c{i}")
sr += ri*SRi/ci
return sr
def recycling_rate(state: State):
rr_num = 0
rr_denom = 0
for i in materials:
ri = state.get(i)
if ri is not None:
RRi = state.get(f"rr{i}")
rr_num += ri*RRi
rr_denom += ri
if rr_denom != 0.0:
return rr_num / rr_denom
else:
return -1.0
| 17.125
| 43
| 0.50564
| 153
| 1,507
| 4.915033
| 0.588235
| 0.053191
| 0.035904
| 0.018617
| 0.106383
| 0.106383
| 0.106383
| 0.106383
| 0.106383
| 0.106383
| 0
| 0.007042
| 0.340411
| 1,507
| 87
| 44
| 17.321839
| 0.749497
| 0
| 0
| 0.074074
| 0
| 0
| 0.295289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.024691
| 0
| 0.08642
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45a7bbe70e7b8614eb0c9109018644cf05fb490
| 24,654
|
py
|
Python
|
src/1-topicmodeling.py
|
sofieditmer/topic_modeling
|
edfff3c4d45c932562f796cc81e9ce9fe35f8e4b
|
[
"MIT"
] | null | null | null |
src/1-topicmodeling.py
|
sofieditmer/topic_modeling
|
edfff3c4d45c932562f796cc81e9ce9fe35f8e4b
|
[
"MIT"
] | null | null | null |
src/1-topicmodeling.py
|
sofieditmer/topic_modeling
|
edfff3c4d45c932562f796cc81e9ce9fe35f8e4b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Info: This script performs topic modeling on the clean tweets by Donald Trump. The number of topics is estimated by computing coherence values for different number of topics, and an LDA model is constructed with the number of topics with the highest coherence value. Visualizations of the topics are created relying on pyLDAvis and wordcloud and these visualizations are saved in the output directory.
Parameters:
(optional) input_file: str <name-of-input-file>, default = clean_trump_tweets.csv
(optional) chunk_size: int <size-of-chunks>, default = 10
(optional) passes: int <number-of-passes>, default = 10
(optional) min_count: int <minimum-count-bigrams>, default = 2
(optional) threshold: int <threshold-for-keeping-phrases>, default = 100
(optional) iterations: int <number-of-iterations>, default = 100
(optional) rolling_mean: int <rolling-mean>, default = 50
(optional) step_size: int <size-of-steps>, default = 5
Usage:
$ python 1-topicmodeling.py
Output:
- topics.txt: overview of topics generated by the LDA model
- dominant_topic.csv: table showing the most dominant topics and their associated keywords as well as how much each topic contributes.
- topic_contributions.csv: a dataframe showing the most contributing keywords for each topic.
- topics_over_time.jpg: visualization of the topic contributions over time.
- topic_wordclouds.png: the topics visualized as word clouds.
"""
### DEPENDENCIES ###
# core libraries
import sys
import os
sys.path.append(os.path.join(".."))
# numpy, pandas, pyplot
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# spaCy
import spacy
nlp = spacy.load("en_core_web_sm", disable=["ner"])
nlp.max_length = 68000000 # increasing maximum length
# pyLDAvis and seaborn for vizualisations
import pyLDAvis.gensim
import seaborn as sns
# matplotlib colors
import matplotlib.colors as mcolors
# wordcloud tools
from wordcloud import WordCloud
# LDA tools
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
from utils import lda_utils
# Ignore warnings
import logging, warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Input file
ap.add_argument("-i", "--input_filename",
type = str,
required = False, # not required argument
help = "Name of input file",
default = "clean_trump_tweets.csv") # default
# Argument 2: Number of passes
ap.add_argument("-p", "--n_passes",
type = int,
required = False, # not required argument
help = "Define the number of passes which is the number of times you want the model to go through the entire corpus.",
default = 10) # default number of passes
# Argument 3: Minimum count for bigrams
ap.add_argument("-m", "--min_count",
type = int,
required = False, # not required argument
help = "Define the minimum count for bigrams to occur to be included",
default = 2) # default minimum count
# Argument 4: Threshold
ap.add_argument("-th", "--threshold",
type = int,
required = False, # not required argument
help = "Define the threshold which determines which phrases to include and which to exlude. The higher the threshold, the fewer the number of phrases are included.",
default = 100) # default threshold
# Argument 5: Iterations
ap.add_argument("-it", "--n_iterations",
type = int,
required = False, # not required argument
help = "Define the number of iterations through each document in the corpus",
default = 100) # default number of iterations
# Argument 6: Rolling mean size
ap.add_argument("-r", "--rolling_mean",
type = int,
required = False, # not required argument
help = "Define the rolling mean which is the number of chunks of tweets to calculate contribution of at a time",
default = 50) # default
# Argument 7: Step size
ap.add_argument("-s", "--step_size",
type = int,
required = False, # not required argument
help = "Define the step size",
default = 5) # default step size
# Argument 8: Chunk size
ap.add_argument("-c", "--chunk_size",
type = int,
required = False, # not required argument
help = "Define the size of the chunks, i.e. how many tweets one chunk should consist of.",
default = 10) # default chunk size
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
input_file = os.path.join("..", "data", args["input_filename"])
n_passes = args["n_passes"]
min_count = args["min_count"]
threshold = args["threshold"]
n_iterations = args["n_iterations"]
rolling_mean = args["rolling_mean"]
step_size = args["step_size"]
chunk_size = args["chunk_size"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message to user
print("\n[INFO] Initializing topic modeling on all Donald Trump tweets from May 2009 to June 2020...")
# Instantiate the topic modeling class
topic_modeling = Topic_modeling(input_file)
# Load and prepare data
print(f"\n[INFO] Loading '{input_file}'...")
clean_tweets_df = topic_modeling.load_data()
# Chunk data
print(f"\n[INFO] Chunking the data into chunks of {chunk_size}...")
chunks = topic_modeling.chunk_tweets(clean_tweets_df, chunk_size)
# Process data
print("\n[INFO] Creating bigram and trigram models and performing lemmatization and part-of-speech-tagging...")
processed_data = topic_modeling.process_data(chunks, min_count, threshold)
# Create bag of words
print("\n[INFO] Creating dictionary and word corpus...")
id2word, corpus = topic_modeling.create_dict_corpus(processed_data)
# Estimate the optimal number of topics
print("\n[INFO] Finding the optimal number of topics...")
optimal_n_topics = topic_modeling.find_optimal_n_topics(processed_data, corpus, id2word, step_size)
# Print the optimal number of topics to the screen
print(f"\nThe optimal number of topics is {optimal_n_topics}")
# Create LDA model and compute perplexity and coherence scores
print("\n[INFO] Creating LDA model...")
lda_model, perplexity_score, coherence_score = topic_modeling.create_lda(processed_data, id2word, corpus, optimal_n_topics, chunk_size, n_passes, n_iterations)
# Create outputs
print("\n[INFO] Producing outputs and saving to 'output' directory...")
# Output 1
topic_modeling.create_output_1(lda_model, perplexity_score, coherence_score, optimal_n_topics)
print("\n[INFO] A txt-file containing the topics has been saved to output directory...")
# Output 2
df_dominant_topic, df_topic_keywords = topic_modeling.create_output_2(lda_model, corpus, processed_data, optimal_n_topics, )
print("\n[INFO] A dataframe showing the most dominant topic for each chunk has been saved to output directory...")
# Output 3
topic_modeling.create_output_3(df_dominant_topic, lda_model, corpus, processed_data, optimal_n_topics, df_topic_keywords)
print("\n[INFO] A dataframe showing the most contributing keywords for each topic has been saved to output directory...")
# Create visualization: topics over time with rolling mean
print("\n[INFO] Creating visualization of topic contributions over time...")
topic_modeling.visualize_topics(processed_data, rolling_mean, lda_model, corpus)
# Create word clouds of topics
print("\n[INFO] Creating word clouds of topics...")
topic_modeling.create_wordcloud(lda_model, optimal_n_topics)
# User message
print("\n[INFO] Done! You have now performed topic modeling on all of Donald Trump tweets from May 2009 to June 2020. The results have been saved in the 'output' folder.\n")
### TOPIC MODELING ###
# Creating Topic modeling class
class Topic_modeling:
# Intialize Preprocessing class
def __init__(self, input_file):
# Receive input
self.input_file = input_file
def load_data(self):
"""
This method loads the preprocessed data from the data folder.
"""
# Load data into dataframe with pandas
clean_tweets_df = pd.read_csv(self.input_file, lineterminator = "\n")
# Take only relevant columns
clean_tweets_df = clean_tweets_df.loc[:, ("id", "date", "clean_tweets")]
# Drop rows with missing values
clean_tweets_df = clean_tweets_df.dropna(subset=['clean_tweets'])
return clean_tweets_df
def chunk_tweets(self, clean_tweets_df, chunk_size):
"""
This method creates chunks of tweets and saves the chunks in a new column in the dataframe. Chuking the tweets
as opposed to having individual tweets is performed to ensure that clear topics are found. By chunking the tweets,
the topics become more interpretable.
"""
# Create empty list for chunks of tweets
chunks = []
# Create chunks of tweets
for i in range(0, len(clean_tweets_df["clean_tweets"]), chunk_size):
chunks.append(' '.join(clean_tweets_df["clean_tweets"][i:i+chunk_size]))
return chunks
def process_data(self, chunks, min_count, threshold):
"""
This method creates bigram and trigram models, and performs lemmatization and part-of-speech-tagging.
The threshold value determines which phrases to include. The higher the threshold, the fewer phrases are included,
because the most frequent bigrams are excluded. Removing the most frequent phrases ensures that only the most
semantically meaningful phrases are kept, and potential noise is filtered out.
The bigrams are created based on the words that appear one after another most frequently, and the bigrams are then
fed into a trigram generator which creates the trigrams based on the bigrams.
The output of this method is a list of the nouns, verbs, and adjectives within the data.
"""
# Create model of bigrams and trigrams
bigram = gensim.models.Phrases(chunks, min_count = min_count, threshold = threshold) # higher threshold fewer phrases. The min_count is the minimum number of times the bigram should occur to be included
trigram = gensim.models.Phrases(bigram[chunks], threshold = threshold) # the trigram model is based on the bigram model
# Fit the models to the data
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# Lemmatize and part-of-speech tag
processed_data = lda_utils.process_words(chunks,
nlp,
bigram_mod,
trigram_mod,
allowed_postags=["NOUN", "VERB", "ADJ"]) # nouns, verbs, and adjectives
return processed_data
def create_dict_corpus(self, processed_data):
"""
This method creates the dictionary and corpus. In other words, it creates a representation of words within a document
in terms of how often an indivudal word occurs in each document. Hence, the documents are conceptualized as a
bag-of-words model. This means that we are no longer dealing with words, but rather we with distributions of word
frequencies (i.e., a numerical representation).
The dictionary is created by converting each word into an integer value. The corpus is created by converting the
documents to a "bag of words" model.
"""
# Create dictionary
id2word = corpora.Dictionary(processed_data)
# Create Ccrpus: Term Document Frequency
corpus = [id2word.doc2bow(text) for text in processed_data]
return id2word, corpus
def find_optimal_n_topics(self, processed_data, corpus, id2word, step_size):
"""
This method runs the model multiple times with different numbers of topics and find the optimal number based
on the maximum coherence value. Hence, the number of topics with the highest coherence value is chosen as the
most optimal number of topics. A high coherence value ensures that the topics are "coherent", i.e., meaningful.
"""
# Run model multiple times
model_list, coherence_values = lda_utils.compute_coherence_values(texts = processed_data,
corpus = corpus,
dictionary = id2word,
start = 5,
limit = 15,
step = step_size)
# Find the maximum coherence value
max_coherence = np.argmax(coherence_values)
# Find the number of topics corresponding to the maximum coherence value
optimal_n_topics = model_list[max_coherence].num_topics
return optimal_n_topics
def create_lda(self, processed_data, id2word, corpus, optimal_n_topics, chunk_size, n_passes, n_iterations):
"""
This method builds the LDA model using gensim's multicore function, and computes perplexity and coherence scores.
When we are calculating perplexity we measure how well the model is performing, i.e. the amount of error.
Ideally, error ("surprise") should be low, because this implies that when the model encounters new data,
it is less "surprised". Hence, the perplexity score should be minimized. Ideally, the coherence value should be high.
A high coherence value means that the topics are very coherent, which means that the topics actually correspond/relate
to something in the data.
"""
# Define and run LDA model
lda_model = gensim.models.LdaMulticore(corpus=corpus, # vectorized corpus (list of lists of tuples)
id2word=id2word, # gensim dictionary (mapping words to IDs)
num_topics=optimal_n_topics, # number of topics
random_state=100, # random state for reproducibility
chunksize=chunk_size, # the number of chunks to process at a time. Rather than processing one chunk at a time, we process batches of 10 chunkes which is more efficient. Increasing the chunk/batch size means that the model will train quicker.
passes=n_passes, # passes/epochs is the number of times the model should go through the entire corpus.
iterations=n_iterations, # the number of iterations is how often the model go through the single document in the corpus
per_word_topics=True, # defining word distributions for greater interpretatbility
minimum_probability=0.0) # in some cases a topic does not appear at all in any document, and I do not want to exclude these topics but rather keep them, which is why I set it to return 0 instead of nothing which is the default
# Calculate perplexity score
perplexity_score = lda_model.log_perplexity(corpus)
# Calculate coherence score
coherence_model_lda = CoherenceModel(model=lda_model,
texts=processed_data,
dictionary=id2word,
coherence='c_v')
coherence_score = coherence_model_lda.get_coherence()
return lda_model, perplexity_score, coherence_score
def create_output_1(self, lda_model, perplexity_score, coherence_score, optimal_n_topics):
"""
This method creates a txt-file containing the topics, perplexity, and coherence scors and saves it to
the 'output' directory.
"""
# Extract the topics
topics = lda_model.print_topics()
# Define path
out_path = os.path.join("..", "output", "topics.txt")
# Write txt-file containing the topics, perplexity, and coherence scores
with open(out_path, "w+") as f:
# Print how many topics the model has
f.writelines(f"The model has {optimal_n_topics} topics.\n")
# Print perplexity and coherence scores
f.writelines(f"Perplexity score: {perplexity_score}, Coherence score: {coherence_score} \n")
# Print topics
f.writelines(f"\nOverview of topics: \n {topics}")
def create_output_2(self, lda_model, corpus, processed_data, optimal_n_topics):
"""
This method creates a dataframe showing the most dominant topic for each chunk and saves it to the 'output' directory.
"""
# Find keywords for each topic
df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model,
corpus=corpus,
texts=processed_data)
# Find the most dominant topic per chunk
df_dominant_topic = df_topic_keywords.reset_index()
df_dominant_topic.columns = ['Chunk_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
# Save dataframe to output folder
output_path = os.path.join("..", "output", "dominant_topic.csv")
df_dominant_topic.to_csv(output_path, index = False)
return df_dominant_topic, df_topic_keywords
def create_output_3(self, df_dominant_topic, lda_model, corpus, processed_data, optimal_n_topics, df_topic_keywords):
"""
This method creates a dataframe showing the most contributing keywords for each topic and saves it to
the 'output' directory.
"""
# Display setting to show more characters in column
pd.options.display.max_colwidth = 100
# Create dataframe
sentence_topics_sorted_df = pd.DataFrame()
# Group keywords by the most dominant topic
sentence_topics_grouped = df_topic_keywords.groupby('Dominant_Topic')
# Compute how much each topic contribtues in percentage
for i, grp in sentence_topics_grouped:
sentence_topics_sorted_df = pd.concat([sentence_topics_sorted_df, grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], axis=0)
# Reset index
sentence_topics_sorted_df.reset_index(drop=True, inplace=True)
# Define columns in dataframe
sentence_topics_sorted_df.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Representative Text"]
# Save dataframe to output-folder
output_path = os.path.join("..", "output", "topic_contributions.csv")
sentence_topics_sorted_df.to_csv(output_path, index = False)
def visualize_topics(self, processed_data, rolling_mean, lda_model, corpus):
"""
This method creates visualizations using pyLDAvis and seaborn and saves these in the 'output' folder.
"""
# Create viz object
viz = pyLDAvis.gensim.prepare(lda_model,
corpus,
dictionary = lda_model.id2word)
# Save visualization as html-file
out_path = os.path.join("..", "output", "lda_topics.html")
pyLDAvis.save_html(viz, out_path)
# Create list of values. The first entry is the topic, and the second entry is how much it contributes (percentage)
values = list(lda_model.get_document_topics(corpus))
# Split the values and keep only the values per topic
split = []
for entry in values:
topic_prevelance = []
for topic in entry:
topic_prevelance.append(topic[1])
split.append(topic_prevelance)
# Create document-topic matrix
matrix = pd.DataFrame(map(list,zip(*split)))
# Create plot with rolling mean
lineplot = sns.lineplot(data=matrix.T.rolling(rolling_mean).mean())
# Set axes labels
lineplot.set(xlabel="Tweet Chunks/batches", ylabel = "Topic Percentage Contribution")
# Set title of plot
lineplot.set_title('Topic Contribution Over Time', size = 20)
# Set title of legend
lineplot.legend(title='Topic', loc='upper right')
# Get figure to be able to save
fig = lineplot.get_figure()
# Specifiy outputpath
out_path = os.path.join("..", "output", "topics_over_time.jpg")
# Save lineplot to output directory
fig.savefig(out_path)
def create_wordcloud(self, lda_model, optimal_n_topics):
"""
This method takes the topics and creates word clouds to make the overview of the topics easier.
This method was inspired by the following article, but modified to fit to the this particular project:
https://www.machinelearningplus.com/nlp/topic-modeling-visualization-how-to-present-results-lda-models/
"""
# Create list of colors from the matplotlib.colors
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]
# Define word cloud
cloud = WordCloud(background_color='white',
width=2500,
height=2500,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
# LDA topics
topics = lda_model.show_topics(num_topics = optimal_n_topics, formatted=False)
# Define subplots. Since the number of subplots depends on the number of topics found I use an if-statement
if (optimal_n_topics == 5):
fig, axes = plt.subplots(1, 5, figsize=(20,20), sharex=True, sharey=True)
if (optimal_n_topics == 10):
fig, axes = plt.subplots(2, 5, figsize=(20,20), sharex=True, sharey=True)
if (optimal_n_topics == 15):
fig, axes = plt.subplots(3, 5, figsize=(20,20), sharex=True, sharey=True)
# Generate a word cloud for each topic
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i))
plt.gca().axis('off')
# Additional adjusting
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
# Save word clouds to visualization folder
output_path = os.path.join("..", "output", "topic_wordclouds.png")
plt.savefig(output_path)
# Define behaviour when called from command line
if __name__=="__main__":
main()
| 46.693182
| 402
| 0.626511
| 3,010
| 24,654
| 4.998339
| 0.19701
| 0.014357
| 0.020472
| 0.012762
| 0.235161
| 0.179129
| 0.140844
| 0.119375
| 0.099036
| 0.075773
| 0
| 0.008627
| 0.299465
| 24,654
| 528
| 403
| 46.693182
| 0.862486
| 0.360672
| 0
| 0.098291
| 0
| 0.025641
| 0.17481
| 0.004616
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.025641
| 0.068376
| 0
| 0.15812
| 0.068376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45a8dc57b1450e18797d47ff570959f3d7e2d31
| 15,086
|
py
|
Python
|
EEG_Lightning/dassl/data/datasets/ProcessDataBase_v1.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | 23
|
2021-10-14T02:31:06.000Z
|
2022-01-25T16:26:44.000Z
|
EEG_Lightning/dassl/data/datasets/ProcessDataBase_v1.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | null | null | null |
EEG_Lightning/dassl/data/datasets/ProcessDataBase_v1.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | 1
|
2022-03-05T06:54:11.000Z
|
2022-03-05T06:54:11.000Z
|
"""
William DUong
"""
import os.path as osp
import os
import errno
from .build import DATASET_REGISTRY
from .base_dataset import Datum, DatasetBase,EEGDatum
from scipy.io import loadmat
import numpy as np
from collections import defaultdict
class ProcessDataBase(DatasetBase):
dataset_dir = None
file_name = None
def __init__(self, cfg):
# self.check_dataInfo()
self._n_domain = 0
self.domain_class_weight = None
self.whole_class_weight = None
self.root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = self.dataset_dir if not cfg.DATASET.DIR else cfg.DATASET.DIR
self.file_name = self.file_name if not cfg.DATASET.FILENAME else cfg.DATASET.FILENAME
self.cfg = cfg
# self.dataset_dir = osp.join(root, self.dataset_dir)
data_path = osp.join(self.root,self.dataset_dir, self.file_name)
if not osp.isfile(data_path):
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), data_path)
self.check_dataInfo()
total_data,total_label,test_data,test_lbl = self._read_data(data_path)
train, train_target, val, test = self.process_data_format((total_data,total_label),(test_data,test_lbl),cfg)
print("target domain : ", cfg.DATASET.TARGET_DOMAINS)
super().__init__(train_x=train, val=val, test=test, train_u=train_target)
@property
def data_domains(self):
return self._n_domain
def _read_data(self,data_path):
raise NotImplementedError
def check_dataInfo(self):
return
# def _read_data(self,data_path):
# """
# Process data from .mat file
# Re-implement this function to process new dataset
# Generate train data and test data with shape (subjects,trials,channels,frequency)
# """
# temp = loadmat(data_path)
# total_data = []
# total_label = []
# print(temp.keys())
# for idx in range(len(temp['data'][0])):
# total_data.append(temp['data'][0][idx])
# total_label.append(temp['labels'][0][idx])
# total_data = np.array(total_data) # (subjects,trials,channels,frequency)
# total_label = np.array(total_label)
# total_label = np.squeeze(total_label)
# total_label = total_label.astype(int)
#
# total_test_data = []
# total_test_label = []
# for idx in range(len(temp['testdata'][0])):
# total_test_data.append(temp['testdata'][0][idx])
# total_test_label.append(temp['testlabels'][0][idx])
#
# # test_data = np.array(temp['testdata']) # shape (trials,channels,frequency
# # test_lbl = np.array(temp['testlabels']) # trial,)
# # test_lbl = np.squeeze(test_lbl)
#
# test_data = np.array(total_test_data) # (subjects,trials,channels,frequency)
# test_lbl = np.array(total_test_label)
# test_lbl = np.squeeze(test_lbl)
# test_lbl = test_lbl.astype(int)
#
# print("BCI_IV data shape : ", total_data.shape)
# print("BCI_IV test shape : ", test_data.shape)
#
# return [total_data,total_label,test_data,test_lbl]
# def setup_within_subject_experiment(self,total_data,total_label,test_data,test_lbl,cfg):
# folds = cfg.DATASET.K_FOLD
# valid_fold = cfg.DATASET.VALID_FOLD
# train_data, train_label, valid_data, valid_label = self._pick_train_valid_same_set(total_data, total_label,
# folds=folds,
# valid_fold=valid_fold)
# return train_data, train_label, valid_data, valid_label,test_data,test_lbl
# def setup_cross_subject_experiment(self,total_data,total_label,test_data,test_lbl,cfg):
# cross_subject_seed = cfg.DATASET.RANDOM_SEED
#
# pick_data_subject_ids, pick_test_subject_ids = self._pick_leave_N_out_ids(total_subject=total_data.shape[0],
# seed=cross_subject_seed,
# given_subject_idx=None, num_subjects=3)
# # use the provided train subjects and target subjects id
# if len(cfg.DATASET.SOURCE_DOMAINS) > 0 and len(cfg.DATASET.TARGET_DOMAINS) > 0:
# pick_data_subject_ids = cfg.DATASET.SOURCE_DOMAINS
# pick_test_subject_ids = cfg.DATASET.TARGET_DOMAINS
#
# train_data = total_data[pick_data_subject_ids,]
# train_label = total_label[pick_data_subject_ids,]
# valid_data = test_data[pick_data_subject_ids,]
# valid_label = test_lbl[pick_data_subject_ids,]
# test_data = np.concatenate((total_data[pick_test_subject_ids,], test_data[pick_test_subject_ids,]), axis=1)
# test_lbl = np.concatenate((total_label[pick_test_subject_ids,], test_lbl[pick_test_subject_ids,]), axis=1)
# print("Pick subject to trian/valid : ", pick_data_subject_ids)
# print("Pick subject to test : ", pick_test_subject_ids)
# print("Train data, valid data, test data shape : ", (train_data.shape, valid_data.shape, test_data.shape))
# print("Train label, valid label, test label shape : ", (train_label.shape, valid_label.shape, test_lbl.shape))
# return train_data, train_label, valid_data, valid_label,test_data,test_lbl
def setup_within_subject_experiment(self,total_data,total_label,test_data,test_lbl,cfg):
"""
Split the total data set into k_folds. Each fold contains data from every subjects
pick 1 fold to be valid data
"""
folds = cfg.DATASET.K_FOLD
valid_fold = cfg.DATASET.VALID_FOLD
train_data, train_label, valid_data, valid_label = self._pick_train_valid_same_set(total_data, total_label,
folds=folds,
valid_fold=valid_fold)
print("train data within subjects shape : {} from k={} split".format(train_data.shape, folds))
print("valid data within subjects shape : {} from k={} split".format(valid_data.shape, folds))
return train_data, train_label, valid_data, valid_label,test_data,test_lbl
def setup_cross_subject_experiment(self,total_data,total_label,test_data,test_lbl,cfg):
"""
Split the total dataset into k folds. Each fold contains some subjects
Pick 1 folds to be valid data
"""
folds = cfg.DATASET.K_FOLD
valid_fold = cfg.DATASET.VALID_FOLD
train_data, train_label, valid_data, valid_label = self._pick_train_valid_cross_set(total_data,total_label,folds=folds,valid_fold=valid_fold)
return train_data, train_label, valid_data, valid_label, test_data, test_lbl
def _pick_train_valid_cross_set(self, total_data, total_label, folds, valid_fold):
if valid_fold > folds:
raise ValueError("can not assign fold identity outside of total cv folds")
total_subjects = np.arange(total_data.shape[0])
# total_subjects = [i for i in range(total_data.shape[0])]
split_folds = [list(x) for x in np.array_split(total_subjects, folds)]
pick_valid_subjects = split_folds[valid_fold - 1]
pick_train_subjects = []
for i in range(folds):
if i != valid_fold - 1:
for subject in split_folds[i]:
pick_train_subjects.append(subject)
# subject_train_folds = for subject in [ ]
# print("train subjects : {} from k={} split".format(pick_valid_subjects, folds))
# print("valid subjects : {} from k={} split".format(pick_valid_subjects, folds))
valid_data = total_data[pick_valid_subjects,]
valid_label = total_label[pick_valid_subjects,]
train_data = total_data[pick_train_subjects,]
train_label = total_label[pick_train_subjects,]
return train_data, train_label,pick_train_subjects, valid_data, valid_label,pick_valid_subjects
# valid_mark_start = (valid_fold - 1) * fold_trial
# valid_mark_end = valid_fold * fold_trial
#
# train_data = np.concatenate((data[:, :valid_mark_start, :, :], data[:, valid_mark_end:, :, :]), axis=1)
# train_label = np.concatenate((label[:, :valid_mark_start], label[:, valid_mark_end:]), axis=1)
#
# valid_data = data[:, valid_mark_start:valid_mark_end, :, :]
# valid_label = label[:, valid_mark_start:valid_mark_end]
#
#
# # if len(total_subjects)%folds == 0:
# train_folds = [i for i in range(1,folds+1) if i !=valid_fold]
# subject_split_folds = np.split(total_subjects,folds)
# print("subject splits : ",subject_split_folds)
#
# validation_subject_fold = subject_split_folds[valid_fold-1]
# train_subject_fold = np.concatenate(subject_split_folds[train_folds])
#
#
#
# "still need to implement"
# return None,None,None,None
def generate_class_weight(self,label):
if len(label.shape) == 2:
#label shall have shape (subjects,trials)
label = label.reshape(label.shape[0] * label.shape[1])
#data need to be shape (trials)
total = label.shape[0]
labels = np.unique(label)
list_ratio = []
for current_label in labels:
current_ratio = total / len(np.where(label == current_label)[0])
list_ratio.append(current_ratio)
return list_ratio
def generate_domain_class_weight(self,label):
"""
assume the label has shape (subjects,trials)
"""
if len(label.shape) != 2:
raise ValueError("domain labels does not have correct data format")
domain_class_weight = defaultdict()
for domain in range(label.shape[0]):
current_domain_class_weight = self.generate_class_weight(label[domain])
domain_class_weight[domain] = current_domain_class_weight
return domain_class_weight
# def _expand_data_dim(self,data):
# i
def process_data_format(self, data,test,cfg):
CROSS_SUBJECTS = cfg.DATASET.CROSS_SUBJECTS
WITHIN_SUBJECTS = cfg.DATASET.WITHIN_SUBJECTS
total_data,total_label = data
test_data,test_lbl = test
if WITHIN_SUBJECTS:
train_data, train_label, valid_data, valid_label,test_data,test_lbl = self.setup_within_subject_experiment(total_data,total_label,test_data,test_lbl,cfg)
elif CROSS_SUBJECTS:
train_data, train_label, valid_data, valid_label, test_data, test_lbl = self.setup_cross_subject_experiment(total_data,total_label,test_data,test_lbl,cfg)
else:
raise ValueError("need to specify to create train/valid for cross subjects or within subject experiments")
"""Create class weight for dataset"""
if cfg.DATASET.DOMAIN_CLASS_WEIGHT:
self.domain_class_weight =self.generate_domain_class_weight(train_label)
if cfg.DATASET.TOTAL_CLASS_WEIGHT:
self.whole_class_weight = self.generate_class_weight(train_label)
#assume the number of subjects represent number of domains
self._n_domain = train_data.shape[0]
train_data = np.expand_dims(train_data,axis=2)
valid_data = np.expand_dims(valid_data,axis=2)
test_data = np.expand_dims(test_data, axis=2)
train_items = self._generate_datasource(train_data,train_label)
valid_items = self._generate_datasource(valid_data,valid_label)
test_items = self._generate_datasource(test_data,test_lbl)
train_target_items = test_items.copy()
return train_items,train_target_items,valid_items,test_items
@classmethod
def _pick_train_valid_same_set(self,data, label, folds=4, valid_fold=1):
if valid_fold > folds:
print("can not assign fold identity outside of total cv folds")
return
total_trials = data.shape[1]
fold_trial = int(total_trials / folds)
valid_mark_start = (valid_fold - 1) * fold_trial
valid_mark_end = valid_fold * fold_trial
# print("valid mark start : ", valid_mark_start)
# print("valid_mark_end : ", valid_mark_end)
train_data = np.concatenate((data[:, :valid_mark_start, :, :], data[:, valid_mark_end:, :, :]), axis=1)
train_label = np.concatenate((label[:, :valid_mark_start], label[:, valid_mark_end:]), axis=1)
valid_data = data[:, valid_mark_start:valid_mark_end, :, :]
valid_label = label[:, valid_mark_start:valid_mark_end]
# print("train data shape : ", train_data.shape)
# print("valid data shape : ", valid_data.shape)
return train_data, train_label, valid_data, valid_label
# @classmethod
def _leave_N_out(self,data, label, seed=None, num_subjects=1, given_subject_idx=None):
"""PICK valid num subjects out"""
pick_valid_subjects_idx,pick_train_subjects_idx = self._pick_leave_N_out_ids(data.shape[0], seed, given_subject_idx,num_subjects)
subjects = np.arange(data.shape[0])
pick_train_subjects = subjects[pick_train_subjects_idx]
pick_valid_subjects = subjects[pick_valid_subjects_idx]
train_data = data[pick_train_subjects_idx,]
valid_data = data[pick_valid_subjects_idx,]
train_label = label[pick_train_subjects_idx,]
valid_label = label[pick_valid_subjects_idx,]
return train_data, train_label, pick_train_subjects, valid_data, valid_label, pick_valid_subjects
@classmethod
def _pick_leave_N_out_ids(self,total_subject, seed=None, given_subject_idx=None, num_subjects=1):
if seed is None:
np.random.choice(1)
else:
np.random.choice(seed)
subjects_idx = np.arange(total_subject) if given_subject_idx is None else given_subject_idx
pick_subjects_idx = np.random.choice(subjects_idx, num_subjects, replace=False)
pick_subjects_idx = np.sort(pick_subjects_idx)
remain_subjects_idx = subjects_idx[~np.isin(subjects_idx, pick_subjects_idx)]
return pick_subjects_idx, remain_subjects_idx
@classmethod
def _generate_datasource(self,data, label, test_data=False):
items = []
total_subjects = 1
if not test_data:
total_subjects = len(data)
for subject in range(total_subjects):
current_subject_data = data[subject]
current_subject_label = label[subject]
domain = subject
for i in range(current_subject_data.shape[0]):
item = EEGDatum(eeg_data=current_subject_data[i], label=int(current_subject_label[i]), domain=domain)
items.append(item)
return items
| 45.032836
| 166
| 0.65173
| 1,963
| 15,086
| 4.67244
| 0.098828
| 0.028783
| 0.022242
| 0.027802
| 0.411252
| 0.322067
| 0.276276
| 0.26232
| 0.248583
| 0.234518
| 0
| 0.004439
| 0.253414
| 15,086
| 334
| 167
| 45.167665
| 0.809909
| 0.353507
| 0
| 0.108974
| 0
| 0
| 0.038194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089744
| false
| 0
| 0.051282
| 0.012821
| 0.24359
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45ad99677d6577af2671852ef4f62636067fd15
| 9,321
|
py
|
Python
|
pywolf3d/level_editor/app.py
|
jammers-ach/pywolf3d
|
3e305d7bdb9aa4f38ae5cf460ed22c54efe8980c
|
[
"MIT"
] | null | null | null |
pywolf3d/level_editor/app.py
|
jammers-ach/pywolf3d
|
3e305d7bdb9aa4f38ae5cf460ed22c54efe8980c
|
[
"MIT"
] | null | null | null |
pywolf3d/level_editor/app.py
|
jammers-ach/pywolf3d
|
3e305d7bdb9aa4f38ae5cf460ed22c54efe8980c
|
[
"MIT"
] | null | null | null |
import argparse
import json
from ursina import load_texture, Ursina, Entity, color, camera, Quad, mouse, time, window, invoke, WindowPanel, \
Text, InputField, Space, scene, Button, Draggable, Tooltip, Scrollable
from pywolf3d.games.wolf3d import WALL_DEFS, WallDef, OBJECT_DEFS
Z_GRID = 0
Z_OBJECT = 2
Z_WALL = 3
class LevelEditor():
def __init__(self, fname, path_to_game):
level_data = None
self.fname = fname
with open(fname) as f:
level_data = json.load(f)
w = len(level_data['level'])
h = len(level_data['level'][0])
self.cursor = Grid(self, parent=scene)
self.grid = []
y = 0
for row in level_data['level']:
tile_row = []
x = 0
for wall_code in row:
tile_row.append(Tile(self, position=(x,y), cursor=self.cursor, wall_code=wall_code, parent=scene))
x += 1
self.grid.append(tile_row)
y += 1
self.object_grid = [[None for y in range(y) ] for x in range(x)]
for coord, code in level_data['object_list']:
if code in OBJECT_DEFS:
y, x = coord
self.update_object_grid(x, y, code)
else:
print(f"ignoring object {code} at {coord}")
camera.orthographic = True
camera.fov = 5
camera.position = (w/2,h/2)
def wall_inventory_click(code):
print(f"clicked tile {code}")
self.current_tile = code
self.mode = "tile"
self.wall_holder = Inventory(wall_inventory_click, cursor=self.cursor)
self.wall_holder.add_script(Scrollable())
for _,w in WALL_DEFS.items():
self.wall_holder.append(w)
def object_inventory_click(code):
print(f"clicked object {code}")
self.current_tile = code
self.mode = "object"
self.object_holder = Inventory(object_inventory_click, cursor=self.cursor, visible=False)
self.object_holder.add_script(Scrollable())
for _,w in OBJECT_DEFS.items():
self.object_holder.append(w)
self.current_tile = 1
self.mode = "tile"
def objects(self):
'''switches between tile and object mode'''
if self.mode == "tile":
self.mode = "object"
self.current_tile = 25
self.object_holder.toggle_visibility(True)
self.wall_holder.toggle_visibility(False)
elif self.mode == "object":
self.mode = "tile"
self.current_tile = 1
self.object_holder.toggle_visibility(False)
self.wall_holder.toggle_visibility(True)
def update_object_grid(self, x, y, code):
x,y = int(x), int(y)
if self.object_grid[x][y]:
self.object_grid[x][y].set_obj_tile(OBJECT_DEFS[code])
else:
self.object_grid[x][y] = ObjectTile(self, OBJECT_DEFS[code], position=(x,y), cursor=self.cursor, parent=scene)
def save(self):
json_data = {"object_list": [],
"name": "test level",
"size": []}
level = []
for r in self.grid:
row = []
for col in r:
row.append(col.wall_code)
level.append(row)
json_data["level"] = level
for r in self.object_grid:
row = []
for col in r:
if col:
json_data["object_list"].append([(col.y, col.x), col.obj_tile.code])
with open(self.fname, 'w') as f:
json.dump(json_data, f)
print(f"written to {self.fname}")
class Inventory(Entity):
def __init__(self, make_click, rows=2, cols=5, full_size=120, scrollable=True, **kwargs):
super().__init__(
parent = camera.ui,
model = Quad(radius=.015),
texture = 'white_cube',
texture_scale = (rows,cols),
scale = (.1 * rows, .1 * cols),
origin = (-.5, .5),
position = (-0.9,.5),
color = color.color(0,0,.1,.9),
)
self.make_click = make_click
self.rows = rows
self.cols = cols
self.full_cols = full_size - cols
self.icons = []
for key, value in kwargs.items():
setattr(self, key, value)
self.used_spots = []
def find_free_spot(self):
for y in range(self.cols+self.full_cols):
for x in range(self.rows):
if not (x,y) in self.used_spots:
self.used_spots.append((x,y))
return x, y
raise Exception("No free spots")
def append(self, wall_def, x=0, y=0):
x, y = self.find_free_spot()
def clicked():
self.make_click(wall_def.code)
icon = Button(
parent = self,
model = 'quad',
icon = wall_def.editor_texture,
color = color.white,
scale_x = 1/self.texture_scale[0],
scale_y = 1/self.texture_scale[1],
origin = (-.5,.5),
x = x * 1/self.texture_scale[0],
y = -y * 1/self.texture_scale[1],
z = -.5,
on_click = clicked,
)
icon.tooltip = Tooltip(wall_def.description)
icon.tooltip.background.color = color.color(0,0,0,.8)
self.icons.append(icon)
def item_clicked(self, item):
self.selected.deselect()
self.selected = item
def toggle_visibility(self, visible):
self.visible = visible
self.z = 0 if visible else 10
for x in self.icons:
x.visible = visible
x.disabled = not visible
class Grid(Entity):
fov_step = 20
move_step = 10
hold_step = 20
def __init__(self, editor, **kwargs):
super().__init__()
self.model=Quad(mode='line')
self.color = color.red
self.z = Z_GRID
self.current_tile = 5
self.editor = editor
for key, value in kwargs.items():
setattr(self, key, value)
def update(self):
self.position = mouse.world_point
self.x = round(self.x, 0)
self.y = round(self.y, 0)
def input(self, key):
if key == "up arrow":
camera.y += self.move_step * time.dt
elif key == "down arrow":
camera.y -= self.move_step * time.dt
elif key == "left arrow":
camera.x -= self.move_step * time.dt
elif key == "right arrow":
camera.x += self.move_step * time.dt
elif key == "up arrow hold":
camera.y += self.hold_step * time.dt
elif key == "down arrow hold":
camera.y -= self.hold_step * time.dt
elif key == "left arrow hold":
camera.x -= self.hold_step * time.dt
elif key == "right arrow hold":
camera.x += self.hold_step * time.dt
elif key == "=" or key == "= hold":
camera.fov -= self.fov_step * time.dt
elif key == "-" or key == "- hold":
camera.fov += self.fov_step * time.dt
elif key == "s":
self.editor.save()
elif key == "o":
self.editor.objects()
class Tile(Entity):
def __init__(self, editor, **kwargs):
super().__init__()
self.model='quad'
self.z = Z_WALL
self.collider='box'
self.editor = editor
self.set_texture(kwargs['wall_code'])
for key, value in kwargs.items():
setattr(self, key, value)
def set_texture(self, wall_code):
txt = WALL_DEFS[wall_code].editor_texture
self.wall_code = wall_code
self.texture = txt
def update(self):
if self.hovered:
self.cursor.x = self.position.x
self.cursor.y = self.position.y
self.cursor.z = Z_GRID
def input(self, key):
if key == 'left mouse down' and self.hovered:
print("down", self.x, self.y, ' - ', self.wall_code)
if self.editor.mode == "tile":
self.set_texture(self.editor.current_tile)
elif self.editor.mode == "object":
self.editor.update_object_grid(self.x, self.y, self.editor.current_tile)
class ObjectTile(Entity):
def __init__(self, editor, obj_tile, **kwargs):
super().__init__()
self.model='quad'
self.z = Z_OBJECT
self.collider=None
self.editor = editor
self.texture = obj_tile.editor_texture
self.obj_tile = obj_tile
for key, value in kwargs.items():
setattr(self, key, value)
def set_obj_tile(self, obj_tile):
self.texture = obj_tile.editor_texture
self.obj_tile = obj_tile
def start_editor(fname, path_to_game):
app = Ursina()
editor = LevelEditor(fname, path_to_game)
app.run()
def run():
parser = argparse.ArgumentParser(description='Mapmaker for pywolf3d')
parser.add_argument('level', help='path to level to load')
parser.add_argument('--path', help='path to wolf3d datafiles (default ./wolfdata)',
default="./wolfdata/")
args = parser.parse_args()
start_editor(args.level, args.path)
if __name__ == '__main__':
run()
| 29.590476
| 122
| 0.55037
| 1,190
| 9,321
| 4.137815
| 0.158824
| 0.026401
| 0.020309
| 0.028432
| 0.342201
| 0.248375
| 0.195167
| 0.164703
| 0.164703
| 0.15658
| 0
| 0.010718
| 0.329364
| 9,321
| 314
| 123
| 29.684713
| 0.776996
| 0.00397
| 0
| 0.177686
| 0
| 0
| 0.055615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095041
| false
| 0
| 0.016529
| 0
| 0.14876
| 0.020661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45ba78572ce87d65bc9fa965f1a8af3685baf94
| 3,404
|
py
|
Python
|
code/data_mgmt.py
|
TomDonoghue/EEGparam
|
a3e747094617479122900688643fa396ecbf8bab
|
[
"MIT"
] | 8
|
2021-08-17T05:22:40.000Z
|
2022-03-23T02:03:48.000Z
|
code/data_mgmt.py
|
TomDonoghue/EEGparam
|
a3e747094617479122900688643fa396ecbf8bab
|
[
"MIT"
] | 1
|
2020-12-09T13:22:03.000Z
|
2021-01-27T01:56:09.000Z
|
code/data_mgmt.py
|
TomDonoghue/EEGparam
|
a3e747094617479122900688643fa396ecbf8bab
|
[
"MIT"
] | 4
|
2021-06-20T14:44:38.000Z
|
2021-12-11T11:21:26.000Z
|
"""Functions for loading and data management for EEG-FOOOF."""
from os.path import join as pjoin
import numpy as np
from fooof import FOOOFGroup
from fooof.analysis import get_band_peak_fg
from settings import BANDS, YNG_INDS, OLD_INDS, N_LOADS, N_SUBJS, N_TIMES
###################################################################################################
###################################################################################################
def reshape_data(data):
"""Reshape loaded data objects into subsets for YNG and OLD groups."""
yng_data = np.vstack([data[0, YNG_INDS, :], data[1, YNG_INDS, :], data[2, YNG_INDS, :]])
old_data = np.vstack([data[0, OLD_INDS, :], data[1, OLD_INDS, :], data[2, OLD_INDS, :]])
return yng_data, old_data
def load_fooof_task_md(data_path, side='Contra', folder='FOOOF'):
"""Load task data in for all subjects, selects & return metadata."""
# Collect measures together from FOOOF results into matrices
all_r2s = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES])
all_errs = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES])
for li, load in enumerate(['Load1', 'Load2', 'Load3']):
pre, early, late = _load_fgs(data_path, folder, side, load)
for ind, fg in enumerate([pre, early, late]):
all_r2s[li, :, ind] = fg.get_params('r_squared')
all_errs[li, :, ind] = fg.get_params('error')
return all_r2s, all_errs
def load_fooof_task_ap(data_path, side='Contra', folder='FOOOF'):
"""Loads task data in for all subjects, selects and return aperiodic FOOOF outputs.
data_path : path to where data
side: 'Ipsi' or 'Contra'
"""
# Collect measures together from FOOOF results into matrices
all_exps = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES])
all_offsets = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES])
for li, load in enumerate(['Load1', 'Load2', 'Load3']):
pre, early, late = _load_fgs(data_path, folder, side, load)
for ind, fg in enumerate([pre, early, late]):
all_exps[li, :, ind] = fg.get_params('aperiodic_params', 'exponent')
all_offsets[li, :, ind] = fg.get_params('aperiodic_params', 'offset')
return all_offsets, all_exps
def load_fooof_task_pe(data_path, side='Contra', param_ind=1, folder='FOOOF'):
"""Loads task data for all subjects, selects and return periodic FOOOF outputs.
data_path : path to where data
side: 'Ipsi' or 'Contra'
"""
# Collect measures together from FOOOF results into matrices
all_alphas = np.zeros(shape=[N_LOADS, N_SUBJS, N_TIMES])
for li, load in enumerate(['Load1', 'Load2', 'Load3']):
pre, early, late = _load_fgs(data_path, folder, side, load)
for ind, fg in enumerate([pre, early, late]):
temp_alphas = get_band_peak_fg(fg, BANDS.alpha)
all_alphas[li, :, ind] = temp_alphas[:, param_ind]
return all_alphas
def _load_fgs(data_path, folder, side, load):
"""Helper to load FOOOFGroups."""
# Load the FOOOF analyses of the average
pre, early, late = FOOOFGroup(), FOOOFGroup(), FOOOFGroup()
pre.load('Group_' + load + '_' + side + '_Pre', pjoin(data_path, folder))
early.load('Group_' + load + '_' + side + '_Early', pjoin(data_path, folder))
late.load('Group_' + load + '_' + side + '_Late', pjoin(data_path, folder))
return pre, early, late
| 35.831579
| 99
| 0.623384
| 472
| 3,404
| 4.28178
| 0.216102
| 0.047501
| 0.047501
| 0.035626
| 0.549233
| 0.502227
| 0.445324
| 0.369619
| 0.369619
| 0.3429
| 0
| 0.006917
| 0.193008
| 3,404
| 94
| 100
| 36.212766
| 0.728795
| 0.205934
| 0
| 0.219512
| 0
| 0
| 0.071166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.121951
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45c0f05cdc7fe7a2e45a2f57230877bc9ba6968
| 413
|
py
|
Python
|
match_shapes.py
|
KyojiOsada/Python-Library
|
b06e50454c56c84c2abb96e6f68d35117ea5f4b5
|
[
"Apache-2.0"
] | null | null | null |
match_shapes.py
|
KyojiOsada/Python-Library
|
b06e50454c56c84c2abb96e6f68d35117ea5f4b5
|
[
"Apache-2.0"
] | null | null | null |
match_shapes.py
|
KyojiOsada/Python-Library
|
b06e50454c56c84c2abb96e6f68d35117ea5f4b5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import cv2
import numpy as np
img1 = cv2.imread('source1.jpg',0)
img2 = cv2.imread('source2.jpg',0)
ret, thresh = cv2.threshold(img1, 127, 255,0)
ret, thresh2 = cv2.threshold(img2, 127, 255,0)
contours,hierarchy,a = cv2.findContours(thresh,2,1)
cnt1 = contours[0]
contours,hierarchy,a = cv2.findContours(thresh2,2,1)
cnt2 = contours[0]
ret = cv2.matchShapes(cnt1,cnt2,1,0.0)
print(ret)
sys.exit()
| 20.65
| 52
| 0.72155
| 70
| 413
| 4.257143
| 0.428571
| 0.040268
| 0.04698
| 0.127517
| 0.228188
| 0.228188
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 0.116223
| 413
| 19
| 53
| 21.736842
| 0.693151
| 0
| 0
| 0
| 0
| 0
| 0.053269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e45c3482ede83aa24d104869dacc8d42f601273f
| 25,556
|
py
|
Python
|
SlicerModules/SegmentConnectedParzenPDF/SegmentConnectedParzenPDF.py
|
jcfr/TubeTK
|
3791790e206b5627a35c46f86eeb9671c8d4190f
|
[
"Apache-2.0"
] | 1
|
2019-07-19T09:27:37.000Z
|
2019-07-19T09:27:37.000Z
|
SlicerModules/SegmentConnectedParzenPDF/SegmentConnectedParzenPDF.py
|
jcfr/TubeTK
|
3791790e206b5627a35c46f86eeb9671c8d4190f
|
[
"Apache-2.0"
] | null | null | null |
SlicerModules/SegmentConnectedParzenPDF/SegmentConnectedParzenPDF.py
|
jcfr/TubeTK
|
3791790e206b5627a35c46f86eeb9671c8d4190f
|
[
"Apache-2.0"
] | 1
|
2019-07-19T09:28:56.000Z
|
2019-07-19T09:28:56.000Z
|
import os
from __main__ import vtk, qt, ctk, slicer
import EditorLib
from EditorLib.EditOptions import HelpButton
from EditorLib.EditOptions import EditOptions
from EditorLib import EditUtil
from EditorLib import LabelEffect
class InteractiveConnectedComponentsUsingParzenPDFsOptions(EditorLib.LabelEffectOptions):
""" Editor Effect gui
"""
def __init__(self, parent=0):
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).__init__(parent)
self.attributes = ('PaintTool')
self.displayName = 'Interactive Connected Components using Parzen PDFs'
self.undoRedo = EditorLib.EditUtil.UndoRedo()
# Create the normal PDF segmenter node cli if it doesn't exists yet.
# This is because we want the normal module's cli to be selected
# when opening the cli module.
module = slicer.modules.segmentconnectedcomponentsusingparzenpdfs
self.logic.getCLINode(module, module.title)
def __del__(self):
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).__del__()
def create(self):
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).create()
ioCollapsibleButton = ctk.ctkCollapsibleGroupBox()
ioCollapsibleButton.title = "IO"
ioCollapsibleButton.collapsed = 0
self.frame.layout().addWidget(ioCollapsibleButton)
# Layout within the io collapsible button
ioFormLayout = qt.QFormLayout(ioCollapsibleButton)
self.additionalInputNodeSelectors = []
for i in range(0,2):
self.additionalInputNodeSelectors.append(self.addInputNodeSelector(i, ioFormLayout))
self.additionalInputNodeSelectors[0].toolTip = "Select the 1st additional input volume to be segmented"
self.additionalInputNodeSelectors[1].toolTip = "Select the 2nd additional input volume to be segmented"
# Objects
objectCollapsibleGroupBox = ctk.ctkCollapsibleGroupBox()
objectCollapsibleGroupBox.title = "Objects"
self.frame.layout().addWidget(objectCollapsibleGroupBox)
# Layout within the io collapsible button
objectFormLayout = qt.QFormLayout(objectCollapsibleGroupBox)
foregroundLayout = qt.QHBoxLayout()
foregroundLabel = slicer.qMRMLLabelComboBox()
foregroundLabel.objectName = 'Foreground label'
foregroundLabel.setMRMLScene(slicer.mrmlScene)
foregroundLabel.setMRMLColorNode(self.editUtil.getColorNode())
foregroundLabel.labelValueVisible = True
foregroundLabel.currentColor = 1
self.foregroundLabel = foregroundLabel
self.connections.append( (self.foregroundLabel, 'currentColorChanged(int)', self.updateMRMLFromGUI ) )
foregroundWeightSpinBox = qt.QDoubleSpinBox(foregroundLabel)
self.foregroundWeightSpinBox = foregroundWeightSpinBox
foregroundWeightSpinBox.setRange(0.0, 1.0)
foregroundWeightSpinBox.setSingleStep(0.1)
foregroundWeightSpinBox.value = 1.0
foregroundPopup = ctk.ctkPopupWidget( foregroundWeightSpinBox )
foregroundPopupLayout = qt.QHBoxLayout( foregroundPopup )
foregroundPopupSlider = ctk.ctkDoubleSlider( foregroundPopup )
self.foregroundPopupSlider = foregroundPopupSlider
foregroundPopupSlider.maximum = 1.0
foregroundPopupSlider.minimum = 0.0
foregroundPopupSlider.singleStep = 0.1
foregroundPopupSlider.connect('valueChanged(double)', self.foregroundWeightSpinBox.setValue)
foregroundWeightSpinBox.connect('valueChanged(double)', self.foregroundPopupSlider.setValue)
self.connections.append( (self.foregroundWeightSpinBox, 'valueChanged(double)', self.updateMRMLFromGUI ) )
foregroundLayout.addWidget( foregroundLabel )
foregroundLayout.addWidget( foregroundWeightSpinBox )
foregroundPopupLayout.addWidget( foregroundPopupSlider )
objectFormLayout.addRow("Foreground Label:", foregroundLayout )
self.objectLabel = foregroundLabel
# http://qt-project.org/doc/qt-4.7/qt.html
foregroundPopup.alignment = 0x0082 # Qt::AlignVCenter | Qt::AlignRight
foregroundPopup.horizontalDirection = 0 # Qt::LeftToRight
foregroundPopup.verticalDirection = 0 # Qt::TopToBottom
foregroundPopup.animationEffect = 1 # Qt::ScrollEffect
backgroundLayout = qt.QHBoxLayout()
backgroundLabel = slicer.qMRMLLabelComboBox()
backgroundLabel.objectName = 'Background label'
backgroundLabel.setMRMLScene(slicer.mrmlScene)
backgroundLabel.setMRMLColorNode(self.editUtil.getColorNode())
backgroundLabel.labelValueVisible = True
backgroundLabel.currentColor = 2
self.backgroundLabel = backgroundLabel
self.connections.append( (self.backgroundLabel, 'currentColorChanged(int)', self.updateMRMLFromGUI ) )
backgroundWeightSpinBox = qt.QDoubleSpinBox(backgroundLabel)
self.backgroundWeightSpinBox = backgroundWeightSpinBox
backgroundWeightSpinBox.setRange(0.0, 1.0)
backgroundWeightSpinBox.setSingleStep(0.1)
backgroundWeightSpinBox.value = 1.0
backgroundPopup = ctk.ctkPopupWidget( backgroundWeightSpinBox )
backgroundPopupLayout = qt.QHBoxLayout( backgroundPopup )
backgroundPopupSlider = ctk.ctkDoubleSlider( backgroundPopup )
self.backgroundPopupSlider = backgroundPopupSlider
backgroundPopupSlider.maximum = 1.0
backgroundPopupSlider.minimum = 0.0
backgroundPopupSlider.singleStep = 0.1
backgroundPopupSlider.connect('valueChanged(double)', self.backgroundWeightSpinBox.setValue)
backgroundWeightSpinBox.connect('valueChanged(double)', self.backgroundPopupSlider.setValue)
self.connections.append( (self.backgroundWeightSpinBox, 'valueChanged(double)', self.updateMRMLFromGUI ) )
backgroundLayout.addWidget( backgroundLabel )
backgroundLayout.addWidget( backgroundWeightSpinBox )
backgroundPopupLayout.addWidget( backgroundPopupSlider )
objectFormLayout.addRow("Background Label:", backgroundLayout)
self.backgroundLabel = backgroundLabel
# http://qt-project.org/doc/qt-4.7/qt.html
backgroundPopup.alignment = 0x0082 # Qt::AlignVCenter | Qt::AlignRight
backgroundPopup.horizontalDirection = 0 # Qt::LeftToRight
backgroundPopup.verticalDirection = 0 # Qt::TopToBottom
backgroundPopup.animationEffect = 1 # Qt::ScrollEffect
# Presets
# Placeholder
presetsCollapsibleGroupBox = ctk.ctkCollapsibleGroupBox()
presetsCollapsibleGroupBox.title = "Preset"
self.frame.layout().addWidget(presetsCollapsibleGroupBox)
presetComboBox = slicer.qSlicerPresetComboBox()
# Advanced Parameters
paramsCollapsibleGroupBox = ctk.ctkCollapsibleGroupBox()
paramsCollapsibleGroupBox.title = "Advanced Parameters"
paramsCollapsibleGroupBox.collapsed = 1
self.frame.layout().addWidget(paramsCollapsibleGroupBox)
# Layout within the io collapsible button
paramsFormLayout = qt.QFormLayout(paramsCollapsibleGroupBox)
erosionSpinBox = qt.QSpinBox()
erosionSpinBox.objectName = 'erosionSpinBox'
erosionSpinBox.toolTip = "Set the erosion radius."
erosionSpinBox.setMinimum(0)
erosionSpinBox.setValue(5) # Default
paramsFormLayout.addRow("Erosion Radius:", erosionSpinBox)
self.erosionSpinBox = erosionSpinBox
self.connections.append( (self.erosionSpinBox, "valueChanged(int)", self.updateMRMLFromGUI ) )
holeFillSpinBox = qt.QSpinBox()
holeFillSpinBox.objectName = 'holeFillSpinBox'
holeFillSpinBox.toolTip = "Set the hole fill iterations."
holeFillSpinBox.setMinimum(0)
holeFillSpinBox.setValue(5) #Default
paramsFormLayout.addRow("Hole Fill Iterations:", holeFillSpinBox)
self.holeFillSpinBox = holeFillSpinBox
self.connections.append( (self.holeFillSpinBox, "valueChanged(int)", self.updateMRMLFromGUI ) )
# probabilitySmoothingStandardDeviation spin box
probabilitySmoothingStdDevSpinBox = qt.QDoubleSpinBox()
probabilitySmoothingStdDevSpinBox.objectName = 'probabilitySmoothingStdDevSpinBox'
probabilitySmoothingStdDevSpinBox.toolTip = "Standard deviation of blur applied to probability images prior to computing maximum likelihood of each class at each pixel."
probabilitySmoothingStdDevSpinBox.setMinimum(0.0)
probabilitySmoothingStdDevSpinBox.setValue(1.0) # Default
probabilitySmoothingStdDevSpinBox.setSingleStep(0.5)
paramsFormLayout.addRow("Probability Smoothing Standard Deviation:", probabilitySmoothingStdDevSpinBox)
self.probabilitySmoothingStdDevSpinBox = probabilitySmoothingStdDevSpinBox
self.connections.append( (self.probabilitySmoothingStdDevSpinBox, "valueChanged(double)", self.updateMRMLFromGUI ) )
# histogramSmoothingStandardDeviation spin box
histogramSmoothingStdDevSpinBox = qt.QDoubleSpinBox()
histogramSmoothingStdDevSpinBox.objectName = 'histogramSmoothingStdDevSpinBox'
histogramSmoothingStdDevSpinBox.toolTip = "Standard deviation of blur applied to histograms to convert them to probability density function estimates."
histogramSmoothingStdDevSpinBox.setMinimum(0.0)
histogramSmoothingStdDevSpinBox.setValue(5.0) # Default
histogramSmoothingStdDevSpinBox.setSingleStep(0.5)
paramsFormLayout.addRow("Probability Smoothing Standard Deviation:", histogramSmoothingStdDevSpinBox)
self.histogramSmoothingStdDevSpinBox = histogramSmoothingStdDevSpinBox
self.connections.append( (self.histogramSmoothingStdDevSpinBox, "valueChanged(double)", self.updateMRMLFromGUI ) )
# draft check box
draftCheckBox = qt.QCheckBox()
draftCheckBox.objectName = 'draftCheckBox'
draftCheckBox.toolTip = "Downsamples results by a factor of 4."
paramsFormLayout.addRow("Draft Mode:", draftCheckBox)
self.draftCheckBox = draftCheckBox
self.connections.append( (self.draftCheckBox, "stateChanged(int)", self.updateMRMLFromGUI ) )
# force classification check box
forceClassificationCheckBox = qt.QCheckBox()
forceClassificationCheckBox.objectName = 'forceClassificationCheckBox'
forceClassificationCheckBox.toolTip = "Perform the classification of all voxels?"
forceClassificationCheckBox.setChecked(False)
paramsFormLayout.addRow("Classify all voxels: ", forceClassificationCheckBox)
self.forceClassificationCheckBox = forceClassificationCheckBox
self.connections.append( (self.forceClassificationCheckBox, "stateChanged(int)", self.updateMRMLFromGUI ) )
# dilate first check box
dilateFirstCheckBox = qt.QCheckBox()
dilateFirstCheckBox.objectName = 'dilateFirstCheckBox'
dilateFirstCheckBox.toolTip = "Dilate and then erode so as to fill-in holes?"
dilateFirstCheckBox.setChecked(False)
paramsFormLayout.addRow("Dilate First: ", dilateFirstCheckBox)
self.dilateFirstCheckBox = dilateFirstCheckBox
self.connections.append( (self.dilateFirstCheckBox, "stateChanged(int)", self.updateMRMLFromGUI ) )
self.helpLabel = qt.QLabel("Run the PDF Segmentation on the current label map.", self.frame)
self.frame.layout().addWidget(self.helpLabel)
self.apply = qt.QPushButton("Apply", self.frame)
self.apply.setToolTip("Apply to run segmentation.\nCreates a new label volume using the current volume as input")
self.frame.layout().addWidget(self.apply)
self.widgets.append(self.apply)
EditorLib.HelpButton(self.frame, "Use this tool to apply segmentation using Parzen windowed PDFs.\n\n Select different label colors and paint on the foreground or background voxels using the paint effect.\nTo run segmentation correctly, you need to supply a minimum or two class labels.")
self.connections.append( (self.apply, 'clicked()', self.onApply) )
def destroy(self):
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).destroy()
# note: this method needs to be implemented exactly as-is
# in each leaf subclass so that "self" in the observer
# is of the correct type
def updateParameterNode(self, caller, event):
node = EditUtil.EditUtil().getParameterNode()
if node != self.parameterNode:
if self.parameterNode:
node.RemoveObserver(self.parameterNodeTag)
self.parameterNode = node
self.parameterNodeTag = node.AddObserver(vtk.vtkCommand.ModifiedEvent, self.updateGUIFromMRML)
def setMRMLDefaults(self):
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).setMRMLDefaults()
disableState = self.parameterNode.GetDisableModifiedEvent()
self.parameterNode.SetDisableModifiedEvent(1)
defaults = [
("outputVolume", "0"),
("labelmap", "0"),
("objectId", "1,2"),
("erodeRadius", "5"),
("holeFillIterations", "5"),
("objectPDFWeight", "1.0,1.0"),
("probImageSmoothingStdDev", "1.0"),
("histogramSmoothingStdDev", "5.0"),
("draft", "0"),
("forceClassification", "0"),
("dilateFirst", "0"),
]
for i in range(0, 2):
defaults.append(("additionalInputVolumeID" + str(i), "0"))
# Set logic here because this function is called before the end
# of the superclass constructor
self.logic = InteractiveConnectedComponentsUsingParzenPDFsLogic(None)
for default in defaults:
pvalue = self.getParameter(default[0])
if pvalue == "":
self.setParameter(default[0], default[1])
self.parameterNode.SetDisableModifiedEvent(disableState)
def updateGUIFromMRML(self,caller,event):
parameters = ["objectId",
"erodeRadius",
"holeFillIterations",
"objectPDFWeight",
"probImageSmoothingStdDev",
"histogramSmoothingStdDev",
"draft",
"forceClassification",
"dilateFirst",
]
for i in range(0, 2):
parameters.append("additionalInputVolumeID" + str(i))
for parameter in parameters:
if self.getParameter(parameter) == "":
# don't update if the parameter node has not got all values yet
return
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).updateGUIFromMRML(caller,event)
self.disconnectWidgets()
# Additional inputs
for i in range(0, 2):
self.additionalInputNodeSelectors[i].currentNodeID = self.getParameter("additionalInputVolumeID" + str(i))
# labels
objectIds = self.logic.listFromStringList(self.getParameter("objectId"))
self.foregroundLabel.currentColor = objectIds[0]
self.backgroundLabel.currentColor = objectIds[1]
# Parameters
self.erosionSpinBox.value = int(self.getParameter("erodeRadius"))
self.holeFillSpinBox.value = int(self.getParameter("holeFillIterations"))
self.probabilitySmoothingStdDevSpinBox.value = float(self.getParameter("probImageSmoothingStdDev"))
self.histogramSmoothingStdDevSpinBox.value = float(self.getParameter("histogramSmoothingStdDev"))
self.draftCheckBox.setChecked(int(self.getParameter("draft")))
self.forceClassificationCheckBox.setChecked(int(self.getParameter("forceClassification")))
self.dilateFirstCheckBox.setChecked(int(self.getParameter("dilateFirst")))
self.connectWidgets()
def onApply(self):
self.undoRedo.saveState()
self.logic.applyPDFSegmenter()
def updateMRMLFromGUI(self):
if self.updatingGUI:
return
disableState = self.parameterNode.GetDisableModifiedEvent()
self.parameterNode.SetDisableModifiedEvent(1)
super(InteractiveConnectedComponentsUsingParzenPDFsOptions,self).updateMRMLFromGUI()
# Input
for i in range(0, 2):
self.setParameter("additionalInputVolumeID" + str(i), self.additionalInputNodeSelectors[i].currentNodeID)
# Labels
objectIds = (str(self.foregroundLabel.currentColor) + ","
+ str(self.backgroundLabel.currentColor)
)
self.setParameter("objectId", objectIds)
# Parameters
self.setParameter("erodeRadius", self.erosionSpinBox.text)
self.setParameter("holeFillIterations", self.holeFillSpinBox.text)
self.setParameter("probImageSmoothingStdDev", self.probabilitySmoothingStdDevSpinBox.text)
self.setParameter("histogramSmoothingStdDev", self.histogramSmoothingStdDevSpinBox.text)
self.setParameter("draft", str(int(self.draftCheckBox.isChecked())))
self.setParameter("forceClassification", str(int(self.forceClassificationCheckBox.isChecked())))
self.setParameter("dilateFirst", str(int(self.dilateFirstCheckBox.isChecked())))
self.parameterNode.SetDisableModifiedEvent(disableState)
if not disableState:
self.parameterNode.InvokePendingModifiedEvent()
def addInputNodeSelector(self, index, layout):
inputNodeSelector = slicer.qMRMLNodeComboBox()
inputNodeSelector.objectName = 'additionalInputNodeSelector'+str(index+1)
inputNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
inputNodeSelector.noneEnabled = True
inputNodeSelector.addEnabled = False
inputNodeSelector.removeEnabled = False
inputNodeSelector.editEnabled = True
inputNodeSelector.enabled = 1
inputNodeSelector.setMRMLScene(slicer.mrmlScene)
layout.addRow("Additional Input Volume "+str(index+1)+":", inputNodeSelector)
self.connections.append( (inputNodeSelector, "currentNodeChanged(vtkMRMLNode*)", self.updateMRMLFromGUI ) )
return inputNodeSelector
def setParameter(self, parameterName, value):
self.logic.setParameter(parameterName, value)
def getParameter(self, parameterName):
return self.logic.getParameter(parameterName)
#
# EditorEffectTemplateTool
#
class InteractiveConnectedComponentsUsingParzenPDFsTool(LabelEffect.LabelEffectTool):
"""
One instance of this will be created per-view when the effect
is selected. It is responsible for implementing feedback and
label map changes in response to user input.
This class observes the editor parameter node to configure itself
and queries the current view for background and label volume
nodes to operate on.
"""
def __init__(self, sliceWidget):
super(InteractiveConnectedComponentsUsingParzenPDFsTool,self).__init__(sliceWidget)
# create a logic instance to do the non-gui work
self.logic = InteractiveConnectedComponentsUsingParzenPDFsLogic(self.sliceWidget.sliceLogic())
def cleanup(self):
super(InteractiveConnectedComponentsUsingParzenPDFsTool,self).cleanup()
def processEvent(self, caller=None, event=None):
"""
handle events from the render window interactor
"""
# let the superclass deal with the event if it wants to
if super(InteractiveConnectedComponentsUsingParzenPDFsTool,self).processEvent(caller,event):
return
if event == "LeftButtonPressEvent":
xy = self.interactor.GetEventPosition()
sliceLogic = self.sliceWidget.sliceLogic()
logic = InteractiveConnectedComponentsUsingParzenPDFsLogic(sliceLogic)
logic.apply(xy)
print("Got a %s at %s in %s", (event,str(xy),self.sliceWidget.sliceLogic().GetSliceNode().GetName()))
self.abortEvent(event)
else:
pass
#if event == "LeftButtonPressEvent"
# self.actionState = "painting"
# if not self.pixelMode:
# self.cursorOff()
# xy = self.interactor.GetEventPosition()
#elif event == "LeftButtonReleaseEvent":
# self.paintApply
# events from the slice node
#if caller and caller.IsA('vtkMRMLSliceNode'):
# here you can respond to pan/zoom or other changes
# to the view
# pass
#
# EditorEffectTemplateLogic
#
class InteractiveConnectedComponentsUsingParzenPDFsLogic(LabelEffect.LabelEffectLogic):
"""
This class contains helper methods for a given effect
type. It can be instanced as needed by an EditorEffectTemplateTool
or EditorEffectTemplateOptions instance in order to compute intermediate
results (say, for user feedback) or to implement the final
segmentation editing operation. This class is split
from the EditorEffectTemplateTool so that the operations can be used
by other code without the need for a view context.
"""
def __init__(self,sliceLogic):
super(InteractiveConnectedComponentsUsingParzenPDFsLogic,self).__init__(sliceLogic)
self.effectName = 'InteractiveConnectedComponentsUsingParzenPDFsOptions'
self.parameterNode = self.editUtil.getParameterNode()
def getCLINode(self, module, nodeName):
cliNode = slicer.mrmlScene.GetFirstNodeByName(nodeName)
# Also check path to make sure the CLI isn't a scripted module
if (cliNode == None) and ("qt-scripted-modules" not in module.path):
cliNode = slicer.cli.createNode(module)
cliNode.SetName(nodeName)
return cliNode
def setParameter(self, parameterName, value):
self.parameterNode.SetParameter(self.getFullParameterName(parameterName), value)
def getParameter(self, parameterName):
return self.parameterNode.GetParameter(self.getFullParameterName(parameterName))
def getFullParameterName(self, parameterName):
return self.effectName + ',' + parameterName
def listFromStringList(self, stringlist):
'''Convert a stringlist of the format '1.0, 2.0, 3.0' to a list
of the format [1.0, 2.0, 3.0].'''
list = []
for string in stringlist.split(","):
try:
list.append(int(string))
except ValueError:
list.append(float(string))
return list
def apply(self,xy):
pass
def applyPDFSegmenter(self):
#
# Apply PDF segmenter based on the parameter node
#
if not self.sliceLogic:
self.sliceLogic = self.editUtil.getSliceLogic()
cliParameters = {}
# IO
cliParameters["inputVolume1"] = self.editUtil.getBackgroundVolume()
for i in range(0,2):
# Get input nodes by their IDs
nodeID = self.getParameter("additionalInputVolumeID" + str(i))
cliParameters["inputVolume"+str(i+2)] = slicer.mrmlScene.GetNodeByID(nodeID)
cliParameters["labelmap"] = self.editUtil.getLabelVolume()
cliParameters["outputVolume"] = self.editUtil.getLabelVolume()
# Labels
cliParameters["objectId"] = self.listFromStringList(self.getParameter("objectId"))
# Parameters
cliParameters["erodeRadius"] = int(self.getParameter( "erodeRadius"))
cliParameters["holeFillIterations"] = int(self.getParameter("holeFillIterations"))
cliParameters["objectPDFWeight"] = self.listFromStringList(self.getParameter("objectPDFWeight"))
cliParameters["probImageSmoothingStdDev"] = float(self.getParameter("probImageSmoothingStdDev"))
cliParameters["histogramSmoothingStdDev"] = float(self.getParameter("histogramSmoothingStdDev"))
cliParameters["draft"] = int(self.getParameter("draft"))
cliParameters["forceClassification"] = int(self.getParameter("forceClassification"))
cliParameters["dilateFirst"] = int(self.getParameter("dilateFirst"))
module = slicer.modules.segmentconnectedcomponentsusingparzenpdfs
cliNode = self.getCLINode(module, "PDFSegmenterEditorEffect")
slicer.cli.run(module, cliNode, cliParameters)
#
# The InteractiveConnectedComponentsUsingParzenPDFs Template class definition
#
class InteractiveConnectedComponentsUsingParzenPDFsExtension(LabelEffect.LabelEffect):
"""Organizes the Options, Tool, and Logic classes into a single instance
that can be managed by the EditBox
"""
def __init__(self):
# name is used to define the name of the icon image resource (e.g. EditorEffectTemplate.png)
self.name = "InteractiveConnectedComponentsUsingParzenPDFs"
self.title = "InteractiveConnectedComponentsUsingParzenPDFs"
# tool tip is displayed on mouse hover
self.toolTip = "Perform PDF Segmentation"
self.options = InteractiveConnectedComponentsUsingParzenPDFsOptions
self.tool = InteractiveConnectedComponentsUsingParzenPDFsTool
self.logic = InteractiveConnectedComponentsUsingParzenPDFsLogic
#
# EditorEffectTemplate
#
class InteractiveConnectedComponentsUsingParzenPDFs:
"""
This class is the 'hook' for slicer to detect and recognize the extension
as a loadable scripted module
"""
def __init__(self, parent):
parent.title = "Editor InteractiveConnectedComponentsUsingParzenPDFs Effect"
parent.categories = ["Developer Tools.Editor Extensions"]
parent.contributors = ["Danielle Pace (Kitware)",
"Christopher Mullins (Kitware)",
"Stephen Aylward (Kitware)",
"Johan Andruejol (Kitware)",]
parent.helpText = """
The PDF Segmenter is a framework for using connected components in
conjunction with intensity histograms for classifying images in pixel space.
This module is available as an editor tool via the editor module in Slicer.
This module cannot be run as a standard module in Slicer.
"""
parent.acknowledgementText = """
This work is part of the TubeTK project at Kitware.
Module implemented by Danielle Pace. PDF Segmenter implemented by
Stephen Aylward.
"""
# TODO:
# don't show this module - it only appears in the Editor module
#parent.hidden = True
# Add this extension to the editor's list for discovery when the module
# is created. Since this module may be discovered before the Editor itself,
# create the list if it doesn't already exist.
try:
slicer.modules.editorExtensions
except AttributeError:
slicer.modules.editorExtensions = {}
slicer.modules.editorExtensions['InteractiveConnectedComponentsUsingParzenPDFs'] = InteractiveConnectedComponentsUsingParzenPDFsExtension
#
# EditorEffectTemplateWidget
#
class InteractiveConnectedComponentsUsingParzenPDFsWidget:
def __init__(self, parent = None):
self.parent = parent
def setup(self):
# don't display anything for this widget - it will be hidden anyway
pass
def enter(self):
pass
def exit(self):
pass
| 44.138169
| 292
| 0.755204
| 2,326
| 25,556
| 8.27687
| 0.236028
| 0.017453
| 0.01418
| 0.015583
| 0.073343
| 0.057345
| 0.03797
| 0.033036
| 0.012466
| 0.003013
| 0
| 0.005943
| 0.157223
| 25,556
| 578
| 293
| 44.214533
| 0.887919
| 0.136211
| 0
| 0.083117
| 0
| 0.002597
| 0.169865
| 0.04121
| 0
| 0
| 0.000548
| 0.00173
| 0
| 1
| 0.075325
| false
| 0.012987
| 0.018182
| 0.007792
| 0.132468
| 0.002597
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4634c0a0adb3cc0d16bbbb61f40f718de94ef2b
| 3,141
|
py
|
Python
|
wind_direction.py
|
simseve/weatherstation
|
68196a032a2cd39062f3924ce6d386f5f54af393
|
[
"MIT"
] | null | null | null |
wind_direction.py
|
simseve/weatherstation
|
68196a032a2cd39062f3924ce6d386f5f54af393
|
[
"MIT"
] | null | null | null |
wind_direction.py
|
simseve/weatherstation
|
68196a032a2cd39062f3924ce6d386f5f54af393
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# wind_direction.py
#
# Copyright 2020 <Simone Severini>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import time
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
class wind_direction():
def __init__(self):
# create i2c bus
self.i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
self.ads = ADS.ADS1115(self.i2c)
self.ads.gain = 1
def get_wind_dir(self):
# Calculate wind direction based on ADC reading
self.chan = AnalogIn(self.ads, ADS.P0)
self.val = self.chan.value
self.windDir = "Not Connected"
self.windDeg = 999
if 20000 <= self.val <= 20500:
self.windDir = "N"
self.windDeg = 0
if 10000 <= self.val <= 10500:
self.windDir = "NNE"
self.windDeg = 22.5
if 11500 <= self.val <= 12000:
self.windDir = "NE"
self.windDeg = 45
if 2000 <= self.val <= 2250:
self.windDir = "ENE"
self.windDeg = 67.5
if 2300 <= self.val <= 2500:
self.windDir = "E"
self.windDeg = 90
if 1500 <= self.val <= 1950:
self.windDir = "ESE"
self.windDeg = 112.5
if 4500 <= self.val <= 4900:
self.windDir = "SE"
self.windDeg = 135
if 3000 <= self.val <= 3500:
self.windDir = "SSE"
self.windDeg = 157.5
if 7000 <= self.val <= 7500:
self.windDir = "S"
self.windDeg = 180
if 6000 <= self.val <= 6500:
self.windDir = "SSW"
self.windDeg = 202.5
if 16000 <= self.val <= 16500:
self.windDir = "SW"
self.windDeg = 225
if 15000 <= self.val <= 15500:
self.windDir = "WSW"
self.windDeg = 247.5
if 24000 <= self.val <= 24500:
self.windDir = "W"
self.windDeg = 270
if 21000 <= self.val <= 21500:
self.windDir = "WNW"
self.windDeg = 292.5
if 22500 <= self.val <= 23000:
self.windDir = "NW"
self.windDeg = 315
if 17500 <= self.val <= 18500:
self.windDir = "NNW"
self.windDeg = 337.5
return self.windDir, self.windDeg
| 27.313043
| 71
| 0.561605
| 403
| 3,141
| 4.349876
| 0.478908
| 0.112949
| 0.022248
| 0.032516
| 0.046777
| 0.031945
| 0
| 0
| 0
| 0
| 0
| 0.114217
| 0.339382
| 3,141
| 114
| 72
| 27.552632
| 0.730602
| 0.286533
| 0
| 0
| 0
| 0
| 0.022142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.078125
| 0
| 0.140625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4639c8948f8a93b0256a4c34b5d407b8adc42bc
| 3,875
|
py
|
Python
|
oswin_tempest_plugin/tests/_mixins/migrate.py
|
openstack/oswin-tempest-plugin
|
59e6a14d01dda304c7d11fda1d35198f25799d6c
|
[
"Apache-2.0"
] | 6
|
2017-10-31T10:40:24.000Z
|
2019-01-28T22:08:15.000Z
|
oswin_tempest_plugin/tests/_mixins/migrate.py
|
openstack/oswin-tempest-plugin
|
59e6a14d01dda304c7d11fda1d35198f25799d6c
|
[
"Apache-2.0"
] | null | null | null |
oswin_tempest_plugin/tests/_mixins/migrate.py
|
openstack/oswin-tempest-plugin
|
59e6a14d01dda304c7d11fda1d35198f25799d6c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import waiters
import testtools
from oswin_tempest_plugin import config
CONF = config.CONF
class _MigrateMixin(object):
"""Cold migration mixin.
This mixin will add a cold migration test. It will perform the
following operations:
* Spawn instance.
* Cold migrate the instance.
* Check the server connectivity.
"""
def _migrate_server(self, server_tuple):
server = server_tuple.server
self.admin_servers_client.migrate_server(server['id'])
self._wait_for_server_status(server, 'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server['id'])
@testtools.skipUnless(CONF.compute.min_compute_nodes >= 2,
'Expected at least 2 compute nodes.')
def test_migration(self):
server_tuple = self._create_server()
self._migrate_server(server_tuple)
self._check_scenario(server_tuple)
class _LiveMigrateMixin(object):
"""Live migration mixin.
This mixin will add a live migration test. It will perform the
following operations:
* Spawn instance.
* Live migrate the instance.
* Check the server connectivity.
"""
# TODO(amuresan): Different mixins may be used at the same time.
# Each of them may override some fields such as
# 'max_microversion'. This has to be sorted out.
max_microversion = '2.24'
def _live_migrate_server(self, server_tuple, destination_host=None,
state='ACTIVE', volume_backed=False):
server = server_tuple.server
admin_server = self._get_server_as_admin(server)
current_host = admin_server['OS-EXT-SRV-ATTR:host']
block_migration = (CONF.compute_feature_enabled.
block_migration_for_live_migration
and not volume_backed)
self.admin_servers_client.live_migrate_server(
server['id'],
host=destination_host,
block_migration=block_migration,
disk_over_commit=False)
waiters.wait_for_server_status(self.admin_servers_client, server['id'],
state)
admin_server = self._get_server_as_admin(server)
after_migration_host = admin_server['OS-EXT-SRV-ATTR:host']
migration_list = (self.admin_migrations_client.list_migrations()
['migrations'])
msg = ("Live Migration failed. Migrations list for Instance "
"%s: [" % server['id'])
for live_migration in migration_list:
if live_migration['instance_uuid'] == server['id']:
msg += "\n%s" % live_migration
msg += "]"
if destination_host:
self.assertEqual(destination_host, after_migration_host, msg)
else:
self.assertNotEqual(current_host, after_migration_host, msg)
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
'Live migration option enabled.')
def test_live_migration(self):
server_tuple = self._create_server()
self._live_migrate_server(server_tuple)
self._check_scenario(server_tuple)
| 35.87963
| 79
| 0.659613
| 462
| 3,875
| 5.298701
| 0.37013
| 0.044935
| 0.02451
| 0.026961
| 0.288399
| 0.245098
| 0.245098
| 0.183824
| 0.09232
| 0.049837
| 0
| 0.00455
| 0.26271
| 3,875
| 107
| 80
| 36.214953
| 0.852293
| 0.303484
| 0
| 0.150943
| 0
| 0
| 0.085431
| 0
| 0
| 0
| 0
| 0.009346
| 0.037736
| 1
| 0.075472
| false
| 0
| 0.056604
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e468b2b5e8f04b80c414c4137b991f429ffae653
| 2,508
|
py
|
Python
|
kedro/extras/logging/color_logger.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 2,047
|
2022-01-10T15:22:12.000Z
|
2022-03-31T13:38:56.000Z
|
kedro/extras/logging/color_logger.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 170
|
2022-01-10T12:44:31.000Z
|
2022-03-31T17:01:24.000Z
|
kedro/extras/logging/color_logger.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 112
|
2022-01-10T19:15:24.000Z
|
2022-03-30T11:20:52.000Z
|
"""A logging handler class which produces coloured logs."""
import logging
import click
class ColorHandler(logging.StreamHandler):
"""A color log handler.
You can use this handler by incorporating the example below into your
logging configuration:
``conf/project/logging.yml``:
::
formatters:
simple:
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
handlers:
console:
class: kedro.extras.logging.ColorHandler
level: INFO
formatter: simple
stream: ext://sys.stdout
# defining colors is optional
colors:
debug: white
info: magenta
warning: yellow
root:
level: INFO
handlers: [console]
The ``colors`` parameter is optional, and you can use any ANSI color.
* Black
* Red
* Green
* Yellow
* Blue
* Magenta
* Cyan
* White
The default colors are:
* debug: magenta
* info: cyan
* warning: yellow
* error: red
* critical: red
"""
def __init__(self, stream=None, colors=None):
logging.StreamHandler.__init__(self, stream)
colors = colors or {}
self.colors = {
"critical": colors.get("critical", "red"),
"error": colors.get("error", "red"),
"warning": colors.get("warning", "yellow"),
"info": colors.get("info", "cyan"),
"debug": colors.get("debug", "magenta"),
}
def _get_color(self, level):
if level >= logging.CRITICAL:
return self.colors["critical"] # pragma: no cover
if level >= logging.ERROR:
return self.colors["error"] # pragma: no cover
if level >= logging.WARNING:
return self.colors["warning"] # pragma: no cover
if level >= logging.INFO:
return self.colors["info"]
if level >= logging.DEBUG: # pragma: no cover
return self.colors["debug"] # pragma: no cover
return None # pragma: no cover
def format(self, record: logging.LogRecord) -> str:
"""The handler formatter.
Args:
record: The record to format.
Returns:
The record formatted as a string.
"""
text = logging.StreamHandler.format(self, record)
color = self._get_color(record.levelno)
return click.style(text, color)
| 26.125
| 74
| 0.548644
| 261
| 2,508
| 5.226054
| 0.360153
| 0.043988
| 0.057185
| 0.032991
| 0.094575
| 0.059384
| 0
| 0
| 0
| 0
| 0
| 0
| 0.342903
| 2,508
| 95
| 75
| 26.4
| 0.82767
| 0.447767
| 0
| 0
| 0
| 0
| 0.093777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e46b6c69ae3a4c3f1fee528d4d729291bff4cf8d
| 1,468
|
py
|
Python
|
qt_figure.py
|
liwenlongonly/LogAnalyzer
|
4981c0673cf0d1a52ad76e473ffc1c30bb6bf22b
|
[
"Apache-2.0"
] | null | null | null |
qt_figure.py
|
liwenlongonly/LogAnalyzer
|
4981c0673cf0d1a52ad76e473ffc1c30bb6bf22b
|
[
"Apache-2.0"
] | null | null | null |
qt_figure.py
|
liwenlongonly/LogAnalyzer
|
4981c0673cf0d1a52ad76e473ffc1c30bb6bf22b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from PyQt5 import QtCore
import numpy as np
from matplotlib.figure import Figure
import time
import matplotlib
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
class QtFigure(FigureCanvas):
def __init__(self, width=5, height=4, dpi=100):
# 第一步:创建一个创建Figure
self.fig = Figure(figsize=(width, height), dpi=dpi)
# 第二步:在父类中激活Figure窗口
super(QtFigure, self).__init__(self.fig) # 此句必不可少,否则不能显示图形
# 第三步:创建一个子图,用于绘制图形用,111表示子图编号,如matlab的subplot(1,1,1)
self.axes = self.fig.add_subplot(111)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self._update_figure)
self.timer.start(1000)
self.date_list = None
def plot(self):
self.axes.grid(True, linestyle='--')
self.date_list = [0, 0, 0, 0]
h, = self.axes.plot(self.date_list, [1, 2, 0, 4], color='r', linewidth=0.5)
def _update_figure(self):
# 构建4个随机整数,位于闭区间[0, 10]
self.axes.cla()
l = [np.random.randint(0, 10) for i in range(4)]
self.date_list.pop(0)
self.date_list.append(QtFigure.date_format())
self.axes.grid(True, linestyle='--')
self.axes.plot(self.date_list, l, color='r', linewidth=0.5)
# self.h.set_ydata(l)
self.draw()
@staticmethod
def date_format():
return time.strftime("%H:%M:%S", time.localtime())
| 32.622222
| 83
| 0.636921
| 201
| 1,468
| 4.537313
| 0.462687
| 0.052632
| 0.078947
| 0.035088
| 0.149123
| 0.111842
| 0
| 0
| 0
| 0
| 0
| 0.038529
| 0.222071
| 1,468
| 44
| 84
| 33.363636
| 0.76007
| 0.11921
| 0
| 0.064516
| 0
| 0
| 0.015576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.193548
| 0.032258
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4705f3acb58336e0e7ad1a046d3910433815d04
| 1,488
|
py
|
Python
|
worldmap/src/worldmap/model/dtm.py
|
expertanalytics/fagkveld
|
96e16b9610e8b60d36425e7bc5435d266de1f8bf
|
[
"BSD-2-Clause"
] | null | null | null |
worldmap/src/worldmap/model/dtm.py
|
expertanalytics/fagkveld
|
96e16b9610e8b60d36425e7bc5435d266de1f8bf
|
[
"BSD-2-Clause"
] | null | null | null |
worldmap/src/worldmap/model/dtm.py
|
expertanalytics/fagkveld
|
96e16b9610e8b60d36425e7bc5435d266de1f8bf
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Data terrain model (DTM).
Examples::
>>> from worldmap import DTM
>>> dtm = DTM()
>>> print(dtm["NOR"])
Location('Norway')
"""
from typing import Dict, List, Tuple, Set, Optional
from bokeh.models import Model
from bokeh.models import ColumnDataSource, Patches, LabelSet
import logging
import numpy as np
from .location import Location
from .coloring import set_location_colors
from ..utils.data_fetcher import get_world_topology, get_country_polygon
class DTM:
locations: Location = {}
data = None
def __init__(self):
# add countries:
logging.info("Fetching topological data")
countries = get_world_topology()
for name, country in countries.items():
self.locations[name] = Location(
name=name, long_name=country["name"])
# add country neighbors:
for name, country in countries.items():
self.locations[name].neighbors = {
neighbor: self.locations[neighbor]
for neighbor in country["borders"]
}
# add country colors
self.set_location_colors()
logging.info("Finshed __init__")
def __getitem__(self, item):
return self.locations[item]
def set_location_colors(self):
"""Set color values on all locations and all location children."""
for location in self.locations.values():
if not location.color:
set_location_colors(location)
| 25.220339
| 74
| 0.638441
| 169
| 1,488
| 5.455621
| 0.39645
| 0.070499
| 0.073753
| 0.045553
| 0.101952
| 0.101952
| 0.101952
| 0.101952
| 0.101952
| 0
| 0
| 0
| 0.266801
| 1,488
| 58
| 75
| 25.655172
| 0.845096
| 0.174059
| 0
| 0.066667
| 0
| 0
| 0.042869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.266667
| 0.033333
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e472ad25bd9133e0e1fe623219e0826f24f2f7ef
| 365
|
py
|
Python
|
Mandelbrot fractal/visualize.py
|
TTimerkhanov/parallel_computing
|
75c79a4e50ac2f5f9fab90cd79560cd8e848228e
|
[
"MIT"
] | 8
|
2018-03-21T12:26:44.000Z
|
2019-10-05T08:46:20.000Z
|
Mandelbrot fractal/visualize.py
|
TTimerkhanov/parallel_computing
|
75c79a4e50ac2f5f9fab90cd79560cd8e848228e
|
[
"MIT"
] | null | null | null |
Mandelbrot fractal/visualize.py
|
TTimerkhanov/parallel_computing
|
75c79a4e50ac2f5f9fab90cd79560cd8e848228e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def mandelbrot(threshold, density):
atlas = np.empty((density, density))
with open('set', 'r') as f:
for line in f.readlines():
i, j, val = line.split(",")
atlas[int(i), int(j)] = val
plt.imshow(atlas.T, interpolation="nearest")
plt.show()
mandelbrot(120, 6000)
| 22.8125
| 48
| 0.60274
| 51
| 365
| 4.313725
| 0.666667
| 0.036364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025455
| 0.246575
| 365
| 16
| 49
| 22.8125
| 0.774545
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e47311462a03c6a7eb9b40addcc16befdf99f631
| 2,133
|
py
|
Python
|
code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/permission_attributes.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/permission_attributes.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/permission_attributes.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import (
ModelNormal,
cached_property,
datetime,
)
class PermissionAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
return {
"created": (datetime,),
"description": (str,),
"display_name": (str,),
"display_type": (str,),
"group_name": (str,),
"name": (str,),
"restricted": (bool,),
}
attribute_map = {
"created": "created",
"description": "description",
"display_name": "display_name",
"display_type": "display_type",
"group_name": "group_name",
"name": "name",
"restricted": "restricted",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""PermissionAttributes - a model defined in OpenAPI
Keyword Args:
created (datetime): [optional] Creation time of the permission.
description (str): [optional] Description of the permission.
display_name (str): [optional] Displayed name for the permission.
display_type (str): [optional] Display type.
group_name (str): [optional] Name of the permission group.
name (str): [optional] Name of the permission.
restricted (bool): [optional] Whether or not the permission is restricted.
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(PermissionAttributes, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 30.042254
| 108
| 0.61369
| 225
| 2,133
| 5.635556
| 0.453333
| 0.033123
| 0.047319
| 0.031546
| 0.102524
| 0.102524
| 0.061514
| 0.061514
| 0
| 0
| 0
| 0.004516
| 0.273324
| 2,133
| 70
| 109
| 30.471429
| 0.813548
| 0.426629
| 0
| 0.055556
| 0
| 0
| 0.176944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.027778
| 0.027778
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4813380bf2daa72d111c3321e1a0890661d1b92
| 5,475
|
py
|
Python
|
CodedCaching/Network.py
|
qizhu8/CodedCachingSim
|
84e8f1e58e1c431ee4916525487d4b28f92e629b
|
[
"MIT"
] | null | null | null |
CodedCaching/Network.py
|
qizhu8/CodedCachingSim
|
84e8f1e58e1c431ee4916525487d4b28f92e629b
|
[
"MIT"
] | null | null | null |
CodedCaching/Network.py
|
qizhu8/CodedCachingSim
|
84e8f1e58e1c431ee4916525487d4b28f92e629b
|
[
"MIT"
] | null | null | null |
"""
Network class is in charge of:
1. Storing M - User Cache Size, N - Number of Files, K - Number of Users
2. Storing User instances, Server instance, and attacker instance
"""
import numpy as np
from scipy import special
import itertools
from Server import Server
from User import User
from tabulate import tabulate
T_BE_INTEGER = True
class Network():
def __init__(self, M, N, K, t=None, fileId2Alphabet=False):
self.M = M
self.N = int(N)
self.K = int(K)
if t is None:
self.t = self.M * self.K / self.N
else:
self.t = t
self.M = self.t * self.N/self.K # make up the missing block of M
if T_BE_INTEGER and self.t != int(self.t):
raise Exception("Make sure t = M*K/N is an integer")
self.numOfSubfile = int(special.comb(self.K, self.t))
self.numOfCodedSubfiles = int(special.comb(self.K, self.t+1))
self.fileId2Alphabet = fileId2Alphabet
self.server = Server(self.M, self.N, self.K, self.t, self.fileId2Alphabet)
self.userset = [User(id, self.M, self.N, self.K, self.t, fileId2Alphabet=fileId2Alphabet) for id in range(self.K)]
self.placementDone = False
def placement(self, isRandom=False, verboseForUser=False, verboseForCache=False):
Z = self.server.generateZ(isRandom=isRandom)
for userId in range(self.K):
self.userset[userId].setZ(Z[userId, :])
if verboseForUser:
# self.userset[userId].printUserDetail(fileId2Alphabet=fileId2Alphabet)
print(self.userset[userId])
if verboseForCache:
self.printCacheContent(Z)
self.placementDone = True
def delivery(self, D=None, verbose=False):
if not self.placementDone:
self.placement(verbose=verbose)
if D is None:
D = [self.userset[id].genD() for id in range(self.K)]
if verbose:
print("D:", D)
X, groupList = self.server.generateX(D)
if verbose:
print("Server Transmission is:")
print(self.printableServerTransmission(X))
return D, X, groupList
def printableServerTransmission(self, X, inList=False, fileId2Alphabet=False):
printoutList = []
totalRow, totalCol = X.shape
for row in range(totalRow):
subfileList = []
for col in range(totalCol):
if X[row][col]:
fileId = int(col / self.numOfSubfile)
subfileId = int(col % self.numOfSubfile)
if fileId2Alphabet:
subfileList.append("{fileIdChr}{subfileId}".format(fileIdChr=chr(65+fileId), subfileId=subfileId+1))
else:
subfileList.append("{fileId}-{subfileId}".format(fileId=fileId, subfileId=subfileId))
if len(subfileList):
printoutList.append(" + ".join(subfileList))
# printoutList.append("{id}:{subfileInfo}".format(id=row, subfileInfo=" + ".join(subfileList)))
if not inList:
return " || ".join(printoutList)
else:
return printoutList
def printCacheContent(self, Z):
if self.fileId2Alphabet:
header = ["UserId"] + [chr(65+fileId)+""+str(subfileId+1) for fileId in range(self.N) for subfileId in range(self.numOfSubfile)]
else:
header = ["UserId"] + [str(fileId)+"-"+str(subfileId) for fileId in range(self.N) for subfileId in range(self.numOfSubfile)]
UserId = np.asarray([range(self.K)]).T
print(tabulate(np.hstack([UserId, Z]), headers=header))
def allD(self):
curD = [0] * self.K
while curD != [self.N-1]*self.K:
yield curD
for checkPos in range(self.K-1, -1, -1):
if curD[checkPos] < self.N-1:
curD[checkPos] += 1
break
else:
curD[checkPos] = 0
yield curD
def __str__(self):
print_template = """M:{M}\nN:{N}\nK:{K}\nt:{t}"""
return print_template.format(M=self.M, N=self.N, K=self.K, t=self.t)
if __name__ == "__main__":
# if t is specified, M is not needed. Currently, I only consider t to be a positive integer.
# M: unified cache size per user (if t is specified, M is not useful anymore)
# N: number of files in the network
# K: number of users in the network
# t: M*K/N,
# M, N, K, t = -1, 3, 3, 1
M, N, K, t = -1, 3, 5, 3
# M, N, K, t = -1, 4, 5, 2
codedCachingNetwork = Network(M=M, N=N, K=K, t=t, fileId2Alphabet=True)
print(codedCachingNetwork)
# codedCachingNetwork.placement(verboseForCache=True, verboseForUser=True, isRandom=True)
codedCachingNetwork.placement(verboseForCache=True, verboseForUser=True, isRandom=False)
X_D_table = []
# for D in itertools.combinations_with_replacement(range(N), K):
for D in codedCachingNetwork.allD():
D, X, groupList = codedCachingNetwork.delivery(verbose=False, D=D) # generate X based on D
groupList
D_str = ",".join(list(map(lambda d: chr(65+ d), D)))
X_D_table.append(["["+D_str+"]"] + codedCachingNetwork.printableServerTransmission(X, inList=True, fileId2Alphabet=True))
# header = ["D", "X"]
header = ["D"] + groupList
print(tabulate(X_D_table, headers=header))
| 36.993243
| 140
| 0.592146
| 687
| 5,475
| 4.672489
| 0.213974
| 0.023364
| 0.027414
| 0.004984
| 0.14486
| 0.134579
| 0.120249
| 0.047352
| 0.034891
| 0.034891
| 0
| 0.01125
| 0.285662
| 5,475
| 147
| 141
| 37.244898
| 0.809512
| 0.156164
| 0
| 0.090909
| 0
| 0
| 0.03437
| 0.010442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0
| 0.060606
| 0
| 0.181818
| 0.171717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e48ba9fc67c09776260edc71cd67600e98eb63a9
| 1,885
|
py
|
Python
|
2017/day07/code.py
|
Fadi88/AoC
|
8b24f2f2cc7b4e1c63758e81e63d8670a261cc7c
|
[
"Unlicense"
] | 12
|
2019-12-15T21:53:19.000Z
|
2021-12-24T17:03:41.000Z
|
2017/day07/code.py
|
Fadi88/adventofcode19
|
dd2456bdd163beb02dbfe9dcea2b021061c7671e
|
[
"Unlicense"
] | 1
|
2021-12-15T20:40:51.000Z
|
2021-12-15T22:19:48.000Z
|
2017/day07/code.py
|
Fadi88/adventofcode19
|
dd2456bdd163beb02dbfe9dcea2b021061c7671e
|
[
"Unlicense"
] | 5
|
2020-12-11T06:00:24.000Z
|
2021-12-20T21:37:46.000Z
|
import time
from collections import defaultdict
def profiler(method):
def wrapper_method(*arg, **kw):
t = time.time()
method(*arg, **kw)
print('Method ' + method.__name__ + ' took : ' +
"{:2.5f}".format(time.time()-t) + ' sec')
return wrapper_method
@profiler
def part1():
towers = defaultdict(list)
for l in open('input.txt'):
l = l.strip()
p = l.split()
if '->' in l:
towers[p[0]] = l[l.find('->') + 3:].split(', ')
else:
towers[p[0]] = []
leaves = []
for ele in towers:
leaves += towers[ele]
for ele in towers:
if ele not in leaves:
print(ele)
break
def get_weight(towers,weights,prog):
if len(towers[prog]) == 0:
return weights[prog]
else :
w = weights[prog]
for sub in towers[prog]:
w += get_weight(towers,weights,sub)
return w
@profiler
def part2():
towers = defaultdict(list)
weights = {}
for l in open('input.txt'):
l = l.strip()
p = l.split()
weights[p[0]] = int(p[1][1:-1])
if '->' in l:
towers[p[0]] = l[l.find('->') + 3:].split(', ')
else:
towers[p[0]] = []
leaves = []
for ele in towers:
leaves += towers[ele]
for ele in towers:
if ele not in leaves:
root = ele
break
diff = 0
while True:
tmp = [get_weight(towers,weights,p) for p in towers[root]]
same = [tmp.count(tmp[i]) > 1 for i in range(len(tmp))]
if not all(same):
root = towers[root][same.index(False)]
diff = tmp[same.index(False)] - tmp[(same.index(False) + 1)% len(tmp)]
else:
print(weights[root] - diff)
break
if __name__ == "__main__":
part1()
part2()
| 22.176471
| 83
| 0.490716
| 241
| 1,885
| 3.767635
| 0.26556
| 0.052863
| 0.035242
| 0.061674
| 0.295154
| 0.295154
| 0.295154
| 0.295154
| 0.295154
| 0.295154
| 0
| 0.01642
| 0.353846
| 1,885
| 84
| 84
| 22.440476
| 0.729064
| 0
| 0
| 0.5
| 0
| 0
| 0.033952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.030303
| 0
| 0.151515
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e48e53ba04ff99bdd6227e182235f811ae1dc4ee
| 403
|
py
|
Python
|
src/microbit/spi-tof-master.py
|
romilly/multi-VL53L0X
|
80cf0d82d93ceae9c54acb967c24a1bf8deb5e3a
|
[
"MIT"
] | null | null | null |
src/microbit/spi-tof-master.py
|
romilly/multi-VL53L0X
|
80cf0d82d93ceae9c54acb967c24a1bf8deb5e3a
|
[
"MIT"
] | null | null | null |
src/microbit/spi-tof-master.py
|
romilly/multi-VL53L0X
|
80cf0d82d93ceae9c54acb967c24a1bf8deb5e3a
|
[
"MIT"
] | null | null | null |
from microbit import *
import struct
from time import sleep
SENSORS = 2
def spi_read(sensor):
pin16.write_digital(0) # Chip select
ibuffer = struct.pack('<B', sensor)
spi.write(ibuffer)
result = spi.read(1)
pin16.write_digital(1) # Chip select off
return result
spi.init(baudrate=100000)
while True:
for i in [0, 1]:
print(i, ord(spi_read(i)))
sleep(0.1)
| 21.210526
| 45
| 0.652605
| 62
| 403
| 4.177419
| 0.564516
| 0.081081
| 0.131274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058065
| 0.230769
| 403
| 19
| 46
| 21.210526
| 0.777419
| 0.066998
| 0
| 0
| 0
| 0
| 0.005348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e48f98c85bda6baa0cc86d71b689b55e8122a390
| 16,653
|
py
|
Python
|
hasher-matcher-actioner/hmalib/models.py
|
isabella232/ThreatExchange
|
0d07a800bbf25d8541f40b828e2dfd377395af9b
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/models.py
|
isabella232/ThreatExchange
|
0d07a800bbf25d8541f40b828e2dfd377395af9b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-19T10:20:43.000Z
|
2021-04-19T10:20:43.000Z
|
hasher-matcher-actioner/hmalib/models.py
|
isabella232/ThreatExchange
|
0d07a800bbf25d8541f40b828e2dfd377395af9b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
import typing as t
import json
from dataclasses import dataclass, field
from mypy_boto3_dynamodb.service_resource import Table
from boto3.dynamodb.conditions import Attr, Key
"""
Data transfer object classes to be used with dynamodbstore
Classes in this module should implement methods `to_dynamodb_item(self)` and
`to_sqs_message(self)`
"""
class DynamoDBItem:
CONTENT_KEY_PREFIX = "c#"
SIGNAL_KEY_PREFIX = "s#"
TYPE_PREFIX = "type#"
def write_to_table(self, table: Table):
table.put_item(Item=self.to_dynamodb_item())
def to_dynamodb_item(self) -> t.Dict:
raise NotImplementedError
@staticmethod
def get_dynamodb_content_key(c_id: str) -> str:
return f"{DynamoDBItem.CONTENT_KEY_PREFIX}{c_id}"
@staticmethod
def get_dynamodb_signal_key(source: str, s_id: t.Union[str, int]) -> str:
return f"{DynamoDBItem.SIGNAL_KEY_PREFIX}{source}#{s_id}"
@staticmethod
def remove_signal_key_prefix(key: str, source: str) -> str:
return key[len(DynamoDBItem.SIGNAL_KEY_PREFIX) + len(source) + 1 :]
@staticmethod
def get_dynamodb_type_key(type: str) -> str:
return f"{DynamoDBItem.TYPE_PREFIX}{type}"
@staticmethod
def remove_content_key_prefix(key: str) -> str:
return key[len(DynamoDBItem.CONTENT_KEY_PREFIX) :]
class SNSMessage:
def to_sns_message(self) -> str:
raise NotImplementedError
@classmethod
def from_sns_message(cls, message: str) -> "SNSMessage":
raise NotImplementedError
@dataclass
class SignalMetadataBase(DynamoDBItem):
"""
Base for signal metadata.
'ds' refers to dataset which for the time being is
quivalent to collab or privacy group (and in the long term could map to bank)
"""
DATASET_PREFIX = "ds#"
signal_id: t.Union[str, int]
ds_id: str
updated_at: datetime.datetime
signal_source: str
signal_hash: str # duplicated field with PDQMatchRecord having both for now to help with debuging/testing
tags: t.List[str] = field(default_factory=list)
@staticmethod
def get_dynamodb_ds_key(ds_id: str) -> str:
return f"{SignalMetadataBase.DATASET_PREFIX}{ds_id}"
@dataclass
class PDQSignalMetadata(SignalMetadataBase):
"""
PDQ Signal metadata.
This object is designed to be an ~lookaside on some of the values used by
PDQMatchRecord for easier and more consistent updating by the syncer and UI.
Otherwise updates on a signals metadata would require updating all
PDQMatchRecord associated; TODO: For now there will be some overlap between
this object and PDQMatchRecord.
"""
SIGNAL_TYPE = "pdq"
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"SK": self.get_dynamodb_ds_key(self.ds_id),
"SignalHash": self.signal_hash,
"SignalSource": self.signal_source, # defaults to 'te' in the current pipeline
"UpdatedAt": self.updated_at.isoformat(),
"HashType": self.SIGNAL_TYPE,
"Tags": self.tags,
}
@classmethod
def get_from_signal(
cls,
table: Table,
signal_id: t.Union[str, int],
signal_source: str,
) -> t.List["PDQSignalMetadata"]:
items = table.query(
KeyConditionExpression=Key("PK").eq(
cls.get_dynamodb_signal_key(signal_source, signal_id)
)
& Key("SK").begins_with(cls.DATASET_PREFIX),
ProjectionExpression="PK, ContentHash, UpdatedAt, SK, SignalSource, SignalHash, Tags",
FilterExpression=Attr("HashType").eq(cls.SIGNAL_TYPE),
).get("Items", [])
return cls._result_items_to_metadata(items)
@classmethod
def _result_items_to_metadata(
cls,
items: t.List[t.Dict],
) -> t.List["PDQSignalMetadata"]:
return [
PDQSignalMetadata(
signal_id=cls.remove_signal_key_prefix(
item["PK"], item["SignalSource"]
),
ds_id=item["SK"][len(cls.DATASET_PREFIX) :],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_source=item["SignalSource"],
signal_hash=item["SignalHash"],
tags=item["Tags"],
)
for item in items
]
@dataclass
class Label:
key: str
value: str
def to_dynamodb_dict(self) -> dict:
return {"K": self.key, "V": self.value}
@classmethod
def from_dynamodb_dict(cls, d: dict) -> "Label":
return cls(d["K"], d["V"])
def __eq__(self, another_label: object) -> bool:
if not isinstance(another_label, Label):
return NotImplemented
return self.key == another_label.key and self.value == another_label.value
@dataclass
class PDQRecordBase(DynamoDBItem):
"""
Abstract Base Record for PDQ releated items.
"""
SIGNAL_TYPE = "pdq"
content_id: str
content_hash: str
updated_at: datetime.datetime
def to_dynamodb_item(self) -> dict:
raise NotImplementedError
def to_sqs_message(self) -> dict:
raise NotImplementedError
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List:
raise NotImplementedError
@dataclass
class PipelinePDQHashRecord(PDQRecordBase):
"""
Successful execution at the hasher produces this record.
"""
quality: int
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_type_key(self.SIGNAL_TYPE),
"ContentHash": self.content_hash,
"Quality": self.quality,
"UpdatedAt": self.updated_at.isoformat(),
"HashType": self.SIGNAL_TYPE,
}
def to_sqs_message(self) -> dict:
return {
"hash": self.content_hash,
"type": self.SIGNAL_TYPE,
"key": self.content_id,
}
@classmethod
def get_from_content_id(
cls, table: Table, content_key: str
) -> t.Optional["PipelinePDQHashRecord"]:
items = HashRecordQuery.from_content_key(
table,
cls.get_dynamodb_content_key(content_key),
cls.get_dynamodb_type_key(cls.SIGNAL_TYPE),
)
records = cls._result_items_to_records(items)
return None if not records else records[0]
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List["PipelinePDQHashRecord"]:
items = HashRecordQuery.from_time_range(
table, cls.get_dynamodb_type_key(cls.SIGNAL_TYPE), start_time, end_time
)
return cls._result_items_to_records(items)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["PipelinePDQHashRecord"]:
return [
PipelinePDQHashRecord(
content_id=item["PK"][len(cls.CONTENT_KEY_PREFIX) :],
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
quality=item["Quality"],
)
for item in items
]
@dataclass
class PDQMatchRecord(PDQRecordBase):
"""
Successful execution at the matcher produces this record.
"""
signal_id: t.Union[str, int]
signal_source: str
signal_hash: str
labels: t.List[Label] = field(default_factory=list)
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"ContentHash": self.content_hash,
"UpdatedAt": self.updated_at.isoformat(),
"SignalHash": self.signal_hash,
"SignalSource": self.signal_source,
"GSI1-PK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"GSI1-SK": self.get_dynamodb_content_key(self.content_id),
"HashType": self.SIGNAL_TYPE,
"GSI2-PK": self.get_dynamodb_type_key(self.SIGNAL_TYPE),
"Labels": [x.to_dynamodb_dict() for x in self.labels],
}
def to_sqs_message(self) -> dict:
# TODO add method for when matches are added to a sqs
raise NotImplementedError
@classmethod
def get_from_content_id(
cls, table: Table, content_id: str
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_content_key(
table,
cls.get_dynamodb_content_key(content_id),
cls.SIGNAL_KEY_PREFIX,
cls.SIGNAL_TYPE,
)
return cls._result_items_to_records(items)
@classmethod
def get_from_signal(
cls, table: Table, signal_id: t.Union[str, int], signal_source: str
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_signal_key(
table,
cls.get_dynamodb_signal_key(signal_source, signal_id),
cls.SIGNAL_TYPE,
)
return cls._result_items_to_records(items)
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_time_range(
table, cls.get_dynamodb_type_key(cls.SIGNAL_TYPE), start_time, end_time
)
return cls._result_items_to_records(items)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["PDQMatchRecord"]:
return [
PDQMatchRecord(
content_id=cls.remove_content_key_prefix(item["PK"]),
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_id=cls.remove_signal_key_prefix(
item["SK"], item["SignalSource"]
),
signal_source=item["SignalSource"],
signal_hash=item["SignalHash"],
labels=[Label.from_dynamodb_dict(x) for x in item["Labels"]],
)
for item in items
]
class HashRecordQuery:
DEFAULT_PROJ_EXP = "PK, ContentHash, UpdatedAt, Quality"
@classmethod
def from_content_key(
cls, table: Table, content_key: str, hash_type_key: str = None
) -> t.List[t.Dict]:
"""
Given a content key (and optional hash type), return its content hash (for that type).
Written to be agnostic to hash type so it can be reused by other types of 'HashRecord's.
"""
if hash_type_key is None:
key_con_exp = Key("PK").eq(content_key) & Key("SK").begins_with(
DynamoDBItem.SIGNAL_KEY_PREFIX
)
else:
key_con_exp = Key("PK").eq(content_key) & Key("SK").eq(hash_type_key)
return table.query(
KeyConditionExpression=key_con_exp,
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
@classmethod
def from_time_range(
cls, table: Table, hash_type: str, start_time: str = None, end_time: str = None
) -> t.List[t.Dict]:
"""
Given a hash type and time range, give me all the hashes found for that type and time range
"""
if start_time is None:
start_time = datetime.datetime.min.isoformat()
if end_time is None:
end_time = datetime.datetime.max.isoformat()
return table.scan(
FilterExpression=Key("SK").eq(hash_type)
& Key("UpdatedAt").between(start_time, end_time),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
class MatchRecordQuery:
"""
Written to be agnostic to hash type so it can be reused by other types of 'MatchRecord's.
"""
DEFAULT_PROJ_EXP = (
"PK, ContentHash, UpdatedAt, SK, SignalSource, SignalHash, Labels"
)
@classmethod
def from_content_key(
cls,
table: Table,
content_key: str,
source_prefix: str = DynamoDBItem.SIGNAL_KEY_PREFIX,
hash_type: str = None,
) -> t.List[t.Dict]:
"""
Given a content key (and optional hash type), give me its content hash (for that type).
"""
filter_exp = None
if not hash_type is None:
filter_exp = Attr("HashType").eq(hash_type)
return table.query(
KeyConditionExpression=Key("PK").eq(content_key)
& Key("SK").begins_with(source_prefix),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
FilterExpression=filter_exp,
).get("Items", [])
@classmethod
def from_signal_key(
cls,
table: Table,
signal_key: str,
hash_type: str = None,
) -> t.List[t.Dict]:
"""
Given a Signal ID/Key (and optional hash type), give me any content matches found
"""
filter_exp = None
if not hash_type is None:
filter_exp = Attr("HashType").eq(hash_type)
return table.query(
IndexName="GSI-1",
KeyConditionExpression=Key("GSI1-PK").eq(signal_key),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
FilterExpression=filter_exp,
).get("Items", [])
@classmethod
def from_time_range(
cls, table: Table, hash_type: str, start_time: str = None, end_time: str = None
) -> t.List[t.Dict]:
"""
Given a hash type and time range, give me all the matches found for that type and time range
"""
if start_time is None:
start_time = datetime.datetime.min.isoformat()
if end_time is None:
end_time = datetime.datetime.max.isoformat()
return table.query(
IndexName="GSI-2",
KeyConditionExpression=Key("GSI2-PK").eq(hash_type)
& Key("UpdatedAt").between(start_time, end_time),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
@dataclass
class MatchMessage(SNSMessage):
"""
Captures a set of matches that will need to be processed. We create one
match message for a single content key. It is possible that a single content
hash matches multiple datasets. When it does, the entire set of matches are
forwarded together so that *one* appropriate action can be taken.
- `content_key`: A way for partners to refer uniquely to content on their
site
- `content_hash`: The hash generated for the content_key
"""
content_key: str
content_hash: str
match_details: t.List["DatasetMatchDetails"] = field(default_factory=list)
def to_sns_message(self) -> str:
return json.dumps(
{
"ContentKey": self.content_key,
"ContentHash": self.content_hash,
"MatchDetails": [x.to_dict() for x in self.match_details],
}
)
@classmethod
def from_sns_message(cls, message: str) -> "MatchMessage":
parsed = json.loads(message)
return cls(
parsed["ContentKey"],
parsed["ContentHash"],
[DatasetMatchDetails.from_dict(d) for d in parsed["MatchDetails"]],
)
@dataclass
class DatasetMatchDetails:
"""
Dataset fields:
- `banked_content_id`: Inside the bank, what's a unique way to refer to what
was matched against?
- `bank_id`: [optional][Defaults to 'threatexchange_all_collabs'] Which bank
did we fetch this banked_content from?
- `bank_source`: [optional][Defaults to 'api/threatexchange'] This is
forward looking, but potentially, we could have this be 'local', or
'api/some-other-api'
"""
banked_indicator_id: str
# source information, for now, it's okay to be hardcoded
# to threatexchange
bank_id: str = "threatexchange_all_collabs"
bank_source: str = "api/threatexchange"
def to_dict(self) -> dict:
return {
"BankedIndicatorId": self.banked_indicator_id,
"BankId": self.bank_id,
"BankSource": self.bank_source,
}
@classmethod
def from_dict(cls, d: dict) -> "DatasetMatchDetails":
return cls(
d["BankedIndicatorId"],
d["BankId"],
d["BankSource"],
)
| 32.273256
| 110
| 0.623311
| 1,984
| 16,653
| 5.020665
| 0.147177
| 0.030117
| 0.015661
| 0.008031
| 0.52314
| 0.477562
| 0.399157
| 0.384098
| 0.327377
| 0.300371
| 0
| 0.000912
| 0.275986
| 16,653
| 515
| 111
| 32.335922
| 0.825247
| 0.1461
| 0
| 0.527624
| 0
| 0
| 0.085471
| 0.018143
| 0
| 0
| 0
| 0.003884
| 0
| 1
| 0.107735
| false
| 0
| 0.016575
| 0.049724
| 0.328729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e494747ad6589e1234241f26ac62dacfe6cecd8c
| 998
|
py
|
Python
|
test/test_truss.py
|
deeepeshthakur/ddtruss
|
86aa945d577c6efe752099eee579386762942289
|
[
"MIT"
] | 1
|
2020-01-27T12:03:47.000Z
|
2020-01-27T12:03:47.000Z
|
test/test_truss.py
|
deeepeshthakur/ddtruss
|
86aa945d577c6efe752099eee579386762942289
|
[
"MIT"
] | null | null | null |
test/test_truss.py
|
deeepeshthakur/ddtruss
|
86aa945d577c6efe752099eee579386762942289
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from ddtruss import Truss, DataDrivenSolver
points = np.array([[0, 0], [1, 0], [0.5, 0.5], [2, 1]])
lines = np.array([[0, 2], [1, 2], [1, 3], [2, 3]], dtype=int)
truss = Truss(points, lines)
E = 1.962e11
A = [2e-4, 2e-4, 1e-4, 1e-4]
U_dict = {0: [0, 0], 1: [0, 0]}
F_dict = {3: [0, -9.81e3]}
u_ref = np.array(
[0, 0, 0, 0, 2.65165043e-4, 8.83883476e-5, 3.47902545e-3, -5.60034579e-3]
)
def test_truss():
u, *_ = truss.solve(A=A, E=E, U_dict=U_dict, F_dict=F_dict)
assert np.allclose(u, u_ref)
@pytest.mark.parametrize(
"n_data", [5000, 10000]
)
def test_data_driven_solver(n_data):
ddsolver = DataDrivenSolver(truss)
eps_max = 1.1e-3
eps = np.linspace(-eps_max, eps_max, n_data)
sig = E * eps
material_data = np.hstack([eps.reshape((-1, 1)), sig.reshape((-1, 1))])
ddsolver.load_material_data(material_data)
u, *_ = ddsolver.solve(A=A, U_dict=U_dict, F_dict=F_dict)
assert np.allclose(u, u_ref, rtol=1e-2)
| 24.95
| 77
| 0.621242
| 185
| 998
| 3.194595
| 0.318919
| 0.027073
| 0.060914
| 0.030457
| 0.155668
| 0.138748
| 0.138748
| 0.138748
| 0.138748
| 0.138748
| 0
| 0.129743
| 0.181363
| 998
| 39
| 78
| 25.589744
| 0.593635
| 0
| 0
| 0
| 0
| 0
| 0.006012
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.107143
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e49516ca8ad700f85017d9325736d77d5ccd8a3d
| 2,326
|
py
|
Python
|
PTO-yelp/Modules/attention_classifier.py
|
LegendTianjin/Point-Then-Operate
|
a6b0818343bc34c468738ab91ecea89dd03a9535
|
[
"Apache-2.0"
] | 50
|
2019-06-06T05:30:32.000Z
|
2021-11-18T07:24:36.000Z
|
PTO-yelp/Modules/attention_classifier.py
|
lancopku/Point-Then-Operate
|
1c04ec326b52fc65f97f5610a6f16f6e938d583e
|
[
"Apache-2.0"
] | 2
|
2019-08-30T09:49:26.000Z
|
2020-01-17T04:20:53.000Z
|
PTO-yelp/Modules/attention_classifier.py
|
ChenWu98/Point-Then-Operate
|
a6b0818343bc34c468738ab91ecea89dd03a9535
|
[
"Apache-2.0"
] | 7
|
2019-06-17T06:20:47.000Z
|
2020-10-26T03:19:44.000Z
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.utils import gpu_wrapper
from Modules.subModules.attention import AttentionUnit
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
class AttenClassifier(nn.Module):
def __init__(self, emb_dim, dim_h, n_layers, dropout, bi):
super(AttenClassifier, self).__init__()
self.emb_dim = emb_dim
self.n_layers = n_layers
self.dim_h = dim_h
self.dropout = dropout
self.n_dir = 2 if bi else 1
self.Encoder = nn.GRU(input_size=self.emb_dim,
hidden_size=self.dim_h,
num_layers=self.n_layers,
dropout=self.dropout,
bidirectional=bi)
self.Attention = AttentionUnit(query_dim=self.dim_h * self.n_dir,
key_dim=self.dim_h * self.n_dir,
atten_dim=self.dim_h)
self.MLP = nn.Sequential(nn.Linear(self.dim_h * self.n_dir, 1),
nn.Sigmoid())
def forward(self, inp, l, null_mask):
"""
:param inp: shape = (B, T, emb_dim)
:param null_mask: shape = (B, T)
:return:
"""
B = inp.shape[0]
T = inp.shape[1]
inp = inp.transpose(0, 1) # shape = (20, n_batch, emb_dim)
packed_emb = pack(inp, l)
outputs, h_n = self.Encoder(packed_emb) # h_n.shape = (n_layers * n_dir, n_batch, dim_h)
outputs = unpack(outputs, total_length=T)[0] # shape = (20, n_batch, dim_h * n_dir)
h_n = h_n.view(self.n_layers, self.n_dir, B, self.dim_h).transpose(1, 2).transpose(2, 3).contiguous().view(self.n_layers, B, -1)
# shape = (n_layers, n_batch, dim_h * n_dir)
h_n = h_n[-1, :, :] # shape = (n_batch, dim_h * n_dir)
context, att_weight = self.Attention(h_n,
outputs.transpose(0, 1),
null_mask) # (n_batch, dim_h * n_dir), (n_batch, 20)
cls = self.MLP(context).squeeze(1) # shape = (n_batch, )
return cls, att_weight
| 41.535714
| 136
| 0.564488
| 328
| 2,326
| 3.759146
| 0.256098
| 0.045418
| 0.045418
| 0.040552
| 0.148418
| 0.136253
| 0.060016
| 0.029197
| 0.029197
| 0.029197
| 0
| 0.014715
| 0.328031
| 2,326
| 55
| 137
| 42.290909
| 0.774152
| 0.141445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4954d56f09841ccf54e7784967df8b418345b0e
| 569
|
py
|
Python
|
minion/parser.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 49
|
2016-03-07T06:42:40.000Z
|
2021-03-06T02:43:02.000Z
|
minion/parser.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 16
|
2016-03-08T07:20:52.000Z
|
2017-04-21T18:15:12.000Z
|
minion/parser.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 9
|
2016-03-29T22:08:52.000Z
|
2021-06-16T16:29:30.000Z
|
"""
`minion-ci` is a minimalist, decentralized, flexible Continuous Integration Server for hackers.
This module contains the parser to parse the `minion.yml` file.
:copyright: (c) by Timo Furrer
:license: MIT, see LICENSE for details
"""
import yaml
from .errors import MinionError
def parse(path):
"""Parse the given minion.yml file"""
try:
with open(path) as minion_file:
config = yaml.load(minion_file)
except OSError:
raise MinionError("No minion.yml config file found in repository")
return config
| 24.73913
| 99
| 0.681898
| 75
| 569
| 5.146667
| 0.693333
| 0.069948
| 0.067358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233743
| 569
| 22
| 100
| 25.863636
| 0.885321
| 0.462214
| 0
| 0
| 0
| 0
| 0.162455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e49b044e4f3bdfef09e6426d0ff3c5f755aa63ae
| 1,464
|
py
|
Python
|
bufflog/bufflog.py
|
bufferapp/python-bufflog
|
12d218dfb917419789c720fb1851a35708909810
|
[
"MIT"
] | null | null | null |
bufflog/bufflog.py
|
bufferapp/python-bufflog
|
12d218dfb917419789c720fb1851a35708909810
|
[
"MIT"
] | null | null | null |
bufflog/bufflog.py
|
bufferapp/python-bufflog
|
12d218dfb917419789c720fb1851a35708909810
|
[
"MIT"
] | 1
|
2021-02-08T12:53:43.000Z
|
2021-02-08T12:53:43.000Z
|
import structlog
import logging
import sys
import os
from structlog.processors import JSONRenderer
from structlog.stdlib import filter_by_level
from structlog.stdlib import add_log_level_number
from .datadog import tracer_injection
def rename_message_key(_, __, event_dict):
event_dict["message"] = event_dict["event"]
event_dict.pop("event", None)
return event_dict
def increase_level_numbers(_, __, event_dict):
event_dict["level"] = event_dict["level_number"] * 10
event_dict.pop("level_number", None)
return event_dict
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
def get_logger(name=None, datadog=False):
logging.basicConfig(
format="%(message)s",
stream=sys.stdout,
level=LOG_LEVEL,
)
processors = [
filter_by_level,
rename_message_key,
add_log_level_number,
increase_level_numbers,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
JSONRenderer(),
]
if datadog:
processors.insert(0, tracer_injection)
structlog.configure(
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
processors=processors,
)
return structlog.get_logger(name)
| 24.4
| 57
| 0.705601
| 165
| 1,464
| 5.945455
| 0.363636
| 0.091743
| 0.042813
| 0.050968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00258
| 0.205601
| 1,464
| 59
| 58
| 24.813559
| 0.840929
| 0
| 0
| 0.045455
| 0
| 0
| 0.047814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.181818
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e49cb572bd1c712b03397fca3826c3ed98801ce6
| 990
|
py
|
Python
|
templator.py
|
daren-thomas/template-system-example
|
248d2f78392be826f3223ee27e90c82feb70a17a
|
[
"MIT"
] | null | null | null |
templator.py
|
daren-thomas/template-system-example
|
248d2f78392be826f3223ee27e90c82feb70a17a
|
[
"MIT"
] | null | null | null |
templator.py
|
daren-thomas/template-system-example
|
248d2f78392be826f3223ee27e90c82feb70a17a
|
[
"MIT"
] | null | null | null |
"""
templator.py reads in an excel file and a template and outputs a file for each row
in the excel file, by substituting the template variables with the values in the columns.
This technique uses pandas to read the excel file into a DataFrame and the python format operator ``%```
to apply the values.
"""
import sys
import os
import pandas as pd
def main(template_file, excel_file):
# read in the template and the excel file
template = open(template_file, 'r').read()
variables = pd.read_excel(excel_file)
# loop over each row (note, one of the columns must be called "filename")
for row_number, values in variables.iterrows():
filename = values['filename']
with open(filename, 'w') as f:
f.write(template % values)
if __name__ == '__main__':
template_file = os.path.join(os.path.dirname(__file__), 'template.txt')
excel_file = os.path.join(os.path.dirname(__file__), 'variables.xls')
main(template_file, excel_file)
| 34.137931
| 104
| 0.706061
| 150
| 990
| 4.486667
| 0.42
| 0.106984
| 0.053492
| 0.062407
| 0.166419
| 0.092125
| 0.092125
| 0.092125
| 0
| 0
| 0
| 0
| 0.19899
| 990
| 29
| 105
| 34.137931
| 0.848676
| 0.417172
| 0
| 0
| 0
| 0
| 0.075571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4a3bd3abdfaed582c987ca4af954c061d659067
| 24,952
|
py
|
Python
|
src/menus/user/Menu.py
|
stregea/TransactionTrackr
|
c38b99d56816becaa47a21400fb20c615d3483ef
|
[
"MIT"
] | 2
|
2021-07-02T19:49:24.000Z
|
2021-07-08T02:59:25.000Z
|
src/menus/user/Menu.py
|
stregea/TransactionTrackr
|
c38b99d56816becaa47a21400fb20c615d3483ef
|
[
"MIT"
] | null | null | null |
src/menus/user/Menu.py
|
stregea/TransactionTrackr
|
c38b99d56816becaa47a21400fb20c615d3483ef
|
[
"MIT"
] | null | null | null |
from objects.user.User import User
from objects.interface.dbconn import DB
from objects.user.Currency import get_currency_symbol
from objects.threads.UploadThread import UploadThread
import utils.globals as _globals
from utils.print import print_message, print_error
from utils.enums import Months, SettingsSelection, is_valid_month, month_string_to_enum
from utils.visualizer import visualizer, visualizer_helper
from utils.builders.folderbuilder import create_user_folder
from utils.exceptions import NoDataFound, NoTotalFound, InvalidMonth, InvalidYear, UserNotFound
from utils.dates.dates import get_dates, subtract_days
from utils.averager.averager import calculate_average
from utils.formatting.formatter import format_date_pretty, format_month_enum_to_string
from utils.generators.csv_generator import generate_transaction_files
from menus.user.Settings import Settings
def user_has_data(user: User) -> bool:
"""
Test to determine if a user has any data
:param user: The user to check.
:return: True if the user has data. False otherwise.
"""
# Determine if the user has any available data.
try:
user.get_earliest_transaction_date()
except Exception: # excepting NoDataFound here does not work for some reason?
print_error("No data is currently available.")
return False
return True
def is_valid_year(year_to_check: str) -> bool:
"""
Determine if the passed in year currently exists within the database.
:param year_to_check: The year to check.
:return: True if the year exists, false otherwise.
"""
year_is_valid = False
db = DB(_globals.DATABASE)
years = db.fetchall(f"SELECT DISTINCT strftime('%Y', Date) from Transactions;")
db.close()
# search through all the years. If the year that was specified exists, set the flag to true.
for year in years:
if year_to_check == year[0]:
year_is_valid = True
break
return year_is_valid
def get_month_and_year() -> (Months, str):
"""
Prompt a user to enter a month and a year.
:raises InvalidMonth: Exception that is to be raised when user enters an invalid month.
:raises InvalidYear: Exception that is to be raised when user enters an invalid year.
:return: A Month enum and the year the user selected.
"""
month = input("Enter a month:\t")
month_enum = month_string_to_enum(month)
if is_valid_month(month_enum):
year = input("Enter a year:\t")
if is_valid_year(year):
return month_enum, year
else:
raise InvalidYear(year)
else:
raise InvalidMonth(month)
def get_year():
"""
Prompt a user to enter a year.
:raises InvalidYear: Exception that is to be raised if a user enters an invalid year.
:return: The year the user enters.
"""
year = input("Enter a year:\t")
if is_valid_year(year):
return year
raise InvalidYear(year)
def display_monthly_information(user: User, month: Months, year: str, show_console: bool = False,
show_visual: bool = False) -> None:
"""
Display information regarding the total money spent within a month.
:param user: The current user.
:param month: The month to get the information regarding how much was spent.
:param year: The year corresponding to the month.
:param show_console: Boolean to determine whether or not to display the information of the total spent
in a month to the console.
:param show_visual: Boolean to determine whether or not to display a visualization of the total spent in the month.
"""
try:
# Dictionary that contains the information about all of the transactions in a given month.
# The key is the day, the value is the total spent on that day.
transactions_dictionary = visualizer_helper.get_transactions_by_month(month, year, user.id)
# The total amount of money spent during the specified month.
total = visualizer_helper.get_monthly_total(month, year, user.id)
except (NoDataFound, NoTotalFound) as n:
print_error(n.message)
return
# List to hold the dollar values for each day.
dollars = []
# List to hold the labels that correspond to each day in the month that had a transaction.
day_labels = []
# List of hold the labels that correspond to the dollar values for the transactions.
dollars_labels = []
# The type of currency the current user is using.
currency_symbol = get_currency_symbol(user.currency_id)
# The title to be displayed on the console and/or the visualization
title = f"Total spent in {format_month_enum_to_string(month)} {year}: {currency_symbol}{total:,}"
for date_key in transactions_dictionary:
day_labels.append(date_key)
# Sort the labels (YYYY-MM-DD - End of Month)
day_labels.sort()
# Add the dollar amount to the corresponding day index, then create a label for that day.
for day in day_labels:
value = round(float(transactions_dictionary[day]), 2)
dollars.append(value)
dollars_labels.append(f"{currency_symbol}{value:,}")
# Display each day and then display the total spent for the month
if show_console: # TODO: change to function to prevent duplicated code.
for i, day in enumerate(day_labels):
print_message(f"{day}:\t{dollars_labels[i]}")
print_message(f"{title}")
# Display a visualization of the money spent in the month specified
if show_visual:
visualizer.display_bar_chart(title=title,
list_of_values=dollars,
list_of_labels=day_labels,
currency_labels=dollars_labels)
def display_yearly_information(user: User, year: str, show_console: bool = False, show_visual: bool = False) -> None:
"""
Display information regarding the total money spent within a certain year.
:param user: The current user.
:param year: The year to gather information from.
:param show_console: Boolean to determine whether or not to display the information of the total spent
in a year to the console.
:param show_visual: Boolean to determine whether or not to display a visualization of the total spent in the month.
"""
try:
# Dictionary to contain the total transaction values per month given the year
transactions_dictionary = visualizer_helper.get_transactions_by_year(year, user.id)
# The total amount of money spent during the specified year.
total = visualizer_helper.get_yearly_total(year, user.id)
except (NoDataFound, NoTotalFound) as n:
print_error(n.message)
return
# List to hold the dollar values for each month.
dollars = []
# List to hold the labels that correspond to the total number of transactions in each month.
month_labels = []
# List of hold the labels that correspond to the dollar values for the transactions.
dollars_labels = []
# The type of currency the current user is using.
currency_symbol = get_currency_symbol(user.currency_id)
# The title to be displayed on the console and/or the visualization
title = f"Total Spent in {year}: {currency_symbol}{total:,}"
for month_name in transactions_dictionary:
value = round(float(transactions_dictionary[month_name]), 2)
dollars.append(value)
dollars_labels.append(f"{currency_symbol}{value:,}")
# Not formatting month name here since the string is already in the key format for the months dictionary.
month_labels.append(_globals.months[month_name])
if show_console:
for i, month in enumerate(month_labels):
print_message(f"{month}: {dollars_labels[i]}")
print_message(f"{title}")
if show_visual:
visualizer.display_bar_chart(title=title,
list_of_values=dollars,
list_of_labels=month_labels,
currency_labels=dollars_labels)
def display_information_all_time(user: User, show_console: bool = False, show_visual: bool = False) -> None:
"""
Display information regarding the total money spent all time.
:param user: The current user.
:param show_console: Boolean to determine whether or not to display the information of the total spent
in a year to the console.
:param show_visual: Boolean to determine whether or not to display a visualization of the total spent in the month.
"""
try:
transactions_dictionary = visualizer_helper.get_transactions_all_time(user.id)
total = visualizer_helper.get_total_all_time(user.id)
except NoDataFound as ndf:
print_error(ndf.message)
return
# List to hold the total dollar values for each available year.
dollars = []
# List to hold the labels that correspond to the total number of transactions in each year.
year_labels = []
# List of hold the labels that correspond to the dollar values for the transactions.
dollars_labels = []
# The type of currency the current user is using.
currency_symbol = get_currency_symbol(user.currency_id)
# The title to be displayed on the console and/or the visualization
title = f"Total Spent All Time: {currency_symbol}{total:,}"
for year in transactions_dictionary:
value = round(float(transactions_dictionary[year]), 2)
dollars.append(value)
dollars_labels.append(f"{currency_symbol}{value:,}")
year_labels.append(year)
if show_console:
for i, year in enumerate(year_labels):
print_message(f"{year}: {dollars_labels[i]}")
print_message(f"{title}")
if show_visual:
visualizer.display_bar_chart(title=title,
list_of_values=dollars,
list_of_labels=year_labels,
currency_labels=dollars_labels)
class Menu:
"""
This class serves as the main menu for the user.
"""
def __init__(self, user: User, show_console=False, show_visual=False) -> None:
self.user = user
self.show_console = show_console
self.show_visual = show_visual
def display_money_spent_per_month(self) -> None:
"""
Menu option that will display information based on a specified month. (Menu option #1).
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
month, year = get_month_and_year()
except (InvalidMonth, InvalidYear) as e:
print_error(e.message)
return
display_monthly_information(user=self.user,
month=month,
year=year,
show_console=self.show_console,
show_visual=self.show_visual)
def display_money_spent_per_year(self) -> None:
"""
Menu option that will display yearly information. (Menu option #2).
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
year = get_year()
except InvalidYear as iy:
print_error(iy.message)
return
display_yearly_information(self.user, year, show_console=self.show_console, show_visual=self.show_visual)
def display_money_spent_all_time(self) -> None:
"""
Menu option that will display the total money spent all time. (Menu option #2).
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
display_information_all_time(self.user, show_console=self.show_console, show_visual=self.show_visual)
def display_daily_average_spending_per_month(self) -> None:
"""
Menu option that will display the user's daily average spending over a specified month
as well as a Pie Chart to show where most of the money was spent.
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
month, year = get_month_and_year()
except (InvalidMonth, InvalidYear) as e:
print_error(e.message)
return
# Get the starting and end dates for the user-specified month.
dates = get_dates(month, year)
# Get the user's specified currency
currency_symbol = get_currency_symbol(self.user.currency_id)
try:
# get the monthly average spent per day.
monthly_average = calculate_average(start_date=dates[0],
end_date=dates[1],
user_id=self.user.id,
exception_type="monthly average")
except NoDataFound as ndf:
print_message(ndf.message)
return
print_message(
f"Your daily average spending for {format_month_enum_to_string(month)} {year} is: {currency_symbol}{monthly_average:,}")
def display_daily_average_spending_per_year(self) -> None:
"""
Menu option that will display the user's daily average spending over a specified year.
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
year = get_year()
except InvalidYear as iy:
print_error(iy.message)
return
# Get the user's specified currency
currency_symbol = get_currency_symbol(self.user.currency_id)
start_date = f"{year}-01-01"
end_date = f"{year}-12-31"
dates = (start_date, end_date)
try:
yearly_average = calculate_average(start_date=dates[0],
end_date=dates[1],
user_id=self.user.id,
exception_type="yearly average")
except NoDataFound as ndf:
print_error(ndf.message)
return
print_message(f"Your daily average spending for {year} is: {currency_symbol}{yearly_average:,}")
def display_daily_average_spending_all_time(self) -> None:
"""
Menu option that will display the user's all-time daily average spending.
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
start_date = self.user.get_earliest_transaction_date()
end_date = self.user.get_latest_transaction_date()
all_time_average = calculate_average(start_date=start_date,
end_date=end_date,
user_id=self.user.id,
exception_type="all time average")
except NoDataFound as ndf:
print_error(ndf.message)
return
currency_symbol = get_currency_symbol(self.user.currency_id)
print_message(f"Your all-time daily average spending is: {currency_symbol}{all_time_average:,}")
def display_daily_average_over_n_days(self) -> None:
"""
Menu option that will allow a user to determine the user's daily spending average over a certain number of days.
"""
# Determine if the user has any available data.
if not user_has_data(self.user):
return
number_of_days = input("How many days would you like to go back?\t")
if number_of_days.isdigit():
try:
earliest_date = self.user.get_earliest_transaction_date()
end_date = self.user.get_latest_transaction_date()
start_date = subtract_days(starting_date=end_date, days=int(number_of_days))
# if a user selects a day that is older than their oldest transaction,
# select the date associated with their oldest transaction.
if start_date < earliest_date:
formatted_start_date = format_date_pretty(start_date)
formatted_earliest_date = format_date_pretty(earliest_date)
print_message(
f"The selected date is older than {formatted_earliest_date}, which is the oldest known transaction date.")
print_error(
f"Using '{formatted_earliest_date}' to calculate the average instead of '{formatted_start_date}'...")
start_date = earliest_date
daily_average = calculate_average(start_date=start_date,
end_date=end_date,
user_id=self.user.id,
exception_type="all time average")
except NoDataFound as ndf:
print_error(ndf.message)
return
currency_symbol = get_currency_symbol(self.user.currency_id)
formatted_start_date = format_date_pretty(start_date)
formatted_end_date = format_date_pretty(end_date)
print_message(f"Using the latest data from: {formatted_end_date}...")
print_message(
f"Your daily average spending between {formatted_start_date} - {formatted_end_date} is: {currency_symbol}{daily_average:,}")
elif number_of_days.startswith("-") and number_of_days[1:].isdigit(): # if a negative number was entered.
print_error("Please enter a positive integer.")
else:
print_error("Invalid number of days to go back.")
def display_daily_average_over_period(self):
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
earliest_date = self.user.get_earliest_transaction_date()
latest_date = self.user.get_latest_transaction_date()
except NoDataFound as ndf:
print_error(ndf.message)
return
try:
print_message("Select your first month/year:")
month1, year1 = get_month_and_year()
print_message("Select your second month/year:")
month2, year2 = get_month_and_year()
except (InvalidMonth, InvalidYear) as e:
print_error(e.message)
return
starting_date = get_dates(month1, year1)[0]
ending_date = get_dates(month2, year2)[1]
# If the starting date is greater than the ending date, swap the dates by:
# changing the starting_date to the beginning of the second selected month,
# then change the ending_date to the end of the first month.
if starting_date > ending_date:
starting_date = get_dates(month2, year2)[0]
ending_date = get_dates(month1, year1)[1]
# If starting or ending day < earliest date
if starting_date < earliest_date:
starting_date = earliest_date
if ending_date < earliest_date:
ending_date = earliest_date
# If starting date or ending date > latest date, use the latest date and the option
if starting_date > latest_date:
starting_date = latest_date
if ending_date > latest_date:
ending_date = latest_date
try:
average_over_period = calculate_average(start_date=starting_date,
end_date=ending_date,
user_id=self.user.id)
except NoDataFound as ndf:
print(ndf.message)
return
formatted_start_date = format_date_pretty(starting_date)
formatted_end_date = format_date_pretty(ending_date)
currency_symbol = get_currency_symbol(self.user.currency_id)
print_message(
f"Your daily average spending between {formatted_start_date} - {formatted_end_date} is: {currency_symbol}{average_over_period:,}")
def display_total_number_of_transactions(self):
# Determine if the user has any available data.
if not user_has_data(self.user):
return
try:
earliest_date = format_date_pretty(self.user.get_earliest_transaction_date())
latest_date = format_date_pretty(self.user.get_latest_transaction_date())
print_message(
f"You have made {self.user.get_total_transactions()} total transactions between {earliest_date} and {latest_date}")
except NoDataFound as ndf:
print_error(ndf.message)
return
def run(self) -> bool:
"""
Run the driver to this object.
"""
# show where money is most spent + include all time total (merchant) -- maybe implement as pie chart
# include option to perform query's -- make it dynamic
print_message(f"--- Welcome, {self.user.username}! ---")
print_message("Enter a number for the following prompt:")
running = True
while running:
print_message("1.\tShow money spent per month")
print_message("2.\tShow money spent per year")
print_message("3.\tShow money spent all time")
print_message("4.\tTotal daily average spending per month")
print_message("5.\tTotal daily average spending per year")
print_message("6.\tTotal daily average spending all time")
print_message("7.\tTotal daily average spending over number of days.")
print_message("8.\tTotal daily average spending over period.")
print_message("9.\tDisplay total number of transactions")
print_message("10.\tUpload Data")
print_message("11.\tSettings")
print_message("12.\tSign out")
print_message("13.\tQuit")
response = input()
if response == '1': # display a chart of the total money spent in a given month.
self.display_money_spent_per_month()
elif response == '2': # display a chart of the total money spent in a given year.
self.display_money_spent_per_year()
elif response == '3': # display a chart of all of the money spent.
self.display_money_spent_all_time()
elif response == '4': # display the daily average spending that a user has made.
self.display_daily_average_spending_per_month()
elif response == '5': # display the daily average spending that a user has made.
self.display_daily_average_spending_per_year()
elif response == '6': # display the daily average spending that a user has made.
self.display_daily_average_spending_all_time()
elif response == '7': # display the daily average spending that a user has made.
self.display_daily_average_over_n_days()
elif response == '8': # display the daily average spending that a user has made.
self.display_daily_average_over_period()
elif response == '9': # display the daily average spending that a user has made.
self.display_total_number_of_transactions()
elif response == '10' or response.lower() == 'upload': # upload data to the database from the 'upload' directory.
# UserNotFound Exception should never be caught here, but it is still being handled.
try:
create_user_folder(user=self.user)
UploadThread(self.user).run()
except UserNotFound as unf:
print_error(unf.message)
elif response == '11': # User settings
selection = Settings(self.user).run()
# immediately exit the user menu if the user had selected to delete their account.
if selection.value == SettingsSelection.DELETE_ACCOUNT.value:
running = False
elif response == '12' or response.lower() == 'signout':
self.user.sign_out()
running = False # set running to false, then return to sign in screen.
elif response == '13' or response.lower() == 'quit':
self.user.sign_out()
exit(0) # exit the main program successfully.
else:
print_error("Unknown Command.")
return not running
| 42.726027
| 142
| 0.631412
| 3,134
| 24,952
| 4.835992
| 0.108807
| 0.020586
| 0.03299
| 0.008709
| 0.56763
| 0.512338
| 0.474927
| 0.442531
| 0.424584
| 0.402481
| 0
| 0.00406
| 0.299094
| 24,952
| 583
| 143
| 42.799314
| 0.862542
| 0.27084
| 0
| 0.431755
| 0
| 0
| 0.121734
| 0.036176
| 0
| 0
| 0
| 0.001715
| 0
| 1
| 0.050139
| false
| 0
| 0.041783
| 0
| 0.178273
| 0.147632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4a69e3428e588c7d00739ddb17751edb51f6451
| 1,717
|
py
|
Python
|
website/CookieHelper.py
|
sousic/flask.huny.kr
|
53a8f5af1fa63b290a4e97278a86328758e97d43
|
[
"MIT"
] | null | null | null |
website/CookieHelper.py
|
sousic/flask.huny.kr
|
53a8f5af1fa63b290a4e97278a86328758e97d43
|
[
"MIT"
] | null | null | null |
website/CookieHelper.py
|
sousic/flask.huny.kr
|
53a8f5af1fa63b290a4e97278a86328758e97d43
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import base64
from functools import wraps
import pyaes
from flask import request
from werkzeug.utils import redirect
from website.domain.UserVO import UserVO
class CookieHelper(object):
def init_app(self, app):
self.app = app
self.app.cookie_helper = self
def SetCookies(self, response, user):
aes = pyaes.AESModeOfOperationCTR(self.app.config['SECRET_KEY'])
chiphertext = aes.encrypt(user.to_json())
response.set_cookie(self.app.config['COOKIE_NAME'], base64.b16encode(chiphertext))
def GetCookies(self, request):
if request is not None:
decrypted = request.cookies.get(self.app.config['COOKIE_NAME'])
if decrypted is not None:
cookies = base64.b16decode(decrypted)
aes = pyaes.AESModeOfOperationCTR(self.app.config['SECRET_KEY'])
decrpyted = aes.decrypt(cookies)
return decrpyted
else:
return None
else:
return None
# 쿠키 내역 아이디만 추출
def GetUserID(self, request):
c = self.GetCookies(request)
user = UserVO()
if c is not None:
user.to_object(c)
return user.user_id
# 쿠키 체크 decorator
def CheckCookie(self, f):
@wraps(f)
def _check_cookie_(*arg, **kwargs):
cookie = self.GetCookies(request)
if cookie is None:
return redirect('/')
return f(*arg, **kwargs)
return _check_cookie_
#로그인 체크
def IsLogin(self, request):
cookie = self.GetCookies(request)
if cookie is None:
return False
else:
return True
| 28.616667
| 90
| 0.594059
| 196
| 1,717
| 5.122449
| 0.367347
| 0.048805
| 0.051793
| 0.065737
| 0.241036
| 0.195219
| 0.195219
| 0.195219
| 0.093626
| 0
| 0
| 0.009362
| 0.315667
| 1,717
| 60
| 91
| 28.616667
| 0.845106
| 0.033197
| 0
| 0.23913
| 0
| 0
| 0.025966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152174
| false
| 0
| 0.130435
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4a6e1bb797c7875ed388c77bf15d0c26b3189cb
| 3,652
|
py
|
Python
|
export_resized_ios_assets.py
|
Tubbebubbe/gimp-plugins
|
11221ded072d8d3001202f30fda266e0cccd3a36
|
[
"MIT"
] | 4
|
2016-08-03T18:20:59.000Z
|
2020-05-24T04:38:47.000Z
|
export_resized_ios_assets.py
|
Tubbebubbe/gimp-plugins
|
11221ded072d8d3001202f30fda266e0cccd3a36
|
[
"MIT"
] | null | null | null |
export_resized_ios_assets.py
|
Tubbebubbe/gimp-plugins
|
11221ded072d8d3001202f30fda266e0cccd3a36
|
[
"MIT"
] | 2
|
2017-10-23T08:23:36.000Z
|
2020-05-24T04:38:57.000Z
|
#!/usr/bin/env python
"""
export_resized_ios_images
Gimp plugin to export image to icon files usable on iOS.
Author:
-------
Tobias Blom, Techne Development AB <tobias.blom@techne-dev.se>
Installation:
-------------
Under Mac OS X, copy this file to ~/Library/Application Support/GIMP/x.x/plug-ins and
make it executable (chmod 755)
Usage:
------
1. Create your image at a resolution four times what you want on a
standard iOS device, twice the size on a retina device.
GIMP image Plug-in output
-------------------------------------------------
80 x 80 @ 144 dpi | Icon 20 x 20 @ 72 dpi
| Icon 40 x 40 @ 144 dpi
| Icon 60 x 60 @ 144 dpi
-------------------------------------------------
120 x 120 @ 144 dpi | Icon 30 x 30 @ 72 dpi
| Icon 60 x 60 @ 144 dpi
| Icon 90 x 90 @ 144 dpi
-------------------------------------------------
2. Run the plug-in (from the File menu) and select the output
directory.
License:
--------
Released under the MIT License
Copyright (c) 2013-2017 Techne Development AB
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from gimpfu import *
import os
def gprint(text):
pdb.gimp_message(text)
return
def resize_and_save_image(timg, tdrawable, scale_factor, dpi, directory, filename):
img = timg.duplicate()
img.merge_visible_layers(0)
width = timg.width * scale_factor
height = timg.height * scale_factor
fullpath = os.path.join(directory, filename)
pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)
pdb.gimp_image_scale(img, width, height)
pdb.gimp_image_set_resolution(img, dpi, dpi)
pdb.file_png_save(img, img.layers[0], fullpath, filename, 0, 9, 1, 1, 1, 1, 1)
def plugin_main(img, drawable, directory):
basename = os.path.basename(img.filename[0:-4])
resize_and_save_image(img, drawable, 0.25, 72, directory, basename + ".png")
resize_and_save_image(img, drawable, 0.5, 144, directory, basename + "@2x.png")
resize_and_save_image(img, drawable, 0.75, 144, directory, basename + "@3x.png")
register(
"export_resized_ios_assets",
"Exports iOS assets at 50% and 75% (144 dpi) and 25% (72 dpi) size",
"Exports iOS assets at 50% and 75% (144 dpi) and 25% (72 dpi) size",
"Techne Development AB",
"Copyright (c) 2013-2017 Techne Development AB. Released under MIT License.",
"2017",
"<Image>/File/Export as iOS assets...",
"RGB*, GRAY*",
[
(PF_DIRNAME, "directory", "Output directory", os.path.expanduser("~")),
],
[],
plugin_main)
main()
| 33.504587
| 85
| 0.659639
| 529
| 3,652
| 4.478261
| 0.404537
| 0.020262
| 0.032081
| 0.030393
| 0.124947
| 0.124947
| 0.124947
| 0.065851
| 0.037991
| 0.037991
| 0
| 0.045833
| 0.211391
| 3,652
| 108
| 86
| 33.814815
| 0.776736
| 0.597207
| 0
| 0.057143
| 0
| 0.057143
| 0.235978
| 0.0171
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.057143
| 0
| 0.171429
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4a93421928eb84ea60e2492daf9f320c6c9d564
| 8,417
|
py
|
Python
|
site/office/compline.py
|
scottBowles/dailyoffice2019
|
ca750ac77316d247ca7a7a820e085f9968fbc8ff
|
[
"MIT"
] | 19
|
2020-01-12T23:57:22.000Z
|
2022-03-30T16:35:17.000Z
|
site/office/compline.py
|
scottBowles/dailyoffice2019
|
ca750ac77316d247ca7a7a820e085f9968fbc8ff
|
[
"MIT"
] | 59
|
2020-01-13T00:45:27.000Z
|
2022-02-20T04:10:05.000Z
|
site/office/compline.py
|
scottBowles/dailyoffice2019
|
ca750ac77316d247ca7a7a820e085f9968fbc8ff
|
[
"MIT"
] | 7
|
2020-01-21T21:12:03.000Z
|
2021-10-24T01:15:50.000Z
|
import datetime
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from office.offices import Office, OfficeSection
from psalter.utils import get_psalms
class Compline(Office):
name = "Compline"
office = "compline"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.description = "Office: {}, Date: {}, Commemoration: {}, Prayer Book: {}".format(
"Compline (Bedtime Prayer)",
self.get_formatted_date_string(),
self.date.primary_evening.name,
"The Book of Common Prayer (2019), Anglican Church in North America",
)
self.start_time = datetime.datetime.combine(self.date.date, datetime.time())
self.start_time = self.start_time.replace(minute=0, hour=20, second=0)
self.end_time = self.start_time.replace(minute=59, hour=23, second=59)
@cached_property
def modules(self):
return [
(ComplineHeading(self.date, self.office_readings), "office/heading.html"),
(ComplineCommemorationListing(self.date, self.office_readings), "office/commemoration_listing.html"),
(ComplineOpening(self.date, self.office_readings), "office/compline_opening.html"),
(ComplineConfession(self.date, self.office_readings), "office/compline_confession.html"),
(ComplineInvitatory(self.date, self.office_readings), "office/compline_invitatory.html"),
(ComplinePsalms(self.date, self.office_readings), "office/minor_office_psalms.html"),
(ComplineScripture(self.date, self.office_readings), "office/minor_office_scripture.html"),
(ComplinePrayers(self.date, self.office_readings), "office/compline_prayers.html"),
(ComplineCanticle(self.date, self.office_readings), "office/compline_canticle.html"),
(ComplineConclusion(self.date, self.office_readings), "office/compline_conclusion.html"),
]
class ComplineHeading(OfficeSection):
@cached_property
def data(self):
return {"heading": mark_safe("Compline"), "calendar_date": self.date}
class ComplineCommemorationListing(OfficeSection):
@cached_property
def data(self):
return {
"day": self.date,
"evening": True,
"heading": "This Nights's Commemoration{}".format("s" if len(self.date.all) > 1 else ""),
"commemorations": self.date.all_evening,
}
class ComplineOpening(OfficeSection):
@cached_property
def data(self):
return {}
class ComplineConfession(OfficeSection):
@cached_property
def data(self):
return {"heading": "Confession of Sin"}
class ComplineInvitatory(OfficeSection):
@cached_property
def data(self):
return {
"alleluia": self.date.evening_season.name != "Lent" and self.date.evening_season.name != "Holy Week",
"heading": "Invitatory",
}
class ComplinePsalms(OfficeSection):
@cached_property
def data(self):
return {"heading": "The Psalms", "psalms": get_psalms("4,31:1-6,91,134")}
class ComplineScripture(OfficeSection):
def get_scripture(self):
if self.date.date.weekday() in [0, 4]:
return {
"sentence": "You, O Lord, are in the midst of us, and we are called by your name; do not leave us.",
"citation": "JEREMIAH 14:9",
}
if self.date.date.weekday() in [1, 5]:
return {
"sentence": "Come to me, all who labor and are heavy laden, and I will give you rest. Take my yoke upon you, and learn from me, for I am gentle and lowly in heart, and you will find rest for your souls. For my yoke is easy, and my burden is light.",
"citation": "MATTHEW 11:28-30",
}
if self.date.date.weekday() in [2, 6]:
return {
"sentence": "Now may the God of peace who brought again from the dead our Lord Jesus, the great shepherd of the sheep, by the blood of the eternal covenant, equip you with everything good that you may do his will, working in us that which is pleasing in his sight, through Jesus Christ, to whom be glory forever and ever. Amen.",
"citation": "HEBREWS 13:20-21",
}
if self.date.date.weekday() in [3]:
return {
"sentence": "Be sober-minded; be watchful. Your adversary the devil prowls around like a roaring lion, seeking someone to devour. Resist him, firm in your faith.",
"citation": "1 PETER 5:8-9",
}
@cached_property
def data(self):
return {"heading": "The Reading", "sentence": self.get_scripture()}
class ComplinePrayers(OfficeSection):
collects = [
(
"A Collect for Evening",
"Visit this place, O Lord, and drive far from it all snares of the enemy; let your holy angels dwell with us to preserve us in peace; and let your blessing be upon us always; through Jesus Christ our Lord.",
),
(
"A Collect for Aid Against Peril",
"Lighten our darkness, we beseech you, O Lord; and by your great mercy defend us from all perils and dangers of this night; for the love of your only Son, our Savior Jesus Christ.",
),
(
"A Collect for Evening",
"Be present, O merciful God, and protect us through the hours of this night, so that we who are wearied by the changes and chances of this life may rest in your eternal changelessness; through Jesus Christ our Lord.",
),
(
"A Collect for Evening",
"Look down, O Lord, from your heavenly throne, illumine this night with your celestial brightness, and from the children of light banish the deeds of darkness; through Jesus Christ our Lord.",
),
(
"A Collect for Saturdays",
"We give you thanks, O God, for revealing your Son Jesus Christ to us by the light of his resurrection: Grant that as we sing your glory at the close of this day, our joy may abound in the morning as we celebrate the Paschal mystery; through Jesus Christ our Lord.",
),
(
"A Collect for Mission",
"Keep watch, dear Lord, with those who work, or watch, or weep this night, and give your angels charge over those who sleep. Tend the sick, Lord Christ; give rest to the weary, bless the dying, soothe the suffering, pity the afflicted, shield the joyous; and all for your love’s sake.",
),
(
"A Collect for Evening",
"O God, your unfailing providence sustains the world we live in and the life we live: Watch over those, both night and day, who work while others sleep, and grant that we may never forget that our common life depends upon each other’s toil; through Jesus Christ our Lord.",
),
]
def get_collects(self):
if self.date.date.weekday() in [6]: # Sunday
return self.collects[0], self.collects[1], self.collects[5]
if self.date.date.weekday() in [0]: # Monday
return self.collects[2], self.collects[3], self.collects[5]
if self.date.date.weekday() in [1]: # Tuesday
return self.collects[0], self.collects[2], self.collects[5]
if self.date.date.weekday() in [2]: # Wednesday
return self.collects[1], self.collects[3], self.collects[6]
if self.date.date.weekday() in [3]: # Thursday
return self.collects[0], self.collects[3], self.collects[5]
if self.date.date.weekday() in [4]: # Friday
return self.collects[1], self.collects[2], self.collects[6]
if self.date.date.weekday() in [5]: # Saturday
return self.collects[2], self.collects[4], self.collects[5]
@cached_property
def data(self):
return {"heading": "The Prayers", "collects": self.get_collects()}
class ComplineCanticle(OfficeSection):
@cached_property
def data(self):
return {
"heading": "Nunc Dimittis",
"subheading": "The Song of Simeon",
"alleluia": self.date.evening_season.name == "Eastertide",
}
class ComplineConclusion(OfficeSection):
@cached_property
def data(self):
return {"alleluia": self.date.evening_season.name == "Eastertide"}
| 43.386598
| 345
| 0.640489
| 1,088
| 8,417
| 4.89614
| 0.305147
| 0.046555
| 0.027032
| 0.028909
| 0.337526
| 0.327389
| 0.267881
| 0.171766
| 0.074338
| 0.047306
| 0
| 0.012604
| 0.255317
| 8,417
| 193
| 346
| 43.611399
| 0.837269
| 0.006653
| 0
| 0.271523
| 0
| 0.072848
| 0.412546
| 0.033042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092715
| false
| 0
| 0.033113
| 0.072848
| 0.364238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4a98810c99783995caf35d9ff70ccf375552008
| 1,735
|
py
|
Python
|
src/tide_constituents/water_level_prediction.py
|
slawler/SI_2019_Coastal
|
4064d323bc62ce2f47a7af41b9a11ea5538ad181
|
[
"MIT"
] | 1
|
2020-03-13T07:51:44.000Z
|
2020-03-13T07:51:44.000Z
|
src/tide_constituents/water_level_prediction.py
|
cheginit/SI_2019_Coastal
|
4064d323bc62ce2f47a7af41b9a11ea5538ad181
|
[
"MIT"
] | null | null | null |
src/tide_constituents/water_level_prediction.py
|
cheginit/SI_2019_Coastal
|
4064d323bc62ce2f47a7af41b9a11ea5538ad181
|
[
"MIT"
] | 1
|
2020-03-13T14:44:57.000Z
|
2020-03-13T14:44:57.000Z
|
import tide_constituents as tc
from py_noaa import coops
import pandas as pd
import numpy as np
import tappy
start = '20180201'
end = '20180228'
interval = 1
start = pd.to_datetime(start)
end = pd.to_datetime(end)
d = start
w, t, p, r = [], [], [], []
while d < end:
start_ = d
end_ = start_ + pd.DateOffset(interval)
end_ = end_ if end_ < end else end
water_level, tide = tc.get_water_levels(start_.strftime('%Y%m%d'),
end_.strftime('%Y%m%d'),
-88.2, 30.4)
water_level = water_level.water_level.astype('float')
prediction = 0.0 if 'Z0' not in list(tide.speed_dict.keys()) else tide.speed_dict['Z0']
prediction += sum_signals(tide.key_list, tide.dates, tide.speed_dict, tide.r, tide.phase)
residual = water_level - prediction
w.append(water_level)
p.append(prediction)
d = end_
water_level = pd.concat(w).to_frame()
water_level.columns = ['observation']
water_level['prediction'] = np.hstack(p)
data = tc.get_tides('20180101', '20181231', -88.2, 30.4)
wl = data.predicted_wl.copy()
grouped = wl.groupby(pd.Grouper(freq='M'))
def f(group):
return pd.DataFrame({'original': group, 'demeaned': group - group.mean()})
wl_demeaned = grouped.apply(f)
min_month = wl_demeaned.rolling(30).min().groupby(pd.Grouper(freq='M')).last()
max_month = wl_demeaned.rolling(30).max().groupby(pd.Grouper(freq='M')).last()
monthly_minmax = min_month.copy()
monthly_minmax['high'] = max_month['demeaned']
monthly_minmax = monthly_minmax[['demeaned', 'high']]
monthly_minmax.columns = ['low', 'high']
monthly_minmax['range'] = monthly_minmax.high - monthly_minmax.low
monthly_minmax.sort_values('range')
| 30.438596
| 93
| 0.663977
| 251
| 1,735
| 4.394422
| 0.386454
| 0.081596
| 0.035358
| 0.054397
| 0.107888
| 0.045331
| 0
| 0
| 0
| 0
| 0
| 0.037324
| 0.181556
| 1,735
| 57
| 94
| 30.438596
| 0.739437
| 0
| 0
| 0
| 0
| 0
| 0.077189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.116279
| 0.023256
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4ae21080507e35b553b7b372118c5c586495e00
| 7,867
|
py
|
Python
|
main/make_gradsamplingbasedexact_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
main/make_gradsamplingbasedexact_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
main/make_gradsamplingbasedexact_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse, os, sys, pickle
import numpy as np, pathos.multiprocessing as mp, torch
import gym_util.common_util as cou, polnet as pn, util_bwopt as u
from collections import defaultdict
from poleval_pytorch import get_rpi_s, get_Ppi_ss, get_ppisteady_s, get_Qsa
def main():
arg = parse_arg()
cfg = u.load_cfg(arg.cfg, arg)
wxmat, wymat = u.get_wxwymesh(cfg)
print(cfg); print('wxmat.shape', wxmat.shape)
# print('sorted(np.unique(wxmat).tolist())', sorted(np.unique(wxmat).tolist()))
print('making arg_generator...')
nrow, ncol = wxmat.shape
arg_generator = ({**cfg, **{'init_param_x': wxmat[i, j], \
'init_param_y': wymat[i, j], 'ij': (i, j)}} \
for i in range(nrow) for j in range(ncol))
print('making grad_b samplingbased expression exactly mesh...')
log_list = []
if arg.ncpu==1:
for i, cfg_i in enumerate(arg_generator):
# if not(cfg_i['init_param_x']==0. and cfg_i['init_param_y']==5.):
# continue
print('i {}/{} ij {} wx {} wy {}'.format(i+1, nrow*ncol, cfg_i['ij'],
cfg_i['init_param_x'], cfg_i['init_param_y']))
finalinfo = get_gradbias_samplingbased_exactly(cfg_i)
log_list.append(finalinfo)
else:
pool = mp.ProcessingPool(ncpus=cfg['ncpu'])
log_list = pool.map(get_gradbias_samplingbased_exactly, arg_generator)
print('meshdata...')
meshdata = defaultdict(dict)
for log in log_list:
for k,v in log.items():
if k=='ij':
continue
meshdata[k][log['ij']] = v
meshdata['wxmat'] = wxmat; meshdata['wymat'] = wymat; meshdata['cfg'] = cfg
meshdata = dict(meshdata)
print('writing...')
envid_short = u.get_shortenvid(cfg['envid'])
datadir = os.path.join(log_list[0]['assetdir'], envid_short, 'data')
os.makedirs(datadir, exist_ok=True)
tag = ['meshdata_gradbias_samplingbased_exactly',
'res{:.2f}'.format(cfg['resolution']), cfg['polnet']['mode']]
fname = u.make_stamp(tag, cfg['timestamp']) + '.pkl'
with open(os.path.join(datadir, fname), 'wb') as f:
pickle.dump(meshdata, f)
def get_gradbias_samplingbased_exactly(arg):
log = defaultdict(list); log['ij'] = arg['ij']
sfx = arg['polnet']['state_feature_extractor_id']
env = cou.make_single_env(arg['envid'], arg['seed'])
nS, nA = env.nS, env.nA; nA_list = env.nA_list
Psas = torch.tensor(env.get_Psas()).double()
Rsa = torch.tensor(env.get_Rsa()).double()
s0 = env.reset() # assume to be deterministic
allstatefeature = torch.from_numpy(env.get_allstatefeature(sfx))
log['assetdir'] = os.path.join(env.dpath, 'asset')
PolicyNetwork = pn.policynetclass_dict[arg['polnet']['mode']]
pi_net = PolicyNetwork(nA_list); pi_net.double()
n_param = sum([i.numel() for i in pi_net.parameters()])
init_param = {pi_net.weight_x_name: arg['init_param_x'],
pi_net.weight_y_name: arg['init_param_y']}
for n, p in pi_net.named_parameters():
p.data.fill_(init_param[n])
p.data = p.data.double()
# policy evaluation
PI = pn.policy_net2tabular(allstatefeature, pi_net, requires_grad=True)
rpi = get_rpi_s(Rsa, PI); Ppi = get_Ppi_ss(Psas, PI)
ppi_steady = get_ppisteady_s(Ppi, PI)
Ppi_steady = torch.vstack([ppi_steady]*nS) # unichain: same rows
Zpi = torch.inverse(torch.eye(nS) - Ppi + Ppi_steady) # fundamental matrix
Hpi = torch.matmul(Zpi, torch.eye(nS) - Ppi_steady) # deviation matrix
g = torch.dot(ppi_steady, rpi) # gain
b = torch.matmul(Hpi, rpi) # bias
Q = get_Qsa(g, b, Psas, Rsa, nA_list) # action value
# Exact via autograd
grad_b = torch.autograd.grad(b[s0], pi_net.parameters(),
allow_unused=False, create_graph=True, retain_graph=True)
grad_b = torch.vstack(grad_b).squeeze()
grad_b_np = grad_b.detach().numpy()
grad_g = torch.autograd.grad(g, pi_net.parameters(),
allow_unused=False, create_graph=False, retain_graph=True)
grad_g = torch.vstack(grad_g).squeeze()
# premix part
tmix = None; tmix_rtol = env.tmix_cfg['rtol']; tmix_atol = env.tmix_cfg['atol']
premix = torch.zeros(n_param).double() # accumulator for premix terms
for t in range(env.tmax_xep):
Ppi_pwr = torch.matrix_power(Ppi, t)
premix_t = torch.zeros(n_param).double()
for s in range(nS):
for a in range(nA_list[s]):
pi = pi_net(torch.from_numpy(env.get_statefeature([s], sfx)))
logprob_a = pi.log_prob(torch.tensor([a]))
grad_logpi = torch.autograd.grad(logprob_a, pi_net.parameters(),
allow_unused=False, create_graph=False, retain_graph=True)
grad_logpi = torch.vstack(grad_logpi).squeeze(dim=u.feature_dimth)
premix_t += Ppi_pwr[s0, s]*PI[s, a]*Q[s, a]*grad_logpi
premix_t -= grad_g # substract grad_g at each timestep term!
premix_t_norm = torch.linalg.norm(premix_t, ord=None)
# check here so that we can assert `premix_t_norm`
if torch.allclose(Ppi_steady[s0, :], Ppi_pwr[s0, :], rtol=tmix_rtol, atol=tmix_atol):
tmix = t; log['tmix'] = tmix # specific to s0
# premix diminishing check at mixing
assert torch.allclose(premix_t_norm, torch.zeros(1).double(),
rtol=float(arg['premix_diminishing_rtol']), \
atol=float(arg['premix_diminishing_atol'])), \
premix_t_norm.item()
break
else:
premix += premix_t # premix so far
log['premix_angerr'].append(u.get_angular_err(premix.detach().numpy(), grad_b_np))
log['premix_normerr'].append(torch.linalg.norm(premix - grad_b, ord=None).item())
# print(t, 'premix', premix.data)
assert tmix is not None # ensure env.tmax_xep is long enough, bigger than tmix
# print('tmix', tmix)
# postmix part
postmix = torch.zeros(n_param).double()
for s in range(nS):
for a in range(nA_list[s]):
grad_qsa = torch.autograd.grad(Q[s, a], pi_net.parameters(),
allow_unused=False, create_graph=False, retain_graph=True)
grad_qsa = torch.vstack(grad_qsa).squeeze()
postmix += Ppi_steady[s0, s]*PI[s, a]*grad_qsa
postmix += grad_g # involving: plus grad g!
log['postmix_normerr'] = torch.linalg.norm(postmix - grad_b, ord=None).item()
log['postmix_angerr'] = u.get_angular_err(postmix.detach().numpy(), grad_b_np)
# total
prepostmix = premix + postmix
prepostmix_normerr = torch.linalg.norm(prepostmix - grad_b, ord=None)
prepostmix_angerr = u.get_angular_err(prepostmix.detach().numpy(), grad_b_np)
log['prepostmix_angerr'] = prepostmix_angerr
log['prepostmix_normerr'] = prepostmix_normerr.item()
assert torch.isfinite(prepostmix).all()
# print('postmix', postmix.data)
# print('prepostmix', prepostmix.data)
# print('grad_b', grad_b.data)
assert torch.allclose(grad_b, prepostmix,
rtol=float(arg['gradbias_cmp_rtol']), atol=float(arg['gradbias_cmp_atol'])), \
prepostmix_normerr.data
return dict(log)
def parse_arg():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg', help='cfg filepath', type=str, default=None, required=True)
parser.add_argument('--ncpu', help='number of cpu', type=int, default=None, required=True)
parser.add_argument('--repo', help='repo dirpath list', type=str, nargs='+', default=None, required=True)
parser.add_argument('--custom', help='list of (key: value) items', type=str, nargs='+', default=[])
arg = parser.parse_args()
arg.cfg = arg.cfg.replace('file://','')
return arg
if __name__ == '__main__':
main()
| 45.473988
| 109
| 0.645862
| 1,116
| 7,867
| 4.341398
| 0.253584
| 0.016512
| 0.01548
| 0.010733
| 0.175439
| 0.105263
| 0.096594
| 0.071827
| 0.063158
| 0.063158
| 0
| 0.002568
| 0.208084
| 7,867
| 172
| 110
| 45.738372
| 0.77512
| 0.096479
| 0
| 0.066176
| 0
| 0
| 0.097754
| 0.01568
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.022059
| false
| 0
| 0.036765
| 0
| 0.073529
| 0.044118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4b72c3c2f5a5bbfee4b0bb9f47cf02969cbd82b
| 31,394
|
py
|
Python
|
plotoptix/tkoptix.py
|
robertsulej/plotoptix
|
628694351791c7fb8cd631a6efe6cc0fd7d9f4f8
|
[
"libtiff",
"MIT"
] | 307
|
2019-04-03T10:51:41.000Z
|
2022-03-28T05:35:09.000Z
|
plotoptix/tkoptix.py
|
robertsulej/plotoptix
|
628694351791c7fb8cd631a6efe6cc0fd7d9f4f8
|
[
"libtiff",
"MIT"
] | 27
|
2019-05-11T08:53:32.000Z
|
2022-02-07T22:43:21.000Z
|
plotoptix/tkoptix.py
|
robertsulej/plotoptix
|
628694351791c7fb8cd631a6efe6cc0fd7d9f4f8
|
[
"libtiff",
"MIT"
] | 21
|
2019-08-29T21:50:23.000Z
|
2022-03-03T05:21:15.000Z
|
"""
Tkinter UI for PlotOptiX raytracer.
https://github.com/rnd-team-dev/plotoptix/blob/master/LICENSE.txt
Have a look at examples on GitHub: https://github.com/rnd-team-dev/plotoptix.
"""
import logging
import numpy as np
import tkinter as tk
from PIL import Image, ImageTk
from ctypes import byref, c_float, c_uint
from typing import List, Tuple, Optional, Union
from plotoptix.enums import *
from plotoptix._load_lib import PLATFORM
from plotoptix.npoptix import NpOptiX
class TkOptiX(NpOptiX):
"""Tkinter based UI for PlotOptiX. Derived from :class:`plotoptix.NpOptiX`.
Summary of mouse and keys actions:
- rotate camera eye around the target: hold and drag left mouse button
- rotate camera target around the eye: hold and drag right mouse button
- zoom out/in (change camera field of view): hold shift + left mouse button and drag up/down
- move camera eye backward/forward: hold shift + right mouse button and drag up/down
- change focus distance in "depth of field" cameras: hold ctrl + left mouse button and drag up/down
- change aperture radius in "depth of field" cameras: hold ctrl + right mouse button and drag up/down
- focus at an object: hold ctrl + double-click left mouse button
- select an object: double-click left mouse button (info on terminal output)
Note: functions with the names ``_gui_*`` can be used from the
GUI thread (Tk event loop) only.
Parameters
----------
src : string or dict, optional
Scene description, file name or dictionary. Empty scene is prepared
if the default ``None`` value is used.
on_initialization : callable or list, optional
Callable or list of callables to execute upon starting the raytracing
thread. These callbacks are executed on the main thread.
on_scene_compute : callable or list, optional
Callable or list of callables to execute upon starting the new frame.
Callbacks are executed in a thread parallel to the raytracing.
on_rt_completed : callable or list, optional
Callable or list of callables to execute when the frame raytracing
is completed (execution may be paused with pause_compute() method).
Callbacks are executed in a thread parallel to the raytracing.
on_launch_finished : callable or list, optional
Callable or list of callables to execute when the frame raytracing
is completed. These callbacks are executed on the raytracing thread.
on_rt_accum_done : callable or list, optional
Callable or list of callables to execute when the last accumulation
frame is finished. These callbacks are executed on the raytracing thread.
width : int, optional
Pixel width of the raytracing output. Default value is half of the
screen width.
height : int, optional
Pixel height of the raytracing output. Default value is half of the
screen height.
start_now : bool, optional
Open the GUI window and start raytracing thread immediately. If set
to ``False``, then user should call ``start()`` or ``show()`` method.
Default is ``False``.
devices : list, optional
List of selected devices, with the primary device at index 0. Empty list
is default, resulting with all compatible devices selected for processing.
log_level : int or string, optional
Log output level. Default is ``WARN``.
"""
def __init__(self,
src: Optional[Union[str, dict]] = None,
on_initialization = None,
on_scene_compute = None,
on_rt_completed = None,
on_launch_finished = None,
on_rt_accum_done = None,
width: int = -1,
height: int = -1,
start_now: bool = False,
devices: List = [],
log_level: Union[int, str] = logging.WARN) -> None:
"""TkOptiX constructor
"""
# pass all arguments, except start_now - we'll do that later
super().__init__(
src=src,
on_initialization=on_initialization,
on_scene_compute=on_scene_compute,
on_rt_completed=on_rt_completed,
on_launch_finished=on_launch_finished,
on_rt_accum_done=on_rt_accum_done,
width=width, height=height,
start_now=False, # do not start yet
devices=devices,
log_level=log_level)
# save initial values to set size of Tk window on startup
self._ini_width = width
self._ini_height = height
self._dummy_rgba = np.ascontiguousarray(np.zeros((8, 8, 4), dtype=np.uint8))
if PLATFORM == "Windows":
dpi_scale = self._optix.get_display_scaling()
self._logger.info("DPI scaling: %d", dpi_scale)
if dpi_scale != 1:
self._logger.warn("DPI setting may cause blurred raytracing output, see this answer")
self._logger.warn("for the solution https://stackoverflow.com/a/52599951/10037996:")
self._logger.warn("set python.exe and pythonw.exe files Properties -> Compatibility")
self._logger.warn("-> Change high DPI settings -> check Override high DPI scaling")
self._logger.warn("behaviour, select Application in the drop-down menu.")
if self._is_scene_created and start_now:
self._logger.info("Starting TkOptiX window and raytracing thread.")
self.start()
###############################################################
# For matplotlib users convenience.
def show(self) -> None:
"""Start raytracing thread and open the GUI window.
Convenience method to call :meth:`plotoptix.NpOptiX.start`.
Actions provided with ``on_initialization`` parameter of TkOptiX
constructor are executed by this method on the main thread, before
the ratracing thread is started and GUI window open.
"""
self.start()
def _run_event_loop(self):
"""Override NpOptiX's method for running the UI event loop.
Configure the GUI window properties and events, prepare image
to display raytracing output.
"""
# setup Tk window #############################################
self._root = tk.Tk()
screen_width = self._root.winfo_screenwidth()
screen_height = self._root.winfo_screenheight()
if self._ini_width <= 0: self._ini_width = int(screen_width / 2)
else: self._ini_width = None
if self._ini_height <= 0: self._ini_height = int(screen_height / 2)
else: self._ini_height = None
self.resize(self._ini_width, self._ini_height)
self._mouse_from_x = 0
self._mouse_from_y = 0
self._mouse_to_x = 0
self._mouse_to_y = 0
self._left_mouse = False
self._right_mouse = False
self._any_mouse = False
self._ctrl_key = False
self._shift_key = False
self._any_key = False
self._selection_handle = -1
self._selection_index = -1
self._fixed_size = None
self._image_scale = 1.0
self._image_at = (0, 0)
self._root.title("R&D PlotOptiX")
self._root.protocol("WM_DELETE_WINDOW", self._gui_quit_callback)
self._canvas = tk.Canvas(self._root, width=self._width, height=self._height)
self._canvas.grid(column=0, row=0, columnspan=3, sticky="nsew")
self._canvas.pack_propagate(0)
self._canvas.bind("<Configure>", self._gui_configure)
self._canvas.bind('<Motion>', self._gui_motion)
self._canvas.bind('<B1-Motion>', self._gui_motion_pressed)
self._canvas.bind('<B3-Motion>', self._gui_motion_pressed)
self._canvas.bind("<Button-1>", self._gui_pressed_left)
self._canvas.bind("<Button-3>", self._gui_pressed_right)
self._canvas.bind("<ButtonRelease-1>", self._gui_released_left)
self._canvas.bind("<ButtonRelease-3>", self._gui_released_right)
self._canvas.bind("<Double-Button-1>", self._gui_doubleclick_left)
self._canvas.bind("<Double-Button-3>", self._gui_doubleclick_right)
self._root.bind_all("<KeyPress>", self._gui_key_pressed)
self._root.bind_all("<KeyRelease>", self._gui_key_released)
self._canvas.bind("<<LaunchFinished>>", self._gui_update_content)
self._canvas.bind("<<ApplyUiEdits>>", self._gui_apply_scene_edits)
self._canvas.bind("<<CloseScene>>", self._gui_quit_callback)
self._status_main_text = tk.StringVar(value="Selection: camera")
self._status_main = tk.Label(self._root, textvariable=self._status_main_text, bd=1, relief=tk.SUNKEN, anchor=tk.W)
self._status_main.grid(column=0, row=1, sticky="ew")
self._status_action_text = tk.StringVar(value="")
self._status_action = tk.Label(self._root, textvariable=self._status_action_text, width=70, bd=1, relief=tk.SUNKEN, anchor=tk.W)
self._status_action.grid(column=1, row=1, sticky="ew")
self._status_fps_text = tk.StringVar(value="FPS")
self._status_fps = tk.Label(self._root, textvariable=self._status_fps_text, width=16, bd=1, relief=tk.SUNKEN, anchor=tk.W)
self._status_fps.grid(column=2, row=1, sticky="ew")
self._root.rowconfigure(0, weight=1)
self._root.columnconfigure(0, weight=1)
self._logger.info("Tkinter widgets ready.")
self._logger.info("Couple scene to the output window...")
with self._padlock:
if self._img_rgba is not None:
pil_img = Image.fromarray(self._img_rgba, mode="RGBX")
else:
pil_img = Image.fromarray(self._dummy_rgba, mode="RGBX")
self._tk_img = ImageTk.PhotoImage(pil_img)
self._img_id = self._canvas.create_image(0, 0, image=self._tk_img, anchor=tk.NW)
###############################################################
# start event loop ############################################
self._logger.info("Start UI event loop...")
self._is_started = True
self._update_req = False
self._root.mainloop()
###############################################################
def close(self) -> None:
"""Stop the raytracing thread, release resources.
Raytracing cannot be restarted after this method is called.
See Also
--------
:meth:`plotoptix.NpOptiX.close`
"""
if not self._is_closed:
self._optix.break_launch()
self._canvas.event_generate("<<CloseScene>>", when="head")
else:
self._logger.warn("UI already closed.")
def _gui_quit_callback(self, *args):
super().close()
self._root.quit()
def _get_image_xy(self, wnd_x, wnd_y):
if self._fixed_size is None: return wnd_x, wnd_y
else:
x = int((wnd_x - self._image_at[0]) / self._image_scale)
y = int((wnd_y - self._image_at[1]) / self._image_scale)
return x, y
def _get_hit_at(self, x, y):
c_x = c_float()
c_y = c_float()
c_z = c_float()
c_d = c_float()
if self._optix.get_hit_at(x, y, byref(c_x), byref(c_y), byref(c_z), byref(c_d)):
return c_x.value, c_y.value, c_z.value, c_d.value
else: return 0, 0, 0, 0
def _gui_get_object_at(self, x, y):
c_handle = c_uint()
c_index = c_uint()
c_prim = c_uint()
if self._optix.get_object_at(x, y, byref(c_handle), byref(c_index), byref(c_prim)):
return c_handle.value, c_index.value, c_prim.value
else:
return None, None, None
def _gui_motion(self, event):
if not (self._any_mouse or self._any_key):
x, y = self._get_image_xy(event.x, event.y)
handle, index, prim = self._gui_get_object_at(x, y)
if (handle != 0x3FFFFFFF):
hx, hy, hz, hd = self._get_hit_at(x, y)
if handle in self.geometry_names:
if (prim != 0xFFFFFFFF):
self._status_action_text.set("%s[prim:%d; vtx:%d]: 2D (%d %d), 3D (%f %f %f), at dist.: %f" % (self.geometry_names[handle], prim, index, x, y, hx, hy, hz, hd))
else:
self._status_action_text.set("%s[%d]: 2D (%d %d), 3D (%f %f %f), at dist.: %f" % (self.geometry_names[handle], index, x, y, hx, hy, hz, hd))
else:
lh = self._optix.get_light_handle(handle, index)
if lh in self.light_names:
self._status_action_text.set("%s: 2D (%d %d), 3D (%f %f %f), at dist.: %f" % (self.light_names[lh], x, y, hx, hy, hz, hd))
else:
self._status_action_text.set("unknown: 2D (%d %d), 3D (%f %f %f), at dist.: %f" % (x, y, hx, hy, hz, hd))
else:
self._status_action_text.set("empty area")
def _gui_motion_pressed(self, event):
self._mouse_to_x, self._mouse_to_y = self._get_image_xy(event.x, event.y)
self._optix.break_launch()
def _gui_pressed_left(self, event):
self._mouse_from_x, self._mouse_from_y = self._get_image_xy(event.x, event.y)
self._mouse_to_x = self._mouse_from_x
self._mouse_to_y = self._mouse_from_y
self._left_mouse = True
self._any_mouse = True
def _gui_pressed_right(self, event):
self._mouse_from_x, self._mouse_from_y = self._get_image_xy(event.x, event.y)
self._mouse_to_x = self._mouse_from_x
self._mouse_to_y = self._mouse_from_y
self._right_mouse = True
self._any_mouse = True
def _gui_released_left(self, event):
self._mouse_to_x, self._mouse_to_y = self._get_image_xy(event.x, event.y)
self._mouse_from_x = self._mouse_to_x
self._mouse_from_y = self._mouse_to_y
self._left_mouse = False
self._any_mouse = False
def _gui_released_right(self, event):
self._mouse_to_x, self._mouse_to_y = self._get_image_xy(event.x, event.y)
self._mouse_from_x = self._mouse_to_x
self._mouse_from_y = self._mouse_to_y
self._right_mouse = False
self._any_mouse = False
def _gui_doubleclick_left(self, event):
assert self._is_started, "Raytracing thread not running."
x, y = self._get_image_xy(event.x, event.y)
handle, index, _ = self._gui_get_object_at(x, y)
if (handle != 0xFFFFFFFF):
if handle in self.geometry_names:
# switch selection: primitive / whole geom
if self._ctrl_key or (self._selection_handle == handle and self._selection_index == -1):
self._status_main_text.set("Selection: %s[%d]" % (self.geometry_names[handle], index))
self._selection_index = index
else:
self._status_main_text.set("Selection: %s" % self.geometry_names[handle])
self._selection_handle = handle
self._selection_index = -1
if self._ctrl_key:
hx, hy, hz, hd = self._get_hit_at(x, y)
if hd > 0:
self._status_action_text.set("Focused at (%f %f %f), distance %f" % (hx, hy, hz, hd))
cam_info = self.get_camera()
if "fisheye" in cam_info["RayGeneration"]:
w = np.array([hx, hy, hz], dtype=np.float32) - np.array(cam_info["Eye"], dtype=np.float32)
_ = self._optix.set_camera_focal_length(np.linalg.norm(w))
else:
_ = self._optix.set_camera_focal_length(hd)
return
else:
lh = self._optix.get_light_handle(handle, index)
if lh in self.light_names:
self._status_main_text.set("Selection: %s" % self.light_names[lh])
self._selection_handle = -2
self._selection_index = lh
return
self._status_main_text.set("Selection: camera")
self._selection_handle = -1
self._selection_index = -1
def select(self, name: Optional[str] = None, index: int = -1):
"""Select geometry, light or camera.
Select object for manual manipulations (rotations, shifts, etc). Geometry or light
is selected by its name. If ``name`` is not provided, then active camera is selected.
Optional ``index`` allows selection of a primitive within the geometry.
Parameters
----------
name : string, optional
Name of the geometry or light to select. If ``None`` then active camera is selected.
index : int, optional
Primitive index to select. Entire geometry is selected if ``index`` is out of primitives range.
"""
if name is None:
self._status_main_text.set("Selection: camera")
self._selection_handle = -1
self._selection_index = -1
return
if name in self.geometry_data:
self._selection_handle = self.geometry_data[name]._handle
if index >= 0 and index < self.geometry_data[name]._size:
self._status_main_text.set("Selection: %s[%d]" % (name, index))
self._selection_index = index
else:
self._status_main_text.set("Selection: %s" % name)
self._selection_index = -1
return
if name in self.light_handles:
self._status_main_text.set("Selection: %s" % name)
self._selection_handle = -2
self._selection_index = self.light_handles[name]
return
def _gui_doubleclick_right(self, event):
self._status_main_text.set("Selection: camera")
self._selection_handle = -1
self._selection_index = -1
def _gui_key_pressed(self, event):
if event.keysym == "Control_L":
self._ctrl_key = True
self._any_key = True
elif event.keysym == "Shift_L":
self._shift_key = True
self._any_key = True
self._any_key = True
else:
self._any_key = False
def _gui_key_released(self, event):
if event.keysym == "Control_L":
self._ctrl_key = False
elif event.keysym == "Shift_L":
self._shift_key = False
self._any_key = False
def get_rt_size(self) -> Tuple[int, int]:
"""Get size of ray-tracing output image.
Get fixed dimensions of the output image or ``None`` if the
image is fit to the GUI window size.
Returns
-------
out : tuple (int, int)
Output image size or ``None`` if set auto-fit mode.
"""
return self._fixed_size
def set_rt_size(self, size: Tuple[int, int]) -> None:
"""Set fixed / free size of ray-tracing output image.
Set fixed dimensions of the output image or allow automatic fit to the
GUI window size. Fixed size image updates are slower, but allow ray tracing
of any size. Default mode is fit to the GUI window size.
Parameters
----------
size : tuple (int, int)
Output image size or ``None`` to set auto-fit mode.
"""
assert self._is_started, "Raytracing thread not running."
if self._fixed_size == size: return
self._fixed_size = size
with self._padlock:
if self._fixed_size is None:
w, h = self._canvas.winfo_width(), self._canvas.winfo_height()
else:
w, h = self._fixed_size
self.resize(width=w, height=h)
def _gui_internal_image_update(self):
if self._img_rgba is not None:
pil_img = Image.fromarray(self._img_rgba, mode="RGBX")
else:
pil_img = Image.fromarray(self._dummy_rgba, mode="RGBX")
move_to = (0, 0)
self._image_scale = 1.0
if self._fixed_size is not None:
wc, hc = self._canvas.winfo_width(), self._canvas.winfo_height()
if self._width / wc > self._height / hc:
self._image_scale = wc / self._width
hnew = int(self._height * self._image_scale)
pil_img = pil_img.resize((wc, hnew), Image.ANTIALIAS)
move_to = (0, (hc - hnew) // 2)
else:
self._image_scale = hc / self._height
wnew = int(self._width * self._image_scale)
pil_img = pil_img.resize((wnew, hc), Image.ANTIALIAS)
move_to = ((wc - wnew) // 2, 0)
tk_img = ImageTk.PhotoImage(pil_img)
# update image on canvas
self._canvas.itemconfig(self._img_id, image=tk_img)
if self._image_at != move_to:
self._canvas.move(self._img_id, -self._image_at[0], -self._image_at[1])
self._canvas.move(self._img_id, move_to[0], move_to[1])
self._image_at = move_to
# swap reference stored in the window instance
self._tk_img = tk_img
# no redraws until the next launch
self._update_req = False
def _gui_configure(self, event):
assert self._is_started, "Raytracing thread not running."
if not self._started_event.is_set():
self._started_event.set()
with self._padlock:
if self._fixed_size is None:
w, h = self._canvas.winfo_width(), self._canvas.winfo_height()
if (w == self._width) and (h == self._height): return
self._logger.info("Resize to: %d x %d", w, h)
self.resize(width=w, height=h)
self._gui_internal_image_update()
###########################################################################
# update raytraced image in Tk window ########
def _gui_update_content(self, *args):
assert self._is_started, "Raytracing thread not running."
if self._update_req:
self._status_fps_text.set("FPS: %.3f" % self._optix.get_fps())
with self._padlock:
self._gui_internal_image_update()
def _launch_finished_callback(self, rt_result: int):
super()._launch_finished_callback(rt_result)
if self._is_started and rt_result < RtResult.NoUpdates.value:
self._update_req = True
self._canvas.event_generate("<<LaunchFinished>>", when="now")
###########################################################################
###########################################################################
# apply manual scene edits made in ui ########
def _gui_apply_scene_edits(self, *args):
if (self._mouse_from_x == self._mouse_to_x) and (self._mouse_from_y == self._mouse_to_y): return
if self._selection_handle == -1:
# manipulate camera:
if self._left_mouse:
if not self._any_key:
self._status_action_text.set("rotate camera eye XZ")
self._optix.rotate_camera_eye(
self._mouse_from_x, self._mouse_from_y,
self._mouse_to_x, self._mouse_to_y)
elif self._ctrl_key:
self._status_action_text.set("change camera focus")
df = 1 + 0.01 * (self._mouse_from_y - self._mouse_to_y)
f = self._optix.get_camera_focal_scale(0) # 0 is current cam
self._optix.set_camera_focal_scale(df * f)
elif self._shift_key:
self._status_action_text.set("change camera FoV")
df = 1 + 0.005 * (self._mouse_from_y - self._mouse_to_y)
f = self._optix.get_camera_fov(0) # 0 is current cam
self._optix.set_camera_fov(df * f)
elif self._right_mouse:
if not self._any_key:
self._status_action_text.set("camera pan/tilt")
self._optix.rotate_camera_tgt(
self._mouse_from_x, self._mouse_from_y,
self._mouse_to_x, self._mouse_to_y)
elif self._ctrl_key:
self._status_action_text.set("change camera aperture")
da = 1 + 0.01 * (self._mouse_from_y - self._mouse_to_y)
a = self._optix.get_camera_aperture(0) # 0 is current cam
self._optix.set_camera_aperture(da * a)
elif self._shift_key:
self._status_action_text.set("camera dolly")
target = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_camera_target(0, target.ctypes.data) # 0 is current cam
eye = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_camera_eye(0, eye.ctypes.data) # 0 is current cam
dl = 0.01 * (self._mouse_from_y - self._mouse_to_y)
eye = eye - dl * (target - eye)
self._optix.set_camera_eye(eye.ctypes.data)
elif self._selection_handle == -2:
# manipulate light:
if self._selection_index in self.light_names:
name = self.light_names[self._selection_index]
if self._left_mouse:
if not self._any_key:
rx = np.pi * (self._mouse_to_y - self._mouse_from_y) / self._height
ry = np.pi * (self._mouse_to_x - self._mouse_from_x) / self._width
self._status_action_text.set("rotate light in camera XY")
self._optix.rotate_light_in_view(name, rx, ry, 0)
elif self._ctrl_key and self._shift_key:
s = 1 - (self._mouse_to_y - self._mouse_from_y) / self._height
self._status_action_text.set("scale light")
self._optix.scale_light(name, s)
elif self._ctrl_key:
rx = np.pi * (self._mouse_to_y - self._mouse_from_y) / self._height
rz = np.pi * (self._mouse_from_x - self._mouse_to_x) / self._width
self._status_action_text.set("rotate light in camera XZ")
self._optix.rotate_light_in_view(name, rx, 0, rz)
elif self._shift_key:
dx = (self._mouse_to_x - self._mouse_from_x) / self._width
dy = (self._mouse_from_y - self._mouse_to_y) / self._height
self._status_action_text.set("move light in camera XY")
self._optix.move_light_in_view(name, dx, dy, 0)
elif self._right_mouse:
if not self._any_key:
dx = (self._mouse_to_x - self._mouse_from_x) / self._width
dz = (self._mouse_to_y - self._mouse_from_y) / self._height
self._status_action_text.set("move light in camera XZ")
self._optix.move_light_in_view(name, dx, 0, dz)
elif self._shift_key:
dx = (self._mouse_from_y - self._mouse_to_y) / self._height
self._status_action_text.set("move light in normal direction")
self._optix.dolly_light(name, dx)
else:
# manipulate selected ogject
name = self.geometry_names[self._selection_handle]
if self._left_mouse:
if not self._any_key:
rx = np.pi * (self._mouse_to_y - self._mouse_from_y) / self._height
ry = np.pi * (self._mouse_to_x - self._mouse_from_x) / self._width
if self._selection_index == -1:
self._status_action_text.set("rotate geometry in camera XY")
self._optix.rotate_geometry_in_view(name, rx, ry, 0, True)
else:
self._status_action_text.set("rotate primitive in camera XY")
self._optix.rotate_primitive_in_view(name, self._selection_index, rx, ry, 0, True)
elif self._ctrl_key and self._shift_key:
s = 1 - (self._mouse_to_y - self._mouse_from_y) / self._height
if self._selection_index == -1:
self._status_action_text.set("scale geometry")
self._optix.scale_geometry(name, s, True)
else:
self._status_action_text.set("scale primitive")
self._optix.scale_primitive(name, self._selection_index, s, True)
elif self._ctrl_key:
rx = np.pi * (self._mouse_to_y - self._mouse_from_y) / self._height
rz = np.pi * (self._mouse_from_x - self._mouse_to_x) / self._width
if self._selection_index == -1:
self._status_action_text.set("rotate geometry in camera XZ")
self._optix.rotate_geometry_in_view(name, rx, 0, rz, True)
else:
self._status_action_text.set("rotate primitive in camera XY")
self._optix.rotate_primitive_in_view(name, self._selection_index, rx, 0, rz, True)
elif self._shift_key:
dx = (self._mouse_to_x - self._mouse_from_x) / self._width
dy = (self._mouse_from_y - self._mouse_to_y) / self._height
if self._selection_index == -1:
self._status_action_text.set("move geometry in camera XY")
self._optix.move_geometry_in_view(name, dx, dy, 0, True)
else:
self._status_action_text.set("move primitive in camera XY")
self._optix.move_primitive_in_view(name, self._selection_index, dx, dy, 0, True)
elif self._right_mouse:
if not self._any_key:
dx = (self._mouse_to_x - self._mouse_from_x) / self._width
dz = (self._mouse_to_y - self._mouse_from_y) / self._height
if self._selection_index == -1:
self._status_action_text.set("move geometry in camera XZ")
self._optix.move_geometry_in_view(name, dx, 0, dz, True)
else:
self._status_action_text.set("move primitive in camera XZ")
self._optix.move_primitive_in_view(name, self._selection_index, dx, 0, dz, True)
self._mouse_from_x = self._mouse_to_x
self._mouse_from_y = self._mouse_to_y
def _scene_rt_starting_callback(self):
if self._is_started:
super()._scene_rt_starting_callback()
self._canvas.event_generate("<<ApplyUiEdits>>", when="now")
###########################################################################
| 46.099853
| 183
| 0.580334
| 4,036
| 31,394
| 4.198464
| 0.117939
| 0.048864
| 0.03051
| 0.035409
| 0.514724
| 0.47483
| 0.434287
| 0.399056
| 0.336205
| 0.316022
| 0
| 0.008039
| 0.306587
| 31,394
| 680
| 184
| 46.167647
| 0.770362
| 0.168949
| 0
| 0.385965
| 0
| 0.008772
| 0.075247
| 0
| 0
| 0
| 0.001204
| 0
| 0.008772
| 1
| 0.059211
| false
| 0
| 0.019737
| 0
| 0.10307
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4ba683b1acdb8fa2966f9142fd6e41d884299cc
| 4,144
|
py
|
Python
|
app.py
|
apizzo1/Hindsight_2020
|
51a124c7363a80ebd00999a3812a91c0b27f62cd
|
[
"MIT"
] | null | null | null |
app.py
|
apizzo1/Hindsight_2020
|
51a124c7363a80ebd00999a3812a91c0b27f62cd
|
[
"MIT"
] | null | null | null |
app.py
|
apizzo1/Hindsight_2020
|
51a124c7363a80ebd00999a3812a91c0b27f62cd
|
[
"MIT"
] | 1
|
2020-09-30T02:56:29.000Z
|
2020-09-30T02:56:29.000Z
|
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import os
import requests
import urllib.parse
# API key introduction
# API_KEY = os.environ.get('API_KEY', '')
finnhub_API_Key = os.environ.get('finnhub_API_Key', '')
from flask import Flask, jsonify, render_template, request
db_url = os.environ.get('DATABASE_URL', '')
# create engine
engine = create_engine(db_url)
# reflect DB
Base=automap_base()
Base.prepare(engine, reflect = True)
# Flask init
app = Flask(__name__)
# dict_builder to take in sql response
def dict_creation(response, headers):
response_list=[]
for item in response:
item_dict={}
for i in range(0,len(headers)):
num1=headers[i]
item_dict[num1]=item[i]
response_list.append(item_dict)
return(response_list)
# home route
@app.route("/")
def welcome():
return render_template("index.html")
@app.route("/api/v1.0/stocks")
def stocks():
# Get the url passed into the route
data = request.args
r = requests.get(urllib.parse.unquote(data["url"]) + finnhub_API_Key)
res = r.json()
return jsonify(res)
@app.route("/api/v1.0/headlines")
def headlines():
# Create our session (link) from Python to the DB
results = engine.execute('SELECT date, img_url, headline, article_url FROM headlines').fetchall()
# dict keys
headers_list=['date', 'img_url', 'headline', 'article_url']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
@app.route("/api/v1.0/national_mobility")
def national_mobility():
# Create our session (link) from Python to the DB
results = engine.execute('SELECT * FROM national_mobility').fetchall()
# dict keys
headers_list=['year', 'month', 'day', 'retail', 'grocery', 'parks', 'transit', 'work', 'residential', 'away_from_home']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
@app.route("/api/v1.0/state_mobility")
def state_mobility():
# Create our session (link) from Python to the DB
results = engine.execute('select sm.id, si.state, sm.year, sm.month, sm.day, sm.gps_retail_and_recreation, sm.gps_grocery_and_pharmacy, sm.gps_parks, sm.gps_transit_stations, sm.gps_workplaces, sm.gps_residential, sm.gps_away_from_home from state_mobility as sm inner join state_ids as si on sm.id=si.id').fetchall()
# dict keys
headers_list=['id', 'state', 'year', 'month', 'day', 'retail', 'grocery', 'parks', 'transit', 'work', 'residential', 'away_from_home']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
@app.route("/api/v1.0/ui_rate")
def ui_rate():
# Create our session (link) from Python to the DB
results = engine.execute('select * from ui_rate').fetchall()
# dict keys
headers_list=['DATE','UNRATE','16-19','over20','AfricanAmer','Latinx','White','Men','Women','no-HS-grad','HS-no-college','Bachelors','Masters','Doctoral']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
@app.route("/api/v1.0/protest")
def protest():
# Create our session (link) from Python to the DB
results = engine.execute('select * from protest_data').fetchall()
# dict keys
headers_list=['ISO','EVENT_ID_CNTY','EVENT_ID_NO_CNTY','EVENT_DATE','YEAR','TIME_PRECISION','EVENT_TYPE','SUB_EVENT_TYPE','ACTOR1','ASSOC_ACTOR_1','INTER1','ACTOR2','ASSOC_ACTOR_2','INTER2','INTERACTION','REGION','COUNTRY','ADMIN1','ADMIN2','ADMIN3','LOCATION','LATITUDE','LONGITUDE','GEO_PRECISION','SOURCE','SOURCE_SCALE','FATALITIES']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
@app.route("/api/v1.0/state_ui")
def state_ui():
# Create our session (link) from Python to the DB
results = engine.execute('SELECT * FROM state_ui').fetchall()
# dict keys
headers_list=['State', 'January', 'February', 'March','April', 'May', 'June', 'July', 'August']
db_response=dict_creation(results,headers_list)
return jsonify(db_response)
if __name__ == '__main__':
app.run(debug=True)
| 36.034783
| 341
| 0.707288
| 586
| 4,144
| 4.798635
| 0.290102
| 0.046942
| 0.027383
| 0.032361
| 0.455903
| 0.394737
| 0.354196
| 0.354196
| 0.354196
| 0.354196
| 0
| 0.009017
| 0.143581
| 4,144
| 114
| 342
| 36.350877
| 0.783319
| 0.127172
| 0
| 0.171429
| 0
| 0.014286
| 0.327586
| 0.042547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128571
| false
| 0
| 0.114286
| 0.014286
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4ba6ef688aa37560a69eb7860a151045a256156
| 1,156
|
py
|
Python
|
Project.py
|
nishitde/Data-Dictionary
|
4da47de16739d3a255c36b1060244d7cb1df6bae
|
[
"MIT"
] | null | null | null |
Project.py
|
nishitde/Data-Dictionary
|
4da47de16739d3a255c36b1060244d7cb1df6bae
|
[
"MIT"
] | null | null | null |
Project.py
|
nishitde/Data-Dictionary
|
4da47de16739d3a255c36b1060244d7cb1df6bae
|
[
"MIT"
] | null | null | null |
import json
from difflib import get_close_matches
data = json.load(open("data.json"))
word = input("Enter a word: ")
try:
def translate(word):
word = word.lower()
if word in data:
return data[word]
elif word.title() in data:
return data[word.title()]
elif word.upper() in data:
return data[word.upper()]
else:
pred = get_close_matches(word, data.keys())[0]
YoN = input("Did you mean '" + str(pred) + "'? [Y/N]: ")
YoN = YoN.upper()
if YoN == "Y":
return data[pred]
elif YoN == "N":
return "The word '" + word + "' does not exist. Please check again."
else:
return "Invalid Entry!"
output = (translate(word))
if (type(output) == list):
for item in output:
print(item)
else:
print(output)
except KeyError:
print("The word '" + word + "' does not exist. Please check again.")
except IndexError:
print("The word '" + word + "' does not exist. Please check again.")
| 28.195122
| 85
| 0.50519
| 135
| 1,156
| 4.296296
| 0.392593
| 0.068966
| 0.062069
| 0.082759
| 0.322414
| 0.218966
| 0.218966
| 0.218966
| 0.218966
| 0.151724
| 0
| 0.00137
| 0.368512
| 1,156
| 40
| 86
| 28.9
| 0.793151
| 0
| 0
| 0.151515
| 0
| 0
| 0.182796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.272727
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c31aee7bfbc8595d53ad5906b60459c10165ee
| 3,472
|
py
|
Python
|
service/recv_setup.py
|
mikroncoin/mikron_restapi_py
|
79cd47c8f26615ccd27c9764c92299f8cebd578a
|
[
"BSD-2-Clause"
] | null | null | null |
service/recv_setup.py
|
mikroncoin/mikron_restapi_py
|
79cd47c8f26615ccd27c9764c92299f8cebd578a
|
[
"BSD-2-Clause"
] | 6
|
2018-09-27T07:12:28.000Z
|
2019-08-14T10:13:13.000Z
|
service/recv_setup.py
|
mikroncoin/mikron_restapi_py
|
79cd47c8f26615ccd27c9764c92299f8cebd578a
|
[
"BSD-2-Clause"
] | null | null | null |
import config
import recv_db
import node_rpc_helper
import threading
from time import sleep, time
setup_check_background_result = {"msg": "(uninitialized)"}
config = config.readConfig()
last_check_time = time() - 10000
def get_setup_check_background():
global setup_check_background_result
return setup_check_background_result
def setup_check_async():
bg_thread = threading.Thread(target = setup_check_sync)
bg_thread.start()
def setup_check_sync():
global setup_check_background_result
global config
global last_check_time
now = time()
age = now - last_check_time
if (age < 300):
setup_check_background_result["msg"] = "(status check skipped " + str(int(age)) + ")"
else:
setup_check_background_result["msg"] = "(status check scheduled)"
sleep(5)
last_check_time = now
setup_check_background_result["msg"] = "(status check executing)"
status = setup_check(config)
#print("status", status)
setup_check_background_result = status
# Check the receiver accounts; compare accounts in the node wallets and in the DB
def setup_check(config):
print("Receiving accounts:")
# in DB
in_db = {}
a_in_db = recv_db.get_all_accounts()
for a in a_in_db:
in_db[a["rec_acc"]] = {"pool": a["pool_account_id"]}
print("- ", len(in_db), "in DB")
#for a in in_db:
# print(a, in_db[a]["pool"])
# in node wallets -- no RPC for wallet list, take from config
wallets = {}
for pool in config["receiver_service.account"]:
pool_id = config["receiver_service.account"][pool]["id"]
wallet = config["receiver_service.account"][pool]["walletid"]
wallets[pool_id] = wallet
#print(pool_id, wallet)
in_node = {}
for pool in wallets:
wallet = wallets[pool]
print(pool, wallet)
wresp = node_rpc_helper.doAccountList(wallet)
#print(wresp)
if "error" in wresp:
print("Error", wresp)
else:
if "accounts" in wresp:
for a in wresp["accounts"]:
in_node[a] = {"pool": pool, "wallet": wallet}
print("- ", len(in_node), "in node wallets")
#for a in in_node:
# print(a, in_node[a]["pool"])
# find those in DB only
count_in_both = 0
count_in_db_only = 0
for a in in_db:
if (a in in_node) and (in_db[a]["pool"] == in_node[a]["pool"]):
# in both, OK
count_in_both = count_in_both + 1
else:
# in DB, but not in node!
print("Error: acc", a, in_db[a]["pool"], "is in DB but not in Node!")
count_in_db_only = count_in_db_only + 1
print("- ", count_in_both, "in both DB and Node")
print("- ", count_in_db_only, "in DB only")
# find those in Node wallets only
count_in_node_only = 0
for a in in_node:
if a not in in_db:
# in Node, but not in DB!
print("Error: acc", a, in_node[a]["pool"], "is in Node but not in DB!")
count_in_node_only = count_in_node_only + 1
print("- ", count_in_node_only, "in Node only")
status = {
"in_db": len(in_db),
"in_node": len(in_node),
"in_both": count_in_both,
"in_db_only": count_in_db_only,
"in_node_only": count_in_node_only
}
print(status)
return status
#config = config.readConfig()
#msg = setup_check(config)
#print("setup_check", msg)
| 32.754717
| 93
| 0.616647
| 497
| 3,472
| 4.036217
| 0.160966
| 0.053838
| 0.089731
| 0.103689
| 0.367398
| 0.199402
| 0.105683
| 0
| 0
| 0
| 0
| 0.005878
| 0.264977
| 3,472
| 105
| 94
| 33.066667
| 0.780172
| 0.140841
| 0
| 0.064103
| 0
| 0
| 0.154806
| 0.024283
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.064103
| 0
| 0.141026
| 0.141026
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c4f544669d2e4b222ccb9bd7786787ddb72fee
| 784
|
py
|
Python
|
2015/day/2/solution.py
|
iangregson/advent-of-code
|
e2a2dde30dcaed027a5ba78f9270f8a1976577f1
|
[
"MIT"
] | null | null | null |
2015/day/2/solution.py
|
iangregson/advent-of-code
|
e2a2dde30dcaed027a5ba78f9270f8a1976577f1
|
[
"MIT"
] | null | null | null |
2015/day/2/solution.py
|
iangregson/advent-of-code
|
e2a2dde30dcaed027a5ba78f9270f8a1976577f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
lines = file.readlines()
lines = [line.strip() for line in lines]
# lines = ["2x3x4", "1x1x10"]
total = 0
for line in lines:
l, w, h = [int(v) for v in line.split('x')]
side1 = l*w
side2 = l*h
side3 = w*h
smallest_side = min(side1, side2, side3)
area = 2*side1 + 2*side2 + 2*side3 + smallest_side
total += area
print("Part 1 answer:", total)
total = 0
for line in lines:
l, w, h = [int(v) for v in line.split('x')]
side1 = 2*l + 2*w
side2 = 2*l + 2*h
side3 = 2*w + 2*h
smallest_side = min(side1, side2, side3)
volume = (l*w*h) + smallest_side
total += volume
print("Part 2 answer:", total)
| 21.777778
| 54
| 0.598214
| 134
| 784
| 3.425373
| 0.335821
| 0.017429
| 0.058824
| 0.091503
| 0.344227
| 0.344227
| 0.344227
| 0.20915
| 0.20915
| 0.20915
| 0
| 0.060201
| 0.237245
| 784
| 35
| 55
| 22.4
| 0.707358
| 0.0625
| 0
| 0.32
| 0
| 0
| 0.055935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c67370682280607f52d85bc867fcf1b22bcd29
| 2,611
|
py
|
Python
|
src/utils/__init__.py
|
ppolewicz/screeps-starter-python
|
dd2f5646a53c9353bf99e976e5f362e297715e96
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
ppolewicz/screeps-starter-python
|
dd2f5646a53c9353bf99e976e5f362e297715e96
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
ppolewicz/screeps-starter-python
|
dd2f5646a53c9353bf99e976e5f362e297715e96
|
[
"MIT"
] | null | null | null |
from creeps.scheduled_action import ScheduledAction
def part_count(creep, of_type):
count = 0
for part in creep.body:
if part['type'] == of_type:
count += 1
return count
def get_first_spawn(room):
for s in room.find(FIND_MY_STRUCTURES):
if s.structureType == STRUCTURE_SPAWN:
return s
for s in room.find(FIND_CONSTRUCTION_SITES):
if s.structureType == STRUCTURE_SPAWN:
return s
#print('WARNING: get_first_spawn returning None for', room)
def get_controller_spawn(room):
# TODO: cache it and drop cache after a spawn is completed
source_filter = lambda s: (
s.structureType == STRUCTURE_SPAWN
)
return room.controller.pos.findClosestByRange(FIND_MY_STRUCTURES, filter=source_filter)
def search_room(room, kind, filter_function=lambda x: True):
result_list = []
for item in room.find(kind):
if filter_function(item):
result_list.append(item)
return result_list
def get_close_structure(pos, _range, structure_type):
for s in pos.findInRange(FIND_STRUCTURES, _range):
if s.structureType != structure_type:
continue
return s
def get_thing_at_coordinates(things, x, y):
for thing in things:
if x == thing.pos.x and y == thing.pos.y:
return thing
class P:
def __init__(self, x, y):
self.x = x
self.y = y
AROUND_OFFSETS = (
(
(-1, -1),
(-1, 0),
(-1, 1),
(0, 1),
(0, -1),
(1, -1),
(1, 0),
(1, 1),
),
(
(1, -2),
(0, -2),
(-1, -2),
(-2, -2),
(-2, -1),
(-2, 0),
(-2, 1),
(-2, 2),
(-1, 2),
(0, 2),
(1, 2),
(2, -2),
(2, -1),
(2, 0),
(2, 1),
(2, 2),
),
)
def around_range(room, x, y, distance, vis=None):
result = []
for x_diff, y_diff in AROUND_OFFSETS[distance-1]:
result.append((x + x_diff, y + y_diff))
if vis is not None:
room.visual.circle(x+x_diff, y+y_diff, {'stroke': vis})
return result
def make_transfer_action(creep, target):
amount = min(
target.store.getFreeCapacity(RESOURCE_ENERGY),
creep.store[RESOURCE_ENERGY],
)
if amount >= 1:
return ScheduledAction.transfer(
creep,
target,
RESOURCE_ENERGY,
amount,
)
def points_to_path(points):
return [
__new__(RoomPosition(point.x, point.y, point.roomName)) for point in points
]
| 23.522523
| 91
| 0.550747
| 339
| 2,611
| 4.061947
| 0.289086
| 0.011619
| 0.015251
| 0.011619
| 0.156137
| 0.129267
| 0.076979
| 0.023239
| 0.023239
| 0.023239
| 0
| 0.029512
| 0.325163
| 2,611
| 110
| 92
| 23.736364
| 0.751986
| 0.044044
| 0
| 0.076923
| 0
| 0
| 0.004013
| 0
| 0
| 0
| 0
| 0.009091
| 0
| 1
| 0.10989
| false
| 0
| 0.010989
| 0.010989
| 0.241758
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c8185a2c9690234bd9c6872e272faa663e2d58
| 2,419
|
py
|
Python
|
src/sidecar/connection.py
|
aldanor/sidecar
|
5353bc4120a01460f6b1e51ea8e1fcafb0524782
|
[
"Apache-2.0"
] | null | null | null |
src/sidecar/connection.py
|
aldanor/sidecar
|
5353bc4120a01460f6b1e51ea8e1fcafb0524782
|
[
"Apache-2.0"
] | null | null | null |
src/sidecar/connection.py
|
aldanor/sidecar
|
5353bc4120a01460f6b1e51ea8e1fcafb0524782
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
| 26.582418
| 76
| 0.613477
| 285
| 2,419
| 5.126316
| 0.319298
| 0.032854
| 0.043806
| 0.028747
| 0.087611
| 0.032854
| 0
| 0
| 0
| 0
| 0
| 0.002795
| 0.260438
| 2,419
| 90
| 77
| 26.877778
| 0.813862
| 0.008681
| 0
| 0.121212
| 0
| 0
| 0.038815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.121212
| 0
| 0.393939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c821a03a137cb22747cf95a778dd8018d7963b
| 1,210
|
py
|
Python
|
1-100/31-40/31-nextPermutation/nextPermutation.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/31-40/31-nextPermutation/nextPermutation.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/31-40/31-nextPermutation/nextPermutation.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
flag = False
for i in range(len(nums)-2, -1, -1):
if nums[i] < nums[i+1]:
flag = True
pivot = nums[i]
for j in range(len(nums)-1, i, -1):
if nums[j] > pivot:
nums[i] = nums[j]
nums[j] = pivot
break
nums[i+1:] = nums[:i:-1]
break
if not flag:
nums[:] = nums[::-1]
def nextPermutation2(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
length = len(nums)
for i in range(length-2, -1, -1):
if nums[i] < nums[i+1]:
for j in range(length-1, i, -1):
if nums[i] < nums[j]:
nums[i], nums[j] = nums[j], nums[i]
nums[i+1:] = nums[:i:-1]
return
nums[:] = nums[::-1]
| 30.25
| 74
| 0.400826
| 145
| 1,210
| 3.344828
| 0.248276
| 0.134021
| 0.11134
| 0.049485
| 0.529897
| 0.470103
| 0.358763
| 0.243299
| 0.243299
| 0
| 0
| 0.029231
| 0.46281
| 1,210
| 40
| 75
| 30.25
| 0.716923
| 0.146281
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4c9e7db24e7faa2d384c8fed0def5b98126bad5
| 4,584
|
py
|
Python
|
recognizer/sat_plan_recognizer.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
recognizer/sat_plan_recognizer.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
recognizer/sat_plan_recognizer.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
import time
from z3 import Solver, Implies, sat, Const, Function, IntSort, ForAll, DeclareSort
from recognizer.pddl.pddl_planner import applicable
from recognizer.pddl.sat_planner import SATPlanner
from recognizer.plan_recognizer import PlanRecognizer
class SATPlanRecognizer(PlanRecognizer):
name = "sat"
def __init__(self, options=None):
PlanRecognizer.__init__(self, options)
def accept_hypothesis(self, h):
return not h.test_failed and h.cost == self.unique_goal.cost
def add_observation_constraints(self, s, planner, ground_actions, length, observations):
obsSort = DeclareSort('Obs')
orderObs = Function('orderObs', obsSort, IntSort())
orderExec = Function('orderExec', obsSort, IntSort())
obsConsts = []
for i in range(0, len(observations)):
o = Const(str(observations[i]), obsSort)
obsConsts.append(o)
s.add(orderObs(o) == i)
for t in range(0, length):
# forced_obs = []
for action in ground_actions:
index = observations.index_of(action.signature())
if index > -1:
obsC = obsConsts[index]
# forced_obs.append(planner.action_prop_at(action, t))
s.add(Implies(planner.action_prop_at(action, t), orderExec(obsC) == t))
# s.add(Or(*forced_obs))
x = Const('x', obsSort)
y = Const('y', obsSort)
# orderSync = Function('order-sync', BoolSort())
s.add(ForAll([x, y], Implies(orderObs(x) < orderObs(y), orderExec(x) < orderExec(y))))
s.add(ForAll([x, y], Implies(orderObs(x) == orderObs(y), orderExec(x) == orderExec(y))))
s.add(ForAll([x, y], Implies(orderObs(x) > orderObs(y), orderExec(x) > orderExec(y))))
def evaluate_hypothesis(self, index, hypothesis, observations):
hyp_problem = self.options.work_dir+'/'+'hyp_%d_problem.pddl' % index
domain_file = self.options.work_dir+'/'+self.options.domain_name+'.pddl'
# domain_file = 'examples/blocksworld/blocksworld.pddl'
hypothesis.generate_pddl_for_hyp_plan(hyp_problem)
planner = SATPlanner(allow_parallel_actions=True, verbose=True)
planner.max_length = 666
parser = planner.parse(domain_file, hyp_problem)
if applicable(parser.state, parser.positive_goals, parser.negative_goals):
hypothesis.cost = 0
# Grounding process
ground_actions = planner.grounding(parser)
plan = None
total_runtime = 0.0
for length in range(0, planner.max_length):
t0 = time.time()
s = Solver()
planner.props.clear()
planner.action_map.clear()
# if self.options.verbose: print("Encoding domain with length {0}".format(length))
planner.encode_formula(s, ground_actions, parser.state, (parser.positive_goals, parser.negative_goals), length)
# Add the constraints for the observations
self.add_observation_constraints(s, planner, ground_actions, length, observations)
# if self.options.verbose: print(s.to_smt2())
if s.check() == sat:
if self.options.verbose: print("Model found with length {0}".format(length))
plan = planner.extract_plan(s.model(),length)
if self.options.verbose: print("Plan %d is %s"%(len(plan),plan))
hypothesis.cost = len(plan)
break
else:
if self.options.verbose: print("No model found with length {0}".format(length))
tf = time.time()
print('Runtime', tf - t0, 'secs')
total_runtime += tf - t0
return plan, total_runtime
def run_recognizer(self):
total_runtime = 0.0
t0 = time.time()
for i in range(0, len(self.hyps)):
if self.options.verbose: print("Evaluating hypothesis %d: %s"%(i,str(self.hyps[i])))
plan, plan_time = self.evaluate_hypothesis(i, self.hyps[i], self.observations)
total_runtime += plan_time
for h in self.hyps:
if not h.test_failed:
if not self.unique_goal or h.cost < self.unique_goal.cost:
self.unique_goal = h
# Select unique goal (choose the goal with the minimal score)
for h in self.hyps:
if self.accept_hypothesis(h):
self.accepted_hypotheses.append(h)
tf = time.time()
print('Total runtime', tf - t0)
return tf - t0
| 43.245283
| 123
| 0.610384
| 556
| 4,584
| 4.899281
| 0.246403
| 0.04442
| 0.028634
| 0.044053
| 0.290015
| 0.210352
| 0.122981
| 0.098752
| 0.062775
| 0.062775
| 0
| 0.007207
| 0.27356
| 4,584
| 105
| 124
| 43.657143
| 0.810811
| 0.095113
| 0
| 0.101266
| 0
| 0
| 0.041818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0
| 0.063291
| 0.012658
| 0.189873
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4cbdc38c4ab5669908483516a67bda51e21ba7f
| 1,431
|
py
|
Python
|
somato/06_dipole.py
|
larsoner/beamformer_simulation
|
ebc9cfc8bc73434ecd995c3b85560db962642307
|
[
"BSD-3-Clause"
] | 2
|
2019-06-03T21:09:24.000Z
|
2020-05-29T20:53:22.000Z
|
somato/06_dipole.py
|
larsoner/beamformer_simulation
|
ebc9cfc8bc73434ecd995c3b85560db962642307
|
[
"BSD-3-Clause"
] | null | null | null |
somato/06_dipole.py
|
larsoner/beamformer_simulation
|
ebc9cfc8bc73434ecd995c3b85560db962642307
|
[
"BSD-3-Clause"
] | 4
|
2019-07-14T02:44:40.000Z
|
2020-05-28T18:05:26.000Z
|
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from nilearn.plotting import plot_anat
from config import fname, subject_id, n_jobs
report = mne.open_report(fname.report)
epochs = mne.read_epochs(fname.epochs)
noise_cov = mne.compute_covariance(epochs, tmin=-0.2, tmax=0, method='shrunk', rank='info')
bem = mne.read_bem_solution(fname.bem)
trans = mne.transforms.read_trans(fname.trans)
# Find the slope of the onset
evoked = epochs.average().crop(0.03, 0.05)
_, mag_peak = evoked.get_peak('mag')
_, grad_peak = evoked.get_peak('grad')
peak_time = (mag_peak + grad_peak) / 2
evoked = epochs.average().crop(peak_time - 0.005, peak_time + 0.005)
print(evoked)
dip, res = mne.fit_dipole(evoked, noise_cov, bem, trans, n_jobs=n_jobs, verbose=True)
dip = dip[int(np.argmax(dip.gof))]
dip.save(fname.ecd, overwrite=True)
# Plot the result in 3D brain with the MRI image using Nilearn
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject_id, subjects_dir=fname.subjects_dir)
t1_fname = op.join(fname.subjects_dir, subject_id, 'mri', 'T1.mgz')
fig = plt.figure()
plot_anat(t1_fname, cut_coords=mri_pos[0], title='Dipole loc.', figure=fig)
report.add_figs_to_section(fig, 'ECD source location', 'Source level', replace=True)
report.save(fname.report, overwrite=True, open_browser=False)
report.save(fname.report_html, overwrite=True, open_browser=False)
| 36.692308
| 91
| 0.750524
| 240
| 1,431
| 4.283333
| 0.433333
| 0.026265
| 0.036965
| 0.044747
| 0.05642
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018268
| 0.120196
| 1,431
| 38
| 92
| 37.657895
| 0.798253
| 0.061495
| 0
| 0
| 0
| 0
| 0.050746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4cbe718ce99cfda8db68cebd8b2f70d40be2a56
| 299
|
py
|
Python
|
Exercicios/Resposta-EstruturaDeDecisao/Exerc_4.py
|
ThaisAlves7/Exercicios_PythonBrasil
|
3c55f56c44b4da9953a79398859e7c73a155dc0e
|
[
"MIT"
] | null | null | null |
Exercicios/Resposta-EstruturaDeDecisao/Exerc_4.py
|
ThaisAlves7/Exercicios_PythonBrasil
|
3c55f56c44b4da9953a79398859e7c73a155dc0e
|
[
"MIT"
] | null | null | null |
Exercicios/Resposta-EstruturaDeDecisao/Exerc_4.py
|
ThaisAlves7/Exercicios_PythonBrasil
|
3c55f56c44b4da9953a79398859e7c73a155dc0e
|
[
"MIT"
] | null | null | null |
# Faça um programa que verifique se uma letra digitada é vogal ou consoante.
vogais = ['A', 'E', 'I', 'O', 'U']
letra = input('Digite uma letra: ').strip()
letra = letra.capitalize()
if letra in vogais:
print(f'A letra {letra} é uma vogal')
else:
print(f'A letra {letra} é uma consoante')
| 27.181818
| 76
| 0.655518
| 49
| 299
| 4
| 0.571429
| 0.153061
| 0.071429
| 0.122449
| 0.214286
| 0.214286
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0.190635
| 299
| 11
| 77
| 27.181818
| 0.809917
| 0.247492
| 0
| 0
| 0
| 0
| 0.361607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4cc48259575ae3ff337d2ba3e8068256382c270
| 21,443
|
py
|
Python
|
summarization_utils.py
|
allenai/advisor
|
6849755042c6dab1488f64cf21bde2322add3cc1
|
[
"Apache-2.0"
] | 5
|
2021-12-13T18:21:35.000Z
|
2022-03-27T17:18:09.000Z
|
summarization_utils.py
|
allenai/advisor
|
6849755042c6dab1488f64cf21bde2322add3cc1
|
[
"Apache-2.0"
] | null | null | null |
summarization_utils.py
|
allenai/advisor
|
6849755042c6dab1488f64cf21bde2322add3cc1
|
[
"Apache-2.0"
] | null | null | null |
import ast
import hashlib
import json
import os
from collections import defaultdict
from typing import Tuple, Sequence, Dict, Optional, Union, Any, Set
import compress_pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
from filelock import FileLock
from allenact.utils.misc_utils import (
bootstrap_max_of_subset_statistic,
expected_max_of_subset_statistic,
all_equal,
)
from minigrid_and_pd_scripts.compute_random_performance_for_task import (
TASK_TO_RANDOM_PERFORMANCE,
)
from projects.advisor.advisor_constants import (
METHOD_ORDER,
METHOD_TO_COLOR,
METHOD_TO_LINE_MARKER,
EXPERIMENT_STR_TO_LABEL_DICT,
)
from projects.advisor.lighthouse_scripts.summarize_pairwise_imitation_data import (
set_size,
)
from projects.advisor.minigrid_constants import ENV_NAMES_TO_TITLE
plt.rc("font", **{"family": "serif", "serif": ["CMU"], "size": 16})
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{amsmath}")
METRIC_TO_LABEL = {
"reward": "Reward",
"rewards": "Reward",
"avg_ep_length": "Avg. Ep. Length",
"success": "Success",
}
def unzip(xs):
a = None
n = None
for x in xs:
if n is None:
n = len(x)
a = [[] for _ in range(n)]
for i, y in enumerate(x):
a[i].append(y)
return a
def add_columns_to_df(df):
keys = ["alpha_start", "alpha_stop", "fixed_alpha", "lr", "tf_ratio"]
for key in keys + ["pretty_label"]:
df[key] = [None] * df.shape[0]
def read_config_kwargs_str(config_kwargs_str):
if config_kwargs_str == "" or config_kwargs_str is None:
return {}
elif isinstance(config_kwargs_str, Dict):
return config_kwargs_str
else:
try:
return json.loads(config_kwargs_str)
except Exception:
return ast.literal_eval(config_kwargs_str)
df.loc[:, "config_kwargs"] = [
read_config_kwargs_str(config_kwargs_str)
for config_kwargs_str in df.loc[:, "config_kwargs_str"]
]
for i in range(df.shape[0]):
row = df.loc[i, :]
config_kwargs: Dict[str, Any] = row["config_kwargs"]
for key in keys:
df.loc[i, key] = config_kwargs.get(key.upper(), None)
for i in range(df.shape[0]):
df.loc[i, "pretty_label"] = run_info_to_pretty_label(dict(df.loc[i, :]))
return df
def plot_max_hp_curves(
x_to_y_list: Sequence[Dict[Union[int, float], float]],
x_to_bootstrap_ys_list: Sequence[Dict[Union[int, float], Sequence[float]]],
method_labels: Sequence[str],
colors: Sequence[Tuple[int, int, int]],
line_styles: Optional[Sequence] = None,
line_markers: Optional[Sequence] = None,
title: str = "",
xlabel: str = "",
ylabel: str = "",
fig_size=(4, 4 * 3.0 / 5.0),
save_path: Optional[str] = None,
put_legend_outside: bool = True,
include_legend: bool = False,
performance_of_random_agent: Optional[float] = None,
best_inds_to_highlight: Optional[Set] = None,
):
"""Plots E[max(metric | n hp runs)] curves.
For more information on studying sensitivity of methods to
hyperparameter tuning, refer to Dodge et al. EMNLP 2019
https://arxiv.org/abs/1909.03004
"""
line_styles = ["solid"] * len(colors) if line_styles is None else line_styles
line_markers = [""] * len(colors) if line_markers is None else line_markers
plt.grid(
b=True,
which="major",
color=np.array([0.93, 0.93, 0.93]),
linestyle="-",
zorder=-2,
)
plt.minorticks_on()
plt.grid(
b=True,
which="minor",
color=np.array([0.97, 0.97, 0.97]),
linestyle="-",
zorder=-2,
)
ax = plt.gca()
ax.set_axisbelow(True)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if best_inds_to_highlight is None:
best_inds_to_highlight = set(range(len(x_to_y_list)))
xscaled = False
for (
index,
(x_to_y, x_to_bootstrap_ys, method_label, color, line_style, line_marker,),
) in enumerate(
zip(
x_to_y_list,
x_to_bootstrap_ys_list,
method_labels,
colors,
line_styles,
line_markers,
)
):
xvals = list(sorted(x_to_bootstrap_ys.keys()))
points_list = [x_to_bootstrap_ys[x] for x in xvals]
points = [x_to_y[x] for x in xvals]
should_highlight = index in best_inds_to_highlight
if max(xvals) > 1e3:
xscaled = True
xvals = [x / 1e6 for x in xvals]
try:
lower, _, upper = unzip(
[np.percentile(points, [25, 50, 75]) for points in points_list]
)
except Exception as _:
print(
"Could not generate max_hp_curve for {}, too few points".format(
method_label
)
)
continue
if performance_of_random_agent is not None:
xvals = [0] + xvals
points = [performance_of_random_agent] + points
lower = [performance_of_random_agent] + lower
upper = [performance_of_random_agent] + upper
plt.gca().fill_between(
xvals,
lower,
upper,
color=np.array(color + (25 if should_highlight else 0,)) / 255,
zorder=1,
)
plot_kwargs = dict(
lw=2.5,
linestyle=line_style,
marker=line_marker,
markersize=8,
markevery=4 if len(xvals) > 10 else 1,
zorder=2,
)
label = (
r"{}.{}".format(index + 1, "\ \ " if index + 1 < 10 else " ") + method_label
)
color = np.array(color + (255,)) / 255
plt.plot([], [], label=label, color=color, **plot_kwargs) # FOR LEGEND ONLY
if not should_highlight:
color = np.array(color)
color[3] = 0.1
plt.plot(xvals, points, color=color, **plot_kwargs)
plt.title(title)
plt.xlabel(xlabel + (r"(Millions)" if xscaled and len(xlabel) != 0 else r""))
plt.ylabel(ylabel)
plt.ticklabel_format(style="plain")
plt.tight_layout()
if include_legend:
if put_legend_outside:
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
else:
plt.legend()
set_size(*fig_size)
if save_path is None:
plt.show()
else:
plt.savefig(
save_path, bbox_inches="tight",
)
plt.close()
print(f"Figure saved to {save_path}")
def create_comparison_hp_plots_from_tsv(
num_hp_evals_for_steps_plot: int,
tsv_file_path: str,
highlight_best: bool,
overwrite=True,
include_legend: bool = False,
hide_labels: bool = False,
):
assert os.path.exists(tsv_file_path)
file_dir, file_name = os.path.split(tsv_file_path)
with open(tsv_file_path, "r") as f:
tsv_hash = str(hashlib.md5(f.read().encode()).hexdigest())
df = pd.read_csv(tsv_file_path, sep="\t")
df = add_columns_to_df(df)
env_type_key = "env"
assert (
df[env_type_key] == df[env_type_key][0]
).all(), "env must be the same for all elements of df"
task_name = df[env_type_key][0]
del df[env_type_key]
df = df.sort_values(by=["exp_type", "seed"])
group_keys = ["exp_type"]
df_grouped = df.groupby(by=group_keys)
df_grouped_lists = df_grouped.agg(list)
# One sort index, based on the first metric
for metric_key in [
"reward",
# "success",
# "avg_ep_length",
]:
if not os.path.exists(file_dir):
print("IN WRONG DIRECTORY.")
else:
plots_dir = os.path.join(file_dir, "neurips21_plots", task_name)
os.makedirs(plots_dir, exist_ok=True)
box_save_path = os.path.join(
plots_dir,
"{}__box_{}_{}.pdf".format(
file_name.replace(".tsv", ""), task_name, metric_key,
),
)
if (not overwrite) and os.path.exists(box_save_path):
print(
"Plot {} exists and overwrite is `False`, skipping...".format(
box_save_path
)
)
continue
tsv_summary_dir = os.path.join(file_dir, "neurips21_summaries")
os.makedirs(tsv_summary_dir, exist_ok=True)
tsv_summary_save_path = os.path.join(
tsv_summary_dir, f"{metric_key}__all_results.tsv"
)
grouped_df_index = df_grouped_lists.index.to_frame(index=False)
method_keys = list(grouped_df_index["exp_type"])
sort_index = [
ind
for _, ind in sorted(
[
(METHOD_ORDER.index(method_key), sort_ind)
if method_key in METHOD_ORDER
else 1e6
for sort_ind, method_key in enumerate(method_keys)
if method_key in METHOD_ORDER
]
)
]
colors = [
METHOD_TO_COLOR.get(method_keys[ind], (0, 0, 0),) for ind in sort_index
]
line_styles = None
line_markers = [
METHOD_TO_LINE_MARKER.get(method_keys[ind], "",) for ind in sort_index
]
sorted_multi_index = [
tuple(grouped_df_index.loc[ind, :]) for ind in sort_index
]
sorted_multi_index = [
x if len(x) != 1 else x[0] for x in sorted_multi_index
]
result_lens = {
multi_ind: len(df_grouped_lists.loc[multi_ind, metric_key])
for multi_ind in sorted_multi_index
}
print(result_lens)
print(sum(result_lens.values()))
points_list = [
list(
map(ast.literal_eval, df_grouped_lists.loc[multi_ind, metric_key],)
)
for multi_ind in sorted_multi_index
]
exp_to_ckpt_training_steps_lists = [
df_grouped_lists.loc[multi_ind, "train_steps"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in exp_to_ckpt_training_steps_lists)
exp_ind_to_ckpt_training_steps = [
ast.literal_eval(training_steps_list[0])
for training_steps_list in exp_to_ckpt_training_steps_lists
]
pretty_label_lists = [
df_grouped_lists.loc[multi_ind, "pretty_label"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in pretty_label_lists)
yticklabels = [l[0] for l in pretty_label_lists]
subset_size_to_bootstrap_points_list = []
subset_size_to_expected_mas_est_list = []
ckpt_to_bootstrap_points_list = []
ckpt_to_expected_mas_est_list = []
print("Starting expected max reward computations")
for i in range(len(points_list)):
print(f"Computing expected max {metric_key} for {yticklabels[i]}")
vals_per_ckpt_mat = np.array(
points_list[i]
) # each col corresponds to a checkpoint
training_steps_inds_to_skip = []
training_steps = exp_ind_to_ckpt_training_steps[i]
cache_path = os.path.join(
plots_dir, "cache", f"{tsv_hash}_{i}_{metric_key}.pkl.gz"
)
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
if os.path.exists(cache_path):
cache = compress_pickle.load(cache_path)
ckpt_to_expected_mas_est_list.append(
cache["ckpt_to_expected_mas_est"]
)
ckpt_to_bootstrap_points_list.append(
cache["ckpt_to_bootstrap_points"]
)
subset_size_to_expected_mas_est_list.append(
cache["subset_size_to_expected_mas_est"]
)
subset_size_to_bootstrap_points_list.append(
cache["subset_size_to_bootstrap_points"]
)
else:
for j in range(len(training_steps) - 1):
# Skip some weird cases where checkpoints were saved too closely
if (training_steps[j + 1] - training_steps[j]) / training_steps[
-1
] < 0.05:
training_steps_inds_to_skip.append(j)
ckpt_to_expected_mas_est_list.append(
{
training_steps: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, j], m=num_hp_evals_for_steps_plot
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
ckpt_to_bootstrap_points_list.append(
{
training_steps: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, j],
m=num_hp_evals_for_steps_plot,
reps=500,
seed=j,
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
max_subset_size = len(points_list[i]) + 1 - 5
subset_size_to_expected_mas_est_list.append(
{
m: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m
)
for m in range(1, max_subset_size)
}
)
subset_size_to_bootstrap_points_list.append(
{
m: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m, reps=500, seed=m
)
for m in range(1, max_subset_size)
}
)
cache = {}
cache["ckpt_to_expected_mas_est"] = ckpt_to_expected_mas_est_list[
-1
]
cache["ckpt_to_bootstrap_points"] = ckpt_to_bootstrap_points_list[
-1
]
cache[
"subset_size_to_expected_mas_est"
] = subset_size_to_expected_mas_est_list[-1]
cache[
"subset_size_to_bootstrap_points"
] = subset_size_to_bootstrap_points_list[-1]
compress_pickle.dump(cache, cache_path)
color_to_best_val_and_index = defaultdict(lambda: (-float("inf"), -1))
color_to_inds = defaultdict(lambda: [])
for ind, c0 in enumerate(colors):
color_to_inds[c0].append(ind)
final_y = list(sorted(ckpt_to_expected_mas_est_list[ind].items()))[-1][
1
]
if final_y > color_to_best_val_and_index[c0][0]:
color_to_best_val_and_index[c0] = (final_y, ind)
best_inds_to_highlight = set(
v[1] for v in color_to_best_val_and_index.values()
)
plot_max_hp_curves(
x_to_y_list=ckpt_to_expected_mas_est_list,
x_to_bootstrap_ys_list=ckpt_to_bootstrap_points_list,
method_labels=yticklabels,
xlabel=("Training Steps" if not hide_labels else ""),
ylabel=(
f"Expected {METRIC_TO_LABEL[metric_key]}" if not hide_labels else ""
),
colors=colors,
line_styles=line_styles,
line_markers=line_markers,
fig_size=(3 * 1.05, 3 * 1.05),
save_path=box_save_path.replace("_box_", "_train_steps_"),
put_legend_outside=True,
include_legend=include_legend,
title=(ENV_NAMES_TO_TITLE[task_name] if not hide_labels else ""),
performance_of_random_agent=TASK_TO_RANDOM_PERFORMANCE.get(
task_name, {}
).get(metric_key, None),
best_inds_to_highlight=best_inds_to_highlight
if highlight_best
else None,
)
def save_expected_rewards_tsv(
task_name: str,
x_to_y_list: Sequence[Dict[Union[int, float], float]],
method_labels: Sequence[str],
save_path: str,
grouped_inds_list: Sequence[Sequence[int]],
):
def all_nearly_equal(seq):
s = seq[0]
return all(abs(s - ss) / min(s, ss) < 0.01 for ss in seq)
with FileLock(save_path + ".lock"):
if os.path.exists(save_path):
df = pandas.read_csv(save_path, sep="\t")
assert list(df["method"]) == method_labels
else:
df = pandas.DataFrame(data={"method": method_labels})
assert all_nearly_equal(
[max(x_to_y.keys()) for x_to_y in x_to_y_list]
)
if task_name in df.columns:
del df[task_name]
values_at_end_of_training = [
x_to_y[max(x_to_y.keys())] for x_to_y in x_to_y_list
]
df[task_name] = values_at_end_of_training
df = df.reindex(["method"] + list(sorted(df.columns[1:])), axis=1)
df.to_csv(save_path, sep="\t", index=False, float_format="%.2f")
save_path = save_path.replace(".tsv", "_group.tsv")
with FileLock(save_path + ".lock"):
grouped_method_labels = [
method_labels[inds[0]] for inds in grouped_inds_list
]
if os.path.exists(save_path):
df = pandas.read_csv(save_path, sep="\t")
assert list(df["method"]) == grouped_method_labels
else:
df = pandas.DataFrame(data={"method": grouped_method_labels})
grouped_values_at_end_of_training = [
max(values_at_end_of_training[i] for i in inds)
for inds in grouped_inds_list
]
df[task_name] = grouped_values_at_end_of_training
df = df.reindex(["method"] + list(sorted(df.columns[1:])), axis=1)
df.to_csv(save_path, sep="\t", index=False, float_format="%.2f")
save_expected_rewards_tsv(
task_name=ENV_NAMES_TO_TITLE[task_name],
x_to_y_list=ckpt_to_expected_mas_est_list,
method_labels=yticklabels,
save_path=tsv_summary_save_path,
grouped_inds_list=[
color_to_inds[k] for k in sorted(color_to_inds.keys())
],
)
plot_max_hp_curves(
x_to_y_list=subset_size_to_expected_mas_est_list,
x_to_bootstrap_ys_list=subset_size_to_bootstrap_points_list,
method_labels=yticklabels,
xlabel=("$N$" if not hide_labels else ""),
ylabel=(
f"\emph{{Robust{METRIC_TO_LABEL[metric_key]}@$N$}}"
if not hide_labels
else ""
),
colors=colors,
line_styles=line_styles,
line_markers=line_markers,
fig_size=(3 * 1.05, 3 * 1.05),
save_path=box_save_path.replace("_box_", "_hpruns_"),
put_legend_outside=False,
include_legend=False,
title=(ENV_NAMES_TO_TITLE[task_name] if not hide_labels else ""),
best_inds_to_highlight=best_inds_to_highlight
if highlight_best
else None,
)
def run_info_to_pretty_label(run_info: Dict[str, Optional[Union[int, str, float]]]):
exp_type = run_info["exp_type"]
return EXPERIMENT_STR_TO_LABEL_DICT[exp_type]
| 36.221284
| 88
| 0.533834
| 2,539
| 21,443
| 4.157936
| 0.155179
| 0.018945
| 0.006062
| 0.024249
| 0.417732
| 0.345458
| 0.275173
| 0.220422
| 0.177039
| 0.162167
| 0
| 0.012613
| 0.375134
| 21,443
| 591
| 89
| 36.282572
| 0.775282
| 0.018934
| 0
| 0.19802
| 0
| 0
| 0.058644
| 0.017136
| 0
| 0
| 0
| 0
| 0.013861
| 1
| 0.015842
| false
| 0
| 0.033663
| 0
| 0.065347
| 0.015842
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4d249940b5e4b94dccb108c35a99b6f1dfb8b25
| 899
|
py
|
Python
|
mylog.py
|
james-prior/python-asyncio-experiments
|
eeda9aafe855f2ef666c694cc6fa85ceef91cfe5
|
[
"MIT"
] | null | null | null |
mylog.py
|
james-prior/python-asyncio-experiments
|
eeda9aafe855f2ef666c694cc6fa85ceef91cfe5
|
[
"MIT"
] | null | null | null |
mylog.py
|
james-prior/python-asyncio-experiments
|
eeda9aafe855f2ef666c694cc6fa85ceef91cfe5
|
[
"MIT"
] | null | null | null |
import datetime
def format_time(t):
return f'{t.seconds:2}.{t.microseconds:06}'
def log(message):
'''
prints a line with:
elapsed time since this function was first called
elapsed time since this function was previously called
message
Elapsed times are shown in seconds with microsecond resolution
although one does not know what the accuracy is.
'''
global time_of_first_call
global time_of_previous_call
now = datetime.datetime.now()
try:
time_of_first_call
except NameError:
time_of_first_call = now
time_of_previous_call = now
time_since_first_call = now - time_of_first_call
time_since_previous_call = now - time_of_previous_call
print(
format_time(time_since_first_call),
format_time(time_since_previous_call),
message,
)
time_of_previous_call = now
| 24.972222
| 66
| 0.694105
| 124
| 899
| 4.717742
| 0.403226
| 0.082051
| 0.075214
| 0.102564
| 0.307692
| 0.191453
| 0
| 0
| 0
| 0
| 0
| 0.004464
| 0.252503
| 899
| 35
| 67
| 25.685714
| 0.866071
| 0.285873
| 0
| 0.1
| 0
| 0
| 0.054455
| 0.054455
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0.05
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4d4b64f36bac32559212d9f09ad4caa0d9bfce2
| 286
|
py
|
Python
|
tests/superset_test_config.py
|
manueliglesiasgarcia/superset-api-client
|
268b36d3b9694895f0e4a9595af7b592ac7c5b77
|
[
"Apache-2.0"
] | 11
|
2021-05-07T16:34:52.000Z
|
2022-03-17T07:54:56.000Z
|
tests/superset_test_config.py
|
manueliglesiasgarcia/superset-api-client
|
268b36d3b9694895f0e4a9595af7b592ac7c5b77
|
[
"Apache-2.0"
] | 10
|
2021-10-08T20:03:59.000Z
|
2022-03-18T18:28:09.000Z
|
tests/superset_test_config.py
|
manueliglesiasgarcia/superset-api-client
|
268b36d3b9694895f0e4a9595af7b592ac7c5b77
|
[
"Apache-2.0"
] | 6
|
2021-07-09T18:23:09.000Z
|
2022-03-19T09:23:19.000Z
|
"""Configuration for tests"""
import tempfile
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
# APP CONFIG
# Creating a tempfile
SQLALCHEMY_DATABASE_URI = f"sqlite://{tempfile.mkdtemp()}/test.db"
# WEBSERVER
SUPERSET_WEBSERVER_ADDRESS = "0.0.0.0"
SUPERSET_WEBSERVER_PORT = 8080
| 19.066667
| 66
| 0.765734
| 39
| 286
| 5.410256
| 0.769231
| 0.028436
| 0.028436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031873
| 0.122378
| 286
| 14
| 67
| 20.428571
| 0.808765
| 0.227273
| 0
| 0
| 0
| 0
| 0.206573
| 0.173709
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4d7188d1126cf74926c3e66a423812722a2540a
| 22,051
|
py
|
Python
|
special_k/tests/test_signing.py
|
namoopsoo/special_k
|
816ad200e8608d862e20971dc2bc4d2724aaf0bc
|
[
"Apache-2.0"
] | null | null | null |
special_k/tests/test_signing.py
|
namoopsoo/special_k
|
816ad200e8608d862e20971dc2bc4d2724aaf0bc
|
[
"Apache-2.0"
] | null | null | null |
special_k/tests/test_signing.py
|
namoopsoo/special_k
|
816ad200e8608d862e20971dc2bc4d2724aaf0bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-present Kensho Technologies, LLC.
from datetime import datetime
import glob
import os
import time
from unittest.mock import Mock
import funcy
import gpg
import pytest
from . import _UNSAFE_KEY_PASSPHRASE, FAKE_KEYS_DIR, TESTING_ENVVAR, TRUSTED_DIR_ENVVAR
from ..check_gpg_keys import (
_verify_trusted_keys_dir,
get_keyname_to_fingerprint,
get_trusted_pub_keys,
)
from ..signing import (
_UNSAFE_KEY_FOR_TESTING_FINGERPRINT,
DAYS_WARNING_FOR_KEY_EXPIRATION,
START_OF_HISTORY,
_is_testing,
add_trusted_keys_to_gpg_home_dir,
get_days_until_expiry,
import_secret_key,
sign_message,
verify_and_extract_message,
warn_for_key_near_expiry,
)
from ..utils import (
get_gpg_homedir_and_context,
get_key_expirations_for_gpg_context,
get_temporary_directory,
)
from .utils import EnvvarCleanupTestCase
# WARNING: rsa1024 is NOT a secure algorithm. This is fine here because we are just testing things
# DO NOT use rsa1024 in practice. It is done here to not drain the entropy pool so the tests can
# run faster.
TEST_KEY_ALGORITHM = "rsa1024"
EXPECTED_EXTRACTED_MESSAGE = b"Test that we can sign models with gpg\n"
# Generated with the following command:
# gpg2 --armor --clearsig --default-key 56BC24E20C87C09D3F8C76A96FD20A3075CFFAF2 my.txt
# Note that `my.txt` is the file that contains the message to be signed,
# and the hex string following `--default-key` is the signing key's fingerprint.
TESTING_KEY_SIGNED_MESSAGE = b"""-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
Test that we can sign models with gpg
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEEVrwk4gyHwJ0/jHapb9IKMHXP+vIFAlvY1+wACgkQb9IKMHXP
+vIzbwwAgUloEempNSXkeSG22zz6aCv+VCivj78WERBkCnclFPZzwFTbU0gDRnT0
NwfbUFHuTmu7d8/EDH8I4tCBfJXDg1RNuGXY/GawNqXCQ3oG1h9LP8SR1XTE8G9Y
JMqRZDIo8hBl8PCDdPy0U64h6OzM5tUrHbGMSIAr6tbP1FeqGckpeARgmGr/dwdh
nsKSGzgT9UOJGBRl+SeSgEDzxxxvHSHYGKTxy/0HChnh84+hTrbquwD9VOEPe4f3
SxNNR4LHMx9DfswBq+Jq+rzKQwogQRby/WPkSh1X8b34DeWQyzvpUOg3ubx/meZR
xxQCj7PykbEu3p77HH08w7VoAkMrHN5gr1hkkflJPIo9oJZBhndE7lhua7rrqDyW
ZFOMnTOrnkPIGFfqksv5gNs+zQr2C8g0Zk1UW6BkdABESXPKYQUoGoMdsN/0VcpT
jp3dvpx700gJkSXoWUGpSpBQuZVhT4ZqYJbDG9M51C4oDNaP3SzBzm4AQgg/ccLJ
hH928Z0H
=wYIe
-----END PGP SIGNATURE-----
"""
# Notice, all zeros in one line of the signature
MESSAGE_WITH_INVALID_SIGNATURE = b"""-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
Test that we can sign models with gpg
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEEVrwk4gyHwJ0/jHapb9IKMHXP+vIFAlvY1+wACgkQb9IKMHXP
+vIzbwwAgUloEempNSXkeSG22zz6aCv+VCivj78WERBkCnclFPZzwFTbU0gDRnT0
NwfbUFHuTmu7d8/EDH8I4tCBfJXDg1RNuGXY/GawNqXCQ3oG1h9LP8SR1XTE8G9Y
JMqRZDIo8hBl8PCDdPy0U64h6OzM5tUrHbGMSIAr6tbP1FeqGckpeARgmGr/dwdh
0000000000000000000000000000000000000000000000000000000000000000
SxNNR4LHMx9DfswBq+Jq+rzKQwogQRby/WPkSh1X8b34DeWQyzvpUOg3ubx/meZR
xxQCj7PykbEu3p77HH08w7VoAkMrHN5gr1hkkflJPIo9oJZBhndE7lhua7rrqDyW
ZFOMnTOrnkPIGFfqksv5gNs+zQr2C8g0Zk1UW6BkdABESXPKYQUoGoMdsN/0VcpT
jp3dvpx700gJkSXoWUGpSpBQuZVhT4ZqYJbDG9M51C4oDNaP3SzBzm4AQgg/ccLJ
hH928Z0H
=wYIe
-----END PGP SIGNATURE-----
"""
MUTATED_MESSAGE_WITH_MISMATCHED_SIGNATURE = b"""-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
Not what the original message contained
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEEVrwk4gyHwJ0/jHapb9IKMHXP+vIFAlvY1+wACgkQb9IKMHXP
+vIzbwwAgUloEempNSXkeSG22zz6aCv+VCivj78WERBkCnclFPZzwFTbU0gDRnT0
NwfbUFHuTmu7d8/EDH8I4tCBfJXDg1RNuGXY/GawNqXCQ3oG1h9LP8SR1XTE8G9Y
JMqRZDIo8hBl8PCDdPy0U64h6OzM5tUrHbGMSIAr6tbP1FeqGckpeARgmGr/dwdh
nsKSGzgT9UOJGBRl+SeSgEDzxxxvHSHYGKTxy/0HChnh84+hTrbquwD9VOEPe4f3
SxNNR4LHMx9DfswBq+Jq+rzKQwogQRby/WPkSh1X8b34DeWQyzvpUOg3ubx/meZR
xxQCj7PykbEu3p77HH08w7VoAkMrHN5gr1hkkflJPIo9oJZBhndE7lhua7rrqDyW
ZFOMnTOrnkPIGFfqksv5gNs+zQr2C8g0Zk1UW6BkdABESXPKYQUoGoMdsN/0VcpT
jp3dvpx700gJkSXoWUGpSpBQuZVhT4ZqYJbDG9M51C4oDNaP3SzBzm4AQgg/ccLJ
hH928Z0H
=wYIe
-----END PGP SIGNATURE-----
"""
def _get_fingerprints_in_trust_db(trustdb_path):
"""Get fingerprints (and associated trust levels) in a trustdb file"""
with open(trustdb_path, "r") as fi:
trustdb = fi.readlines()
fingerprints_in_db = [
entry.strip() # remove comments, whitespace, and newlines from trustdb
for entry in trustdb
if not entry.startswith("#")
]
return fingerprints_in_db
def _write_contents_to_file(filepath, contents):
"""Write contents to a file."""
with open(filepath, "w") as fi:
fi.write(contents)
class TestFakeKeySafety(EnvvarCleanupTestCase):
def test_testing_usage(self):
# test that we can properly verify a message signed by the test directory if we set the
# trusted keys directory to the unsafe one and enable the testing flag
os.environ[TESTING_ENVVAR] = "1"
os.environ[TRUSTED_DIR_ENVVAR] = FAKE_KEYS_DIR
with get_temporary_directory() as gpg_home_dir:
add_trusted_keys_to_gpg_home_dir(gpg_home_dir)
# Sanity-check: ensure that the valid signed message is still accepted and trusted.
self.assertEqual(
EXPECTED_EXTRACTED_MESSAGE,
verify_and_extract_message(gpg_home_dir, TESTING_KEY_SIGNED_MESSAGE),
)
# unset the testing flag, it should now raise RuntimeError
del os.environ[TESTING_ENVVAR]
with get_temporary_directory() as gpg_home_dir:
with self.assertRaises(RuntimeError):
add_trusted_keys_to_gpg_home_dir(gpg_home_dir)
# Now delete the trusted keys dir. We should get a value error when trying to find it
del os.environ[TRUSTED_DIR_ENVVAR]
with get_temporary_directory() as gpg_home_dir:
with self.assertRaises(ValueError):
add_trusted_keys_to_gpg_home_dir(gpg_home_dir)
def test__is_testing(self):
if TESTING_ENVVAR in os.environ:
del os.environ[TESTING_ENVVAR]
self.assertFalse(_is_testing())
os.environ[TESTING_ENVVAR] = "1"
self.assertTrue(_is_testing())
os.environ[TESTING_ENVVAR] = "0"
self.assertFalse(_is_testing())
for bad_val in ("2", "-1", "a string", "1.0", "False", "True"):
os.environ[TESTING_ENVVAR] = bad_val
with self.assertRaises(ValueError):
_is_testing()
class SigningTests(EnvvarCleanupTestCase):
def test_reinitialization_is_safe(self):
with get_temporary_directory() as gpg_home_dir:
# this is now fsync'ed for safety
# Add ultimately trusted key to the home dir twice.
# The second time should have no effect.
add_trusted_keys_to_gpg_home_dir(gpg_home_dir)
add_trusted_keys_to_gpg_home_dir(gpg_home_dir)
# Sanity-check: ensure that the valid signed message is still accepted and trusted.
self.assertEqual(
EXPECTED_EXTRACTED_MESSAGE,
verify_and_extract_message(gpg_home_dir, TESTING_KEY_SIGNED_MESSAGE),
)
def test_sign_and_verify_with_new_key(self):
passphrase = None
with get_gpg_homedir_and_context(passphrase, algorithm=TEST_KEY_ALGORITHM) as (
gpg_home_dir,
new_key,
key_fingerprint,
):
test_message = b"Hello world! This is a test!\n"
signed_data = sign_message(gpg_home_dir, key_fingerprint, test_message)
recovered_message = verify_and_extract_message(gpg_home_dir, signed_data)
self.assertEqual(test_message, recovered_message)
with self.assertRaises(ValueError):
# GPG seems to like adding a newline to the end of the extracted message,
# regardless of whether or not the original message contained a newline.
# For safety, we don't allow messages that do not end in a newline to be signed.
sign_message(
gpg_home_dir, key_fingerprint, b"message that does not end in a newline"
)
# The new GPG home dir does not trust the ultimately trusted key.
# We can use this fact to test that invalid signatures are not respected.
# TODO: Since the signature is from an unknown pubkey, that causes a SEGFAULT
# that crashes the python interpreter, stopping the tests.
# See if anything can be done in this case.
# with self.assertRaises(gpg.errors.VerificationError):
# verify_and_extract_message(gpg_home_dir, MASTER_KEY_SIGNED_MESSAGE)
def test_sign_and_verify_with_key_and_passphrase(self):
passphrase = "test_sign_and_verify_with_key_and_passphrase"
with get_gpg_homedir_and_context(passphrase, algorithm=TEST_KEY_ALGORITHM) as (
gpg_home_dir,
new_key,
key_fingerprint,
):
test_message = b"Hello world! This is a test!\n"
signed_data = sign_message(
gpg_home_dir, key_fingerprint, test_message, passphrase=passphrase
)
recovered_message = verify_and_extract_message(gpg_home_dir, signed_data)
self.assertEqual(test_message, recovered_message)
def test_import_private_key(self):
private_key_path = os.path.join(FAKE_KEYS_DIR, "testing.secret.asc")
with get_temporary_directory() as gpg_home_dir:
import_secret_key(gpg_home_dir, private_key_path, passphrase=_UNSAFE_KEY_PASSPHRASE)
with gpg.Context(home_dir=gpg_home_dir) as ctx:
keys = list(ctx.keylist())
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].fpr, _UNSAFE_KEY_FOR_TESTING_FINGERPRINT)
@pytest.mark.skip("GPG will segfault if we provide a bad passphrase, and we cannot test that")
def test_attempt_signing_with_bad_passphrase(self):
passphrase = "test_attempt_signing_with_bad_passphrase"
with get_gpg_homedir_and_context(passphrase, algorithm=TEST_KEY_ALGORITHM) as (
gpg_home_dir,
new_key,
key_fingerprint,
):
test_message = b"Hello world! This is a test!\n"
# Using an incorrect passphrase for the key will result in an error.
with self.assertRaises(AssertionError):
sign_message(
gpg_home_dir, key_fingerprint, test_message, passphrase="incorrect passphrase"
)
class ExpiryTests(EnvvarCleanupTestCase):
def test_key_expiry_utils(self):
seconds_in_a_day = 86400
with get_temporary_directory() as gpg_home_dir:
# gpg_home_dir is now fsync'ed
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
new_key_long_expiry = ctx.create_key(
"test@example.com",
# make the key expire in much more than the expiration
# give an extra 60s because key creation can take more than a second
expires_in=DAYS_WARNING_FOR_KEY_EXPIRATION * seconds_in_a_day * 2 + 60,
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
key_expirations = list(get_key_expirations_for_gpg_context(ctx).items())
self.assertEqual(len(key_expirations), 1) # there should only be one key
fpr, expiry = key_expirations[0]
self.assertEqual(fpr, new_key_long_expiry.fpr)
day_to_expiry = (expiry - datetime.now()).days
# TODO: Determine why this test fails occasionally with day_to_expiry off by one
self.assertAlmostEqual(day_to_expiry, 2 * DAYS_WARNING_FOR_KEY_EXPIRATION, delta=1)
# now test keys with no expiration
with get_temporary_directory() as gpg_home_dir:
# gpg_home_dir is now fsync'ed
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
new_key_no_expiry = ctx.create_key(
"test@example.com",
# make the key that never expires
expires=False,
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
key_expirations = list(get_key_expirations_for_gpg_context(ctx).items())
self.assertEqual(len(key_expirations), 1) # there should only be one key
fpr, expiry = key_expirations[0]
self.assertEqual(fpr, new_key_no_expiry.fpr)
self.assertEqual(expiry, START_OF_HISTORY)
days_until_expiry = get_days_until_expiry(ctx)[fpr]
self.assertEqual(days_until_expiry, float("inf"))
def test_expiry_warning(self):
with get_temporary_directory() as gpg_home_dir:
# gpg_home_dir is now fsync'ed
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
new_key_short_expiry = ctx.create_key(
"test@example.com",
# make the key expire in much more than the expiration
expires_in=60 * 60, # expires in an hour
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
key_expirations = list(get_key_expirations_for_gpg_context(ctx).items())
self.assertEqual(len(key_expirations), 1) # there should only be one key
fpr, expiry = key_expirations[0]
self.assertEqual(fpr, new_key_short_expiry.fpr)
day_to_expiry = (expiry - datetime.now()).days
self.assertEqual(day_to_expiry, 0)
with self.assertLogs("special_k.signing", level="WARNING"):
warn_for_key_near_expiry(ctx)
def test_contradictory_expiry_info(self):
# Test a key that is marked as expired, despite having an expiration date in the future
with get_temporary_directory() as gpg_home_dir:
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
ctx.create_key(
"test@example.com",
expires_in=24 * 60 * 60,
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
keylist = list(ctx.keylist())
new_key = keylist[0]
new_key.expired = 1
ctx.get_key = Mock(return_value=new_key)
with self.assertRaisesRegex(
AssertionError, "Key with fingerprint .* is marked as expired"
):
get_key_expirations_for_gpg_context(ctx)
# Test a key that is marked as unexpired, despite having an expiration date in the past
with get_temporary_directory() as gpg_home_dir:
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
ctx.create_key(
"test@example.com",
expires_in=1,
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
time.sleep(1) # Wait until the key expires
keylist = list(ctx.keylist())
new_key = keylist[0]
new_key.expired = 0
ctx.get_key = Mock(return_value=new_key)
with self.assertRaisesRegex(
AssertionError, "Key with fingerprint .* is marked as not expired"
):
get_key_expirations_for_gpg_context(ctx)
# Test a key that is marked as expired, but never expires
with get_temporary_directory() as gpg_home_dir:
with gpg.Context(
home_dir=gpg_home_dir,
armor=True,
offline=True,
pinentry_mode=gpg.constants.PINENTRY_MODE_LOOPBACK,
) as ctx:
ctx.create_key(
"test@example.com",
expires=False,
algorithm=TEST_KEY_ALGORITHM,
sign=True,
passphrase=None,
)
keylist = list(ctx.keylist())
new_key = keylist[0]
new_key.expired = 1
ctx.get_key = Mock(return_value=new_key)
with self.assertRaisesRegex(
AssertionError, "Key with fingerprint .* has no expiration date"
):
get_key_expirations_for_gpg_context(ctx)
class TestTrustedKeys(EnvvarCleanupTestCase):
def test_checked_in_keys(self):
# test that there is a one to one map between checked in keys and fingerprints
keyname_to_fingerprint = get_keyname_to_fingerprint()
self.assertEqual(set(get_trusted_pub_keys()), set(keyname_to_fingerprint.keys()))
self.assertIn(_UNSAFE_KEY_FOR_TESTING_FINGERPRINT, keyname_to_fingerprint.values())
# Make sure people don't mess with the trusted_keys directory
cur_path = os.path.dirname(os.path.abspath(__file__))
trusted_keys_dir = os.path.join(cur_path, "./fake_keys")
trustdb_path = os.path.join(trusted_keys_dir, "trustdb.txt")
# enumerate all the possible files that might have accidentally ended up in trusted_keys
# If someone has good reason to add a .py file (other than __init__), then can delete
# that extension from here
file_patterns_to_check = ("*.py", "*.txt", "*.key", "*.pem", "*.pub*", "*.asc")
all_files_in_trusted_keys_dir = funcy.flatten(
glob.glob(os.path.join(trusted_keys_dir, pattern)) for pattern in file_patterns_to_check
)
all_file_names = { # take basename and fine uniques
os.path.basename(filepath) for filepath in all_files_in_trusted_keys_dir
}
expected_filenames = get_trusted_pub_keys().union(
{"trustdb.txt", "__init__.py", "my.txt.asc", "testing.secret.asc"}
)
# expected_filenames is a frozenset, need to cast to set for nice debugging
self.assertEqual(all_file_names, set(expected_filenames))
# test that only the ultimately trusted key is in the trustdb
fingerprints_in_trust_db = _get_fingerprints_in_trust_db(trustdb_path)
self.assertEqual(
len(fingerprints_in_trust_db),
1,
"Found {} items in trustdb. Expected 1. Someone has added keys to the "
"trustdb but only the ultimately trusted key should be "
"there".format(len(fingerprints_in_trust_db)),
)
expected_entry = "{}:6:".format(_UNSAFE_KEY_FOR_TESTING_FINGERPRINT)
self.assertEqual(
fingerprints_in_trust_db[0],
expected_entry,
"Found a single entry, `{}` in the trustdb but it does not match the "
"ultimately trusted key. Only the ultimately trusted key should live inside the "
"trust db.".format(fingerprints_in_trust_db[0]),
)
def test__verify_trusted_keys_dir(self):
# get everything right
with get_temporary_directory() as trusted_keys_dir:
filepath = os.path.join(trusted_keys_dir, "key1.pub.asc")
_write_contents_to_file(filepath, "this is a key")
filepath = os.path.join(trusted_keys_dir, "trustdb.txt")
_write_contents_to_file(filepath, "this is a trustdb")
filepath = os.path.join(trusted_keys_dir, "keyname-to-fingerprint.json")
_write_contents_to_file(filepath, "this is a json map")
_verify_trusted_keys_dir(trusted_keys_dir)
# no public key
with get_temporary_directory() as trusted_keys_dir:
filepath = os.path.join(trusted_keys_dir, "trustdb.txt")
_write_contents_to_file(filepath, "this is a trustdb")
filepath = os.path.join(trusted_keys_dir, "keyname-to-fingerprint.json")
_write_contents_to_file(filepath, "this is a json map")
with self.assertRaisesRegex(ValueError, "No public keys.*"):
_verify_trusted_keys_dir(trusted_keys_dir)
# no trustdb
with get_temporary_directory() as trusted_keys_dir:
filepath = os.path.join(trusted_keys_dir, "key1.pub.asc")
_write_contents_to_file(filepath, "this is a key")
filepath = os.path.join(trusted_keys_dir, "keyname-to-fingerprint.json")
_write_contents_to_file(filepath, "this is a json map")
with self.assertRaisesRegex(ValueError, "No `trustdb.txt`.*"):
_verify_trusted_keys_dir(trusted_keys_dir)
# keyname to fingerprint
with get_temporary_directory() as trusted_keys_dir:
filepath = os.path.join(trusted_keys_dir, "key1.pub.asc")
_write_contents_to_file(filepath, "this is a key")
filepath = os.path.join(trusted_keys_dir, "trustdb.txt")
_write_contents_to_file(filepath, "this is a trustdb")
with self.assertRaisesRegex(ValueError, "No file `keyname-to-fingerprint.*"):
_verify_trusted_keys_dir(trusted_keys_dir)
| 42.984405
| 100
| 0.655072
| 2,576
| 22,051
| 5.320264
| 0.157997
| 0.027581
| 0.033564
| 0.027362
| 0.63174
| 0.586428
| 0.563809
| 0.539511
| 0.509814
| 0.495586
| 0
| 0.02523
| 0.275634
| 22,051
| 512
| 101
| 43.068359
| 0.83278
| 0.15378
| 0
| 0.584416
| 0
| 0
| 0.196385
| 0.103168
| 0
| 0
| 0
| 0.001953
| 0.101299
| 1
| 0.036364
| false
| 0.049351
| 0.041558
| 0
| 0.090909
| 0.067532
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4d7953efcb7408bce5180ef4d3341f1a6b7b1ad
| 4,603
|
py
|
Python
|
src/models/CORAL-LM/coral/interactive.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/models/CORAL-LM/coral/interactive.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/models/CORAL-LM/coral/interactive.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | 1
|
2021-08-19T15:21:50.000Z
|
2021-08-19T15:21:50.000Z
|
import argparse
from torch.utils.data import DataLoader
from .model import BERT
from .trainer import BERTTrainer
from .dataset import DataReader, UnitedVocab, CORALDataset, my_collate, key_lib
import pdb
import os
import json
import torch
class Session():
def __init__(self, dataset,
model_path,
vocab_path,
cuda_devices = "1",
duplicate = 1,
log_freq =10000,
batch_size = 16,
markdown = True,
max_graph_num = 1000000,
seq_len= 160,
num_workers = 1,
min_occur = 1,
weak_supervise = True,
use_sub_token=False,
adam_beta1 = 0.9,
adam_beta2 = 0.99,
adam_weight_decay = 0.1,
lr = 0.0003,
hidden = 256,
layers = 4,
attn_heads = 4,
with_cuda = True,
dropout = 0.2):
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_devices
self.dataset = dataset
self.vocab_path = vocab_path
self.model_path = model_path
self.cuda_devices = cuda_devices
self.log_freq = log_freq
self.max_graph_num = max_graph_num
self.seq_len = seq_len
self.min_occur = min_occur
self.weak_supervise = weak_supervise
self.duplicate = duplicate
self.adam_weight_decay = adam_weight_decay
self.adam_beta1 = adam_beta1
self.adam_beta2 = adam_beta2
self.use_sub_token = use_sub_token
self.markdown = markdown
self.batch_size = batch_size
self.num_workers = num_workers
self.with_cuda = with_cuda
self.hidden= hidden
self.attn_heads = attn_heads
self.layers = layers
self.dropout = dropout
self.lr = lr
print("Load Data", self.dataset)
data_reader = DataReader(
self.dataset, use_sub_token=self.use_sub_token, max_graph_num=self.max_graph_num, code_filter=key_lib)
print("Loading Vocab")
if self.markdown:
self.vocab = UnitedVocab(data_reader.graphs, min_occur=self.min_occur,
use_sub_token=self.use_sub_token, path=self.vocab_path)
else:
self.vocab = SNAPVocab(data_reader.graphs, min_occur=self.min_occur,
use_sub_token=self.use_sub_token)
print("Vocab Size: ", len(self.vocab))
print("Loading Train Dataset", self.dataset)
self.train_dataset = CORALDataset(data_reader.graphs[:int(len(data_reader) * 0.8)], self.vocab, seq_len=self.seq_len,
n_neg=self.duplicate, use_sub_token=self.use_sub_token, markdown=self.markdown, masked=True)
print(len(self.train_dataset))
print("Loading Dev Dataset", self.dataset)
self.test_dataset = CORALDataset(data_reader.graphs[int(len(data_reader) * 0.8):], self.vocab, seq_len=self.seq_len,
n_neg=self.duplicate, use_sub_token=self.use_sub_token, markdown=self.markdown, masked=True) # \
print(len(self.test_dataset))
print("Creating Dataloaders")
self.train_data_loader = DataLoader(
self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=my_collate)
self.test_data_loader = DataLoader(
self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=my_collate) # \
print("Building BERT model")
self.bert = BERT(len(self.vocab), hidden=self.hidden,
n_layers=self.layers, attn_heads=self.attn_heads, dropout=self.dropout)
print("Creating BERT Trainer")
self.trainer = BERTTrainer(self.bert, len(self.vocab), train_dataloader=self.train_data_loader, test_dataloader=self.test_data_loader,
lr=self.lr, betas=(self.adam_beta1, self.adam_beta2),
weight_decay=self.adam_weight_decay,
with_cuda=self.with_cuda, cuda_devices=self.cuda_devices,
log_freq=self.log_freq, pad_index=self.vocab.pad_index, model_path=self.model_path)
print("Trainer Complete")
def main():
pass
if __name__ == '__main__':
main()
| 38.358333
| 142
| 0.582663
| 543
| 4,603
| 4.637201
| 0.198895
| 0.030977
| 0.056791
| 0.035743
| 0.262907
| 0.245433
| 0.245433
| 0.235107
| 0.235107
| 0.235107
| 0
| 0.016999
| 0.335433
| 4,603
| 120
| 143
| 38.358333
| 0.806146
| 0.000652
| 0
| 0.021053
| 0
| 0
| 0.03893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021053
| false
| 0.010526
| 0.094737
| 0
| 0.126316
| 0.115789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4d9a9c56221ae7e0e31c377fd09796258aef2bc
| 532
|
py
|
Python
|
tutorials/conversational_search/Tutorial6_Binary_Passage_Retriever.py
|
giguru/converse
|
bfe5ccc0af50455074abf7926a31145ac96834a5
|
[
"Apache-2.0"
] | 9
|
2020-10-23T14:39:45.000Z
|
2021-11-16T10:37:11.000Z
|
tutorials/conversational_search/Tutorial6_Binary_Passage_Retriever.py
|
giguru/converse
|
bfe5ccc0af50455074abf7926a31145ac96834a5
|
[
"Apache-2.0"
] | 12
|
2020-10-07T08:07:51.000Z
|
2020-10-22T14:20:19.000Z
|
tutorials/conversational_search/Tutorial6_Binary_Passage_Retriever.py
|
giguru/converse
|
bfe5ccc0af50455074abf7926a31145ac96834a5
|
[
"Apache-2.0"
] | null | null | null |
from haystack import Pipeline
from haystack.retriever.anserini import DenseAnseriniRetriever
# LOAD COMPONENTS
retriever = DenseAnseriniRetriever(prebuilt_index_name="wikipedia-bpr-single-nq-hash",
binary=True,
query_encoder="castorini/bpr-nq-question-encoder")
# BUILD PIPELINE
p = Pipeline()
p.add_node(component=retriever, name="Retriever", inputs=["Query"])
# RUN A QUERY
output = p.run(query="When was Elon Musk born?")
print(output['documents'])
exit()
| 31.294118
| 86
| 0.682331
| 60
| 532
| 5.983333
| 0.666667
| 0.066852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208647
| 532
| 17
| 87
| 31.294118
| 0.852732
| 0.078947
| 0
| 0
| 0
| 0
| 0.221766
| 0.125257
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4da6f2e214530239d7254ffdc625a7c298a5b02
| 854
|
py
|
Python
|
ros_system_ws/src/vector79/scripts/voltage_monitor.py
|
DrClick/ARCRacing
|
4428a244c5a4627f4550eba066657b5a87ff0602
|
[
"MIT"
] | 7
|
2016-12-15T22:24:04.000Z
|
2018-12-27T05:48:45.000Z
|
ros_system_ws/src/vector79/scripts/voltage_monitor.py
|
DrClick/ARCRacing
|
4428a244c5a4627f4550eba066657b5a87ff0602
|
[
"MIT"
] | null | null | null |
ros_system_ws/src/vector79/scripts/voltage_monitor.py
|
DrClick/ARCRacing
|
4428a244c5a4627f4550eba066657b5a87ff0602
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, Float32
import time
import subprocess
def voltage_monitor():
rospy.init_node('voltage_monitor')
info_pub = rospy.Publisher('bus_comm', String, queue_size=1)
voltage_pub = rospy.Publisher('voltage', Float32, queue_size=1)
while True:
time.sleep(10)
input_voltage = subprocess.check_output(['cat','/sys/bus/i2c/devices/0-0040/iio_device/in_voltage0_input']).rstrip()
voltage = round(float(input_voltage)/1000,2)
msg = "VLT:{}".format(voltage)
rospy.loginfo(rospy.get_caller_id() + '%s', msg)
info_pub.publish(msg)
voltage_pub.publish(voltage)
if voltage < 11.2:
msg = "WRN:LOW-VOLTAGE {}".format(voltage)
info_pub.publish(msg)
if __name__ == '__main__':
voltage_monitor()
| 27.548387
| 124
| 0.661593
| 113
| 854
| 4.743363
| 0.557522
| 0.078358
| 0.063433
| 0.063433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033824
| 0.203747
| 854
| 30
| 125
| 28.466667
| 0.754412
| 0.023419
| 0
| 0.095238
| 0
| 0
| 0.148014
| 0.067389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4de733e5c0ae2e4678e9cda8d9bbc096dd360a5
| 594
|
py
|
Python
|
server/grpc/pyserver.py
|
Panthereum/DigitalBeing
|
7fda011f34dd62c03d1072035ae0ad2a129281a7
|
[
"MIT"
] | 53
|
2021-07-20T04:01:57.000Z
|
2022-03-13T17:31:08.000Z
|
server/grpc/pyserver.py
|
Panthereum/DigitalBeing
|
7fda011f34dd62c03d1072035ae0ad2a129281a7
|
[
"MIT"
] | 58
|
2021-08-20T02:22:16.000Z
|
2021-12-13T10:38:58.000Z
|
server/grpc/pyserver.py
|
Panthereum/DigitalBeing
|
7fda011f34dd62c03d1072035ae0ad2a129281a7
|
[
"MIT"
] | 13
|
2021-08-23T20:16:14.000Z
|
2022-01-31T23:59:21.000Z
|
import logging
import time
# import the original example.py
from handler import DigitalBeing as DB
logger = logging.getLogger('server_logger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('grpc_server.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# create a class to define the server functions, derived from
# example_pb2_grpc.AgentServicer
class Service():
def __init__(self):
self.digital_being = DB()
Service()
while True:
time.sleep(86400)
#except KeyboardInterrupt:
#server.stop(0)
| 22.846154
| 61
| 0.754209
| 79
| 594
| 5.556962
| 0.632911
| 0.068337
| 0.091116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013972
| 0.156566
| 594
| 26
| 62
| 22.846154
| 0.862275
| 0.355219
| 0
| 0
| 0
| 0
| 0.074271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4debd4280513f51ffae81a74e479547f07c0088
| 2,679
|
py
|
Python
|
app/tg/routes.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 30
|
2017-09-14T20:25:43.000Z
|
2022-03-12T09:55:35.000Z
|
app/tg/routes.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 59
|
2018-01-12T18:29:24.000Z
|
2019-03-08T21:08:40.000Z
|
app/tg/routes.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 8
|
2017-12-01T18:36:04.000Z
|
2020-11-22T00:36:15.000Z
|
import logging
from json import loads
from time import time
from flask import request, abort
from telebot.apihelper import ApiException
from telebot.types import Update
from app import db, new_functions as nf
from app.constants import (
webhook_url_base, webhook_url_path, ids, other_error_answer
)
from app.models import User
from app.tg import bp
from tg_bot import bot
def run_bot(update):
tic = time()
was_error = False
answer = "No error"
try:
bot.process_new_updates([update])
except ApiException as ApiExcept:
was_error = True
json_err = loads(ApiExcept.result.text)
if json_err["description"] == "Forbidden: tg_bot was blocked by " \
"the user":
if update.message:
chat_id = update.message.chat.id
else:
chat_id = update.callback_query.message.chat.id
user = User.query.filter_by(tg_id=chat_id).first()
user.clear_all()
db.session.delete(user)
db.session.commit()
logging.info("USER LEFT {0}".format(
update.message.chat.id))
else:
logging.info("ERROR: {0}".format(
json_err["description"]))
except Exception as err:
answer = other_error_answer
was_error = True
bot.send_message(
chat_id=ids["my"],
text=str(err)
)
finally:
if was_error:
if update.message:
chat_id = update.message.chat.id
else:
chat_id = update.callback_query.message.chat.id
bot.send_message(
chat_id=chat_id,
text=answer,
disable_web_page_preview=True,
parse_mode="HTML"
)
nf.write_log(update, time() - tic, was_error)
@bp.route("/reset_webhook", methods=["GET", "HEAD"])
def reset_webhook():
bot.remove_webhook()
bot.set_webhook(url=webhook_url_base + webhook_url_path)
return "OK", 200
@bp.route(webhook_url_path, methods=["POST"])
def webhook():
if request.headers.get("content-type") == "application/json":
# Запускать бота фоновым процессом в RQ?
run_bot(
update=Update.de_json(
json_type=request.get_data().decode("utf-8")
)
)
return "OK", 200
else:
abort(403)
@bp.route("/test_route", methods=["POST"])
def test_route():
json_string = request.get_data().decode("utf-8")
print(json_string)
update = Update.de_json(json_string)
bot.process_new_updates([update])
return "OK", 200
| 28.806452
| 75
| 0.591639
| 333
| 2,679
| 4.564565
| 0.348348
| 0.051316
| 0.076974
| 0.0625
| 0.280921
| 0.176316
| 0.107895
| 0.107895
| 0.107895
| 0.107895
| 0
| 0.008588
| 0.304591
| 2,679
| 92
| 76
| 29.119565
| 0.8073
| 0.014184
| 0
| 0.2375
| 0
| 0
| 0.069723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1375
| 0
| 0.225
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4df95cc2cdc1f1463a8b2b8946b91a69dbe5207
| 7,391
|
py
|
Python
|
prysm/segmented.py
|
deisenroth/prysm
|
53a400ef89697041f67192e879e61ad28c451318
|
[
"MIT"
] | 110
|
2017-09-28T05:24:22.000Z
|
2022-03-17T17:34:08.000Z
|
prysm/segmented.py
|
mjhoptics/prysm
|
5dea335e068d04d1006741d8eb02278181751f73
|
[
"MIT"
] | 82
|
2018-01-03T03:52:42.000Z
|
2022-02-02T02:30:19.000Z
|
prysm/segmented.py
|
mjhoptics/prysm
|
5dea335e068d04d1006741d8eb02278181751f73
|
[
"MIT"
] | 28
|
2017-12-28T02:47:55.000Z
|
2022-03-29T02:10:11.000Z
|
"""Tools for working with segmented systems."""
from collections import namedtuple
import numpy as truenp
from .geometry import regular_polygon
from .mathops import np
Hex = namedtuple('Hex', ['q', 'r', 's'])
def add_hex(h1, h2):
"""Add two hex coordinates together."""
q = h1.q + h2.q
r = h1.r + h2.r
s = h1.s + h2.s
return Hex(q, r, s)
def sub_hex(h1, h2):
"""Subtract two hex coordinates."""
q = h1.q - h2.q
r = h1.r - h2.r
s = h1.s - h2.s
return Hex(q, r, s)
def mul_hex(h1, h2):
"""Multiply two hex coordinates."""
q = h1.q * h2.q
r = h1.r * h2.r
s = h1.s * h2.s
return Hex(q, r, s)
# as given
hex_dirs = [
Hex(1, 0, -1), Hex(1, -1, 0), Hex(0, -1, 1),
Hex(-1, 0, 1), Hex(-1, 1, 0), Hex(0, 1, -1)
]
def hex_dir(i):
"""Hex direction associated with a given integer, wrapped at 6."""
return hex_dirs[i % 6] # wrap dirs at 6 (there are only 6)
def hex_neighbor(h, direction):
"""Neighboring hex in a given direction."""
return add_hex(h, hex_dir(direction))
def hex_to_xy(h, radius, rot=90):
"""Convert hexagon coordinate to (x,y), if all hexagons have a given radius and rotation."""
if rot == 90:
x = 3/2 * h.q
y = truenp.sqrt(3)/2 * h.q + truenp.sqrt(3) * h.r
else:
x = truenp.sqrt(3) * h.q + truenp.sqrt(3)/2 * h.r
y = 3/2 * h.r
return x*radius, y*radius
def scale_hex(h, k):
"""Scale a hex coordinate by some constant factor."""
return Hex(h.q * k, h.r * k, h.s * k)
def hex_ring(radius):
"""Compute all hex coordinates in a given ring."""
start = Hex(-radius, radius, 0)
tile = start
results = []
# there are 6*r hexes per ring (the i)
# the j ensures that we reset the direction we travel every time we reach a
# 'corner' of the ring.
for i in range(6):
for j in range(radius):
results.append(tile)
tile = hex_neighbor(tile, i)
# rotate one so that the first element is 'north'
for _ in range(radius):
results.append(results.pop(0)) # roll < radius > elements so that the first element is "north"
return results
def _local_window(cy, cx, center, dx, samples_per_seg, x, y):
offset_x = cx + int(center[0]/dx) - samples_per_seg
offset_y = cy + int(center[1]/dx) - samples_per_seg
upper_x = offset_x + (2*samples_per_seg)
upper_y = offset_y + (2*samples_per_seg)
# clamp the offsets
if offset_x < 0:
offset_x = 0
if offset_x > x.shape[1]:
offset_x = x.shape[1]
if offset_y < 0:
offset_y = 0
if offset_y > y.shape[0]:
offset_y = y.shape[0]
if upper_x < 0:
upper_x = 0
if upper_x > x.shape[1]:
upper_x = x.shape[1]
if upper_y < 0:
upper_y = 0
if upper_y > y.shape[0]:
upper_y = y.shape[0]
return slice(offset_y, upper_y), slice(offset_x, upper_x)
class CompositeHexagonalAperture:
"""An aperture composed of several hexagonal segments."""
def __init__(self, x, y, rings, segment_diameter, segment_separation, segment_angle=90, exclude=()):
"""Create a new CompositeHexagonalAperture.
Note that __init__ is relatively computationally expensive and hides a lot of work.
Parameters
----------
x : `numpy.ndarray`
array of x sample positions, of shape (m, n)
y : `numpy.ndarray`
array of y sample positions, of shape (m, n)
rings : `int`
number of rings in the structure
segment_diameter : `float`
flat-to-flat diameter of each segment, same units as x
segment_separation : `float`
edge-to-nearest-edge distance between segments, same units as x
segment_angle : `float`, optional, {0, 90}
rotation angle of each segment
exclude : sequence of `int`
which segment numbers to exclude.
defaults to all segments included.
The 0th segment is the center of the array.
Other segments begin from the "up" orientation and count clockwise.
"""
(
self.vtov,
self.all_centers,
self.windows,
self.local_coords,
self. local_masks,
self.segment_ids,
self.amp
) = _composite_hexagonal_aperture(rings, segment_diameter, segment_separation,
x, y, segment_angle, exclude)
self.exclude = exclude
def _composite_hexagonal_aperture(rings, segment_diameter, segment_separation, x, y, segment_angle=90, exclude=(0,)):
if segment_angle not in {0, 90}:
raise ValueError('can only synthesize composite apertures with hexagons along a cartesian axis')
flat_to_flat_to_vertex_vertex = 2 / truenp.sqrt(3)
segment_vtov = segment_diameter * flat_to_flat_to_vertex_vertex
rseg = segment_vtov / 2
# center segment
dx = x[0, 1] - x[0, 0]
samples_per_seg = rseg / dx
# add 1, must avoid error in the case that non-center segments
# fall on a different subpixel and have different rounding
# use rseg since it is what we are directly interested in
samples_per_seg = int(samples_per_seg+1)
# compute the center segment over the entire x, y array
# so that mask covers the entirety of the x/y extent
# this may look out of place/unused, but the window is used when creating
# the 'windows' list
cx = int(np.ceil(x.shape[1]/2))
cy = int(np.ceil(y.shape[0]/2))
center_segment_window = _local_window(cy, cx, (0, 0), dx, samples_per_seg, x, y)
mask = np.zeros(x.shape, dtype=np.bool)
all_centers = [(0, 0)]
segment_id = 0
segment_ids = [segment_id]
windows = [center_segment_window]
xx = x[center_segment_window]
yy = y[center_segment_window]
local_coords = [
(xx, yy)
]
center_mask = regular_polygon(6, rseg, xx, yy, center=(0, 0), rotation=segment_angle)
if 0 not in exclude:
mask[center_segment_window] |= center_mask
local_masks = [center_mask]
for i in range(1, rings+1):
hexes = hex_ring(i)
centers = [hex_to_xy(h, rseg+(segment_separation/2), rot=segment_angle) for h in hexes]
ids = np.arange(segment_id+1, segment_id+1+len(centers), dtype=int)
id_mask = ~np.isin(ids, exclude, assume_unique=True)
valid_ids = ids[id_mask]
centers = truenp.array(centers)
centers = centers[id_mask]
all_centers += centers.tolist()
for segment_id, center in zip(valid_ids, centers):
# short circuit: if we do not wish to include a segment,
# do no further work on it
if segment_id in exclude:
continue
segment_ids.append(segment_id)
local_window = _local_window(cy, cx, center, dx, samples_per_seg, x, y)
windows.append(local_window)
xx = x[local_window]
yy = y[local_window]
local_coords.append((xx-center[0], yy-center[1]))
local_mask = regular_polygon(6, rseg, xx, yy, center=center, rotation=segment_angle)
local_masks.append(local_mask)
mask[local_window] |= local_mask
segment_id = ids[-1]
return segment_vtov, all_centers, windows, local_coords, local_masks, segment_ids, mask
| 32.134783
| 117
| 0.613043
| 1,115
| 7,391
| 3.912108
| 0.232287
| 0.022925
| 0.029803
| 0.017194
| 0.222834
| 0.146722
| 0.120816
| 0.107978
| 0.092847
| 0.092847
| 0
| 0.02385
| 0.279529
| 7,391
| 229
| 118
| 32.275109
| 0.795305
| 0.278717
| 0
| 0.022556
| 0
| 0
| 0.016116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082707
| false
| 0
| 0.030075
| 0
| 0.195489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4dfb95f04467bee0420411aa09bbdd150a3b575
| 1,156
|
py
|
Python
|
Projects/multiple_args_curvefit/python/src/model.py
|
basavyr/curve-fitting
|
0c7f93b7764d9ddc3e2860e5f20d21bf30256f58
|
[
"MIT"
] | null | null | null |
Projects/multiple_args_curvefit/python/src/model.py
|
basavyr/curve-fitting
|
0c7f93b7764d9ddc3e2860e5f20d21bf30256f58
|
[
"MIT"
] | null | null | null |
Projects/multiple_args_curvefit/python/src/model.py
|
basavyr/curve-fitting
|
0c7f93b7764d9ddc3e2860e5f20d21bf30256f58
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
import random as rd
import plotter
def model_function(X, a, b, c):
"""
- the analytical expression for the model that aims at describing the experimental data
- the X argument is an array of tuples of the form X=[,...,(xi_1,xi_2),...]
"""
nw1, nw2, I = X
f = a * pow(I, 2) * (nw1 + 0.5) + b * I * (nw2 + 0.5) + c
return f
def generate_x_data(size):
spin = lambda x: (2 * x) + 0.5
phonon = lambda: rd.choice([0, 1, 2])
x_data = [(phonon(), phonon(), spin(idx)) for idx in range(size)]
return x_data
def generate_data_from_params(params):
x_data = generate_x_data(30)
p1, p2, p3 = params
y_data_exp = [model_function(x, p1, p2, p3) for x in x_data]
y_data_th = [y + rd.choice([-1, 1]) *
rd.uniform(0.05, 0.08) * y for y in y_data_exp]
return [x_data, y_data_exp, y_data_th]
def main():
x_data = generate_x_data(10)
test_params = [3, 4, 0]
w_data = generate_data_from_params(test_params)
plotter.plot_data(w_data)
if __name__ == '__main__':
main()
| 23.12
| 91
| 0.624567
| 201
| 1,156
| 3.363184
| 0.393035
| 0.066568
| 0.057692
| 0.065089
| 0.053254
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043628
| 0.24654
| 1,156
| 49
| 92
| 23.591837
| 0.732491
| 0.141003
| 0
| 0
| 0
| 0
| 0.008222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.178571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4e259912dc8fab0fc123e2bde02cc765fe32ec1
| 3,740
|
py
|
Python
|
src/text_computation/computeCorrs.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | 2
|
2022-03-27T19:24:30.000Z
|
2022-03-29T16:15:31.000Z
|
src/text_computation/computeCorrs.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | 1
|
2021-04-23T21:03:45.000Z
|
2021-04-23T21:03:45.000Z
|
src/text_computation/computeCorrs.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Simple script to compute correlations for inserted and removed tokens
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import sqlite3
from datetime import datetime
import scipy.stats
import scipy.sparse
def create_array(token_index, db, total=9584147):
token_indicator_arr = np.zeros(total, dtype=bool)
cursor = db.execute('SELECT revision_index FROM inds WHERE token_index = ?', (token_index,))
inds = cursor.fetchall()
if len(inds) > 0:
inds_arr = np.array([ind[0] for ind in inds])
token_indicator_arr[inds_arr] = 1
return token_indicator_arr
def main():
git_root_dir = '/export/scratch2/levon003/repos/wiki-ores-feedback'
raw_data_dir = "/export/scratch2/wiki_data"
derived_data_dir = os.path.join(git_root_dir, "data", "derived")
revision_sample_dir = os.path.join(derived_data_dir, 'revision_sample')
s = datetime.now()
audit_dir = os.path.join(derived_data_dir, 'audit')
merged_preds_df_filepath = os.path.join(audit_dir, 'merged_preds.pkl')
merged_preds_df = pd.read_pickle(merged_preds_df_filepath)
print(f"Preds loaded. {datetime.now() - s}, pred count={len(merged_preds_df)}")
rev_id_list = []
with open(os.path.join(audit_dir, 'rev_id_2020-08-01T05:40:00Z.txt'), 'r') as infile:
for line in infile:
if line.strip() != '':
rev_id = int(line.strip())
rev_id_list.append(rev_id)
print("Loaded rev id list:", len(rev_id_list))
merged_preds_df['raw_misalignment'] = merged_preds_df.damaging_prob_calibrated - merged_preds_df.revert_prob
merged_preds_df['binary_misalignment'] = merged_preds_df.damaging_prob_calibrated - merged_preds_df.is_reverted_1week
rev_id_misalignment_dict = {row.rev_id: row.raw_misalignment for row in tqdm(merged_preds_df.itertuples(), total=len(merged_preds_df), desc='Building misalignment dict')}
rev_id_binary_misalignment_dict = {row.rev_id: row.binary_misalignment for row in tqdm(merged_preds_df.itertuples(), total=len(merged_preds_df), desc='Building binary misalignment dict')}
misalignment = np.zeros(len(rev_id_list), dtype=float)
for i, rev_id in tqdm(enumerate(rev_id_list), total=len(rev_id_list), desc='Building misalignment arr'):
if rev_id in rev_id_misalignment_dict:
misalignment[i] = rev_id_misalignment_dict[rev_id]
binary_misalignment = np.zeros(len(rev_id_list), dtype=float)
for i, rev_id in tqdm(enumerate(rev_id_list), total=len(rev_id_list), desc='Building binary misalignment arr'):
if rev_id in rev_id_binary_misalignment_dict:
binary_misalignment[i] = rev_id_binary_misalignment_dict[rev_id]
db = sqlite3.connect(
os.path.join(audit_dir, 'td_doc_indices.sqlite'),
detect_types=sqlite3.PARSE_DECLTYPES
)
try:
with open(os.path.join(audit_dir, 'doc_corr_2020-08-01T05:40:00Z.csv'), 'w') as outfile:
outfile.write("token_index,token_count,raw_misalignment_r,raw_misalignment_p,binary_misalignment_r,binary_misalignment_p\n")
for i in tqdm(range(50000,150000), desc='Computing corrs'):
token_indicator_arr = create_array(i, db)
token_count = np.sum(token_indicator_arr)
r, p = 0, 0
if token_count > 0:
r, p = scipy.stats.pointbiserialr(token_indicator_arr, misalignment)
r_binary, p_binary = scipy.stats.pointbiserialr(token_indicator_arr, binary_misalignment)
outfile.write(f"{i},{token_count},{r},{p},{r_binary},{p_binary}\n")
finally:
db.close()
if __name__ == "__main__":
main()
| 46.17284
| 191
| 0.699465
| 542
| 3,740
| 4.51107
| 0.273063
| 0.055215
| 0.074438
| 0.02454
| 0.391411
| 0.343558
| 0.265849
| 0.222495
| 0.198773
| 0.198773
| 0
| 0.02053
| 0.192513
| 3,740
| 80
| 192
| 46.75
| 0.789073
| 0.024332
| 0
| 0
| 0
| 0
| 0.186729
| 0.094598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.171875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4efdb84b5f54430d099fe1b59a9b2291a76ef7a
| 1,400
|
py
|
Python
|
cidc_utils/caching/credential_cache.py
|
CIMAC-CIDC/cidc-utils
|
2f2cf82007a3a67971293752e1dc168a7aad10e3
|
[
"MIT"
] | null | null | null |
cidc_utils/caching/credential_cache.py
|
CIMAC-CIDC/cidc-utils
|
2f2cf82007a3a67971293752e1dc168a7aad10e3
|
[
"MIT"
] | null | null | null |
cidc_utils/caching/credential_cache.py
|
CIMAC-CIDC/cidc-utils
|
2f2cf82007a3a67971293752e1dc168a7aad10e3
|
[
"MIT"
] | null | null | null |
"""
Defines caching before for user preferences
"""
import jwt
import time
from cachetools import TTLCache
from typing import Optional
class CredentialCache(TTLCache):
"""
Subclass of TTLCache that temporarily stores and retreives user login credentials
Arguments:
TTLCache {TTLCache} -- A TTLCache object
Returns:
CredentialCache -- [description]
"""
def cache_key(self, key):
"""
Adds an access key to the cache
Arguments:
key {str} -- Google access token.
"""
self["access_token"] = key
def get_key(self) -> Optional[str]:
"""
Retreive key from cache.
"""
if "access_token" in self and self["access_token"]:
try:
decode = jwt.decode(self["access_token"], verify=False)
exp = decode["exp"]
if time.time() > exp:
print(
"Your token has expired! Please log in to the web portal and get a new token."
)
self["access_token"] = None
return self["access_token"]
except jwt.exceptions.DecodeError:
print("This token is not a valid JWT!")
if self["access_token"]:
return self["access_token"]
else:
return None
| 27.45098
| 102
| 0.534286
| 146
| 1,400
| 5.054795
| 0.479452
| 0.134146
| 0.142276
| 0.054201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.379286
| 1,400
| 50
| 103
| 28
| 0.849252
| 0.24
| 0
| 0.083333
| 0
| 0
| 0.212876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.416667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4fbf9c4787bd6c823e79265ebbbdf508f8294f4
| 4,200
|
py
|
Python
|
src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py
|
Peshal1067/OpenClimateGIS
|
297db6ae1f6dd8459ede6bed905c8d85bd93c5d6
|
[
"BSD-3-Clause"
] | 3
|
2015-04-23T09:09:04.000Z
|
2020-02-26T17:40:19.000Z
|
src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py
|
arthur-e/OpenClimateGIS
|
297db6ae1f6dd8459ede6bed905c8d85bd93c5d6
|
[
"BSD-3-Clause"
] | null | null | null |
src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py
|
arthur-e/OpenClimateGIS
|
297db6ae1f6dd8459ede6bed905c8d85bd93c5d6
|
[
"BSD-3-Clause"
] | 2
|
2017-05-30T10:27:36.000Z
|
2020-11-09T13:52:58.000Z
|
import io
import zipfile
import csv
from util.ncconv.experimental.ocg_converter.subocg_converter import SubOcgConverter
class CsvConverter(SubOcgConverter):
# __headers__ = ['OCGID','GID','TIME','LEVEL','VALUE','AREA_M2','WKT','WKB']
def __init__(self,*args,**kwds):
self.as_wkt = kwds.pop('as_wkt',False)
self.as_wkb = kwds.pop('as_wkb',False)
self.add_area = kwds.pop('add_area',True)
## call the superclass
super(CsvConverter,self).__init__(*args,**kwds)
# self.headers = self.get_headers(self.value_table)
# ## need to extract the time as well
# if 'TID' in self.headers:
# self.headers.insert(self.headers.index('TID')+1,'TIME')
#
# codes = [['add_area','AREA_M2'],['as_wkt','WKT'],['as_wkb','WKB']]
# for code in codes:
# if getattr(self,code[0]):
# self.headers.append(code[1])
def get_writer(self,buffer,headers=None):
writer = csv.writer(buffer)
if headers is None: headers = self.get_headers()
writer.writerow(headers)
writer = csv.DictWriter(buffer,headers)
return(writer)
def _convert_(self):
buffer = io.BytesIO()
writer = self.get_writer(buffer)
for attrs in self.get_iter(wkt=self.as_wkt,wkb=self.as_wkb):
writer.writerow(attrs)
buffer.flush()
return(buffer.getvalue())
class LinkedCsvConverter(CsvConverter):
# def __init__(self,*args,**kwds):
# self.tables = kwds.pop('tables',None)
#
# super(LinkedCsvConverter,self).__init__(*args,**kwds)
#
# if self.tables is None and self.use_stat:
# tables = kwds.pop('tables',['Geometry','Stat'])
# elif self.tables is None and not self.use_stat:
# tables = kwds.pop('tables',['Geometry','Time','Value'])
# self.tables = [getattr(self.db,tbl) for tbl in tables]
# def _clean_headers_(self,table):
# headers = self.get_headers(table)
# if self.get_tablename(table) == 'geometry':
# codes = [['add_area','AREA_M2'],['as_wkt','WKT'],['as_wkb','WKB']]
# for code in codes:
# if not getattr(self,code[0]):
# headers.remove(code[1])
# return(headers)
def _convert_(self):
if self.use_stat:
itrs = [[self.sub.sub.iter_geom_with_area,'geometry',{'keep_geom':False}],
[self.sub.sub.iter_time,'time',{}],
[self.sub.sub.iter_value_keyed,'value',{}],
[self.sub.iter_stats,'stat',{'keep_geom':False}]]
else:
itrs = [[self.sub.iter_geom_with_area,'geometry',{'keep_geom':False}],
[self.sub.iter_time,'time',{}],
[self.sub.iter_value_keyed,'value',{}]]
## generate the info for writing
info = []
for itr in itrs:
iter = itr[0]
headers = iter(**itr[2]).next().keys()
# headers = self._clean_headers_(table)
# headers = self._clean_headers_([h.upper() for h in table.__mapper__.columns.keys()])
arcname = '{0}_{1}.csv'.format(self.base_name,itr[1])
buffer = io.BytesIO()
writer = self.get_writer(buffer,headers=headers)
info.append(dict(headers=headers,
writer=writer,
arcname=arcname,
iter=iter(**itr[2]),
buffer=buffer))
## write the tables
for i in info:
## loop through each database record
for attrs in i['iter']:
i['writer'].writerow(attrs)
i['buffer'].flush()
return(info)
def _response_(self,payload):
buffer = io.BytesIO()
zip = zipfile.ZipFile(buffer,'w',zipfile.ZIP_DEFLATED)
for info in payload:
zip.writestr(info['arcname'],info['buffer'].getvalue())
self.write_meta(zip)
zip.close()
buffer.flush()
zip_stream = buffer.getvalue()
buffer.close()
return(zip_stream)
| 37.837838
| 97
| 0.553095
| 492
| 4,200
| 4.534553
| 0.247967
| 0.039444
| 0.019722
| 0.028238
| 0.231286
| 0.195876
| 0.15688
| 0.15688
| 0.086957
| 0.086957
| 0
| 0.004759
| 0.299524
| 4,200
| 111
| 98
| 37.837838
| 0.753569
| 0.358095
| 0
| 0.112903
| 0
| 0
| 0.047493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.064516
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9003b0f6d049c9acbb898890fc3e7195ecd16b28
| 1,634
|
py
|
Python
|
arcade/gui/examples/anchor_widgets.py
|
akapkotel/arcade
|
6e43ec53e7bfa3dee1aa574404794e3695aad381
|
[
"MIT"
] | null | null | null |
arcade/gui/examples/anchor_widgets.py
|
akapkotel/arcade
|
6e43ec53e7bfa3dee1aa574404794e3695aad381
|
[
"MIT"
] | 1
|
2022-03-21T06:24:29.000Z
|
2022-03-21T06:24:29.000Z
|
arcade/gui/examples/anchor_widgets.py
|
Ibrahim2750mi/arcade
|
bf3229e64117931bffb8e50926a996a7a8fc9b8b
|
[
"MIT"
] | null | null | null |
"""
Example shows how to use UIAnchorWidget to position widgets on screen.
Dummy widgets indicate hovered, pressed and clicked.
"""
import arcade
from arcade.gui import UIManager
from arcade.gui.widgets import UIDummy
from arcade.gui.widgets.layout import UIAnchorLayout
class UIMockup(arcade.Window):
def __init__(self):
super().__init__(800, 600, "UI Mockup", resizable=True)
self.manager = UIManager()
self.manager.enable()
arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)
anchor = self.manager.add(UIAnchorLayout())
anchor.add(
child=UIDummy(color=arcade.color.RED),
anchor_x="center_x",
anchor_y="top",
)
anchor.add(
child=UIDummy(color=arcade.color.BLUE),
anchor_x="right",
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.GREEN),
anchor_x="center_x",
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.YELLOW),
anchor_x="left",
anchor_y="bottom",
)
anchor.add(
child=UIDummy(color=arcade.color.ORANGE),
anchor_x="left",
align_x=20,
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.ORANGE),
anchor_x="right",
align_x=-40,
anchor_y="bottom",
align_y=40,
)
def on_draw(self):
self.clear()
self.manager.draw()
window = UIMockup()
arcade.run()
| 24.38806
| 70
| 0.570991
| 184
| 1,634
| 4.891304
| 0.353261
| 0.085556
| 0.124444
| 0.14
| 0.361111
| 0.361111
| 0.322222
| 0.24
| 0.24
| 0.24
| 0
| 0.010743
| 0.316401
| 1,634
| 66
| 71
| 24.757576
| 0.794987
| 0.075275
| 0
| 0.387755
| 0
| 0
| 0.054558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.081633
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90041b2eae192a57fb04bf6a09bec2f9aae7dce1
| 3,897
|
py
|
Python
|
tools/e2e_inference.py
|
nanit/deep-high-resolution-net.pytorch
|
17226df8effda518c47355e85f4733638c20297a
|
[
"MIT"
] | null | null | null |
tools/e2e_inference.py
|
nanit/deep-high-resolution-net.pytorch
|
17226df8effda518c47355e85f4733638c20297a
|
[
"MIT"
] | 2
|
2021-09-23T12:59:27.000Z
|
2021-11-01T12:21:51.000Z
|
tools/e2e_inference.py
|
nanit/deep-high-resolution-net.pytorch
|
17226df8effda518c47355e85f4733638c20297a
|
[
"MIT"
] | null | null | null |
import os
import glob
import pickle
import sys
import tensorflow as tf
from numba import cuda
from python_tools.OSUtils import ensure_dir
from offline_predict import get_boxes_from_detection_predictions_data, convert_boxes_to_bboxes, predict_on_image_list, load_skeleton_model
DETECTION_RESEARCH_FOLDER = os.path.expanduser('~/nanit/tf-models/research/')
sys.path.append(DETECTION_RESEARCH_FOLDER)
from object_detection.nanit_model_predict import run_on_dataset, label_map_util, extract_labels_names_from_label_map
# input images folder
IMAGES_FOLDER = os.path.expanduser('~/nanit/skeleton_data_phase1/sms_videos/')
# Detection Model
DETECTION_MODEL_DIR = os.path.expanduser('~/nanit/model_train_phase1/export')
DETECTION_LABEL_PATH = os.path.expanduser('~/nanit/detection_unified/detection_unified_label_map.pbtxt')
# Skeleton Model
SKELETON_CONFIG_FILE_NAME = '../experiments/nanit_mpii/hrnet/w32_256x256_nanit_skeleton.yaml'
SKELETON_TORCH_SCRIPT_MODEL_PATH = '../export/skeleton_model_phase2_and_homography_images_plus_aug_dropout.pth'
# Outputs
OUTPUT_FOLDER = os.path.expanduser('~/nanit/skeleton_data/output/sms_videos/')
DETECTION_IMAGES_SAVE_PATH = os.path.join(OUTPUT_FOLDER, 'detection_images')
SKELETON_IMAGES_SAVE_PATH = os.path.join(OUTPUT_FOLDER, 'skeleton_images')
SAVE_DEBUG_IMAGES = False
def detection_inference(image_paths):
category_index = label_map_util.create_category_index_from_labelmap(DETECTION_LABEL_PATH, use_display_name=True)
detection_predictions_filepath = os.path.join(OUTPUT_FOLDER, 'detection_predictions.pkl')
saved_model_path = os.path.join(DETECTION_MODEL_DIR, 'saved_model')
detection_model = tf.saved_model.load(saved_model_path)
detection_predictions = run_on_dataset(detection_model,
image_paths,
None, # test_image_gt
category_index,
False, # USE_GT
save_flag=SAVE_DEBUG_IMAGES,
save_path=DETECTION_IMAGES_SAVE_PATH)
with open(detection_predictions_filepath, 'wb') as f:
pickle.dump(detection_predictions, f)
print('Detection Predictions saved to: {}'.format(detection_predictions_filepath))
print('Release GPU Memory (After Detection Predictions)')
device = cuda.get_current_device()
device.reset()
return detection_predictions
def skeleton_inference(detection_predictions, image_paths):
boxes = get_boxes_from_detection_predictions_data(detection_predictions)
babies_bboxes, heads_bboxes = convert_boxes_to_bboxes(boxes)
gt_data = {}
homography_matrix_data = {}
pose_model, cfg = load_skeleton_model(SKELETON_CONFIG_FILE_NAME, SKELETON_TORCH_SCRIPT_MODEL_PATH)
skeleton_predictions, _ = predict_on_image_list(image_paths, babies_bboxes, heads_bboxes, gt_data, homography_matrix_data,
pose_model, cfg, SKELETON_IMAGES_SAVE_PATH, SAVE_DEBUG_IMAGES)
skeleton_predictions_filepath = os.path.join(OUTPUT_FOLDER, 'skeleton_predictions.pkl')
with open(skeleton_predictions_filepath, 'wb') as f:
pickle.dump(skeleton_predictions, f)
print('Skeleton Detected {} / {}'.format(len(skeleton_predictions), len(image_paths)))
return skeleton_predictions
def main():
image_paths = glob.glob(os.path.join(IMAGES_FOLDER, '**/*'))
image_paths = [p for p in image_paths if p.endswith('.png') or p.endswith('.jpg')]
detection_predictions = detection_inference(image_paths)
skeleton_predictions = skeleton_inference(detection_predictions, image_paths)
print('Done Skeleton End-to-End Predictions')
if __name__ == '__main__':
ensure_dir(DETECTION_IMAGES_SAVE_PATH)
ensure_dir(SKELETON_IMAGES_SAVE_PATH)
main()
| 45.847059
| 138
| 0.744419
| 483
| 3,897
| 5.569358
| 0.271222
| 0.111524
| 0.036431
| 0.039033
| 0.271004
| 0.240149
| 0.139777
| 0.055019
| 0
| 0
| 0
| 0.00342
| 0.17475
| 3,897
| 84
| 139
| 46.392857
| 0.833022
| 0.020272
| 0
| 0
| 0
| 0
| 0.155865
| 0.101023
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.145161
| 0
| 0.225806
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9007054fb0674671d547ac9d0adee85e1c24f33c
| 1,234
|
py
|
Python
|
analytics/extract/bare/funds-explorer/scrap_ranking.py
|
vicmattos/data-invest
|
4318a33117583bf492b45c69c957fd0ea2c455e1
|
[
"MIT"
] | null | null | null |
analytics/extract/bare/funds-explorer/scrap_ranking.py
|
vicmattos/data-invest
|
4318a33117583bf492b45c69c957fd0ea2c455e1
|
[
"MIT"
] | null | null | null |
analytics/extract/bare/funds-explorer/scrap_ranking.py
|
vicmattos/data-invest
|
4318a33117583bf492b45c69c957fd0ea2c455e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import os
import csv
import time
from datetime import datetime
import requests
from bs4 import BeautifulSoup
url = 'https://www.fundsexplorer.com.br/ranking'
# Data Cleansing
# 'R$' => ''
# '%' => ''
# '.0' => ''
# '.' => ''
# ',' => '.'
# 'N/A' => ''
print("Starting...{}".format(datetime.now()))
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
data = []
table = soup.find(id="table-ranking")
table_head = table.find('thead')
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
colsd = [ele.get_text(separator=" ").strip() for ele in cols]
data.append([ele for ele in colsd])
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
colsd = [ele.text.replace('R$','').replace('%','').replace('.0','').replace('.','').replace('N/A','').replace(',','.').strip() for ele in cols]
data.append([ele for ele in colsd])
out_dir = 'out/'
os.makedirs(out_dir, exist_ok=False)
file = open(out_dir+"fii.csv", "w")
wtr = csv.writer(file, delimiter=';', lineterminator='\n')
for x in data : wtr.writerow(x)
file.close()
print("Finish...{}".format(datetime.now()))
time.sleep(1)
| 20.915254
| 147
| 0.627229
| 177
| 1,234
| 4.299435
| 0.435028
| 0.036794
| 0.04205
| 0.031537
| 0.204993
| 0.204993
| 0.204993
| 0.204993
| 0.204993
| 0.204993
| 0
| 0.003784
| 0.143436
| 1,234
| 58
| 148
| 21.275862
| 0.716178
| 0.077796
| 0
| 0.125
| 0
| 0
| 0.117699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9009e3424db2d10a8ac51689c842cea2498a6040
| 14,546
|
py
|
Python
|
stentseg/apps/_3DPointSelector.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | 1
|
2020-08-28T16:34:10.000Z
|
2020-08-28T16:34:10.000Z
|
stentseg/apps/_3DPointSelector.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | null | null | null |
stentseg/apps/_3DPointSelector.py
|
almarklein/stentseg
|
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
|
[
"BSD-3-Clause"
] | 1
|
2021-04-25T06:59:36.000Z
|
2021-04-25T06:59:36.000Z
|
""" Module 3D Point Selector
Provides functionality view slices and to select points in multiplanar reconstructions.
"""
import os, time, sys
import numpy as np
import visvis as vv
from visvis.utils.pypoints import Point, Pointset, Aarray
import OpenGL.GL as gl
import OpenGL.GLU as glu
class VolViewer:
""" VolViewer. View (CT) volume while scrolling through slices x,y or z depending on the direction chosen
"""
def __init__(self, vol, direction, axes=None, clim=None):
self.direction = direction
# Store vol and init
if self.direction == 0:
self.vol = vol
elif self.direction == 1:
self.vol = np.transpose(vol,(1,0,2))
self.vol.origin = (vol.origin[1],vol.origin[0],vol.origin[2])
self.vol.sampling = (vol.sampling[1],vol.sampling[0],vol.sampling[2])
elif self.direction == 2:
self.vol = np.transpose(vol,(2,0,1))
self.vol.origin = (vol.origin[2],vol.origin[0],vol.origin[1])
self.vol.sampling = (vol.sampling[2],vol.sampling[0],vol.sampling[1])
else:
S('No valid input for direction, only 1,2 or 3 is possible')
self.slice = 0
# Prepare figure and axex
if axes is None:
self.a = vv.gca()
else:
self.a = axes
self.f = vv.gcf()
# Create slice in 2D texture
if clim:
self.t = vv.imshow(self.vol[self.round_slice,:,:],clim = clim, axes=self.a)
else:
self.t = vv.imshow(self.vol[self.round_slice,:,:],axes=self.a)
# Bind
self.a.eventScroll.Bind(self.on_scroll)
self.eventPositionUpdate = vv.events.BaseEvent(self)
axes.eventMouseDown.Bind(self.on_click)
# Fig properties
self.a.bgcolor = [0,0,0]
self.a.axis.visible = False
self.a.showAxis = False
@property
def round_slice(self):
return int(self.slice + 0.5)
def on_scroll(self, event):
self.slice += int(event.verticalSteps)
if self.slice > (self.vol.shape[0]-1):
self.slice = (self.vol.shape[0]-1)
if self.slice < 0:
self.slice = 0
self.show()
return True
def on_click(self, event):
# get current mouse position
self._refpos = [round(event.x2d,1), round(event.y2d,1)]
#print(self._refpos)
self.eventPositionUpdate.Fire()
return self._refpos
def show(self):
self.t.SetData(self.vol[self.round_slice,:,:])
def GetCurrentSlice(self):
ctslice = self.slice
CurrentSlice = round(self.vol.origin[0] + ctslice * self.vol.sampling[0],1)
return CurrentSlice
def SetCurrentSlice(self, slicenr):
ctslice = (slicenr - self.vol.origin[0])/ self.vol.sampling[0]
self.slice = ctslice
self.show()
class PointSelect3D:
""" A helper class for 3d point select. Use the select3dpoint function to
perform manual point selection.
"""
def __init__(self, vol, a_transversal, a_coronal, a_sagittal, a_MIP, a_text, nr_of_stents, clim=None):
self.nr_of_stents = nr_of_stents
self.f = vv.gcf()
self.vol = vol
# Create empty list of endpoints
self.endpoints = []
self.endpoints = ['xx,yy,zz'] * nr_of_stents * 2
self.endpointsindex = 0
# Create text objects
self._labelcurrent = vv.Label(a_text)
self._labelx = vv.Label(a_text)
self._labelxslice = vv.Label(a_text)
self._labely = vv.Label(a_text)
self._labelyslice = vv.Label(a_text)
self._labelz = vv.Label(a_text)
self._labelzslice = vv.Label(a_text)
self._labelcurrent.position = -250,10
self._labelx.position = -250,35
self._labelxslice.position = -200,35
self._labely.position = -250,55
self._labelyslice.position = -200,55
self._labelz.position = -250,75
self._labelzslice.position = -200,75
self._labelendpointstext =[]
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[0].position = 100,-5
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[1].position = 230,-5
for i in range(2,self.nr_of_stents+2):
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[i].position = 40,15+(20*(i-2))
self._labelendpoints = []
for i in range(0,self.nr_of_stents * 2,2):
self._labelendpoints.append(vv.Label(a_text))
self._labelendpoints[i].position = 100,15+(20*(i/2)),50,20
self._labelendpoints.append(vv.Label(a_text))
self._labelendpoints[i+1].position = 230,15+(20*(i/2)),50,20
# Create Select button
self._select = False
self._butselect = vv.PushButton(a_text)
self._butselect.position = -110,150
self._butselect.text = 'Select'
# Create Back button
self._back = False
self._butback = vv.PushButton(a_text)
self._butback.position = 10,150
self._butback.text = 'Back'
# Create Close button
self._finished = False
self._butclose = vv.PushButton(a_text)
self._butclose.position = -50,180
self._butclose.text = 'Finish'
# Get short name for sampling
if isinstance(vol, Aarray):
self._sam = sam = vol.sampling
else:
self._sam = None
sam = (1,1,1)
# Display the slices and 3D MIP
self.b1 = VolViewer(vol, 0, axes=a_transversal, clim=clim)
self.b2 = VolViewer(vol, 1, axes=a_coronal, clim=clim)
self.b3 = VolViewer(vol, 2, axes=a_sagittal, clim=clim)
renderstyle = 'mip'
a_MIP.daspect = 1,1,-1
self.b4 = vv.volshow(vol, clim=(0,2500), renderStyle = renderstyle, axes=a_MIP)
c = vv.ClimEditor(a_MIP)
c.position = (10, 50)
# set axis settings
for a in [a_transversal, a_coronal, a_sagittal, a_MIP]:
a.bgcolor = [0,0,0]
a.axis.visible = False
a.showAxis = True
# get current slice number
Zslice = self.b1.GetCurrentSlice()
Yslice = self.b2.GetCurrentSlice()
Xslice = self.b3.GetCurrentSlice()
size = vol.shape
# create lines for position of x,y and z slices
origin = vol.origin
Zrange = (origin[0], (size[0]*sam[0])+origin[0])
Xrange = (origin[1], (size[1]*sam[1])+origin[1])
Yrange = (origin[2], (size[2]*sam[2])+origin[2])
self.l11 = vv.Line(a_transversal,[(Yslice,Xrange[0]),(Yslice,Xrange[1])])
self.l12 = vv.Line(a_transversal,[(Yrange[0],Xslice),(Yrange[1],Xslice)])
self.l21 = vv.Line(a_coronal,[(Zslice,Zrange[0]),(Zslice,Zrange[1])])
self.l22 = vv.Line(a_coronal,[(Yrange[0],Xslice),(Yrange[1],Xslice)])
self.l31 = vv.Line(a_sagittal, [(Zslice,Zrange[0]),(Zslice,Zrange[1])])
self.l32 = vv.Line(a_sagittal, [(Xrange[0],Yslice),(Xrange[1],Yslice)])
# change color of the lines
for i in [self.l11,self.l12,self.l21,self.l22,self.l31,self.l32]:
i.lc = 'g'
# create a point in the MIP figure for the current position
self.mippoint = vv.Line(a_MIP, [(Zslice,Xslice,Yslice)])
self.mippoint.ms = 'o'
self.mippoint.mw = 5
self.mippoint.mc = 'g'
self.mippoint.alpha = 0.9
# Get list of all range wobjects
self._volviewers = [self.b1, self.b2, self.b3]
# Bind events
fig = a_text.GetFigure()
fig.eventClose.Bind(self._OnFinish)
self._butclose.eventPress.Bind(self._OnFinish)
self._butselect.eventPress.Bind(self._OnSelect)
self._butback.eventPress.Bind(self._OnBack)
for r in self._volviewers:
r.eventPositionUpdate.Bind(self._OnMouseClickAxis)
for s in range(len(self._labelendpoints)):
self._labelendpoints[s].eventMouseDown.Bind(self._OnMouseClickEndpoint)
# Almost done
self._SetTexts()
self.updatePosition()
def _OnMouseClickEndpoint(self,event):
index = self._labelendpoints.index(event.owner)
self.endpointsindex = index
self.updateText()
def _OnMouseClickAxis(self,event):
# Get ranges of wobject that fired the event
rangex, rangey = event.owner._refpos[0], event.owner._refpos[1]
# Update slices in onther wobjects
if event.owner is self.b1:
self.b2.SetCurrentSlice(rangey)
self.b3.SetCurrentSlice(rangex)
elif event.owner is self.b2:
self.b1.SetCurrentSlice(rangey)
self.b3.SetCurrentSlice(rangex)
elif event.owner is self.b3:
self.b1.SetCurrentSlice(rangey)
self.b2.SetCurrentSlice(rangex)
else:
print('unknown owner! %s' % repr(event.owner))
def _SetTexts(self):
# Get short names for labels
lx, ly, lz = self._labelx, self._labely, self._labelz
# Apply texts
self._labelcurrent.text = 'Current Position:'
lx.text = 'X: '
ly.text = 'Y: '
lz.text = 'Z: '
self._labelendpointstext[0].text = 'StartPoints'
self._labelendpointstext[1].text = 'EndPoints'
for i in range(2,(self.nr_of_stents)+2):
self._labelendpointstext[i].text = 'Stent %1d:' % int(i-1)
for i in range(self.nr_of_stents*2):
self._labelendpoints[i].text = self.endpoints[i]
for i in range(self.nr_of_stents*2):
if i == self.endpointsindex:
self._labelendpoints[i].textColor = 'b'
else:
self._labelendpoints[i].textColor = 'k'
def _OnSelect(self, event):
Position = self.updatePosition()
if self.endpointsindex <= len(self.endpoints)-1:
self.endpoints[self.endpointsindex] = Position
self.endpointsindex += 1
self.updateText()
#print(self.endpoints)
#print('Current position = ' + str(Position))
def _OnBack(self, event):
if not(self.endpointsindex <0):
self.endpoints[self.endpointsindex] = 'xx,yy,zz'
self.updateText()
print(self.endpoints)
print('Back Pressed')
def _OnFinish(self, event):
self._finished = True
return self.endpoints
print('Finish Pressed')
def updatePosition(self):
# get current slice numbers
Zslice = self.b1.GetCurrentSlice()
Yslice = self.b2.GetCurrentSlice()
Xslice = self.b3.GetCurrentSlice()
# update lines
self.l11.SetXdata([Xslice,Xslice])
self.l12.SetYdata([Yslice,Yslice])
self.l21.SetXdata([Xslice,Xslice])
self.l22.SetYdata([Zslice,Zslice])
self.l31.SetXdata([Yslice,Yslice])
self.l32.SetYdata([Zslice,Zslice])
# update Point
self.mippoint.SetXdata([Xslice])
self.mippoint.SetYdata([Yslice])
self.mippoint.SetZdata([Zslice])
# update current slice text
self._labelxslice.text = str(Xslice)
self._labelyslice.text = str(Yslice)
self._labelzslice.text = str(Zslice)
# return Position
Position = (Xslice, Yslice, Zslice)
return Position
def updateText(self):
for i in range(self.nr_of_stents*2):
self._labelendpoints[i].text = str(self.endpoints[i])
if i == self.endpointsindex:
self._labelendpoints[i].textColor = 'b'
else:
self._labelendpoints[i].textColor = 'k'
def Run(self):
vv.processEvents()
self.updatePosition()
def select3dpoints(vol, nr_of_stents, fig=None, clim=None):
""" Manually select 3d points in a volume. In the given figure (or a new
figure if None), three axes are created that display the transversal,
sagittal and coronal slices of the volume. The user can then use the mouse
to scroll to the correct slice and select the current position as an
endpoint of a stent.
Input: Number of stents to select start- and endpoints for.
"""
# Create figure
if fig is None:
fig = vv.figure()
figCleanup = True
else:
fig.Clear()
figCleanup = False
# Create four axes and a wibject to attach text labels to
fig.position = 0, 22, 750, 700
fig.title = '3D Point Selector'
a1 = vv.subplot(321)
a2 = vv.subplot(322)
a3 = vv.subplot(323)
a4 = vv.subplot(324)
a5 = vv.Wibject(fig)
# x-richting, y-richting, x-breedte?, y-breedte?
a5.position = 0.5, 0.7, 0.5, 0.5
# Set settings
for a in [a1, a2, a3, a4]:
a.showAxis = False
# Create PointSelect instance
pointselect3d = PointSelect3D(vol, a1, a3, a2, a4, a5, nr_of_stents, clim)
# Enter a mainloop
while not pointselect3d._finished:
pointselect3d.Run()
time.sleep(0.01)
# Clean up figure (close if we opened it)
fig.Clear()
fig.DrawNow()
if figCleanup:
fig.Destroy()
# Done (return points)
Startpoints = []
Endpoints = []
for i in range(nr_of_stents):
if isinstance(pointselect3d.endpoints[i*2],tuple):
Startpoints.append(pointselect3d.endpoints[i*2])
if isinstance(pointselect3d.endpoints[(i*2)+1],tuple):
Endpoints.append(pointselect3d.endpoints[(i*2)+1])
return Startpoints, Endpoints
| 36.732323
| 110
| 0.564691
| 1,735
| 14,546
| 4.639769
| 0.192507
| 0.016522
| 0.01677
| 0.017888
| 0.257267
| 0.178137
| 0.157516
| 0.136894
| 0.136894
| 0.096398
| 0
| 0.034052
| 0.323663
| 14,546
| 395
| 111
| 36.825316
| 0.784204
| 0.120033
| 0
| 0.173432
| 0
| 0
| 0.01731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066421
| false
| 0
| 0.02214
| 0.00369
| 0.121771
| 0.01476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|