hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c1ff08cd085c626e8f3f1328f189116ff83820d
| 4,650
|
py
|
Python
|
app/main.py
|
govdirectory/health-check-service
|
c32e1055e1c755fdb03e2786dc0a157697250421
|
[
"CC0-1.0"
] | 1
|
2021-09-28T00:09:18.000Z
|
2021-09-28T00:09:18.000Z
|
app/main.py
|
govdirectory/health-check-service
|
c32e1055e1c755fdb03e2786dc0a157697250421
|
[
"CC0-1.0"
] | null | null | null |
app/main.py
|
govdirectory/health-check-service
|
c32e1055e1c755fdb03e2786dc0a157697250421
|
[
"CC0-1.0"
] | null | null | null |
import requests
from typing import List
from fastapi import FastAPI, Path
from pydantic import BaseModel, HttpUrl
from fastapi.middleware.cors import CORSMiddleware
cors_origins = [
'https://www.govdirectory.org',
'https://www.wikidata.org',
]
user_agent_external = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0 Govdirectory.org account existence checker'
user_agent_wikimedia = 'Wikidata:WikiProject Govdirectory (health check service)'
url_properties = [
{
'name': 'official website',
'prop': 'P856',
},
{
'name': 'URL for citizen\'s initiatives',
'prop': 'P9732',
},
]
platform_properties = [
{
'name': 'Twitter username',
'prop': 'P2002',
'formatter_url': 'https://twitter.com/$1',
},
{
'name': 'YouTube channel ID',
'prop': 'P2397',
'formatter_url': 'https://www.youtube.com/channel/$1',
},
{
'name': 'Facebook ID',
'prop': 'P2013',
'formatter_url': 'https://www.facebook.com/$1',
},
{
'name': 'Instagram username',
'prop': 'P2003',
'formatter_url': 'https://www.instagram.com/$1/',
},
{
'name': 'GitHub username',
'prop': 'P2037',
'formatter_url': 'https://github.com/$1',
},
{
'name': 'Vimeo identifier',
'prop': 'P4015',
'formatter_url': 'https://vimeo.com/$1',
},
{
'name': 'Flickr user ID',
'prop': 'P3267',
'formatter_url': 'https://www.flickr.com/people/$1',
},
{
'name': 'Pinterest username',
'prop': 'P3836',
'formatter_url': 'https://www.pinterest.com/$1/',
},
{
'name': 'Dailymotion channel ID',
'prop': 'P2942',
'formatter_url': 'https://www.dailymotion.com/$1',
},
{
'name': 'TikTok username',
'prop': 'P7085',
'formatter_url': 'https://www.tiktok.com/@$1',
},
{
'name': 'SlideShare username',
'prop': 'P4016',
'formatter_url': 'https://www.slideshare.net/$1',
},
]
def check_url(url: HttpUrl):
r = requests.head(url, headers={ 'User-Agent': user_agent_external, 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br' })
if r.status_code >= 400:
return r.status_code
return None
app = FastAPI(
title='Govdirectory Health Check Service',
description='Microservice that validates various external identifiers and URLs associated with a given Wikidata identifier.',
version='0.1.0',
docs_url='/',
)
app.add_middleware(
CORSMiddleware,
allow_origins=cors_origins,
allow_credentials=True,
allow_methods=['GET'],
allow_headers=['*'],
)
class Error(BaseModel):
prop: str
prop_name: str
url: HttpUrl
status_code: int
@app.get('/{qid}', response_model=List[Error])
async def read_item(qid: str = Path(..., title='Wikidata identfier', min_length=2, regex='^Q\d+$')):
r = requests.get('https://www.wikidata.org/w/api.php?action=wbgetentities&props=claims&utf8=1&format=json&ids=' + qid, headers={ 'User-Agent': user_agent_wikimedia })
item_statements = list(r.json()['entities'][qid]['claims'].items())
errors = []
for p in url_properties:
for claim in item_statements:
if not claim[0] == p['prop']:
continue
for statement in claim[1]: # needed in case a prop has several values
url = statement['mainsnak']['datavalue']['value']
negative_status = check_url(url)
if negative_status:
error = {
'prop': p['prop'],
'prop_name': p['name'],
'url': url,
'status_code': negative_status,
}
errors.append(error)
for p in platform_properties:
for claim in item_statements:
if not claim[0] == p['prop']:
continue
for statement in claim[1]: # needed in case a prop has several values
identifier = statement['mainsnak']['datavalue']['value']
url = p['formatter_url'].replace('$1', identifier, 1)
negative_status = check_url(url)
if negative_status:
error = {
'prop': p['prop'],
'prop_name': p['name'],
'url': url,
'status_code': negative_status,
}
errors.append(error)
return errors
| 29.807692
| 170
| 0.543871
| 494
| 4,650
| 5.006073
| 0.350202
| 0.058229
| 0.075617
| 0.064699
| 0.202992
| 0.182774
| 0.182774
| 0.182774
| 0.182774
| 0.182774
| 0
| 0.030546
| 0.303011
| 4,650
| 155
| 171
| 30
| 0.73249
| 0.017419
| 0
| 0.173913
| 0
| 0.007246
| 0.336837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007246
| false
| 0
| 0.036232
| 0
| 0.101449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c21574006d86d8b50934be4fe7a2c2e0d87d074
| 5,933
|
py
|
Python
|
src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py
|
dSar-UVA/repoMiner
|
8f75074e388ff13419a0a37b4337c0cdcb459f74
|
[
"BSD-3-Clause"
] | 9
|
2017-10-21T13:29:46.000Z
|
2022-01-10T23:49:54.000Z
|
src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py
|
dSar-UVA/repoMiner
|
8f75074e388ff13419a0a37b4337c0cdcb459f74
|
[
"BSD-3-Clause"
] | 3
|
2018-01-09T11:28:55.000Z
|
2019-01-20T08:45:18.000Z
|
src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py
|
dSar-UVA/repoMiner
|
8f75074e388ff13419a0a37b4337c0cdcb459f74
|
[
"BSD-3-Clause"
] | 1
|
2020-12-29T05:10:31.000Z
|
2020-12-29T05:10:31.000Z
|
"""
This script adds a specific column to the `bug_type_entropy_projectname_old` tables. The added column contains the nesting depth (>=0) of each line.
"""
import os, sys, psycopg2, ntpath, traceback, subprocess
from pprint import pprint
#--------------------------------------------------------------------------------------------------------------------------
def get_BTE_data(project_name):
BTE_old_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old"
BTE_old_table_name = BT_old_table_name.replace('-', '_')
BTE_data = []
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("SELECT file_name, sha, line_num, parents_all FROM " + BTE_old_table_name)
BTE_data = list(cur.fetchall())
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
# Make it a list of lists instead of list of tuples
for index, BTE_tuple in enumerate(BTE_data):
BTE_data[index] = list(BTE_tuple)
return BTE_data
#--------------------------------------------------------------------------------------------------------------------------
def dump_BTE_prime_table(BTE_data, project_name):
BTE_prime_table_name = "err_corr_c.BTE_prime_" + project_name
BTE_prime_table_name = BTE_prime_table_name.replace('-', '_')
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS " + BTE_prime_table_name + " ")
query = """
CREATE TABLE """ + BTE_prime_table_name + """ (file_name varchar(100),
sha varchar(42),
line_num integer,
parents_all varchar(144),
depth integer)
"""
cur.execute(query)
query = "INSERT INTO " + BTE_prime_table_name + " (file_name, sha, line_num, parents_all, depth) VALUES (%s, %s, %s, %s, %s)"
cur.executemany(query, BTE_data)
con.commit()
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
#--------------------------------------------------------------------------------------------------------------------------
def join_BTE_old_and_BTE_prime(project_name):
BTE_old_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old"
BTE_old_table_name = BTE_old_table_name.replace('-', '_')
BTE_prime_table_name = "err_corr_c.BTE_prime_" + project_name
BTE_prime_table_name = BTE_prime_table_name.replace('-', '_')
BTE_merged_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old_wd"
BTE_merged_table_name = BTE_merged_table_name.replace('-', '_')
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("ALTER TABLE " + BTE_old_table_name + " DROP COLUMN IF EXISTS depth")
query = """
SELECT old.*, prime.depth
INTO """ + BTE_merged_table_name + """
FROM """ + BTE_old_table_name + """ as old
JOIN """ + BTE_prime_table_name + """ as prime
ON (old.file_name = prime.file_name AND
old.sha = prime.sha AND
old.line_num = prime.line_num)
"""
cur.execute(query)
con.commit()
cur.execute("DROP TABLE " + BTE_prime_table_name)
cur.execute("DROP TABLE " + BTE_old_table_name)
cur.execute("ALTER TABLE " + BTE_merged_table_name + " RENAME TO " + BTE_old_table_name.split('.')[1])
con.commit()
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
#--------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 2:
print("\nUsage: python add_depth_to_BTE_table.py <project_name>")
print("\nSample usage: python add_depth_to_BTE_table.py libgit2")
raise ValueError("Incorrect input arguments. Aborting...")
project_name = sys.argv[1]
# depth_dict = get_depth_data(project_name)
# if not depth_dict:
# raise ValueError("`get_depth_data` returned an empty `depth_dict` dictionary. Aborting...")
print("\nNow fetching BTE_old_data...")
# BTE_data is a list of lists; each element list = [file_name, sha, line_num, parents_all]
BTE_data = get_BTE_data(project_name)
if not BTE_data:
raise ValueError("`get_BTE_data` returned an empty `BTE_data` list. Aborting...")
print("\nNow creating BTE_prime_data, i.e., table with `depth` appended to BTE_old_data...")
# We will add `depth` attribute to each row in BTE_data
error_count = 0
for index, BTE_tuple in enumerate(BTE_data):
# `depth` = number of parents as given in `parents_all` column of BTE table
depth = BTE_tuple[3].count('-') + 1
if BTE_tuple[3] == '':
BTE_data[index].append(0)
else:
BTE_data[index].append(depth)
print("\nNow dumping the temporary table BTE_prime. This may take approx. 3-4 min per million LOC...")
dump_BTE_prime_table(BTE_data, project_name)
print("\nNow joining BTE_old and BTE_prime to get desired table. This takes about 2 min per million LOC...")
join_BTE_old_and_BTE_prime(project_name)
#--------------------------------------------------------------------------------------------------------------------------
| 40.917241
| 148
| 0.539019
| 696
| 5,933
| 4.268678
| 0.232759
| 0.081791
| 0.056883
| 0.062942
| 0.490744
| 0.396163
| 0.382026
| 0.33625
| 0.271962
| 0.271962
| 0
| 0.005709
| 0.261925
| 5,933
| 144
| 149
| 41.201389
| 0.672756
| 0.199393
| 0
| 0.44898
| 0
| 0.020408
| 0.354259
| 0.037201
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030612
| false
| 0
| 0.020408
| 0
| 0.061224
| 0.132653
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c21d95c779b91777f1c1e4c2f9a294fa6bd8d6e
| 10,862
|
py
|
Python
|
tools/ProjectionTools/Lidar2RGB/lib/utils.py
|
ladt/SeeingThroughFog
|
c714a4c3e8f8e604494b1db6e9eef529b0326405
|
[
"MIT"
] | null | null | null |
tools/ProjectionTools/Lidar2RGB/lib/utils.py
|
ladt/SeeingThroughFog
|
c714a4c3e8f8e604494b1db6e9eef529b0326405
|
[
"MIT"
] | null | null | null |
tools/ProjectionTools/Lidar2RGB/lib/utils.py
|
ladt/SeeingThroughFog
|
c714a4c3e8f8e604494b1db6e9eef529b0326405
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import cv2
import scipy.spatial
from sklearn.linear_model import RANSACRegressor
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import lib.settings
def dense_map(Pts, n, m, grid):
'''
interpolate lidar depth
:param Pts: num observations of (W, H, D) lidar coordinates (D - depth corrsponding to (W,H) image positions), Pts.shape==(3, num)
:param n: image width
:param m: image height
:param grid: (grid*2+1) is neighborhood size
:return:
'''
ng = 2 * grid + 1
mX = np.zeros((m, n)) + np.float("inf")
mY = np.zeros((m, n)) + np.float("inf")
mD = np.zeros((m, n))
mX[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[0] - np.round(Pts[0])
mY[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[1] - np.round(Pts[1])
mD[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[2]
KmX = np.zeros((ng, ng, m - ng, n - ng))
KmY = np.zeros((ng, ng, m - ng, n - ng))
KmD = np.zeros((ng, ng, m - ng, n - ng))
for i in range(ng):
for j in range(ng):
KmX[i, j] = mX[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmY[i, j] = mY[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmD[i, j] = mD[i: (m - ng + i), j: (n - ng + j)]
S = np.zeros_like(KmD[0, 0])
Y = np.zeros_like(KmD[0, 0])
for i in range(ng):
for j in range(ng):
s = 1 / np.sqrt(KmX[i, j] * KmX[i, j] + KmY[i, j] * KmY[i, j])
Y = Y + s * KmD[i, j]
S = S + s
S[S == 0] = 1
out = np.zeros((m, n))
out[grid + 1: -grid, grid + 1: -grid] = Y / S
return out
def project_pointcloud(lidar, vtc, velodyne_to_camera, image_shape, init=None, draw_big_circle=False):
def py_func_project_3D_to_2D(points_3D, P):
# Project on image
points_2D = np.matmul(P, np.vstack((points_3D, np.ones([1, np.shape(points_3D)[1]]))))
# scale projected points
points_2D[0][:] = points_2D[0][:] / points_2D[2][:]
points_2D[1][:] = points_2D[1][:] / points_2D[2][:]
points_2D = points_2D[0:2]
return points_2D.transpose()
def py_func_create_lidar_img(lidar_points_2D, lidar_points, img_width=1248, img_height=375, init=None):
# lidar_points_2d shape (19988, 2), each line is 2d image coordinates
# lidar_points shape (3, 19988)
within_image_boarder_width = np.logical_and(img_width > lidar_points_2D[:, 0], lidar_points_2D[:, 0] >= 0)
within_image_boarder_height = np.logical_and(img_height > lidar_points_2D[:, 1], lidar_points_2D[:, 1] >= 0)
valid_points = np.logical_and(within_image_boarder_width, within_image_boarder_height) #(19988,) boolean array
coordinates = np.where(valid_points)[0] #(4222,) - enteries of valid points in lidar_points
values = lidar_points[:, coordinates] #(3, 4222)
if init is None:
image = -120.0 * np.ones((img_width, img_height, 3)) #image.shape==(1920, 1024, 3)
else:
image = init.transpose((1, 0, 2)) #image.shape==(1920, 1024, 3), zeroes
img_coordinates = lidar_points_2D[coordinates, :].astype(dtype=np.int32) #(4222, 2)
final_coordinates = np.concatenate((img_coordinates, values.transpose()[:, 1][:, np.newaxis]), 1).transpose()
inter_image = dense_map(final_coordinates, img_width, img_height, grid=lib.settings.grid_size)
import matplotlib as mpl
import matplotlib.cm as cm
norm = mpl.colors.Normalize(vmin=0, vmax=80)
cmap = cm.jet
m = cm.ScalarMappable(norm, cmap)
depth_map_color = np.copy(inter_image).reshape(-1)
depth_map_color = m.to_rgba(depth_map_color)
depth_map_color = (255 * depth_map_color).astype(dtype=np.uint8)
depth_map_color = np.array(depth_map_color)[:, :3]
depth_map_color = depth_map_color.reshape((inter_image.shape[0], inter_image.shape[1], 3))
inter_image_colormap = depth_map_color
if not draw_big_circle:
image[img_coordinates[:, 0], img_coordinates[:, 1], :] = values.transpose() # image is (1920, 1024, 3), values.transpose() is (4222, 3)
else:
# Slow elementwise circle drawing through opencv
len = img_coordinates.shape[0]
image = image.transpose([1, 0, 2]).squeeze().copy()
depth_map_color = values.transpose()[:, 1] #values.transpose is (4222, 3), [:, 1] is depth (z)
depth_map_color = m.to_rgba(depth_map_color)
depth_map_color = (255 * depth_map_color).astype(dtype=np.uint8)
for idx in range(len):
x, y = img_coordinates[idx, :]
value = depth_map_color[idx]
# print value
tupel_value = (int(value[0]), int(value[1]), int(value[2]))
# print tupel_value
cv2.circle(image, (x, y), 1, tupel_value, -1) # TODO was 3
return image, inter_image, inter_image_colormap #(1024, 1920, 3)
return image.transpose([1, 0, 2]).squeeze() #(1024, 1920, 3)
def py_func_lidar_projection(lidar_points_3D, vtc, velodyne_to_camera, shape, init=None): # input):
img_width = shape[1]
img_height = shape[0]
# print img_height, img_width
lidar_points_3D = lidar_points_3D[:, 0:4] #(54837, 4)
# Filer away all points behind image plane
min_x = 2.5
valid = lidar_points_3D[:, 0] > min_x
# extend projection matrix to 5d to efficiently parse intensity
lidar_points_3D = lidar_points_3D[np.where(valid)]
lidar_points_3D2 = np.ones((lidar_points_3D.shape[0], lidar_points_3D.shape[1] + 1))
lidar_points_3D2[:, 0:3] = lidar_points_3D[:, 0:3]
lidar_points_3D2[:, 4] = lidar_points_3D[:, 3]
# Extend projection matric to pass trough intensities
velodyne_to_camera2 = np.zeros((5, 5))
velodyne_to_camera2[0:4, 0:4] = velodyne_to_camera
velodyne_to_camera2[4, 4] = 1
lidar_points_2D = py_func_project_3D_to_2D(lidar_points_3D.transpose()[:][0:3], vtc) #lidar_points_2d.shape=(19988, 2)
pts_3D = np.matmul(velodyne_to_camera2, lidar_points_3D2.transpose())
# detelete placeholder 1 axis
pts_3D = np.delete(pts_3D, 3, axis=0) #pts_3d.shape==(4, 19988)
pts_3D_yzi = pts_3D[1:, :] #(3, 19988)
return py_func_create_lidar_img(lidar_points_2D, pts_3D_yzi, img_width=img_width,
img_height=img_height, init=init)
# lidar.shape==(54837, 5)
return py_func_lidar_projection(lidar, vtc, velodyne_to_camera, image_shape, init=init)
def find_missing_points(last, strongest):
last_set = set([tuple(x) for x in last])
strong_set = set([tuple(x) for x in strongest])
remaining_last = np.array([x for x in last_set - strong_set])
remaining_strong = np.array([x for x in strong_set - last_set])
return remaining_last, remaining_strong
def transform_coordinates(xyz):
"""
Takes as input a Pointcloud with xyz coordinates and appends spherical coordinates as columns
:param xyz:
:return: Pointcloud with following columns, r, phi, theta, ring, intensity, x, y, z, intensity, ring
"""
ptsnew = np.hstack((np.zeros_like(xyz), xyz))
r_phi = xyz[:, 0] ** 2 + xyz[:, 1] ** 2
ptsnew[:, 0] = np.sqrt(r_phi + xyz[:, 2] ** 2)
ptsnew[:, 2] = np.pi / 2 - np.arctan2(np.sqrt(r_phi), xyz[:, 2]) # for elevation angle defined from Z-axis down
ptsnew[:, 1] = np.arctan2(xyz[:, 1], xyz[:, 0])
ptsnew[:, 3] = xyz[:, 4]
ptsnew[:, 4] = xyz[:, 3]
return ptsnew
def find_closest_neighbors(x, reference):
"""
This function allows you to match strongest and last echos and reason about scattering distributions.
:param x: Pointcloud which should be matched
:param reference: Reference Pointcloud
:return: returns valid matching indexes
"""
tree = scipy.spatial.KDTree(reference[:, 1:4])
distances, indexes = tree.query(x[:, 1:4], p=2)
print('indexes', indexes)
print('found matches', len(indexes), len(set(indexes)))
# return 0
valid = []
# not matching contains all not explainable scattered mismatching particles
not_matching = []
for idx, i in enumerate(indexes):
delta = reference[i, :] - x[idx, :]
# Laser Ring has to match
if delta[-1] == 0:
# Follows assumption that strongest echo has higher intensity than last and that the range is more distant
# for the last return. The sensor can report 2 strongest echo if strongest and last echo are matching.
# Here those points are not being matched.
if delta[-2] < 0 and delta[0] > 0:
valid.append((i, idx))
else:
not_matching.append((i, idx))
else:
not_matching.append((i, idx))
return valid
def filter(lidar_data, distance):
"""
Takes lidar Pointcloud as ibnput and filters point below distance threshold
:param lidar_data: Input Pointcloud
:param distance: Minimum distance for filtering
:return: Filtered Pointcloud
"""
r = np.sqrt(lidar_data[:, 0] ** 2 + lidar_data[:, 1] ** 2 + lidar_data[:, 2] ** 2)
true_idx = np.where(r > distance)
lidar_data = lidar_data[true_idx, :]
return lidar_data[0]
def read_split(split):
with open(split, 'r') as f:
entry_ids = f.readlines()
entry_ids = [i.replace('\n', '') for i in entry_ids]
return entry_ids
def filter_below_groundplane(pointcloud, tolerance=1):
valid_loc = (pointcloud[:, 2] < -1.4) & \
(pointcloud[:, 2] > -1.86) & \
(pointcloud[:, 0] > 0) & \
(pointcloud[:, 0] < 40) & \
(pointcloud[:, 1] > -15) & \
(pointcloud[:, 1] < 15)
pc_rect = pointcloud[valid_loc]
print(pc_rect.shape)
if pc_rect.shape[0] <= pc_rect.shape[1]:
w = [0, 0, 1]
h = -1.55
else:
reg = RANSACRegressor().fit(pc_rect[:, [0, 1]], pc_rect[:, 2])
w = np.zeros(3)
w[0] = reg.estimator_.coef_[0]
w[1] = reg.estimator_.coef_[1]
w[2] = 1.0
h = reg.estimator_.intercept_
w = w / np.linalg.norm(w)
print(reg.estimator_.coef_)
print(reg.get_params())
print(w, h)
height_over_ground = np.matmul(pointcloud[:, :3], np.asarray(w))
height_over_ground = height_over_ground.reshape((len(height_over_ground), 1))
above_ground = np.matmul(pointcloud[:, :3], np.asarray(w)) - h > -tolerance
print(above_ground.shape)
return np.hstack((pointcloud[above_ground, :], height_over_ground[above_ground]))
| 39.498182
| 147
| 0.612042
| 1,595
| 10,862
| 3.987461
| 0.201881
| 0.050157
| 0.032704
| 0.00566
| 0.194497
| 0.161321
| 0.114151
| 0.102516
| 0.059591
| 0.038365
| 0
| 0.048682
| 0.249217
| 10,862
| 274
| 148
| 39.642336
| 0.731208
| 0.194531
| 0
| 0.085714
| 0
| 0
| 0.003372
| 0
| 0
| 0
| 0
| 0.00365
| 0
| 1
| 0.062857
| false
| 0
| 0.062857
| 0
| 0.194286
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c239288cab4c6c0e0a6b09c9cc9d94248d11f8f
| 20,757
|
py
|
Python
|
main.py
|
AniruddhaGawali/Hand_Cricket
|
71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb
|
[
"Apache-2.0"
] | 1
|
2020-11-08T07:38:58.000Z
|
2020-11-08T07:38:58.000Z
|
main.py
|
AniruddhaGawali/Hand_Cricket
|
71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
AniruddhaGawali/Hand_Cricket
|
71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb
|
[
"Apache-2.0"
] | null | null | null |
# ---------------------------------------------------------------ALL REQUIRD FILES-------------------------------------------------------------
from tkinter import *
import tkinter.ttk as ttk
import tkinter.messagebox as msg
import tkinter.filedialog as tf
from ttkthemes import ThemedStyle
from PIL import Image, ImageTk
import random,pickle,os,playsound,datetime
root = Tk()
style = ThemedStyle(root)
root.wm_iconbitmap("data/img/ico/icon.ico")
root.title('Hand Cricket')
if os.path.isfile('data/files/app_data.p'):
f1 = open('data/files/app_data.p','rb')
theme = pickle.load(f1)
else:
theme=2
if theme ==2:
bg_color='gray10'
fg_color='dodgerblue'
root.config(bg='gray10')
label_bg_color = 'gray20'
label_fg_color = 'dodgerblue'
elif theme ==1:
bg_color='white'
fg_color='dodgerblue'
root.config(bg='white')
label_bg_color = 'dodgerblue'
label_fg_color = 'white'
style.set_theme("vista")
root.geometry('300x520')
root.maxsize(300,518)
# --------------------------------------------------------------------VARIBILES-----------------------------------------------------------------
# n=0
player_run=0
comp_run=0
Total_runs=0
comp_Total_runs=0
player_wicket=0
comp_wicket=0
players_balls=0
comp_balls=0
target=0
Total_overs = 0
Total_wicket =0
who_win = ''
player_bat_choice={}
comp_bat_choice={}
# -------------------------------------------------------------------FUNCTIONS------------------------------------------------------------------
def raise_frame(frame):
frame.tkraise()
def effect(file):
playsound.playsound(file)
def comp_score_board():
comp_score['text']=f'{comp_Total_runs}/{comp_wicket}'
balls_remain['text']=f'Balls : {comp_balls}'
def player_score_board():
score['text']=f'{Total_runs}/{player_wicket}'
balls_remain['text']=f'Balls : {players_balls}'
def overs(o,w):
global players_balls, comp_balls
if int(w) == 0 or int(o) == 0:
pass
else:
global Total_overs, Total_wicket
Total_overs = int(o)
Total_wicket = int(w)
players_balls=Total_overs*6
comp_balls=Total_overs*6
balls_remain['text']=f'Balls : {players_balls}'
over_count['text']=f'Total Overs : {Total_overs}'
def comp_bat():
comp_bat_choice[f"{comp_balls}"] = [player_run,comp_run]
def player_bat():
player_bat_choice[f"{players_balls}"] = [player_run,comp_run]
def player_bat_match_result():
global who_win
if players_balls==0 and comp_balls==0 or comp_wicket==Total_wicket:
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
if Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
who_win = 'p'
effect('data\sound\win.mp3')
elif Total_runs==comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'TIE'
who_win = 't'
effect("data\sound\loss.mp3")
else:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win= 'c'
effect("data\sound\loss.mp3")
elif players_balls == 0 and Total_runs < comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win = 'c'
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
effect("data\sound\loss.mp3")
def comp_bat_match_result():
global who_win
if players_balls==0 and comp_balls==0 or player_wicket==Total_wicket:
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
if Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
effect('data\sound\win.mp3')
who_win = 'p'
elif Total_runs==comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'TIE'
who_win='t'
effect("data\sound\loss.mp3")
else:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win='c'
effect("data\sound\loss.mp3")
elif comp_balls == 0 and Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
who_win='p'
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
effect('data\sound\win.mp3')
def player_bat_match():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
if players_balls==0 or player_wicket==Total_wicket:
who_ball['text']='Bowlling : You'
who_bat['text']='Batting : Comp'
players_balls=0
target = Total_runs+1
target_label['text']=f'Target : {target}'
if comp_run == player_run:
comp_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
comp_balls -= 1
comp_score_board()
else:
comp_Total_runs+=comp_run
comp_balls -= 1
comp_score_board()
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
comp_bat()
else:
who_ball['text']='Bowlling : Comp'
who_bat['text']='Batting : You'
if comp_run == player_run:
player_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
players_balls -= 1
player_score_board()
else:
Total_runs+=player_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
players_balls -= 1
player_score_board()
player_bat()
player_bat_match_result()
def comp_bat_match():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
if comp_balls==0 or comp_wicket==Total_wicket:
who_ball['text']='Bowlling : Comp'
who_bat['text']='Batting : You'
comp_balls=0
target = comp_Total_runs+1
target_label['text']=f'Target : {target}'
if comp_run == player_run:
player_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
players_balls -= 1
player_score_board()
else:
Total_runs+=player_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
players_balls -= 1
player_score_board()
player_bat()
else:
who_bat['text']='Batting : Comp'
who_ball['text']='Bowlling : You'
if comp_run == player_run:
comp_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
comp_balls -= 1
comp_score_board()
# effect(out)
else:
comp_Total_runs+=comp_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
comp_balls -= 1
comp_score_board()
comp_bat()
comp_bat_match_result()
def comp_select():
global comp_run
comp_run = random.choice((0,1,2,3,4,4,6,3,6))
def add_runs(run):
global player_run
global player_wicket,match_is_of
player_run = run
comp_select()
if First_to.get()=='ba':
comp_bat_match()
match_is_of=1
elif First_to.get()=='b':
player_bat_match()
match_is_of=2
def coin_toss(select):
effect('data\sound\coinflip.mp3')
overs(over.get(),wicket.get())
coin_face = random.choice(('h','t'))
if select== coin_face:
raise_frame(root_frame3)
else:
raise_frame(root_frame2)
First_to.set('ba')
def quitapp():
root.destroy()
def newgame():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs,Total_overs,Total_wicket
global player_run
global player_wicket
player_run=0
comp_run=0
player_wicket=0
comp_wicket=0
Total_runs=0
comp_Total_runs=0
players_balls=0
comp_balls=0
target=0
Total_overs = 0
Total_wicket =0
who_win=''
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='normal')
raise_frame(root_frame1)
balls_remain['text']=f'Balls : {comp_balls}'
comp_score['text']=f'{comp_Total_runs}/{comp_wicket}'
conc_style.configure('conc.TLabel',background=bg_color,foreground='white')
concustion_label['text']= f'-'
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
score['text']=f'{Total_runs}/{player_wicket}'
target_label['text']=f'Target : {target}'
comp_bat_choice.clear()
player_bat_choice.clear()
def save_game():
if who_win == '':
msg.showwarning("Warning", 'You have not Played a Game or\nYou have not Completed Your Game\nPlease do it First then only\nYou can save a Game File ')
else:
name = tf.asksaveasfilename(defaultextension=".txt",
filetypes=[("Text files",".txt"),
("Word files",".doc")],
initialdir="dir",
initialfile='game.txt',
title="Save as")
if name != '':
with open(name,'w') as file:
file.write(f'{datetime.datetime.now().strftime("%B %d, %Y")}\n')
file.write(f'{datetime.datetime.now().strftime("%H:%M:%S")}\n\n')
if who_win == 'p':
file.write("PLAYER WINS")
elif who_win == 'c':
file.write("COMP WINS")
elif who_win == 't':
file.write("MATCH TIE")
file.write(f'\nTotal Over : {Total_overs}\t\tTotal Balls : {Total_overs*6}\nTotal Wicket : {Total_wicket}\tTarget : {target}\n\n\n')
if match_is_of == 1:
file.write('First Inning\nBAT : comp , BALL : player\n')
file.write(f'Score : {comp_Total_runs}/{comp_wicket}\n\n')
for k,v in comp_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
file.write('\n\nSecond Inning\nBAT : player , BALL : comp\n')
file.write(f'Score : {Total_runs}/{player_wicket}\n\n')
for k,v in player_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
elif match_is_of == 2:
file.write('First Inning\nBAT : player , BALL : comp\n')
file.write(f'Score : {Total_runs}/{player_wicket}\n\n')
for k,v in player_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
file.write('\n\nSecond Inning\nBAT : comp , BALL : player\n')
file.write(f'Score : {comp_Total_runs}/{comp_wicket}\n\n')
for k,v in comp_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
else:
msg.showwarning("Warn",'You have Not Select or Set the Game File\nSo Game file is Not Save')
# ------------------------------------------------------------FRAMES AND MAIN PROGRAM-----------------------------------------------------------
# ----------------------------------------------------------------------FRAME1------------------------------------------------------------------
root_frame1=Frame(root,bg=bg_color)
root_frame2=Frame(root,bg=bg_color)
root_frame3=Frame(root,bg=bg_color)
for frame in (root_frame1,root_frame2,root_frame3):
frame.grid(row=0,column=0,sticky='news')
raise_frame(root_frame1)
root_frame1_label_style=ttk.Style()
root_frame1_label_style.configure('TLabel',background=bg_color,foreground=fg_color)
over_select_label=ttk.Label(root_frame1,text='Select No. of Overs',font="Helvetica 15 bold",style='TLabel')
over_select_label.config(anchor=CENTER)
over_select_label.pack(padx=(23,0),pady=(20,0))
over=StringVar()
over.set('0')
over_select= ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=over)
over_select.pack(pady=8,padx=(23,0))
wicket=StringVar()
wicket.set('0')
player_select_label=ttk.Label(root_frame1,text='Select No. of Players',font="Helvetica 15 bold",style='TLabel')
player_select_label.config(anchor=CENTER)
player_select_label.pack(padx=(23,0))
no_of_players=ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=wicket)
no_of_players.pack(pady=8,padx=(23,0))
style_checkbutton=ttk.Style()
style_checkbutton.configure('TCheckbutton',width=10,hight=100,background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
toss_label=ttk.Label(root_frame1,text='Select the Face',font='Helvetica 15 bold',style='TLabel')
toss_label.pack(pady=(10,5))
toss=StringVar()
head=ttk.Checkbutton(root_frame1,text='HEADS',variable=toss,onvalue='h',style='TCheckbutton')
tails=ttk.Checkbutton(root_frame1,text='TAILS',variable=toss,onvalue='t',style='TCheckbutton')
head.pack()
tails.pack()
over_selected=ttk.Button(root_frame1,text='Toss',command=lambda : coin_toss(toss.get()))
over_selected.pack(pady=15,padx=(23,0))
# ----------------------------------------------------------------------FRAME3------------------------------------------------------------------
First_to=StringVar()
label1=Label(root_frame3,text='YOU WIN THE TOSS',background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
label1.pack(padx=(20,0))
bat=ttk.Checkbutton(root_frame3,text='BAT',variable=First_to,onvalue='b',style='TCheckbutton')
ball=ttk.Checkbutton(root_frame3,text='BALL',variable=First_to,onvalue='ba',style='TCheckbutton')
bat.pack(pady=5,padx=(52,0))
ball.pack(pady=5,padx=(52,0))
buttton_of_match=ttk.Button(root_frame3,text="Start",command=lambda : raise_frame(root_frame2))
buttton_of_match.pack(pady=10)
# ----------------------------------------------------------------------FRAME2------------------------------------------------------------------
selected_no_frame=Frame(root_frame2,bg=bg_color)
selected_no_frame.pack()
player_select_no_label=ttk.Label(selected_no_frame,text=' You Select ',font="none 10 bold",style='TLabel')
player_select_no_label.grid(row=0,column=0,padx=(15,5),pady=5)
comp_select_no_label=ttk.Label(selected_no_frame,text='Comp Select',font="none 10 bold",style='TLabel')
comp_select_no_label.grid(row=0,column=1,padx=(40,0),pady=5)
player_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
comp_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
player_select_no.grid(row=1,column=0,padx=(15,5),pady=(5,2))
comp_select_no.grid(row=1,column=1,padx=(40,0),pady=(5,2))
conc_frame=Frame(root_frame2,bg=bg_color, relief=SUNKEN)
conc_frame.pack()
conc_style=ttk.Style()
conc_style.configure('conc.TLabel',background=bg_color,foreground='white')
concustion_label=ttk.Label(conc_frame,text='-',font='Helvetica 15 bold',style='conc.TLabel')
concustion_label.pack(padx=(31,10),pady=(0,15))
button_frame=Frame(root_frame2,bg=bg_color)
button_frame.pack(pady=20)
for i in range(0,7):
if i==5:
continue
else:
globals()['img%s'%i]= ImageTk.PhotoImage(Image.open(f"data/img/hand_numbers/img{i}.png"))
but0=Button(button_frame,text=i,image=img0,borderwidth=2,command= lambda : add_runs(0) )
but1=Button(button_frame,text=i,image=img1,borderwidth=2,command= lambda : add_runs(1) )
but2=Button(button_frame,text=i,image=img2,borderwidth=2,command= lambda : add_runs(2) )
but3=Button(button_frame,text=i,image=img3,borderwidth=2,command= lambda : add_runs(3) )
but4=Button(button_frame,text=i,image=img4,borderwidth=2,command= lambda : add_runs(4) )
but6=Button(button_frame,text=i,image=img6,borderwidth=2,command= lambda : add_runs(6) )
but0.grid(row=0,column=0,padx=(25,6),pady=5)
but1.grid(row=0,column=1,padx=(4,0),pady=5)
but2.grid(row=0,column=2,padx=(10,0),pady=5)
but3.grid(row=1,column=0,padx=(25,6),pady=5)
but4.grid(row=1,column=1,padx=(4,0),pady=5)
but6.grid(row=1,column=2,padx=(10,0),pady=5)
scrore_frame=Frame(root_frame2,bg=bg_color)
scrore_frame.pack(pady=10)
score_name_label=ttk.Label(scrore_frame,text='Your Score : ',font='Helvetica 20 bold')
score_name_label.grid(row=2,column=0,sticky=W,pady=(3,0),padx=(8,0))
score=ttk.Label(scrore_frame,text=f'{Total_runs}/{player_wicket}',font='Helvetica 20 bold')
score.grid(row=2,column=1,sticky=W,pady=(3,0))
comp_score_name_label=ttk.Label(scrore_frame,text='Comp Score : ',font='Helvetica 20 bold')
comp_score_name_label.grid(row=3,column=0,sticky=W,pady=(3,0),padx=(8,0))
comp_score=ttk.Label(scrore_frame,text=f'{comp_Total_runs}/{comp_wicket}',font='Helvetica 20 bold')
comp_score.grid(row=3,column=1,sticky=W,pady=(3,0))
over_count=ttk.Label(scrore_frame,text='Over : 3',font='Helvetica 13 bold')
over_count.grid(row=4,column=0,sticky=W,padx=9)
balls_remain=ttk.Label(scrore_frame,text='Balls : 0',font='Helvetica 13 bold')
balls_remain.grid(row=4,column=1,sticky=W,padx=0)
target_label=ttk.Label(scrore_frame,text=f'Target : {target}',font='Helvetica 13 bold')
target_label.grid(row=5,column=0,sticky=W,padx=8)
who_bat=ttk.Label(scrore_frame,text='Batting : -',font='Helvetica 10 ')
who_ball=ttk.Label(scrore_frame,text='Bowling : -' ,font='Helvetica 10 ')
who_bat.grid(row=6,column=0,sticky=W,padx=(10,0))
who_ball.grid(row=7,column=0,sticky=W,padx=(10,0))
# --------------------------------------------------------------------MENU----------------------------------------------------------------------
mainmenu = Menu(root, activebackground=label_bg_color)
root.config(menu=mainmenu)
m1 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m1.add_command(label='New Game',command=newgame)
m1.add_command(label='Save Game',command=save_game)
m1.add_separator()
m1.add_command(label='Exit',command=quitapp)
mainmenu.add_cascade(label='Menu', menu=m1)
def temp_light():
global theme
theme=1
msg.showinfo("RESTART", 'Please Restart the application for apply the Theme')
def temp_dark():
global theme
theme=2
msg.showinfo("RESTART", 'Please Restart the application for apply the Theme')
m2 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub = Menu(m2,tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub.add_command(label='Dark', command=temp_dark)
m2_sub.add_command(label='Light', command=temp_light)
m2.add_cascade(label='Theme',menu=m2_sub)
m2.add_command(label='Help', command=lambda: msg.showinfo('Help', 'We will help you soon'))
m2.add_command(label='More About', command=lambda: msg.showinfo('About', 'This GUI is created by AKG007\n Made in India'))
mainmenu.add_cascade(label='Settings', menu=m2)
root.mainloop()
f1= open('data/files/app_data.p','wb')
pickle.dump(theme,f1)
f1.close()
| 36.803191
| 158
| 0.620321
| 2,870
| 20,757
| 4.280836
| 0.110801
| 0.02222
| 0.029057
| 0.04615
| 0.657985
| 0.598405
| 0.509035
| 0.42642
| 0.40827
| 0.395735
| 0
| 0.025789
| 0.191116
| 20,757
| 564
| 159
| 36.803191
| 0.705956
| 0.055788
| 0
| 0.468182
| 0
| 0.013636
| 0.184389
| 0.030783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043182
| false
| 0.002273
| 0.015909
| 0
| 0.059091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c26670f1bac191aee07007494c6fa726372a36b
| 11,451
|
py
|
Python
|
Game/game_functions.py
|
Gabriel-limadev/Alien-Invasion
|
1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf
|
[
"MIT"
] | 3
|
2021-09-11T16:35:20.000Z
|
2021-09-25T02:42:04.000Z
|
Game/game_functions.py
|
Gabriel-limadev/Invasao-Alienigena
|
1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf
|
[
"MIT"
] | null | null | null |
Game/game_functions.py
|
Gabriel-limadev/Invasao-Alienigena
|
1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf
|
[
"MIT"
] | 1
|
2022-02-28T01:06:15.000Z
|
2022-02-28T01:06:15.000Z
|
import sys #Fornece funções e variáveis para manipular partes do ambiente de tempo de execução do Python
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a pressionamentos de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
# Move a nave para a direita
ship.moving_right = True
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
# Move a nave para a esquerda
ship.moving_left = True
elif event.key == pygame.K_UP or event.key == pygame.K_w:
# Move a nave para cima
ship.moving_top = True
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
# Move a nave para baixo
ship.moving_bottom = True
elif event.key == pygame.K_SPACE:
# Cria um novo projetil e adiciona ao grupo de projeteis
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_ESCAPE:
# O jogo finaliza quando o jogador tecla esc
sys.exit()
elif event.key == pygame.K_p and (stats.game_start or stats.game_over):
start_game(ai_settings, screen, stats, sb, ship, aliens, bullets)
ai_settings.initialize_dynamic_settings()
def check_keyup_events(event, ship):
"""Respostas a solturas de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
ship.moving_right = False
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
ship.moving_left = False
elif event.key == pygame.K_UP or event.key == pygame.K_w:
ship.moving_top = False
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
ship.moving_bottom = False
def fire_bullet(ai_settings, screen, ship, bullets):
"""Dispara um projetil se o limite ainda não for alcançado"""
# Cria um novo projetil e o adiciona no grupo dos projeteis
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
# Tocando o som de tiro laser
shoot_sound = pygame.mixer.Sound('Sounds/shoot.wav')
pygame.mixer.Sound.set_volume(shoot_sound, 0.1)
shoot_sound.play()
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""Responde eventos de teclado e de mouse"""
for event in pygame.event.get():
# Jogador apertar o x de sair
if event.type == pygame.QUIT:
sys.exit()
# Jogador apertar a seta direita
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
# elif event.type == pygame.MOUSEBUTTONDOWN:
# mouse_x, mouse_y = pygame.mouse.get_pos()
# check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def start_game(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Inicia um novo jogo quando o jogador clicar em play ou quando o jogador teclar p"""
# Oculta o cursor do mouse
# pygame.mouse.set_visible(False)
# Reinicia os dados estatisticcos e apresenta a tela do jogo
stats.reset_stats()
stats.game_active = True
stats.game_start = False
# Reinicia as imagens do painel de pontuação
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Esvazia a lista de alienigenas e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaçonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# def check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y):
# """Inicia um novo jogo quando o jogador clicar em play"""
# button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
# if button_clicked and not stats.game_active:
# start_game(ai_settings, screen, stats, ship, aliens, bullets)
def update_start(screen, background_start):
"""Inicia o jogo com uma tela amigavél"""
screen.blit(background_start, (0, 0))
# Atualiza a tela
pygame.display.flip()
def update_menu(screen, background_menu, play_button):
"""Apresenta o menu na tela com o botão de play"""
# Tempo de transição de tela
sleep(3)
screen.blit(background_menu, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_game_over(screen, background_game_over, play_button):
"""Apresenta a tela de game-over com o botão de play"""
# Tempo de transição de tela
sleep(3)
screen.blit(background_game_over, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, background):
"""Atualiza as imagens na tela e alterna para a nova tela"""
screen.fill(ai_settings.bg_color)
screen.blit(background, (0, 0))
# Desenha a informação sobre pontuação
sb.show_score()
# Redesenha a tela a cada passagem pelo laço
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Atualiza a tela
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Atualiza a posição dos projéteis e se livra dos projéteis antigos"""
# Atualiza as posições dos projéteis
bullets.update()
# Livra dos projeteis que ultrapassam a tela
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a colisões entre projéteis e alienigenas."""
# Remove qualquer projétil e alienigena que tenham colidido
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destroi os projéteis existentes, aumenta a velocidade do jogo, cria uma nova frota e aumenta o nivel.
bullets.empty()
ai_settings.increase_speed()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de troca de fase
# Toca o som de explosão da nave
next_level = pygame.mixer.Sound('Sounds/ufo_lowpitch.wav')
pygame.mixer.Sound.set_volume(next_level, 0.3)
next_level.play()
# Aumenta o nivel
stats.level += 1
sb.prep_level()
def get_number_aliens_x(ai_settings, alien_width):
"""Determina o número de aliens que cabem em uma linha"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determina o número de linhas com aliens que cabem na tela"""
available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_aliens(ai_settings, screen, aliens, alien_number, row_number):
"""Cria um alien e o posiciona na tela"""
alien = Alien(ai_settings, screen)
position_aliens(alien, aliens, alien_number, row_number)
def position_aliens(alien, aliens, alien_number, row_number):
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""Cria uma frota completa de alienigenas"""
# Cria um alien e calcula o número de aliens em uma linha
# O espaçamento entre os aliens é igual à largura de um alienigena
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
#Cria a frota de alienigenas
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_aliens(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
"""Responde apropriadamente se algum alienígena alcançou uma borda"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Faz toda a frota descer e muda a sua direção"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se a frota está em uma das bordas
e então atualiza as posições de todos os alienigenas da frota"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Verifica se houve colisões entre alienigenas e a espaçonave
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Verifica se há algum alien que atingiu a parte inferior da tela
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde ao fato de a espaçonave ter sido atingida por um alienigena"""
if stats.ships_left > 1:
# Decrementa ships_left
stats.ships_left -= 1
# Atualiza o painel de pontuações
sb.prep_ships()
stats.score -= ai_settings.alien_points * (36 - len(aliens))
sb.prep_score()
# Esvazia a lista de aliens e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaçonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de explosão da nave
explotion_sound = pygame.mixer.Sound('Sounds/explosion.wav')
pygame.mixer.Sound.set_volume(explotion_sound, 0.1)
explotion_sound.play()
# Faz uma pausa
sleep(0.5)
else:
stats.game_active = False
stats.game_over = True
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se algum alienígena alcançou a parte inferior da tela"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Trata esse caso do mesmo modo que é feito quando a espaçonave é atingida
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def check_high_score(stats, sb):
"""Verifica se há uma nova pontuação máxima"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
| 36.352381
| 117
| 0.687189
| 1,639
| 11,451
| 4.632093
| 0.18975
| 0.061907
| 0.065332
| 0.03754
| 0.458641
| 0.392782
| 0.333114
| 0.290701
| 0.258035
| 0.249473
| 0
| 0.003599
| 0.223561
| 11,451
| 314
| 118
| 36.468153
| 0.850298
| 0.296306
| 0
| 0.276471
| 0
| 0
| 0.007476
| 0.002914
| 0
| 0
| 0
| 0.003185
| 0
| 1
| 0.129412
| false
| 0
| 0.041176
| 0
| 0.182353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c2ea613a50a1e1e9624048d804bb8ed4e0017dd
| 3,276
|
py
|
Python
|
herbstluftwm/hl_panel_content.py
|
FAUSheppy/config
|
998e13f71a4b48c60f645470631cf937586be2fd
|
[
"Unlicense"
] | 1
|
2019-05-07T13:03:10.000Z
|
2019-05-07T13:03:10.000Z
|
herbstluftwm/hl_panel_content.py
|
FAUSheppy/config
|
998e13f71a4b48c60f645470631cf937586be2fd
|
[
"Unlicense"
] | null | null | null |
herbstluftwm/hl_panel_content.py
|
FAUSheppy/config
|
998e13f71a4b48c60f645470631cf937586be2fd
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import hl_utils
from hl_constants import *
import string
import re
from datetime import datetime
def guthaben():
guthaben = ''
if hl_utils.is_cip():
raw = ""
with open(hl_utils.hlpath(PRINT_LOG)) as f:
raw = f.read();
guthaben = "Druckerguthaben: " + raw + " Euro"
col = hl_utils.get_color(float(raw),0,COLOR_BORDER)
guthaben = hl_utils.color_panel(guthaben,col)
return guthaben;
def quota():
q = ''
if not hl_utils.is_cip():
return ''
else:
with open(hl_utils.hlpath("quota.cip")) as f:
return f.read()
def vpn():
vpn = ''
if hl_utils.is_cip():
return ''
else:
try:
with open(hl_utils.hlpath(VPN_LOG)) as f:
tmp = f.read()
tmp = ' '+tmp
return tmp;
except FileNotFoundError:
return hl_utils.color_panel("NO VPN INFORMATION",YELLOW)
def ip():
try:
with open(hl_utils.hlpath(IP_LOG)) as f:
tmp = f.read()
if "[" in tmp:
tmp = hl_utils.color_panel("Public IP: IP6 ",GREEN)
tmp = ' '+tmp
return tmp;
except Exception:
return hl_utils.color_panel("Public IP: No Data",YELLOW)
def battery():
if hl_utils.is_laptop():
try:
with open(hl_utils.hlpath(BATTERY_LOG)) as f:
tmp = f.read()
tmp = ' '+tmp
return tmp;
except FileNotFoundError as e:
return hl_utils.color_panel(str(e),RED)
else:
return ""
def date():
return hl_utils.shexec("date +' ^fg(#efefef)%H:%M^fg(#909090), %Y-%m-^fg(#efefef)%d'")
def logins():
try:
with open(hl_utils.hlpath(LOGINS_LOG),'r') as f:
return f.read()
except:
return ""
def bcw():
try:
with open(hl_utils.hlpath(BC_WORD_LOG),'r') as f:
tmp = int(f.read())
string = "{} words".format(tmp)
return hl_utils.color_panel(string,hl_utils.get_color(tmp,0,7000,reverse=False))
except:
return ""
def bwp():
tmp = ""
cur = 29
try:
with open(hl_utils.hlpath(BC_PAGE_LOG),'r') as f:
tmp = "{} pages".format(f.read().strip())
except:
tmp = "{} pages".format(cur)
tmp = hl_utils.color_panel(tmp,hl_utils.get_color(cur,0,44,reverse=False))
return tmp
def countdown():
delta = datetime(year=2018,month=7,day=23,hour=12) - datetime.now()
if delta.total_seconds() < 0:
return hl_utils.color_panel('Have a nice life without me fuckers.',RED)
tmp = "{} days {} hours remaining".format(delta.days,int(delta.seconds/60/60))
tmp = hl_utils.color_panel(tmp,hl_utils.get_color(delta.days,0,180))
return tmp
if __name__ == "__main__":
print(logins(),ip(),vpn(),guthaben(),battery(),date(),sep='',end='')
| 31.2
| 100
| 0.501832
| 396
| 3,276
| 3.989899
| 0.290404
| 0.11962
| 0.068354
| 0.096835
| 0.420253
| 0.275949
| 0.146835
| 0.113924
| 0.113924
| 0.113924
| 0
| 0.017944
| 0.370574
| 3,276
| 104
| 101
| 31.5
| 0.748303
| 0.005189
| 0
| 0.355556
| 0
| 0.011111
| 0.074586
| 0.015654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0.011111
| 0.377778
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c2eba18adaa6d56cede8b191f75d6ce31f0cf4f
| 1,368
|
py
|
Python
|
views.py
|
vigilcommunity/mega-project
|
09a44c76170c71ee4c1d206fb0942b72c65ff45f
|
[
"MIT"
] | null | null | null |
views.py
|
vigilcommunity/mega-project
|
09a44c76170c71ee4c1d206fb0942b72c65ff45f
|
[
"MIT"
] | null | null | null |
views.py
|
vigilcommunity/mega-project
|
09a44c76170c71ee4c1d206fb0942b72c65ff45f
|
[
"MIT"
] | null | null | null |
import datetime
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from vigil_ctf_app.EmailBackEnd import EmailBackEnd
#Authentication views ONLY ONLY
def register(request):
return render(request,'authentication/signup.html')
def show_login(request):
return render(request,'authentication/signin.html')
def doLogin(request):
if request.method!="POST":
return HttpResponse("<h2>Method Not Allowed</h2>")
else:
user=EmailBackEnd.authenticate(request,username=request.POST.get("email"),password=request.POST.get("password"))
if user!=None:
login(request,user)
if user.user_type=="1":
return HttpResponseRedirect('admin')
else:
messages.error(request,"Invalid User Details")
return HttpResponseRedirect("/")
def GetUserDetails(request):
if request.user!=None:
return HttpResponse("User : "+request.user.email+" usertype : "+str(request.user.user_type))
else:
return HttpResponse("Please Login First")
def logout_user(request):
logout(request)
return HttpResponseRedirect("/")
def policy(request):
return render(request,'rules/policy.html')
| 29.106383
| 120
| 0.710526
| 155
| 1,368
| 6.232258
| 0.380645
| 0.05176
| 0.059006
| 0.080745
| 0.082816
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002681
| 0.182018
| 1,368
| 46
| 121
| 29.73913
| 0.86059
| 0.02193
| 0
| 0.151515
| 0
| 0
| 0.133234
| 0.038922
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0.030303
| 0.212121
| 0.090909
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c2f5071a275540f672417776634a7f3cf12f94d
| 19,223
|
py
|
Python
|
graph-measures/features_algorithms/vertices/motifs.py
|
Unknown-Data/QGCN
|
e074ada31c13b6de6eabba2b2ebce90e88fdfdbf
|
[
"MIT"
] | 3
|
2021-04-21T16:06:51.000Z
|
2022-03-31T12:09:01.000Z
|
graph-measures/features_algorithms/vertices/motifs.py
|
Unknown-Data/QGCN
|
e074ada31c13b6de6eabba2b2ebce90e88fdfdbf
|
[
"MIT"
] | 1
|
2021-02-04T07:48:16.000Z
|
2021-02-24T23:01:41.000Z
|
graph-measures/features_algorithms/vertices/motifs.py
|
Unknown-Data/QGCN
|
e074ada31c13b6de6eabba2b2ebce90e88fdfdbf
|
[
"MIT"
] | null | null | null |
import os
import pickle
from functools import partial
from itertools import permutations, combinations
import networkx as nx
import numpy as np
from bitstring import BitArray
from collections import Counter
try:
from graph_measures.features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
except ModuleNotFoundError as e:
from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
CUR_PATH = os.path.realpath(__file__)
BASE_PATH = os.path.dirname(os.path.dirname(CUR_PATH))
VERBOSE = False
DEBUG =False
SAVE_COUNTED_MOTIFS = False
interesting_groups = [
sorted([0, 1, 8, 27])
]
class MotifsNodeCalculator(NodeFeatureCalculator):
def __init__(self, *args, level=3, **kwargs):
super(MotifsNodeCalculator, self).__init__(*args, **kwargs)
assert level in [3, 4], "Unsupported motif level %d" % (level,)
self._level = level
self._node_variations = {}
self._all_motifs = None
self._print_name += "_%d" % (self._level,)
self._gnx = self._gnx.copy()
self._load_variations()
self._counted_motifs = set() # Only used if SAVE_COUNTED_MOTIFS is set
self._double_counter = Counter()
def is_relevant(self):
return True
@classmethod
def print_name(cls, level=None):
print_name = super(MotifsNodeCalculator, cls).print_name()
if level is None:
return print_name
return "%s_%d" % (print_name, level)
# name = super(MotifsNodeCalculator, cls).print_name()
# name.split("_")[0]
def _load_variations_file(self):
fname = "%d_%sdirected.pkl" % (self._level, "" if self._gnx.is_directed() else "un")
fpath = os.path.join(BASE_PATH, "motif_variations", fname)
return pickle.load(open(fpath, "rb"))
def _load_variations(self):
self._node_variations = self._load_variations_file()
self._all_motifs = set(self._node_variations.values())
# here we pass on the edges of the sub-graph containing only the bunch nodes
# and calculate the expected index of each edge (with respect to whether the graph is directed on not)
# the formulas were calculated by common reason
# combinations index: sum_0_to_n1-1((n - i) - 1) + n2 - n1 - 1
# permutations index: each set has (n - 1) items, so determining the set is by n1, and inside the set by n2
def _get_group_number_opt1(self, nbunch):
subgnx = self._gnx.subgraph(nbunch)
nodes = {node: i for i, node in enumerate(subgnx)}
n = len(nodes)
if subgnx.is_directed():
def edge_index(n1, n2):
return n1 * (n - 1) + n2 - (1 * (n2 > n1))
else:
def edge_index(n1, n2):
n1, n2 = min(n1, n2), max(n1, n2)
return (n1 / 2) * (2 * n - 3 - n1) + n2 - 1
return sum(2 ** edge_index(nodes[edge[0]], nodes[edge[1]]) for edge in subgnx.edges())
# passing on all:
# * undirected graph: combinations [(n*(n-1)/2) combs - handshake lemma]
# * directed graph: permutations [(n*(n-1) perms - handshake lemma with respect to order]
# checking whether the edge exist in the graph - and construct a bitmask of the existing edges
def _get_group_number(self, nbunch):
func = permutations if self._gnx.is_directed() else combinations
if DEBUG:
pass
return BitArray(self._gnx.has_edge(n1, n2) for n1, n2 in func(nbunch, 2)).uint
# def _get_motif_sub_tree(self, root, length):
# implementing the "Kavosh" algorithm for subgroups of length 3
def _get_motif3_sub_tree(self, root):
visited_vertices = {root: 0}
visited_index = 1
# variation == (1, 1)
first_neighbors = set(nx.all_neighbors(self._gnx, root))
# neighbors, visited_neighbors = tee(first_neighbors)
for n1 in first_neighbors:
visited_vertices[n1] = visited_index
visited_index += 1
for n1 in first_neighbors:
last_neighbors = set(nx.all_neighbors(self._gnx, n1))
for n2 in last_neighbors:
if n2 in visited_vertices:
if visited_vertices[n1] < visited_vertices[n2]:
yield [root, n1, n2]
else:
visited_vertices[n2] = visited_index
visited_index += 1
yield [root, n1, n2]
# variation == (2, 0)
for n1, n2 in combinations(first_neighbors, 2):
if (visited_vertices[n1] < visited_vertices[n2]) and \
not (self._gnx.has_edge(n1, n2) or self._gnx.has_edge(n2, n1)):
yield [root, n1, n2]
# implementing the "Kavosh" algorithm for subgroups of length 4
def _get_motif4_sub_tree(self, root):
visited_vertices = {root: 0}
# visited_index = 1
# variation == (1, 1, 1)
neighbors_first_deg = set(nx.all_neighbors(self._gnx, root))
# neighbors_first_deg, visited_neighbors, len_a = tee(neighbors_first_deg, 3)
neighbors_first_deg = visited_neighbors = list(neighbors_first_deg)
for n1 in visited_neighbors:
visited_vertices[n1] = 1
for n1, n2, n3 in combinations(neighbors_first_deg, 3):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n in visited_neighbors:
if n not in visited_vertices:
if DEBUG:
if n is 1:
hi = 0.5
visited_vertices[n] = 2
for n2 in neighbors_sec_deg:
for n11 in neighbors_first_deg:
if visited_vertices[n2] == 2 and n1 != n11:
edge_exists = (self._gnx.has_edge(n2, n11) or self._gnx.has_edge(n11, n2))
if (not edge_exists) or (edge_exists and n1 < n11):
group = [root, n1, n11, n2]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
# for n1 in neighbors_first_deg:
# if DEBUG:
# if root is 41:
# print('n1', n1)
# neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# # neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
# neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for comb in combinations(neighbors_sec_deg, 2):
if DEBUG:
if root is 41:
hi = 1
if 2 == visited_vertices[comb[0]] and visited_vertices[comb[1]] == 2:
group = [root, n1, comb[0], comb[1]]
if DEBUG:
if root is 41:
print('A 41 group:', group)
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n2 in neighbors_sec_deg:
if visited_vertices[n2] == 1:
continue
for n3 in set(nx.all_neighbors(self._gnx, n2)):
if DEBUG:
if root is 0 and n1 is 27 and n2 is 8 and n3 is 1:
hi = 1.5
if n3 not in visited_vertices:
if DEBUG:
pass
visited_vertices[n3] = 3
if visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
else:
if visited_vertices[n3] == 1:
continue
if visited_vertices[n3] == 2 and not (self._gnx.has_edge(n1, n3) or self._gnx.has_edge(n3, n1)):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
elif visited_vertices[n3] == 3 and visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
def _order_by_degree(self, gnx=None):
if gnx is None:
gnx = self._gnx
return sorted(gnx, key=lambda n: len(list(nx.all_neighbors(gnx, n))), reverse=True)
def _calculate_motif(self):
# consider first calculating the nth neighborhood of a node
# and then iterate only over the corresponding graph
motif_func = self._get_motif3_sub_tree if self._level == 3 else self._get_motif4_sub_tree
sorted_nodes = self._order_by_degree()
for node in sorted_nodes:
for group in motif_func(node):
group_num = self._get_group_number(group)
motif_num = self._node_variations[group_num]
yield group, group_num, motif_num
if VERBOSE:
self._logger.debug("Finished node: %s" % node)
self._gnx.remove_node(node)
def _update_nodes_group(self, group, motif_num):
for node in group:
self._features[node][motif_num] += 1
def _calculate(self, include=None):
m_gnx = self._gnx.copy()
motif_counter = {motif_number: 0 for motif_number in self._all_motifs}
self._features = {node: motif_counter.copy() for node in self._gnx}
for i, (group, group_num, motif_num) in enumerate(self._calculate_motif()):
if DEBUG:
if 21 in group and motif_num is 47:
print('A 21/47 group:', group, motif_num)
pass
if sorted(group) in interesting_groups:
print('An interesting group:', group, motif_num)
if SAVE_COUNTED_MOTIFS:
h = hash(frozenset(group))
# h = frozenset(group)
if h in self._counted_motifs:
print("\033[91m Group {} already counted \033[00m".format(group))
self._double_counter[frozenset(group)] += 1
else:
self._counted_motifs.add(h)
self._update_nodes_group(group, motif_num)
if (i + 1) % 1000 == 0 and VERBOSE:
self._logger.debug("Groups: %d" % i)
# print('Max num of duplicates:', max(self._double_counter.values()))
# print('Number of motifs counted twice:', len(self._double_counter))
self._gnx = m_gnx
def _get_feature(self, element):
all_motifs = self._all_motifs.difference(set([None]))
cur_feature = self._features[element]
return np.array([cur_feature[motif_num] for motif_num in sorted(all_motifs)])
# consider ignoring node's data
class MotifsEdgeCalculator(MotifsNodeCalculator):
def __init__(self, *args, include_nodes=False, **kwargs):
self._edge_variations = {}
self._should_include_nodes = include_nodes
super(MotifsEdgeCalculator, self).__init__(*args, **kwargs)
def is_relevant(self):
# if graph is not directed, there is no use of edge variations
return self._gnx.is_directed()
def _calculate_motif_dictionaries(self):
# calculating the node variations
super(MotifsEdgeCalculator, self)._load_variations_file()
if not self._gnx.is_directed():
# if graph is not directed, there is no use of edge variations
return
motif_edges = list(permutations(range(self._level), 2))
# level * (level - 1) is number of permutations of size 2
num_edges = self._level * (self._level - 1)
for group_num, motif_num in self._node_variations.items():
bin_repr = BitArray(length=num_edges, int=group_num)
self._edge_variations[group_num] = set([edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit])
# noinspection PyMethodOverriding
def _calculate(self, include=None):
for group, group_num, motif_num in self._calculate_motif():
if self._should_include_nodes:
self._update_nodes_group(group, motif_num)
for edge_type in self._edge_variations[group_num]:
edge = tuple(map(lambda idx: group[idx], edge_type))
if edge not in self._features:
self._features[edge] = {motif_number: 0 for motif_number in self._all_motifs}
self._features[edge][motif_num] += 1
def nth_nodes_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
def nth_edges_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
feature_node_entry = {
"motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
"motif4": FeatureMeta(nth_nodes_motif(4), {"m4"}),
}
feature_edge_entry = {
"motif3_edge": FeatureMeta(nth_edges_motif(3), {"me3"}),
"motif4_edge": FeatureMeta(nth_edges_motif(4), {"me4"}),
}
if __name__ == "__main__":
from measure_tests.specific_feature_test import test_specific_feature
# Previous version contained a bug while counting twice sub-groups with double edges
# test_specific_feature(nth_edges_motif(3), is_max_connected=True)
test_specific_feature(nth_edges_motif(4), is_max_connected=True)
# def _calculate_motif_dictionaries(self):
# motifs_edges_dict = {}
# motifs_vertices_dict = {}
# motif_edges = list(permutations(range(self._level), 2))
#
# motif_file = pandas.read_csv(self._motif_path(), delimiter="\t")
# if not self._gnx.is_directed():
# motifs_vertices_dict = {BitArray(length=3, int=int(y)).bin: int(x) for i, (x, y) in motif_file.iterrows()}
# else:
# num_edges = len(motif_edges)
# for _, (x, y) in motif_file.iterrows():
# bin_repr = BitArray(length=num_edges, int=int(y))
# motifs_vertices_dict[bin_repr.bin] = int(x)
# motifs_edges_dict[bin_repr.bin] = [edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit]
#
# return {'v': motifs_vertices_dict, 'e': motifs_edges_dict}
###########################################################################################
# def _calculate(self, include=None):
# all_motifs = set(self._node_variations.values())
# undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree():
# history = set()
# self._features[node] = {motif_number: 0 for motif_number in all_motifs}
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level, gnx=undirected_gnx))
# for group in self._get_subgroups(node, self._level, gnx=neighbors_gnx):
# group = sorted(group)
# if group in history:
# continue
# history.add(group)
# motif_number = self._get_motif_number(group)
# self._features[node][motif_number] += 1
# self._gnx.remove_node(node)
#
# def _subgroups(self, node, level, gnx=None):
# if gnx is None:
# gnx = self._gnx
# if level == 1:
# return node
#
# def _calculate1(self):
# for node in self._order_by_degree():
# history = {}
# for sub_group in self._subgroups(node, self._level):
# if sub_group in history:
# continue
#
# # this might be more efficient than dijkstra (with cutoff) - a simple BFS
# def _get_neighborhood(self, node, dist, gnx=None):
# dist -= 1
# if gnx is None:
# gnx = self._gnx
# neighborhood = set()
# queue = [(node, 0)]
# while queue:
# cur_node, node_dist = queue.pop(0)
# neighborhood.add(cur_node)
# neighbors = set(nx.all_neighbors(gnx, cur_node)).difference(neighborhood)
# if node_dist >= dist - 1:
# neighborhood.update(neighbors)
# else: # node_dist is lower than (dist - 1)
# queue.extend((n, node_dist + 1) for n in neighbors)
# return neighborhood
#
# # seems more simple - but it's more costly
# def _get_neighborhood_dijkstra(self, node, dist, gnx=None):
# if gnx is None:
# gnx = self._gnx
# return set(nx.single_source_dijkstra_path_length(gnx, node, cutoff=dist))
#
# def _calculate2(self):
# self._undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree(self._undirected_gnx):
# # calculating the nth neighborhood of the node - is working on the neighborhood graph more efficient?
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level))
# history = {}
# for sub_group in self._subgroups(node, self._level, gnx=neighbors_gnx):
# if sub_group in history:
# continue
# self._gnx.remove_node(node)
# TODO: consider removing
# def _initialize_motif_hist(self):
# length = max(self._node_variations.values()) + 1
# return {n: [0] * length for n in self._gnx}
#
# def _initialize_motif_hist(self):
# node_hist = super(MotifsEdgeCalculator, self)._initialize_motif_hist()
#
# length = max(self._edge_variations.values()) + 1
# edge_hist = {e: [0] * length for e in self._gnx.edges()}
# return {'v': node_hist, 'e': edge_hist}
| 41.78913
| 121
| 0.572179
| 2,314
| 19,223
| 4.497407
| 0.133535
| 0.026232
| 0.025944
| 0.013068
| 0.438263
| 0.352839
| 0.294417
| 0.232824
| 0.205054
| 0.190257
| 0
| 0.020746
| 0.33049
| 19,223
| 459
| 122
| 41.880174
| 0.787879
| 0.308173
| 0
| 0.306773
| 0
| 0
| 0.028864
| 0
| 0
| 0
| 0
| 0.002179
| 0.003984
| 1
| 0.087649
| false
| 0.01992
| 0.043825
| 0.01992
| 0.195219
| 0.059761
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c31ccf0bc17c144c7bcad4490fb8229ffccbad2
| 4,954
|
py
|
Python
|
sigpy/mri/rf/sim.py
|
jonbmartin/sigpy-rf-staging
|
1be409a1ce0799574f1a979044b02fe21a19bf5d
|
[
"BSD-3-Clause"
] | null | null | null |
sigpy/mri/rf/sim.py
|
jonbmartin/sigpy-rf-staging
|
1be409a1ce0799574f1a979044b02fe21a19bf5d
|
[
"BSD-3-Clause"
] | null | null | null |
sigpy/mri/rf/sim.py
|
jonbmartin/sigpy-rf-staging
|
1be409a1ce0799574f1a979044b02fe21a19bf5d
|
[
"BSD-3-Clause"
] | null | null | null |
"""RF Pulse Simulation Functions.
"""
from sigpy import backend
__all__ = ['abrm', 'abrm_nd', 'abrm_hp']
def abrm(rf, x, balanced=False):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
Args:
rf (array): rf waveform input.
x (array): spatial locations.
balanced (bool): toggles application of rewinder.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
g = xp.ones(xp.size(rf)) * 2 * xp.pi / xp.size(rf)
a = xp.ones(xp.size(x), dtype=complex)
b = xp.zeros(xp.size(x), dtype=complex)
for mm in range(xp.size(rf)):
om = x * g[mm]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2) + eps
n = xp.column_stack((xp.real(rf[mm]) / phi,
xp.imag(rf[mm]) / phi,
om / phi))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
if balanced: # apply a rewinder
g = -2 * xp.pi / 2
om = x * g
phi = xp.abs(om) + eps
nz = om / phi
av = xp.cos(phi / 2) - 1j * nz * xp.sin(phi / 2)
a = av * a
b = xp.conj(av) * b
return a, b
def abrm_nd(rf, x, g):
r"""N-dim RF pulse simulation
Assumes that x has inverse spatial units of g, and g has gamma*dt applied.
Assumes dimensions x = [...,Ndim], g = [Ndim,Nt].
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient array.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
a = xp.ones(xp.shape(x)[0], dtype=complex)
b = xp.zeros(xp.shape(x)[0], dtype=complex)
for mm in range(xp.size(rf)):
om = x @ g[mm, :]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2)
n = xp.column_stack((xp.real(rf[mm]) / (phi + eps),
xp.imag(rf[mm]) / (phi + eps),
om / (phi + eps)))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
return a, b
def abrm_hp(rf, gamgdt, xx, dom0dt=0):
r"""1D RF pulse simulation, with non-simultaneous RF + gradient rotations.
Args:
rf (array): rf pulse samples in radians.
gamdt (array): gradient samples in radians/(units of xx).
xx (array): spatial locations.
dom0dt (float): off-resonance phase in radians.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
Ns = xp.shape(xx)
Ns = Ns[0] # Ns: # of spatial locs
Nt = xp.shape(gamgdt)
Nt = Nt[0] # Nt: # time points
a = xp.ones((Ns,))
b = xp.zeros((Ns,))
for ii in xp.arange(Nt):
# apply phase accural
z = xp.exp(-1j * (xx * gamgdt[ii, ] + dom0dt))
b = b * z
# apply rf
C = xp.cos(xp.abs(rf[ii]) / 2)
S = 1j * xp.exp(1j * xp.angle(rf[ii])) * xp.sin(xp.abs(rf[ii]) / 2)
at = a * C - b * xp.conj(S)
bt = a * S + b * C
a = at
b = bt
z = xp.exp(1j / 2 * (xx * xp.sum(gamgdt, axis=0) + Nt * dom0dt))
a = a * z
b = b * z
return a, b
| 30.207317
| 79
| 0.489705
| 689
| 4,954
| 3.502177
| 0.219158
| 0.013262
| 0.016577
| 0.018649
| 0.668877
| 0.632408
| 0.58765
| 0.58765
| 0.547037
| 0.524658
| 0
| 0.028371
| 0.366774
| 4,954
| 163
| 80
| 30.392638
| 0.740835
| 0.400484
| 0
| 0.415584
| 0
| 0
| 0.006519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0
| 0.012987
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c35a72377a5c784fe27610ba9ce572e4beeb277
| 1,171
|
py
|
Python
|
main.py
|
Yash7689/copy-data-from-1-txt-to-another
|
1ab88c89209088b04c2105c5db4342029a079219
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Yash7689/copy-data-from-1-txt-to-another
|
1ab88c89209088b04c2105c5db4342029a079219
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Yash7689/copy-data-from-1-txt-to-another
|
1ab88c89209088b04c2105c5db4342029a079219
|
[
"Apache-2.0"
] | null | null | null |
# @Copyright [2021] [Yash Bajaj]
import fileinput as fi
# This module replaces the word <|SPACE|> with a new line (code line 18)
def writer():
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/input.txt", "w") as writer:
data = input("Whatever you will write will be present in input.txt - ")
writer.write(data)
# This is a input function whatever you will write that will come in input.txt
def copy():
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/input.txt", "r") as f:
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/copyied.txt", "w") as f1:
font = f.read()
f1.write(font)
# This is a function to copy data from input.txt and paste it in copyied.txt
def editer():
with fi.FileInput("c:/PycharmProjects/copy_data_from_1_file_to_another/copyied.txt", inplace=True, backup=".bak") as r:
for line in r:
print(line.replace(' ', '''
'''), end='')
# This function replaces <|SPACE|> with new line this will also create one backup file with extention .bak
if __name__ == '__main__':
writer()
copy()
editer()
# This will run the code
| 37.774194
| 123
| 0.681469
| 183
| 1,171
| 4.185792
| 0.393443
| 0.052219
| 0.078329
| 0.125326
| 0.29765
| 0.29765
| 0.29765
| 0.29765
| 0.29765
| 0.29765
| 0
| 0.012821
| 0.200683
| 1,171
| 30
| 124
| 39.033333
| 0.805556
| 0.325363
| 0
| 0
| 0
| 0
| 0.408685
| 0.316731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3666e9b94187f8c2b912f96ab0492447c6ab94
| 16,981
|
py
|
Python
|
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | 8
|
2020-03-20T13:49:30.000Z
|
2021-12-04T07:41:27.000Z
|
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | null | null | null |
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | 1
|
2020-04-01T11:01:09.000Z
|
2020-04-01T11:01:09.000Z
|
# -*- coding: utf-8 -*-
# Date: 2020/3/17 12:16
"""
an engine for deep learning task
"""
__author__ = 'tianyu'
import abc
import random
import time
import warnings
import numpy as np
import torch.backends.cudnn
import torch.nn.functional as F
import torch.utils.data
from torch.optim.lr_scheduler import StepLR
from .options import Parser
from .tracer import Tracer
from .utils import tracer_component as tc
from .utils.function import *
class Engine(object, metaclass=abc.ABCMeta):
"""
Suggest Overriding Function:
_on_start_epoch: add some your meters for learning
_get_lr_scheduler: define your lr scheduler, default StepLR(step=30,gamma=0.1)
_on_start_batch: define how to read your dataset to return input,target as well as put on right device
_add_on_end_batch_log: add some your log information
_add_on_end_batch_tb: add some your visualization for tensorboard by add_xxx
_add_record: add some record information
_before_evaluate: define your operation before calling _validate evaluation mode
_after_evaluate: define your operation after calling _validate evaluation mode
"""
def __init__(self, parser: Parser, experiment_name='exp'):
self._parser = parser
self._switch_training = True
self._meters = self._status_meter()
self._state = {'best_acc1': -1, 'training_iterations': 0, 'iteration': 0}
self._experiment_name = experiment_name
self._init_learning()
def _status_meter(self):
outer = self
class StatusMeter(object):
def __init__(self):
self._training = Chain()
self._validation = Chain()
def __getattr__(self, item):
if outer._switch_training:
return getattr(self._training, item)
else:
return getattr(self._validation, item)
return StatusMeter()
def _close(self):
self._tracer.close()
def _do_args(self):
if self._args.deterministic:
seed = 1541233595
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.set_printoptions(precision=10)
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if self._args.debug:
self._args.workers = 0
self._args.batch_size = 2
if self._args.gpu is not None:
# torch.backends.cudnn.benchmark = True
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(self._args.gpu)
# assign 0 because if you code os.environ['CUDA_VISIBLE_DEVICES']=xx,
# all gpu device is 0 in pytorch context, otherwise you will get a
# RuntimeError: CUDA error: invalid device ordinal
self._args.gpu = 0
if self._args.evaluate:
self._args.p_bar = True
self._args.no_tb = False
if self._args.p_bar:
self._args.print_freq = 1
def _warp_loader(self, training, dataset):
return torch.utils.data.DataLoader(dataset, batch_size=self._args.batch_size, num_workers=self._args.workers,
pin_memory=True, shuffle=training)
def _init_learning(self):
self._args = self._parser.parse_args()
self._do_args()
self._tracer = \
Tracer(root_dir=Path(self._args.work_dir), work_name=self._parser.work_name, clean_up=self._args.clean_up) \
.tb_switch(self._args.no_tb) \
.debug_switch(self._args.debug or self._args.p_bar) \
.snap_git_switch(self._args.snapgit) \
.attach(experiment_name=self._experiment_name, override=self._args.nowtime_exp,
logger_name=self._args.logger_name)
if self._args.revert_snapgit:
self._tracer.revert(self._args.revert_snapgit)
@property
def tracer(self):
return self._tracer
def _resume(self, model, optimizer):
"""load more than one model and optimizer, for example GAN"""
for pth, m, optim in zip(self._args.resume, [model] if not isinstance(model, list) else model,
[optimizer] if not isinstance(optimizer, list) else optimizer):
ret = self._tracer.load(tc.Model(
pth, {
'model': m,
'optim': optim
}))
self._args.start_epoch = ret['start_epoch']
self._state['best_acc1'] = ret['best_acc1']
self._args.epochs += self._args.start_epoch
@staticmethod
def _get_lr_scheduler(optimizer: torch.optim.Optimizer) -> list:
return [StepLR(optim, 30, gamma=0.1) for optim in ([optimizer] if not isinstance(optimizer, list) else optimizer)]
@staticmethod
def _on_start_epoch():
"""
add your meters by get_meters function
for example : get_meters(['mine1', 'mine2'])
usage: self._meters[mode].{name}.update() detail in : from .meter import AverageMeter
"""
return get_meters([])
def _add_record(self, ret_forward, batch_size):
"""
self._meters.losses.update(ret['loss'], bs)
"""
pass
def _before_evaluate(self, model):
"""
load checkpoint
"""
for pth, m in zip(self._args.evaluate, [model] if not isinstance(model, list) else model):
if os.path.isfile(pth):
log("=> loading checkpoint '{}'".format(pth))
checkpoint = torch.load(pth, map_location='cpu')
m.load_state_dict(checkpoint['state_dict'])
log("=> loaded checkpoint '{}' (epoch {} Acc@1 {})"
.format(pth, checkpoint['epoch'], checkpoint['best_acc1']))
else:
assert False, "=> no checkpoint found at '{}'".format(pth)
def _after_evaluate(self):
"""
execute something after evaluation
"""
pass
def _on_end_epoch(self, model, optimizer, is_best):
"""save more than one model and optimizer, for example GAN"""
postfix = f'_{self._args.extension}'
if self._args.extension == '': postfix = ''
for m, optim in zip([model] if not isinstance(model, list) else model,
[optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.store(tc.Model(
f"{model.__class__.__name__}{postfix}.pth.tar",
{
'epoch': self._state['epoch'] + 1,
'arch': str(m),
'state_dict': m.state_dict(),
'best_acc1': self._state['best_acc1'],
'optimizer': optim.state_dict(),
}, is_best))
def _on_start_batch(self, data):
"""override to adapt yourself dataset __getitem__"""
inp, target = data
if self._args.gpu is not None:
return inp.cuda(self._args.gpu), target.cuda(self._args.gpu), target.size(0)
else:
return inp, target, target.size(0)
def _add_on_end_batch_log(self, training):
""" user can add some log information with _on_start_epoch using all kinds of meters in _on_end_batch"""
if training:
pass
else:
pass
return ""
def _add_on_end_batch_tb(self, training):
""" user can add some tensorboard operations with _on_start_epoch using all kinds of meters"""
if training:
pass
else:
pass
def _on_end_batch(self, data_loader, optimizer=None):
""" print log and visualization"""
training_iterations = self._state['training_iterations']
if self._switch_training:
if self._state['iteration'] != 0 and self._state['iteration'] % self._args.print_freq == 0:
print_process_bar = {'p_bar': self._args.p_bar, 'current_batch': self._state['iteration'], 'total_batch': len(data_loader)}
if self._args.p_bar:
prefix_info = "Epoch:[{0}] "
else:
prefix_info = 'Epoch: [{0}][{1}/{2}]\t'
fix_log = prefix_info + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Data {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' \
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t'
fix_log = fix_log.format(
self._state['epoch'], self._state['iteration'], len(data_loader), batch_time=self._meters.batch_time,
data_time=self._meters.data_time, loss=self._meters.losses,
top1=self._meters.top1, top5=self._meters.top5)
log(fix_log + self._add_on_end_batch_log(True), **print_process_bar)
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'training': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalar('data/epochs', self._state['epoch'], training_iterations)
for oi, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.tb.add_scalars(f'data/learning_rate', {f'lr_optim_{oi + 1}': optim.param_groups[-1]['lr']}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'training': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'training': self._meters.top5.avg
}, training_iterations)
self._tracer.tb.add_scalars('data/runtime', {
'batch_time': self._meters.batch_time.avg,
'data_time': self._meters.data_time.avg
}, training_iterations)
self._add_on_end_batch_tb(True)
elif not self._args.evaluate:
fix_log = ('Testing: Epoch [{0}] Acc@1 {top1.avg:.3f}\tAcc@5 {top5.avg:.3f}\tLoss {loss.avg:.4f}\t[best:{best_acc}]\t'
.format(self._state['epoch'], top1=self._meters.top1, top5=self._meters.top5,
loss=self._meters.losses, best_acc=self._state['best_acc1']))
log(fix_log + self._add_on_end_batch_log(False), color="green")
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'validation': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'validation': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'validation': self._meters.top5.avg
}, training_iterations)
self._add_on_end_batch_tb(False)
@staticmethod
@abc.abstractmethod
def _on_forward(training, model, inp, target, optimizer=None) -> dict:
"""
implement training and validation code here
:param training: bool -> training validation
:param model: one or list
:param inp: batch data
:param target: batch target
:param optimizer: one or list
:return:
"""
""" for example """
# ret can expand but DONT Shrink
ret = {'loss': object, 'acc1': object, 'acc5': object}
# do something
output = model(inp)
loss = F.cross_entropy(output, target)
# compute acc1 acc5
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
ret['loss'] = loss.item()
ret['acc1'] = acc1.item()
ret['acc5'] = acc5.item()
return ret
def _train(self, model, train_loader, optimizer, epoch):
self._switch_training = True
# setup model
[m.train() for m in (model if isinstance(model, list) else [model])]
self._meters.merge(get_meters(['batch_time', 'data_time', 'losses', 'top1', 'top5']))
self._meters.merge(self._on_start_epoch())
end = time.time()
for i, batch in enumerate(train_loader):
self._state['training_iterations'] += 1
self._state['iteration'] = i
self._state['epoch'] = epoch
# measure data loading time
self._meters.data_time.update(time.time() - end)
inp, target, bs = self._on_start_batch(batch)
# compute output
ret = self._on_forward(True, model, inp, target, optimizer)
# record indicators
self._meters.losses.update(ret['loss'], bs)
self._meters.top1.update(ret['acc1'], bs)
self._meters.top5.update(ret['acc5'], bs)
self._add_record(ret, bs)
# measure elapsed time
self._meters.batch_time.update(time.time() - end)
end = time.time()
self._on_end_batch(train_loader, optimizer)
def _validate(self, model, val_loader):
self._switch_training = False
# setup model
[m.eval() for m in (model if isinstance(model, list) else [model])]
self._meters.merge(get_meters(['batch_time', 'losses', 'top1', 'top5']))
self._meters.merge(self._on_start_epoch())
end = time.time()
with torch.no_grad():
for i, batch in enumerate(val_loader):
self._state['iteration'] = i
inp, target, bs = self._on_start_batch(batch)
# compute output
ret = self._on_forward(False, model, inp, target)
# record indicators
self._meters.losses.update(ret['loss'], bs)
self._meters.top1.update(ret['acc1'], bs)
self._meters.top5.update(ret['acc5'], bs)
self._add_record(ret, bs)
# measure elapsed time
self._meters.batch_time.update(time.time() - end)
end = time.time()
self._on_end_batch(val_loader)
return self._meters.top1.avg
def learning(self, model, optimizer, train_dataset, val_dataset):
"""
Core function of engine to organize training process
:param val_dataset: training dataset
:param train_dataset: validation dataset
:param model: one or list
:param optimizer: one or list
"""
# save config
cfg = {f"optimizer{i + 1}": optim for i, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer)}
self._tracer.store(tc.Config({**cfg, **vars(self._args)}))
train_loader = self._warp_loader(True, train_dataset)
val_loader = self._warp_loader(False, val_dataset)
log('==> Start ...', color="red")
if self._args.resume:
self._resume(model, optimizer)
# cuda setup
if self._args.gpu is not None:
[m.cuda(self._args.gpu) for m in (model if isinstance(model, list) else [model])]
if self._args.evaluate:
self._before_evaluate(model)
self._validate(model, val_loader)
self._after_evaluate()
else:
ajlr = None
if self._args.adjust_lr:
ajlr = self._get_lr_scheduler(optimizer)
for epoch in range(self._args.start_epoch, self._args.epochs):
# train for one epoch
self._train(model, train_loader, optimizer, epoch)
# evaluate on validation set
acc1 = self._validate(model, val_loader)
# remember best acc@1 and save checkpoint
is_best = acc1 > self._state['best_acc1']
self._state['best_acc1'] = max(acc1, self._state['best_acc1'])
self._on_end_epoch(model, optimizer, is_best)
if self._args.adjust_lr:
[lr.step() for lr in ajlr]
print(f"Best Acc1:{self._state['best_acc1']}")
self._close()
return self._state['best_acc1']
| 39.955294
| 149
| 0.572876
| 1,993
| 16,981
| 4.636227
| 0.168088
| 0.04329
| 0.017316
| 0.016558
| 0.338203
| 0.277381
| 0.247619
| 0.227814
| 0.210498
| 0.173593
| 0
| 0.011981
| 0.316766
| 16,981
| 424
| 150
| 40.049528
| 0.784434
| 0.136741
| 0
| 0.241877
| 0
| 0.00722
| 0.104505
| 0.01427
| 0
| 0
| 0
| 0
| 0.00361
| 1
| 0.086643
| false
| 0.021661
| 0.050542
| 0.01083
| 0.191336
| 0.021661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c36c7337778993804185f55e34f582ccb3e038c
| 3,736
|
py
|
Python
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 26
|
2017-06-23T02:18:54.000Z
|
2022-02-19T08:45:11.000Z
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 21
|
2017-06-22T07:30:20.000Z
|
2022-03-26T02:23:24.000Z
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 2
|
2018-06-20T01:16:27.000Z
|
2020-07-14T19:55:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ninjadog` package."""
# TODO: test raises ValueError when pug cli can't be found and not passed explicitly to renderer
def test_npm_installed():
from subprocess import Popen
assert Popen(('which', 'npm')).wait() == 0, 'npm must be installed'
def test_pug_cli_exists():
from pathlib import Path
from ninjadog.constants import PUG_CLI_PATH
assert Path(PUG_CLI_PATH).exists()
def test_hello_world():
from ninjadog import render
assert render('h1 hello world') == '<h1>hello world</h1>'
def test_pug_variable():
from ninjadog import render
assert render('h1= title', context={'title': 'hello world'}) == '<h1>hello world</h1>'
def test_jinja2_variable():
from ninjadog import render
assert render('h1 {{ title }}', context={'title': 'hello world'}, with_jinja=True) == '<h1>hello world</h1>'
def test_context():
import ninjadog
context = {'name': 'Derp'}
assert ninjadog.render('h1 hello #{ name }', context=context) == '<h1>hello Derp</h1>'
assert ninjadog.render("h1= name", context=context) == '<h1>Derp</h1>'
def test_conditional():
from textwrap import dedent
import ninjadog
string = dedent("""
if name == 'sam'
h1 hello #{ name }
""")
assert ninjadog.render(string, context={'name': 'sam'}) == '<h1>hello sam</h1>'
string = dedent("""
if person.name == 'sam'
h1 hello #{ person.name }
""")
assert ninjadog.render(string, context={'person': {'name': 'sam'}}) == '<h1>hello sam</h1>'
def test_render_no_string_argument():
from tempfile import NamedTemporaryFile
import ninjadog
string = 'h1 hello'
with NamedTemporaryFile('w+') as tempfile:
tempfile.write(string)
tempfile.seek(0)
assert ninjadog.render(file=tempfile.name) == ninjadog.render(string) == '<h1>hello</h1>'
def test_with_pug_with_jinja2():
from textwrap import dedent
from ninjadog import render
string = dedent("""
if person.name == "Bob"
h1 Hello Bob
else
h1 My name is #{ person.name }
p The persons's uppercase name is {{ person.get('name').upper() }}
p The person's name is #{ person.name }
if animal
h1 This should not output
else
p animal value is false
""").strip()
context = {'person': {'name': 'Bob'}, 'animal': None}
expected_output = dedent("""
<h1>Hello Bob</h1>
<p>The persons's uppercase name is BOB</p>
<p>The person's name is Bob</p>
<p>animal value is false</p>
""").strip()
actual_output = render(string, context=context, pretty=True, with_jinja=True).strip()
assert expected_output == actual_output
def test_cli_string():
from ninjadog.cli import main
from ninjadog.utils import jsonify
context = jsonify({'title': 'hello, world'})
assert main(('string', 'h1= title', '-c', context)) == '<h1>hello, world</h1>'
def test_extends():
from tempfile import gettempdir
from textwrap import dedent
from pathlib import Path
from ninjadog import render
parent_string = dedent("""
h1 Title
block content
""")
child_string = dedent("""
extends parent
block content
h2 Subtitle
""")
parent_path = Path(gettempdir(), 'parent.pug')
child_path = Path(gettempdir(), 'child.pug')
with parent_path.open('w+') as parent, child_path.open('w+') as child:
parent.write(parent_string)
parent.seek(0)
child.write(child_string)
child.seek(0)
assert render(file=child_path) == '<h1>Title</h1><h2>Subtitle</h2>'
assert render(file=str(child_path)) == '<h1>Title</h1><h2>Subtitle</h2>'
| 27.470588
| 112
| 0.635974
| 490
| 3,736
| 4.759184
| 0.222449
| 0.045026
| 0.027015
| 0.051458
| 0.328473
| 0.259005
| 0.150086
| 0.110635
| 0.062607
| 0.062607
| 0
| 0.016775
| 0.218148
| 3,736
| 135
| 113
| 27.674074
| 0.781582
| 0.0447
| 0
| 0.276596
| 0
| 0
| 0.318169
| 0.024712
| 0
| 0
| 0
| 0.007407
| 0.148936
| 1
| 0.117021
| false
| 0
| 0.202128
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3ca74700c452639c1abd59ef05386a970cf094
| 1,095
|
py
|
Python
|
src/detect_utils.py
|
iglaweb/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | 7
|
2021-07-02T03:57:20.000Z
|
2022-03-20T13:23:32.000Z
|
src/detect_utils.py
|
filipul1s/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | null | null | null |
src/detect_utils.py
|
filipul1s/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | 3
|
2021-07-02T16:07:28.000Z
|
2022-03-20T13:23:33.000Z
|
import cv2
from scipy.spatial import distance as dist
def mouth_aspect_ratio(mouth) -> float:
# compute the euclidean distances between the two sets of
# vertical mouth landmarks (x, y)-coordinates
A = dist.euclidean(mouth[2], mouth[10]) # 51, 59
B = dist.euclidean(mouth[4], mouth[8]) # 53, 57
# compute the euclidean distance between the horizontal
# mouth landmark (x, y)-coordinates
C = dist.euclidean(mouth[0], mouth[6]) # 49, 55
# compute the mouth aspect ratio
mar = (A + B) / (2.0 * C)
return mar
def resize_img(frame_crop, max_width, max_height):
height, width = frame_crop.shape[:2]
# only shrink if img is bigger than required
if max_height < height or max_width < width:
# get scaling factor
scaling_factor = max_height / float(height)
if max_width / float(width) < scaling_factor:
scaling_factor = max_width / float(width)
# resize image
frame_crop = cv2.resize(frame_crop, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame_crop
| 36.5
| 117
| 0.675799
| 157
| 1,095
| 4.579618
| 0.452229
| 0.108484
| 0.075104
| 0.072323
| 0.080668
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029656
| 0.230137
| 1,095
| 29
| 118
| 37.758621
| 0.82325
| 0.286758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3cce245cb8dd51640bae04fe6b64d1a7249903
| 3,626
|
py
|
Python
|
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import tarfile
import glob
import json
def unzip_rna_seq_data(file_name, desired_folder_name):
''' Downloaded RNA files are tarfiles, this unzips them'''
if 'tar' in file_name:
open_tar = tarfile.open(file_name)
open_tar.extractall(f'{desired_folder_name}')
open_tar.close()
else:
print('Not a tarfile')
def unzip_individual_rna_seq_files(root_dir):
''' Tarfile unzip results in gz files, which need to be further unzipped'''
files_to_unpack = []
dfs = []
meta_data_file = ''.join(glob.glob('**/**metadata.cart**', recursive=True))
with open(meta_data_file, 'r') as f:
meta_data = json.load(f)
convert_filename_caseuuid = {meta_data[i]['file_id']:
meta_data[i]['associated_entities'][0]
['case_id'] for i in range(0, len(meta_data))}
# dictionary of file_id:case_id
for directory in os.listdir(root_dir):
try:
for filename in os.listdir(os.path.join(root_dir, directory)):
if ".gz" in filename:
files_to_unpack.append(os.path.join(root_dir,
directory, filename))
except NotADirectoryError:
continue
for file in files_to_unpack:
dfs.append(pd.read_csv
(file, compression='gzip', sep="\t", names=['gene',
convert_filename_caseuuid[os.path.split(os.path.dirname
(file))[1]]],
index_col='gene'))
# these dfs already have the correct case id name
return files_to_unpack, dfs, convert_filename_caseuuid
def concat_all_rna_seq(dfs):
''' Takes each individual rna seq file and concatenates them into one '''
rna_seq_data = pd.concat(dfs, join="outer", axis=1).T
if type(rna_seq_data.index[0]) == str:
rna_seq_data.reset_index(inplace=True)
return rna_seq_data
def convert_ensg_to_gene_name(dataframe_with_genes):
'''TCGA data is listed with ensemble names, this converts to gene
names for greater readability '''
change_name_file = 'mart_export.txt'
gene_names = {}
with open(change_name_file) as fh:
for line in fh:
ensg, gene_name = line.split(',', 1)
gene_names[gene_name.split('.')[0]] = ensg
dataframe = (dataframe_with_genes.rename
(columns=lambda x: x.split('.')[0]).rename(
columns=gene_names))
genes = dataframe.columns[1:-1].tolist()
return dataframe, genes, gene_names
def concat_rna_to_clinical_data(clinical_dataframe, rna_dataframe):
''' Combines clinical data and the rna seq data. Clinical dataframe should
have bcr_patient_uuid as the index. '''
full_data = pd.merge(rna_dataframe, clinical_dataframe,
how='right', left_on=['index'],
right_on=['bcr_patient_uuid'])
return full_data
def limit_full_data_for_pca(full_data, genes):
''' Removes rna seq files where there is no drug name available and limits
columns to rna seq data, drug name and vital status '''
limit_full_data = (full_data.loc[(full_data.standard_drugs != '')
& (full_data.standard_drugs != '[not available]')
& (full_data.standard_drugs != '[unknown]')].copy())
limit_full_data.dropna(subset=['index'], inplace=True)
columns_needed = genes+['standard_drugs', 'vital_status']
return limit_full_data.loc[:, columns_needed]
| 39.413043
| 79
| 0.619967
| 473
| 3,626
| 4.501057
| 0.346723
| 0.031
| 0.032879
| 0.022546
| 0.024425
| 0.024425
| 0
| 0
| 0
| 0
| 0
| 0.003795
| 0.273304
| 3,626
| 91
| 80
| 39.846154
| 0.804175
| 0.163265
| 0
| 0
| 0
| 0
| 0.071022
| 0.007035
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.076923
| 0
| 0.246154
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3cdcc2642ae1e7ae2f269889189d138f16d4af
| 7,268
|
py
|
Python
|
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
import re
from collections import OrderedDict
from django.conf.urls import url as django_url, include
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.utils.encoding import force_text
import logging
# Using FastUrl has a couple of caveats:
# 1. FastUrl tries to keep the resolution order the same as declared, but we cannot guarantee that the order will
# be exactly the same which could cause the wrong view to be returned if you have urlpatterns that overlap.
# 2. Detection of regexes within urlpatterns is very ad-hock, it would be easy to deliberately cause it to fail, but
# in practice it should cover most cases. Any errors should occur during url building rather than at resolution time
# Usage:
# Build your urlpatterns using 'FastUrl' instead of 'url' and then rebuild your urlpatterns with
# urlpatterns = render_fast_urls(urlpatterns)
class StartsWithResolver(RegexURLResolver):
"""
Python regexs are pretty slow, so this class checks if the string looks like it matches before
passing it through to the regular resolver class
"""
def __init__(self, regex, view, kwargs=None):
urlconf_module, app_name, namespace = view
super(StartsWithResolver, self).__init__(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
self.pattern = regex
if self.pattern[0] == "^":
self.pattern = self.pattern[1:]
self.passthrough = False
for char in "$()[]<>*?\\":
if char in self.pattern:
self.passthrough = True
else:
self.passthrough = True
def resolve(self, path):
if not self.passthrough:
path = force_text(path) # path may be a reverse_lazy object
if not path.startswith(self.pattern):
return False
return super(StartsWithResolver, self).resolve(path)
class FastUrl(object):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def add_to_tree(self, tree):
# This does some super ad-hock detection of regex patterns and tries to re-join any regexes that
# were split in the middle
words = re.split('/', self._args[0])
for i in range(len(words) - 2, 0, -1):
if words[i] and words[i + 1] and (words[i][-1] == "^" or words[i + 1][0] == "?"):
words = words[:i] + [words[i] + "/" + words[i + 1]] + words[i + 2:]
new_words = []
parens_index = -1
parens = 0
for i, word in enumerate(words):
if "(" in words[i]:
if parens == 0:
parens_index = i
parens += word.count('(')
if "[" in words[i]:
if parens == 0:
parens_index = i
parens += word.count('[')
if ")" in words[i]:
parens -= word.count(')')
if "]" in words[i]:
parens -= word.count(']')
if parens_index < 0:
new_words.append(word)
elif parens == 0:
new_words.append('/'.join(words[parens_index:i+1]))
parens_index = -1
if parens_index != -1:
raise RuntimeError("Mismatched parentheses in urlpattern {}".format(self._args[0]))
words = new_words
if words[-1] in ("?", "?$", "$"):
words = words[:-2] + [words[-2] + "/" + words[-1]]
entry = tree
for word in words[:-1]:
if not entry.get(word):
entry[word] = OrderedDict()
entry = entry[word]
processed_include = False
# For include(...) processing. we add the urls to the tree instead of instantiating a RegexURLResolver
if isinstance(self._args[1], (list, tuple)):
urlconf_module, app_name, namespace = self._args[1]
if not app_name and not namespace:
processed_include = True
word = words[-1]
if not entry.get(word):
entry[word] = OrderedDict()
for url in urlconf_module.urlpatterns:
_add_url_to_tree(entry, url)
if not processed_include:
if words[-1] in entry:
logging.error("Duplicate entry for urlpattern {}".format(self._args[0]))
entry[words[-1]] = (self._args, self._kwargs)
def _is_django_regex(ob):
if isinstance(ob, RegexURLPattern) or isinstance(ob, RegexURLResolver):
return True
return False
def _add_url_to_tree(tree, url):
if isinstance(url, FastUrl):
url.add_to_tree(tree)
if _is_django_regex(url):
tree[('djangourl', _add_url_to_tree.django_urls)] = url
_add_url_to_tree.django_urls += 1
_add_url_to_tree.django_urls = 0 # counter for django only urls
merged_count = 0
def _merge_single_children(tree):
if not isinstance(tree, dict):
return tree
new_tree = OrderedDict()
for path, param in tree.items():
if isinstance(param, dict):
child = _merge_single_children(param)
if isinstance(child, dict) and len(child) == 1:
new_tree[path + '/' + child.keys()[0]] = child.values()[0]
_merge_single_children.count += 1
else:
new_tree[path] = _merge_single_children(param)
else:
new_tree[path] = param
return new_tree
_merge_single_children.count = 0
def render_fast_urls(urls, debug=False):
url_tree = OrderedDict()
# Expand the url list into the tree structure
for url in urls:
_add_url_to_tree(url_tree, url)
# Merge any entries with only a single child
url_tree = _merge_single_children(url_tree)
# Render the tree back into a list
def render_tree(tree):
new_urls = []
for path, param in tree.items():
if _is_django_regex(param):
new_urls.append(param)
else:
if path and path[0] is not "^":
path = "^" + path
if not path:
path = "^$"
if isinstance(param, dict):
new_urls.append(StartsWithResolver(path + "/", include(render_tree(param))))
else:
p = (path,) + param[0][1:]
new_urls.append(django_url(*p, **param[1]))
return new_urls
urlpatterns = render_tree(url_tree)
if debug:
_print_tree(url_tree, 0)
print ("FastUrl generated {} top level url patterns from {} total urls".format(len(urlpatterns), _count_tree(url_tree)))
print ("There were {} normal django urls.".format(_add_url_to_tree.django_urls))
print ("{} branches were merged".format(_merge_single_children.count))
return urlpatterns
def _print_tree(tree, indent = 0):
if not isinstance(tree, dict):
return
for key in tree.keys():
print (" " * indent + str(key))
_print_tree(tree[key], indent +2)
def _count_tree(tree):
if not isinstance(tree, dict):
return 1
total = 0
for key in tree.keys():
total += _count_tree(tree[key])
return total
| 35.627451
| 128
| 0.589158
| 918
| 7,268
| 4.498911
| 0.230937
| 0.017433
| 0.013559
| 0.020339
| 0.146005
| 0.112107
| 0.083777
| 0.05569
| 0.05569
| 0.05569
| 0
| 0.010342
| 0.3082
| 7,268
| 203
| 129
| 35.802956
| 0.811058
| 0.164007
| 0
| 0.189189
| 0
| 0
| 0.039066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074324
| false
| 0.027027
| 0.040541
| 0
| 0.202703
| 0.047297
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3d59a46c15d1afca1d52fd4d95d34b6fd700b1
| 6,679
|
py
|
Python
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 36
|
2020-09-13T12:30:41.000Z
|
2022-02-15T08:52:58.000Z
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 6
|
2020-09-04T11:14:14.000Z
|
2022-02-09T23:49:59.000Z
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | null | null | null |
import os
import json
import keras
import numpy as np
import csv
from experiments import config
import utils
import utils_train
import models
import argparse
class Data(object):
"""Class that deals with all the data mess
"""
def __init__(self, data_splits_path, data_path, input_patch_size, batch_size,
active_str, muxrate):
self.data_splits_path = data_splits_path
self.input_patch_size = input_patch_size
self.data_path = data_path
(self.train_set,
self.validation_set,
self.test_set) = self.load_data_splits()
self.train_files = utils_train.get_file_paths(self.train_set, self.data_path)
self.validation_files = utils_train.get_file_paths(
self.validation_set, self.data_path
)
self.test_files = utils_train.get_file_paths(self.test_set, self.data_path)
self.batch_size = batch_size
self.active_str = active_str
self.muxrate = muxrate
def load_data_splits(self):
with open(self.data_splits_path, 'r') as fhandle:
data_splits = json.load(fhandle)
return data_splits['train'], data_splits['validate'], data_splits['test']
def get_train_generator(self):
"""return a training data generator
"""
return utils_train.keras_generator(
self.train_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def get_validation_generator(self):
"""return a validation data generator
"""
return utils_train.keras_generator(
self.validation_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def get_test_generator(self):
"""return a test data generator
"""
return utils_train.keras_generator(
self.test_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def load_data(load_path):
with open(load_path, 'r') as fp:
data = json.load(fp)
return data
def create_data_splits(path_to_metadata_file, exper_dir):
metadata = load_data(path_to_metadata_file)
utils.create_data_split(metadata,
os.path.join(exper_dir, 'data_splits.json'))
def train(model, model_save_path, data_splits_file, batch_size, active_str, muxrate):
#data_path = utils.data_path_multif0()
data_path = config.data_save_folder
input_patch_size = (360, 50)
data_splits_path = os.path.join(config.data_save_folder, data_splits_file)
## DATA MESS SETUP
dat = Data(
data_splits_path, data_path, input_patch_size,
batch_size, active_str, muxrate
)
# instantiate train and validation generators
train_generator = dat.get_train_generator()
validation_generator = dat.get_validation_generator()
model.compile(
loss=utils_train.bkld,
metrics=['mse', utils_train.soft_binary_accuracy],
optimizer='adam'
)
print(model.summary(line_length=80))
# hopefully fit model
history = model.fit_generator(
train_generator, config.SAMPLES_PER_EPOCH, epochs=config.NB_EPOCHS, verbose=1,
validation_data=validation_generator, validation_steps=config.NB_VAL_SAMPLES,
callbacks=[
keras.callbacks.ModelCheckpoint(
model_save_path, save_best_only=True, verbose=1),
keras.callbacks.ReduceLROnPlateau(patience=5, verbose=1),
keras.callbacks.EarlyStopping(patience=25, verbose=1)
]
)
model.load_weights(model_save_path)
return model, history, dat
def run_evaluation(exper_dir, save_key, history, dat, model):
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
## Results plots
print("plotting results...")
utils_train.plot_metrics_epochs(history, plot_save_path)
## Evaluate
print("getting model metrics...")
utils_train.get_model_metrics(dat, model, model_scores_path)
print("getting best threshold...")
thresh = utils_train.get_best_thresh(dat, model)
print("scoring multif0 metrics on test sets...")
utils_train.score_on_test_set(model, save_path, dat, thresh)
def experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate):
"""
This should be common code for all experiments
"""
exper_dir = config.exper_output
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
model_save_path = '/scratch/hc2945/data/models/'
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_path = os.path.join(model_save_path, "{}.pkl".format(save_key))
'''
# create data splits file if it doesnt exist
if not os.path.exists(
os.path.join(exper_dir, 'data_splits.json')):
create_data_splits(path_to_metadata_file='./mtracks_info.json', exper_dir=exper_dir)
'''
model, history, dat = train(model, model_save_path, data_splits_file,
batch_size, active_str, muxrate)
run_evaluation(exper_dir, save_key, history, dat, model)
print("Done! Results saved to {}".format(save_path))
def main(args):
batch_size = 32
active_str = 100
muxrate = 32
save_key = args.save_key
data_splits_file = args.data_splits_file
if args.model_name == 'model1':
model = models.build_model1()
elif args.model_name == 'model2':
model = models.build_model2()
elif args.model_name == 'model3':
model = models.build_model3()
else:
print("Specified model does not exist. Please choose an valid model: model1, model2 or model3.")
return
experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train specified model with training set.")
parser.add_argument("--model",
dest='model_name',
type=str,
help="Name of the model you want to train.")
parser.add_argument("--save_key",
dest='save_key',
type=str,
help="String to save model-related data.")
parser.add_argument("--data_splits_file",
dest='data_splits_file',
type=str,
help="Filename of the data splits file to use in the experiment.")
main(parser.parse_args())
| 28.421277
| 104
| 0.658332
| 855
| 6,679
| 4.818713
| 0.214035
| 0.065534
| 0.034709
| 0.026214
| 0.313107
| 0.279369
| 0.279369
| 0.240291
| 0.190534
| 0.16966
| 0
| 0.007176
| 0.24884
| 6,679
| 234
| 105
| 28.542735
| 0.814032
| 0.053301
| 0
| 0.107914
| 0
| 0
| 0.092308
| 0.004632
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079137
| false
| 0
| 0.071942
| 0
| 0.208633
| 0.05036
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c3f1a1b4560f11557e8a7fa31b050b56c6becc0
| 6,666
|
py
|
Python
|
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
from logging import exception, info
from requests import post as request_post
from requests.exceptions import RequestException
from typing import Union
from uuid import UUID
from django.conf import settings
from django.db.models import (
CASCADE,
CharField,
ForeignKey,
OneToOneField,
)
from web3.types import HexBytes
from base.models import AbstractBaseModel
from base.support_functions.base import bytes_to_base58
from contracts.models import Contract
from backend.consts import DEFAULT_CRYPTO_ADDRESS, NETWORK_NAMES
from networks.models import (
Transaction,
CustomRpcProvider,
NearRpcProvider,
)
from networks.types import HASH_LIKE
class ValidatorSwap(AbstractBaseModel):
"""
ValidatorSwap model which used for creating and
sending signatures to relayer.
- contract - Contract instance on which transaction was found
- transaction - Transaction instance of found transaction while scanning
- signature - hashed params signed by Validator private key
- status - current status of swap
"""
STATUS_CREATED = 'created'
STATUS_WAITING_FOR_DATA = 'waiting for data'
STATUS_SIGNATURE_CREATED = 'signature created'
STATUS_SIGNATURE_SEND = 'signature send'
STATUS_SUCCESS = 'success'
_STATUSES = (
(STATUS_CREATED, STATUS_CREATED.upper()),
(STATUS_WAITING_FOR_DATA, STATUS_WAITING_FOR_DATA.upper()),
(STATUS_SIGNATURE_CREATED, STATUS_SIGNATURE_CREATED.upper()),
(STATUS_SIGNATURE_SEND, STATUS_SIGNATURE_SEND.upper()),
(STATUS_SUCCESS, STATUS_SUCCESS.upper()),
)
contract = ForeignKey(
to=Contract,
on_delete=CASCADE,
related_name='contract_validator_swaps',
verbose_name='Contract',
)
transaction = OneToOneField(
to=Transaction,
on_delete=CASCADE,
related_name='validator_swap_transaction',
verbose_name='Transaction',
)
signature = CharField(
max_length=255,
blank=True,
default='',
verbose_name='Signature',
)
status = CharField(
max_length=255,
choices=_STATUSES,
default=STATUS_CREATED,
verbose_name='Status',
)
class Meta:
db_table = 'validator_swaps'
ordering = '-_created_at',
def __str__(self) -> str:
return (
f'Validator swap with transaction hash \"{self.transaction.hash}\"'
)
def send_signature_to_relayer(self):
"""
Sends created by Validator signature.
"""
params = {
'password': settings.PRIVATE_PASSWORD_FOR_SIGNATURE_API,
}
payload = {
'validatorName': settings.VALIDATOR_NAME,
'signature': self.signature,
'fromContractNum': self.contract.blockchain_id,
'fromTxHash': self.transaction.hash,
'eventName': self.transaction.event_data.get('event', ''),
}
try:
response = request_post(
url=f"{settings.RELAYER_URL}/api/trades/signatures/",
params=params,
json=payload,
)
if response.status_code != 200:
exception("Could not send signature to relayer")
return
self.status = self.STATUS_SIGNATURE_SEND
self.save()
message = (
f'Signature \"{self.signature}\" of validator '
f'\"{settings.VALIDATOR_NAME}\" send to '
f'{settings.RELAYER_URL}'
)
info(message)
except RequestException as exception_error:
exception(exception_error)
pass
@classmethod
def get_swap_by_transaction_id(cls, transaction_id: UUID):
return cls.objects.filter(transaction__id=transaction_id).first()
@classmethod
def create_swap(
cls,
rpc_provider: Union[CustomRpcProvider, NearRpcProvider],
contract: Contract,
txn_hash: HASH_LIKE,
event: dict,
):
"""
Save ValidatorSwap instance in DataBase
:param rpc_provider: custom rpc provider of source network
:param contract: Contract object of source network
:param txn_hash: hash of the found transaction
:param event: event data of transaction
"""
if isinstance(txn_hash, HexBytes):
txn_hash = txn_hash.hex()
source_transaction = Transaction.get_transaction(
network_id=contract.network.id,
txn_hash=txn_hash,
)
info(source_transaction)
to_contract = Contract.get_contract_by_blockchain_id(
blockchain_id=source_transaction.data.get('params')[0],
)
if contract.network.title != NETWORK_NAMES['near']:
event_data = contract.get_event(event)
source_transaction.event_data = event_data
if to_contract.network.title in (
NETWORK_NAMES['solana'],
):
transaction_params = list(source_transaction.data['params'])
transaction_params[6] = bytes_to_base58(
string=transaction_params[6]
)
second_path = list(transaction_params[3])
for i in range(len(second_path)):
second_path[i] = bytes_to_base58(
string=second_path[i],
)
transaction_params[3] = second_path
source_transaction.data['params'] = transaction_params
elif to_contract.network.title in (
NETWORK_NAMES['near'],
):
transaction_params = list(source_transaction.data['params'])
transaction_params[6] = DEFAULT_CRYPTO_ADDRESS
# second_path = list(transaction_params[3])
#
# for i in range(len(second_path)):
# second_path[i] = bytes_to_base58(
# string=second_path[i],
# )
#
# transaction_params[3] = second_path
#
# source_transaction.data['params'] = transaction_params
source_transaction.save(
update_fields=(
'event_data',
'data',
'_created_at',
)
)
validator_swap = ValidatorSwap.get_swap_by_transaction_id(
transaction_id=source_transaction.id
)
if not validator_swap:
validator_swap = ValidatorSwap.objects.create(
contract=contract,
transaction=source_transaction,
)
return validator_swap
| 29.495575
| 79
| 0.611461
| 661
| 6,666
| 5.912254
| 0.252648
| 0.047851
| 0.026868
| 0.027636
| 0.163255
| 0.13869
| 0.13869
| 0.120266
| 0.120266
| 0.120266
| 0
| 0.005635
| 0.307831
| 6,666
| 225
| 80
| 29.626667
| 0.841352
| 0.124062
| 0
| 0.070064
| 0
| 0
| 0.096571
| 0.030266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025478
| false
| 0.012739
| 0.089172
| 0.012739
| 0.216561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c40a91da29b8a959cf350b71661cacacc596d6d
| 494
|
py
|
Python
|
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
n = 0
k = len(nums)
for i in range(k-n):
if nums[i]==0:
while k-1-n >= i or nums[k-1-n]==0:
n+=1
nums[i],nums[k-1-n]=nums[k-1-n],nums[i]
if __name__ == "__main__":
a = Solution()
nums = [0]
a.moveZeroes(nums)
| 27.444444
| 74
| 0.465587
| 72
| 494
| 3.083333
| 0.472222
| 0.036036
| 0.054054
| 0.094595
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029316
| 0.378543
| 494
| 18
| 75
| 27.444444
| 0.693811
| 0.178138
| 0
| 0
| 0
| 0
| 0.021333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c42601ba0916dd0c025e30a21fda4322eb4b154
| 2,838
|
py
|
Python
|
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | 2
|
2021-10-03T14:51:38.000Z
|
2021-11-10T02:54:26.000Z
|
import argparse
from reward_surfaces.agents.make_agent import make_agent
import torch
import json
import os
from glob import glob
def main():
parser = argparse.ArgumentParser(description='Train an agent and keep track of important information.')
parser.add_argument('save_dir', type=str, help="Directory where checkpoints will be saved")
parser.add_argument('agent_name', type=str, help="One of 'rainbow', 'SB3_OFF', 'SB3_ON', or 'SB3_HER'")
parser.add_argument('env', type=str, help="Environment name")
parser.add_argument('device', type=str, help="Device used for training ('cpu' or 'cuda')")
parser.add_argument('hyperparameters', type=str, help="Dictionary of hyperparameters for training. Should include the intended training algorithm (E.g. {'ALGO': 'PPO'})")
parser.add_argument('--save_freq', type=int, default=10000, help="Training steps between each saved checkpoint.")
parser.add_argument('--resume', action='store_true', help="Continue training from last checkpoint")
args = parser.parse_args()
assert args.agent_name in ['rainbow', 'SB3_OFF', 'SB3_ON', 'SB3_HER'], "Name must be one of 'rainbow', 'SB3_OFF', 'SB3_ON', or 'SB3_HER'"
torch.set_num_threads(1)
zip_path = ""
timesteps = 0
pretraining = None
if args.resume:
subdirs = glob(args.save_dir+"/*/")
for i, subdir in enumerate(subdirs):
parts = subdir.split("/")
subdirs[i] = ""
for part in parts:
if part.isdigit():
subdirs[i] = int(part)
subdirs = sorted(list(filter(lambda a: a != "", subdirs)))
latest_checkpoint = subdirs.pop()
timesteps = int(latest_checkpoint)
zip_path = args.save_dir + "/" + latest_checkpoint + "/checkpoint.zip"
best_path = args.save_dir + "/best/checkpoint.zip"
pretraining = {
"latest": zip_path,
"best": best_path,
"trained_steps": timesteps,
}
print(zip_path)
# trainer = SB3HerPolicyTrainer(robo_env_fn,HER("MlpPolicy",robo_env_fn(),model_class=TD3,device="cpu",max_episode_length=100))
print(args.resume)
agent, steps = make_agent(args.agent_name, args.env, args.save_dir, json.loads(args.hyperparameters),
pretraining=pretraining, device=args.device)
os.makedirs(args.save_dir, exist_ok=True)
hyperparams = json.loads(args.hyperparameters)
run_info = {
"agent_name": args.agent_name,
"env": args.env,
"hyperparameters": hyperparams,
}
run_info_fname = os.path.join(args.save_dir, "info.json")
with open(run_info_fname, 'w') as file:
file.write(json.dumps(run_info, indent=4))
agent.train(steps, args.save_dir, save_freq=args.save_freq)
if __name__ == "__main__":
main()
| 41.130435
| 174
| 0.65821
| 368
| 2,838
| 4.880435
| 0.388587
| 0.03118
| 0.066258
| 0.026726
| 0.044543
| 0.034521
| 0.034521
| 0.034521
| 0.034521
| 0.034521
| 0
| 0.009821
| 0.210712
| 2,838
| 68
| 175
| 41.735294
| 0.791964
| 0.044045
| 0
| 0
| 0
| 0.017857
| 0.247879
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.017857
| false
| 0
| 0.125
| 0
| 0.142857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c44a6f087fd346f5832a3d385363862360f4ae8
| 447
|
py
|
Python
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 6
|
2019-01-31T18:55:46.000Z
|
2020-12-02T14:53:45.000Z
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 1
|
2020-12-04T00:18:20.000Z
|
2020-12-04T00:18:20.000Z
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 1
|
2019-03-17T03:46:26.000Z
|
2019-03-17T03:46:26.000Z
|
from hamcrest import assert_that, equal_to, is_
from opencypher.ast import Expression, NonEmptySequence, Order, SortItem, SortOrder
def test_order():
ast = Order(
items=NonEmptySequence[SortItem](
SortItem(
expression=Expression("foo"),
order=SortOrder.DESCENDING,
),
),
)
assert_that(
str(ast),
is_(equal_to("ORDER BY foo DESCENDING")),
)
| 23.526316
| 83
| 0.590604
| 43
| 447
| 5.976744
| 0.511628
| 0.077821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.313199
| 447
| 18
| 84
| 24.833333
| 0.837134
| 0
| 0
| 0.133333
| 0
| 0
| 0.058166
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c489fd8b4623ac06e1c59f92467d3fce08e9f03
| 1,742
|
py
|
Python
|
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from cricdb_setup import Team, Player, Base, Batsman, Bowler, Fielder, PlayerStrength, PlayerWeakness, PlayerMoment, Video
engine = create_engine('sqlite:///cricdb.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
Team1 = Team(id = 1, name='India')
session.add(Team1)
session.commit()
# Create dummy player
Player1 = Player(id = 1, team_id = 1, name="Virat Kohli", country="India", info='Born Nov 05, 1988 (28 years) Birth Place Delhi Nickname Kohli Height 5 ft 9 in (175 cm) Role Batsman Batting Style Right Handed Bat Bowling Style Right-arm medium', career='blank', batting_style='blank', bowling_style='blank',
picture='vk.jpg')
session.add(Player1)
session.commit()
# Menu for UrbanBurger
Batsman1 = Batsman(id=1, stance_type="front on", foot_position="front foot", shot="straight drive")
session.add(Batsman1)
session.commit()
Video1 = Video(id=1, video_type='batsman', video_name='front on front foot straight drive', video_url='google.com')
session.add(Video1)
session.commit()
print ("added menu items!")
| 40.511628
| 308
| 0.74225
| 253
| 1,742
| 5.067194
| 0.545455
| 0.050702
| 0.028081
| 0.017161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017808
| 0.161883
| 1,742
| 42
| 309
| 41.47619
| 0.860274
| 0.32434
| 0
| 0.190476
| 0
| 0.047619
| 0.287622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c4921ee958b3c93f23ee76186c1ec8331428083
| 1,006
|
py
|
Python
|
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional, Union
from bindings.gmd.dq_evaluation_method_type_code import DqEvaluationMethodTypeCode
from bindings.gmd.nil_reason_enumeration_value import NilReasonEnumerationValue
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class DqEvaluationMethodTypeCodePropertyType:
class Meta:
name = "DQ_EvaluationMethodTypeCode_PropertyType"
dq_evaluation_method_type_code: Optional[DqEvaluationMethodTypeCode] = field(
default=None,
metadata={
"name": "DQ_EvaluationMethodTypeCode",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
nil_reason: Optional[Union[str, NilReasonEnumerationValue]] = field(
default=None,
metadata={
"name": "nilReason",
"type": "Attribute",
"namespace": "http://www.isotc211.org/2005/gco",
"pattern": r"other:\w{2,}",
},
)
| 32.451613
| 82
| 0.667992
| 94
| 1,006
| 6.946809
| 0.489362
| 0.059724
| 0.073507
| 0.11026
| 0.316998
| 0.151608
| 0.104135
| 0
| 0
| 0
| 0
| 0.028133
| 0.222664
| 1,006
| 30
| 83
| 33.533333
| 0.806905
| 0
| 0
| 0.153846
| 0
| 0
| 0.239563
| 0.0666
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c4c00831838cc942a656d3b8ca70c1fdf886a13
| 3,964
|
py
|
Python
|
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
#import time
from spark.ReqBase import ReqBase
class ReqTwisted(ReqBase):
""" specialized on Twisted requests """
def __init__(self, req, reactor, properties={}):
self.twistedreq = req
self.http_accept_language = self.twistedreq.getHeader('Accept-Language')
#cookie give me major problem!
self.saved_cookies={}
cookietxt = self.twistedreq.getHeader("cookie")
if cookietxt:
for c in cookietxt.split(';'):
cook = c.lstrip()
eqs=cook.find('=')
k=cook[0:eqs]
v=cook[eqs+1:]
self.saved_cookies[k] = v
self.reactor = reactor
self._have_ct = 0
self._have_status = 0
self.server_protocol = self.twistedreq.clientproto
self.server_name = self.twistedreq.getRequestHostname().split(':')[0]
self.server_port = str(self.twistedreq.getHost()[2])
self.is_ssl = self.twistedreq.isSecure()
if self.server_port != ('80', '443')[self.is_ssl]:
self.http_host = self.server_name + ':' + self.server_port
else:
self.http_host = self.server_name
#self.script_name = [v for v in self.twistedreq.prepath[:-1] if v != '']
self.script_name = [v for v in self.twistedreq.prepath if v != '']
self.path_info = [v for v in self.twistedreq.postpath if v != '']
self.request_method = self.twistedreq.method
self.remote_host = self.twistedreq.getClient()
self.remote_addr = self.twistedreq.getClientIP()
self.http_user_agent = self.twistedreq.getHeader('User-Agent')
self.request_uri = self.twistedreq.uri
self.url = self.http_host + self.request_uri # was: self.server_name + self.request_uri
qindex = self.request_uri.find('?')
if qindex != -1:
query_string = self.request_uri[qindex+1:]
else:
self.query_string = ''
ReqBase.__init__(self)
def run(self):
ReqBase.run(self)
def get_form(self):
args = {}
for key,values in self.twistedreq.args.items():
if isinstance(values, list) and len(values)==1:
values = values[0]
args[key] = values
return args
def get_vars(self):
pass
def read(self, n=None):
""" Read from input stream.
"""
self.twistedreq.content.seek(0, 0)
if n is None:
rd = self.twistedreq.content.read()
else:
rd = self.twistedreq.content.read(n)
#print "request.RequestTwisted.read: data=\n" + str(rd)
return rd
def write(self, data):
for piece in data:
self.twistedreq.write(piece)
#if self.header_type == 'html':
# self.twistedreq.write(str(time.time()-self.pagestart_time))
def finish(self):
self.twistedreq.finish()
# Headers ----------------------------------------------------------
def appendHttpHeader(self, header):
self.user_headers.append(header)
def __setHttpHeader(self, header):
#if type(header) is unicode:
# header = header.encode('ascii')
key, value = header.split(':',1)
value = value.lstrip()
self.twistedreq.setHeader(key, value)
def xml_headers(self, more_headers=[]):
if getattr(self, 'sent_headers', None):
return
self.sent_headers = 1
self.__setHttpHeader("Content-type: application/rss+xml;charset=utf-8")
def http_headers(self, more_headers=[]):
if getattr(self, 'sent_headers', None):
return
self.sent_headers = 1
have_ct = 0
# set http headers
for header in more_headers + getattr(self, 'user_headers', []):
if header.lower().startswith("content-type:"):
# don't send content-type multiple times!
if have_ct: continue
have_ct = 1
self.__setHttpHeader(header)
if not have_ct:
self.__setHttpHeader("Content-type: text/html;charset=utf-8")
def redirect(self, addr):
if isinstance(addr, unicode):
addr = addr.encode('ascii')
self.twistedreq.redirect(addr)
def setResponseCode(self, code, message=None):
self.twistedreq.setResponseCode(code, message)
def get_cookie(self, coname):
return self.saved_cookies.get(coname,'')
def set_cookie(self, coname, codata, expires=None):
if expires:
self.twistedreq.addCookie(coname, codata, expires)
else:
self.twistedreq.addCookie(coname, codata)
| 29.362963
| 89
| 0.688951
| 546
| 3,964
| 4.8663
| 0.274725
| 0.142266
| 0.026346
| 0.027098
| 0.16146
| 0.114791
| 0.106887
| 0.085811
| 0.085811
| 0.085811
| 0
| 0.007494
| 0.158426
| 3,964
| 134
| 90
| 29.58209
| 0.788969
| 0.139506
| 0
| 0.102041
| 0
| 0
| 0.052926
| 0.016466
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153061
| false
| 0.010204
| 0.010204
| 0.010204
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c4cbca2cb07bcccddf7a558df7b93567d90c79c
| 11,093
|
py
|
Python
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 13
|
2016-12-21T03:25:57.000Z
|
2022-03-15T03:25:04.000Z
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 4
|
2020-07-11T09:49:51.000Z
|
2021-12-03T07:07:34.000Z
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 7
|
2018-09-23T10:58:24.000Z
|
2021-09-05T01:13:57.000Z
|
r"""
This module contains several utility functions which can be used e.g.
for thresholding the alpha-shearlet coefficients or for using the
alpha-shearlet transform for denoising.
Finally, it also contains the functions :func:`my_ravel` and :func:`my_unravel`
which can be used to convert the alpha-shearlet coefficients into a
1-dimensional vector and back. This is in particular convenient for the
subsampled transform, where this conversion is not entirely trivial, since the
different "coefficient images" have varying dimensions.
"""
import os.path
import math
import numpy as np
import numexpr as ne
import scipy.ndimage
def find_free_file(file_template):
r"""
This function finds the first nonexistent ("free") file obtained by
"counting upwards" using the passed template/pattern.
**Required Parameter**
:param string file_template:
This should be a string whose ``format()`` method can be called
using only an integer argument, e.g. ``'/home/test_{0:0>2d}.txt'``,
which would result in ``find_free_file`` consecutively checking
the following files for existence:
`/home/test_00.txt,`
`/home/test_01.txt, ...`
**Return value**
:return:
``file_template.format(i)`` for the first value of ``i`` for which
the corresponding file does not yet exist.
"""
i = 0
while os.path.isfile(file_template.format(i)):
i += 1
return file_template.format(i)
def threshold(coeffs, thresh_value, mode):
r"""
Given a set of coefficients, this function performs a thresholding
procedure, i.e., either soft or hard thresholding.
**Required parameters**
:param coeffs:
The coefficients to be thresholded.
Either a three-dimensional :class:`numpy.ndarray` or a generator
producing two dimensional :class:`numpy.ndarray` objects.
:param float thresh_value:
The thresholding cutoff :math:`c` for the coefficients, see also
``mode`` for more details.
:param string mode:
Either ``'hard'`` or ``'soft'``. This parameter determines whether
the hard thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c,
\end{cases}
or the soft thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x\cdot \frac{|x|-c}{|x|}, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c
\end{cases}
is applied to each entry of the coefficients.
**Return value**
:return:
A generator producing the thresholded coefficients. Each
thresholded "coefficient image", i.e., each thresholded
2-dimensional array, is produced in turn.
"""
if mode == 'hard':
for coeff in coeffs:
ev_string = 'coeff * (real(abs(coeff)) >= thresh_value)'
yield ne.evaluate(ev_string)
# yield coeff * (np.abs(coeff) >= thresh_value)
elif mode == 'soft':
for coeff in coeffs:
ev_string = ('(real(abs(coeff)) - thresh_value) * '
'(real(abs(coeff)) >= thresh_value)')
large_values = ne.evaluate(ev_string)
# large_values = np.maximum(np.abs(coeff) - thresh_value, 0)
ev_str_2 = 'coeff * large_values / (large_values + thresh_value)'
yield ne.evaluate(ev_str_2)
# yield coeff * large_values / (large_values + thresh_value)
else:
raise ValueError("'mode' must be 'hard' or 'soft'")
def scale_gen(trafo):
r"""
**Required parameter**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
**Return value**
:return:
A generator producing integers. The i-th produced integer
is the *scale* (starting from -1 for the low-pass part) of the i-th
alpha-shearlet associated to ``trafo``.
Hence, if ``coeff = trafo.transform(im)``, then the following iteration
produces the associated scale to each "coefficient image"::
for scale, c in zip(scale_gen(trafo), coeff):
...
"""
indices_gen = iter(trafo.indices)
next(indices_gen)
yield -1
for index in indices_gen:
yield index[0]
def denoise(img, trafo, noise_lvl, multipliers=None):
r"""
Given a noisy image :math:`\tilde f`, this function performs a denoising
procedure based on shearlet thresholding. More precisely:
#. A scale dependent threshold parameter :math:`c=(c_j)_j` is calculated
according to :math:`c_j=m_j\cdot \lambda / \sqrt{N_1\cdot N_2}`, where
:math:`m_j` is a multiplier for the jth scale, :math:`\lambda` is the
noise level present in the image :math:`\tilde f` and
:math:`N_1\times N_2` are its dimensions.
#. The alpha-shearlet transform of :math:`\tilde f` is calculated
using ``trafo``.
#. Hard thesholding with threshold parameter (cutoff) :math:`c` is
performed on alpha-shearlet coefficients, i.e., for each scale ``j``,
each of the coefficients belonging to the jth scale is set to zero if
its absolute value is smaller than :math:`c_j` and otherwise it is
left unchanged.
#. The (pseudo)-inverse of the alpha-shearlet transform is applied to the
thresholded coefficients and this reconstruction is the return value
of the function.
**Required parameters**
:param numpy.ndarray img:
The “image” (2 dimensional array) that should be denoised.
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
This object is used to calculate the (inverse) alpha-shearlet
transform during the denoising procedure.
The dimension of the transform and of ``img`` need to coincide.
:param float noise_lvl:
The (presumed) noise level present in ``img``.
If ``img = img_clean + noise``, then ``noise_lvl`` should be
approximately equal to the :math:`\ell^2` norm of ``noise``.
In particular, if ``im`` is obtained by adding Gaussian noise with
standard deviation :math:`\sigma` (in each entry) to a noise free
image :math:`f`, then the noise level :math:`\lambda` is given by
:math:`\lambda= \sigma\cdot \sqrt{N_1\cdot N_2}`; see also
:func:`AdaptiveAlpha.optimize_denoising`.
**Keyword parameter**
:param list multipliers:
A list of multipliers (floats) for each scale. ``multipliers[j]``
determines the value of :math:`m_j` and thus of the cutoff
:math:`c_j = m_j \cdot \lambda / \sqrt{N_1 \cdot N_2}` for scale ``j``.
In particular, ``len(multipliers)`` needs
to be equal to the number of the scales of ``trafo``.
**Return value**
:return:
The denoised image, i.e., the result of the denoising procedure
described above.
"""
coeff_gen = trafo.transform_generator(img, do_norm=True)
if multipliers is None:
# multipliers = [1] + ([2.5] * (trafo.num_scales - 1)) + [5]
multipliers = [3] * trafo.num_scales + [4]
width = trafo.width
height = trafo.height
thresh_lvls = [multi * noise_lvl / math.sqrt(width * height)
for multi in multipliers]
thresh_coeff = (coeff * (np.abs(coeff) >= thresh_lvls[scale + 1])
for (coeff, scale) in zip(coeff_gen, scale_gen(trafo)))
recon = trafo.inverse_transform(thresh_coeff, real=True, do_norm=True)
return recon
def image_load(path):
r"""
Given a '.npy' or '.png' file, this function loads the file and returns
its content as a two-dimensional :class:`numpy.ndarray` of :class:`float`
values.
For '.png' images, the pixel values are normalized to be between 0 and 1
(instead of between 0 and 255) and color images are converted to
grey-scale.
**Required parameter**
:param string path:
Path to the image to be converted, either of a '.png' or '.npy' file.
**Return value**
:return:
The loaded image as a two-dimensional :class:`numpy.ndarray`.
"""
image_extension = path[path.rfind('.'):]
if image_extension == '.npy':
return np.array(np.load(path), dtype='float64')
elif image_extension == '.png':
return np.array(scipy.ndimage.imread(path, flatten=True) / 255.0,
dtype='float64')
else:
raise ValueError("This function can only load .png or .npy files.")
def _print_listlist(listlist):
for front, back, l in zip(['['] + ([' '] * (len(listlist) - 1)),
([''] * (len(listlist) - 1)) + [']'],
listlist):
print(front + str(l) + back)
def my_ravel(coeff):
r"""
The subsampled alpha-shearlet transform returns a list of differently
sized(!) two-dimensional arrays. Likewise, the fully sampled transform
yields a three dimensional numpy array containing the coefficients.
The present function can be used (in both cases) to convert this list into
a single *one-dimensional* numpy array.
.. note::
In order to invert this conversion to a one-dimensional array,
use the associated function :func:`my_unravel`. Precisely,
:func:`my_unravel` satisfies
``my_unravel(my_trafo, my_ravel(coeff)) == coeff``,
if coeff is obtained from calling ``my_trafo.transform(im)``
for some image ``im``.
The preceding equality holds at least up to (negligible)
differences (the left-hand side is a generator while the
right-hand side could also be a list).
**Required parameter**
:param list coeff:
A list (or a generator) containing/producing two-dimensional
numpy arrays.
**Return value**
:return:
A one-dimensional :class:`numpy.ndarray` from which **coeff** can
be reconstructed.
"""
return np.concatenate([c.ravel() for c in coeff])
def my_unravel(trafo, coeff):
r"""
This method is a companion method to :func:`my_ravel`.
See the documentation of that function for more details.
**Required parameters**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
:param numpy.ndarray coeff:
A one-dimensional numpy array, obtained via
``my_ravel(coeff_unrav)``, where ``coeff_unrav`` is of the same
dimensions as the output of ``trafo.transform(im)``, where
``im`` is an image.
**Return value**
:return:
A generator producing the same values as ``coeff_unrav``, i.e.,
an "unravelled" version of ``coeff``.
"""
coeff_sizes = [spec.shape for spec in trafo.spectrograms]
split_points = np.cumsum([spec.size for spec in trafo.spectrograms])
return (c.reshape(size)
for size, c in zip(coeff_sizes, np.split(coeff, split_points)))
| 34.557632
| 79
| 0.63166
| 1,474
| 11,093
| 4.683853
| 0.244912
| 0.007242
| 0.017236
| 0.020278
| 0.148175
| 0.11066
| 0.090238
| 0.058082
| 0.045771
| 0.017092
| 0
| 0.006473
| 0.261877
| 11,093
| 320
| 80
| 34.665625
| 0.836712
| 0.694041
| 0
| 0.056338
| 0
| 0
| 0.102374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.070423
| 0
| 0.267606
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c50ce676f3a6dc75c4d1900f6d996ce7fd69ed7
| 2,692
|
py
|
Python
|
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | 1
|
2021-09-01T12:53:09.000Z
|
2021-09-01T12:53:09.000Z
|
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | null | null | null |
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import re
from datetime import datetime
import pytest
from tests.provider.dwd.radar import station_reference_pattern_unsorted
from wetterdienst.provider.dwd.radar import DwdRadarValues
from wetterdienst.provider.dwd.radar.metadata import DwdRadarDate, DwdRadarParameter
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
from wetterdienst.util.datetime import round_minutes
@pytest.mark.xfail(reason="Out of service", strict=True)
@pytest.mark.remote
def test_radar_request_composite_latest_rx_reflectivity():
"""
Example for testing radar COMPOSITES latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RX_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
)
buffer = next(request.query())[1]
payload = buffer.getvalue()
month_year = datetime.utcnow().strftime("%m%y")
header = (
f"RX......10000{month_year}BY 8101..VS 3SW ......PR E\\+00INT 5GP 900x 900MS " # noqa:E501,B950
f"..<{station_reference_pattern_unsorted}>" # noqa:E501,B950
)
assert re.match(bytes(header, encoding="ascii"), payload[:160])
@pytest.mark.remote
def test_radar_request_composite_latest_rw_reflectivity():
"""
Example for testing radar COMPOSITES (RADOLAN) latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RW_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0][1]
payload = buffer.getvalue()
month_year = datetime.utcnow().strftime("%m%y")
header = (
f"RW......10000{month_year}"
f"BY16201..VS 3SW ......PR E-01INT 60GP 900x 900MF 00000001MS "
f"..<{station_reference_pattern_unsorted}>"
)
assert re.match(bytes(header, encoding="ascii"), payload[:160])
@pytest.mark.remote
def test_radar_request_site_latest_dx_reflectivity():
"""
Example for testing radar SITES latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.DX_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
site=DwdRadarSite.BOO,
)
buffer = next(request.query())[1]
payload = buffer.getvalue()
timestamp_aligned = round_minutes(datetime.utcnow(), 5)
month_year = timestamp_aligned.strftime("%m%y")
header = f"DX......10132{month_year}BY.....VS 2CO0CD4CS0EP0.80.80.80.80.80.80.80.8MS" # noqa:E501,B950
assert re.match(bytes(header, encoding="ascii"), payload[:160])
| 30.247191
| 108
| 0.69688
| 324
| 2,692
| 5.657407
| 0.385802
| 0.02946
| 0.016367
| 0.017458
| 0.588652
| 0.322968
| 0.267321
| 0.267321
| 0.231315
| 0.195854
| 0
| 0.051942
| 0.177563
| 2,692
| 88
| 109
| 30.590909
| 0.775971
| 0.120357
| 0
| 0.425926
| 0
| 0.018519
| 0.16789
| 0.088045
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.148148
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c5274b4da8bf2db8410e4efcd81dcd874ad4000
| 710
|
py
|
Python
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 1
|
2021-08-06T15:09:26.000Z
|
2021-08-06T15:09:26.000Z
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 16
|
2021-09-20T04:10:29.000Z
|
2022-03-14T04:26:01.000Z
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 2
|
2021-08-21T18:48:15.000Z
|
2021-11-19T16:52:25.000Z
|
# -*- coding: utf-8 -*-
import pytest
import json
from pathlib import Path
@pytest.fixture
def accounts_httpRequests1hGroupsFixture(scope="session"):
with open("tests/data/accounts/httpRequests1hGroups.json") as data:
res = json.load(data)
return res
@pytest.fixture
def zones_httpRequests1hGroupsFixture(scope="session"):
with open("tests/data/zones/httpRequests1hGroups.json") as data:
res = json.load(data)
return res
@pytest.fixture
def test_fixture(scope="session"):
with open("cloudflare_exporter/gql/accounts.httpRequests1hGroups.graphql") as data:
# query = data.read()
query = "".join(line.rstrip().lstrip() for line in data)
return query
| 26.296296
| 87
| 0.712676
| 86
| 710
| 5.837209
| 0.44186
| 0.077689
| 0.095618
| 0.119522
| 0.501992
| 0.501992
| 0.501992
| 0.278884
| 0.278884
| 0.278884
| 0
| 0.010152
| 0.167606
| 710
| 26
| 88
| 27.307692
| 0.839256
| 0.057746
| 0
| 0.388889
| 0
| 0
| 0.253754
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c5353e05ae0337f97754129d22ee251e890227f
| 4,529
|
py
|
Python
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 34
|
2020-09-21T10:49:57.000Z
|
2022-01-08T04:50:42.000Z
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 2
|
2021-01-08T03:52:51.000Z
|
2021-09-10T07:45:05.000Z
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 5
|
2021-04-23T09:30:51.000Z
|
2022-01-09T08:40:45.000Z
|
#!/usr/bin/env python
import os
import sys
import glob
import argparse
from pathlib import Path
from collections import defaultdict
from hashlib import sha1
import numpy as np
import sacrebleu
import tabulate
from pysimt.metrics.simnmt import AVPScorer, AVLScorer, CWMScorer, CWXScorer
"""This script should be run from within the parent folder where each pysimt
experiment resides."""
def read_lines_from_file(fname):
lines = []
with open(fname) as f:
for line in f:
lines.append(line.strip())
return lines
def compute_bleu(fname, refs):
hyps = open(fname).read()
hashsum = sha1(hyps.encode('utf-8')).hexdigest()
parent = fname.parent
cached_bleu = parent / f'.{fname.name}__{hashsum}'
if os.path.exists(cached_bleu):
return float(open(cached_bleu).read().strip().split()[2])
else:
bleu = sacrebleu.corpus_bleu(
hyps.strip().split('\n'), refs, tokenize='none')
with open(cached_bleu, 'w') as f:
f.write(bleu.format() + '\n')
return float(bleu.format().split()[2])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='delay-analysis',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Compute delay metrics for multiple runs",
argument_default=argparse.SUPPRESS)
parser.add_argument('-r', '--ref-file', required=True, type=str,
help='The reference file for BLEU evaluation.')
parser.add_argument('act_files', nargs='+',
help='List of action files')
args = parser.parse_args()
refs = [read_lines_from_file(args.ref_file)]
test_set = Path(args.ref_file).name.split('.')[0]
results = {}
# Automatically fetch .acts files
acts = [Path(p) for p in args.act_files]
# unique experiments i.e. nmt and mmt for example
exps = set([p.parent for p in acts])
scorers = [
AVPScorer(add_trg_eos=False),
AVLScorer(add_trg_eos=False),
#CWMScorer(add_trg_eos=False),
#CWXScorer(add_trg_eos=False),
]
for exp in exps:
# get actions for this experiment
exp_acts = [p for p in acts if p.parent == exp]
parts = [p.name.split('.') for p in exp_acts]
# different run prefixes
runs = list(set([p[0] for p in parts]))
# type of decodings i.e. wait if diff, waitk, etc.
types = list(set([p[2] for p in parts]))
# Evaluate baseline consecutive systems as well
baseline_bleus = []
for run in runs:
hyp_fname = f'{exp}/{run}.{test_set}.gs'
if os.path.exists(hyp_fname):
bleu = compute_bleu(Path(hyp_fname), refs)
baseline_bleus.append(bleu)
else:
baseline_bleus.append(-1)
results[exp.name] = {m.name: '0' for m in scorers}
results[exp.name]['Q2AVP'] = '0'
baseline_bleus = np.array(baseline_bleus)
results[exp.name]['BLEU'] = f'{baseline_bleus.mean():2.2f} ({baseline_bleus.std():.4f})'
# Evaluate each decoding type and keep multiple run scores
for typ in types:
scores = defaultdict(list)
for run in runs:
act_fname = f'{exp}/{run}.{test_set}.{typ}.acts'
hyp_fname = f'{exp}/{run}.{test_set}.{typ}.gs'
# Compute BLEU
bleu = compute_bleu(Path(hyp_fname), refs)
scores['BLEU'].append(bleu)
if os.path.exists(act_fname):
# Compute delay metrics
run_scores = [s.compute_from_file(act_fname) for s in scorers]
for sc in run_scores:
scores[sc.name].append(sc.score)
scores['Q2AVP'] = bleu / scores['AVP'][-1]
# aggregate
scores = {k: np.array(v) for k, v in scores.items()}
means = {k: v.mean() for k, v in scores.items()}
sdevs = {k: v.std() for k, v in scores.items()}
str_scores = {m: f'{means[m]:4.2f} ({sdevs[m]:.2f})' for m in scores.keys()}
results[f'{exp.name}_{typ}'] = str_scores
headers = ['Name'] + [sc.name for sc in scorers] + ['BLEU', 'Q2AVP']
results = [[name, *[scores[key] for key in headers[1:]]] for name, scores in results.items()]
# alphabetical sort
results = sorted(results, key=lambda x: x[0].rsplit('_', 1)[-1])
# print
print(tabulate.tabulate(results, headers=headers))
| 34.310606
| 97
| 0.59108
| 600
| 4,529
| 4.345
| 0.31
| 0.034906
| 0.013809
| 0.021481
| 0.070963
| 0.070963
| 0.050249
| 0
| 0
| 0
| 0
| 0.007643
| 0.277766
| 4,529
| 131
| 98
| 34.572519
| 0.789361
| 0.095827
| 0
| 0.068182
| 0
| 0
| 0.103743
| 0.042452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.125
| 0
| 0.181818
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c56a8517956b8fdd74335b60fe24a921ed77b5c
| 3,713
|
py
|
Python
|
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | null | null | null |
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | 5
|
2018-05-10T19:49:43.000Z
|
2021-01-29T19:39:34.000Z
|
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | null | null | null |
import logging
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.shortcuts import redirect
from .controller import (
create_canvas_course,
start_course_template_copy,
finalize_new_canvas_course,
get_canvas_course_url
)
from .mixins import CourseSiteCreationAllowedMixin
from icommons_ui.mixins import CustomErrorPageMixin
from .exceptions import NoTemplateExistsForSchool
from .models import CanvasCourseGenerationJob
from braces.views import LoginRequiredMixin
logger = logging.getLogger(__name__)
class CanvasCourseSiteCreateView(LoginRequiredMixin, CourseSiteCreationAllowedMixin, CustomErrorPageMixin, TemplateView):
"""
Serves up the canvas course site creation wizard on GET and creates the
course site on POST.
"""
template_name = "canvas_course_site_wizard/canvas_wizard.html"
# This is currently the project-level 500 error page, which has RenderableException logic
custom_error_template_name = "500.html"
def post(self, request, *args, **kwargs):
sis_course_id = self.object.pk
sis_user_id = 'sis_user_id:%s' % request.user.username
# we modified create_canvas_course to return two params when it's called as part of
# the single course creation. This is so we can keep track of the job_id
# for the newly created job record. There's a probably a better way to handle this
# but for now, this works
course, course_job_id = create_canvas_course(sis_course_id, request.user.username)
try:
course_generation_job = start_course_template_copy(self.object, course['id'],
request.user.username, course_job_id=course_job_id)
return redirect('ccsw-status', course_generation_job.pk)
except NoTemplateExistsForSchool:
# If there is no template to copy, immediately finalize the new course
# (i.e. run through remaining post-async job steps)
course_url = finalize_new_canvas_course(course['id'], sis_course_id, sis_user_id)
job = CanvasCourseGenerationJob.objects.get(pk=course_job_id)
job.update_workflow_state(CanvasCourseGenerationJob.STATUS_FINALIZED)
return redirect(course_url)
class CanvasCourseSiteStatusView(LoginRequiredMixin, DetailView):
""" Displays status of course creation job, including progress and result of template copy and finalization """
template_name = "canvas_course_site_wizard/status.html"
model = CanvasCourseGenerationJob
context_object_name = 'content_migration_job'
def get_context_data(self, **kwargs):
"""
get_context_data allows us to pass additional values to the view. In this case we are passing in:
- the canvas course url for a successfully completed job (or None if it hasn't successfully completed)
- simplified job progress status indicators for the template to display success/failure messages
"""
context = super(CanvasCourseSiteStatusView, self).get_context_data(**kwargs)
logger.debug('Rendering status page for course generation job %s' % self.object)
context['canvas_course_url'] = get_canvas_course_url(canvas_course_id=self.object.canvas_course_id)
context['job_failed'] = self.object.workflow_state in [
CanvasCourseGenerationJob.STATUS_FAILED,
CanvasCourseGenerationJob.STATUS_SETUP_FAILED,
CanvasCourseGenerationJob.STATUS_FINALIZE_FAILED
]
context['job_succeeded'] = self.object.workflow_state in [CanvasCourseGenerationJob.STATUS_FINALIZED]
return context
| 51.569444
| 121
| 0.736062
| 447
| 3,713
| 5.897092
| 0.369128
| 0.063733
| 0.022762
| 0.016692
| 0.088771
| 0.068285
| 0.042489
| 0
| 0
| 0
| 0
| 0.002028
| 0.20307
| 3,713
| 71
| 122
| 52.295775
| 0.888814
| 0.25882
| 0
| 0
| 0
| 0
| 0.085416
| 0.038046
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.212766
| 0
| 0.468085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c58884fde7690dcd1123dcef567073872ba2ad9
| 7,389
|
py
|
Python
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 38
|
2017-01-06T00:18:46.000Z
|
2022-01-25T19:44:10.000Z
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 28
|
2017-01-11T09:12:57.000Z
|
2022-02-14T14:53:48.000Z
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 12
|
2018-02-13T20:23:00.000Z
|
2022-01-05T18:39:19.000Z
|
import sys
import numpy as np
from .sam_utils import load_samfile, fetch_reads
def _check_SE_event(gene):
"""Check SE event"""
if (len(gene.trans) != 2 or
gene.trans[0].exons.shape[0] != 3 or
gene.trans[1].exons.shape[0] != 2 or
np.mean(gene.trans[0].exons[[0, 2], :] ==
gene.trans[1].exons) != 1):
return False
else:
return True
def _get_segment(exons, read):
"""Get the length of segments by devidinig a read into exons.
The segments include one for each exon and two edges.
"""
if read is None:
return None
_seglens = [0] * (exons.shape[0] + 2)
_seglens[0] = np.sum(read.positions < exons[0, 0])
_seglens[-1] = np.sum(read.positions > exons[-1, -1])
for i in range(exons.shape[0]):
_seglens[i + 1] = np.sum(
(read.positions >= exons[i, 0]) * (read.positions <= exons[i, 1]))
return _seglens
def check_reads_compatible(transcript, reads, edge_hang=10, junc_hang=2):
"""Check if reads are compatible with a transcript
"""
is_compatible = [True] * len(reads)
for i in range(len(reads)):
_segs = _get_segment(transcript.exons, reads[i])
# check mismatch to regions not in this transcript
if len(reads[i].positions) - sum(_segs) >= junc_hang:
is_compatible[i] = False
continue
# check if edge hang is too short
if (_segs[0] > 0 or _segs[-1] > 0) and sum(_segs[1:-1]) < edge_hang:
is_compatible[i] = False
continue
# check if exon has been skipped
if len(_segs) > 4:
for j in range(2, len(_segs) - 2):
if (_segs[j-1] >= junc_hang and _segs[j+1] >= junc_hang and
transcript.exons[j-1, 1] - transcript.exons[j-1, 0] -
_segs[j] >= junc_hang):
is_compatible[i] = False
break
return np.array(is_compatible)
def SE_reads_count(gene, samFile, edge_hang=10, junc_hang=2, **kwargs):
"""Count the categorical reads mapped to a splicing event
rm_duplicate=True, inner_only=True,
mapq_min=0, mismatch_max=5, rlen_min=1, is_mated=True
"""
# Check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event!")
exit()
# Fetch reads (TODO: customise fetch_reads function, e.g., FLAG)
reads = fetch_reads(samFile, gene.chrom, gene.start, gene.stop, **kwargs)
# Check reads compatible
is_isoform1 = check_reads_compatible(gene.trans[0], reads["reads1"])
is_isoform2 = check_reads_compatible(gene.trans[1], reads["reads1"])
if len(reads["reads2"]) > 0:
is_isoform1 *= check_reads_compatible(gene.trans[0], reads["reads2"])
is_isoform2 *= check_reads_compatible(gene.trans[1], reads["reads2"])
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads1u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads1u"]))
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads2u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads2u"]))
# return Reads matrix
Rmat = np.zeros((len(is_isoform1), 2), dtype=bool)
Rmat[:, 0] = is_isoform1
Rmat[:, 1] = is_isoform2
return Rmat
def get_count_matrix(genes, sam_file, sam_num, edge_hang=10, junc_hang=2):
samFile = load_samfile(sam_file)
RV = []
for g in range(len(genes)):
_Rmat = SE_reads_count(genes[g], samFile, edge_hang=10, junc_hang=2,
rm_duplicate=True, inner_only=False, mapq_min=0, mismatch_max=5,
rlen_min=1, is_mated=True)
if _Rmat.shape[0] == 0:
continue
K = 2**(np.arange(_Rmat.shape[1]))
code_id, code_cnt = np.unique(np.dot(_Rmat, K), return_counts=True)
count_dict = {}
for i in range(len(code_id)):
count_dict["%d" %(code_id[i])] = code_cnt[i]
RV.append("%d\t%d\t%s" %(sam_num + 1, g + 1, str(count_dict)))
RV_line = ""
if len(RV) > 0:
RV_line = "\n".join(RV) + "\n"
return RV_line
def SE_probability(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get read categorical probability of each isoform.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
p1: l2 + rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
p1: rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
prob_mat = np.zeros((2, 3))
# Isoform 1
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
prob_mat[0, 0] = (l2 + rlen - 2 * junc_hang) / len_isoform1
prob_mat[0, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform1
# Isoform 2
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
prob_mat[1, 1] = (rlen - 2 * junc_hang) / len_isoform2
prob_mat[1, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform2
return prob_mat
def SE_effLen(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get effective length matrix for three read categories from two isoforms.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
and three read groups:
group1: uniquely from isoform1
group2: uniquely from isoform2
group3: ambiguous identity
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
read group1: l2 + rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
read group2: rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
isoLen_mat = np.zeros((2, 3))
# isoform length
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
# segments
isoLen_mat[0, 0] = l2 + rlen - 2 * junc_hang
isoLen_mat[1, 1] = rlen - 2 * junc_hang
isoLen_mat[0, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoLen_mat[1, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
# prob_mat = isoLen_mat / isoLen_mat.sum(1, keepdims=True)
return isoLen_mat
| 35.354067
| 79
| 0.603194
| 1,100
| 7,389
| 3.881818
| 0.167273
| 0.048712
| 0.033724
| 0.044965
| 0.568852
| 0.532084
| 0.48103
| 0.458548
| 0.418735
| 0.362529
| 0
| 0.046395
| 0.27365
| 7,389
| 208
| 80
| 35.524038
| 0.749208
| 0.278116
| 0
| 0.224299
| 0
| 0
| 0.040522
| 0
| 0
| 0
| 0
| 0.004808
| 0
| 1
| 0.065421
| false
| 0
| 0.028037
| 0
| 0.17757
| 0.028037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c5c369d85c41ace1c62ddc67471055b462a3df1
| 1,527
|
py
|
Python
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 1
|
2021-04-17T16:18:14.000Z
|
2021-04-17T16:18:14.000Z
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 12
|
2019-07-26T18:01:56.000Z
|
2019-08-31T15:35:17.000Z
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-demo
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | null | null | null |
import click
from typing import Callable, List
from .effect_parade import AbstractEffectParade
DISPLAY_OPTIONS = [
click.option('-p', '--parade', type=click.Choice(AbstractEffectParade.get_parade_options()),
help="How the effects are displayed.", default=AbstractEffectParade.get_default_option(),
show_default=True),
click.option('-d', '--duration', type=click.IntRange(1, 180),
help="How long to display each effect for, in seconds (1-180).", default=10, show_default=True),
click.option('-r', '--repeat', type=click.IntRange(1, 240),
help="How many times to run the effects before stopping (1-240).", default=1, show_default=True),
click.option('-b', '--brightness', type=click.IntRange(1, 10),
help="How bright the effects will be (1-10).", default=8, show_default=True),
click.option('-i', '--invert', is_flag=True, help="Change the display orientation.")
]
def add_options(options: List[click.option]) -> Callable:
"""
Create a decorator to apply Click options to a function.
:param options: Click options to be applied
:return: Decorator function
"""
def _add_options(func: Callable):
"""
Apply click options to the supplied function.
:param func: To add click options to.
:return: The function with the click options added.
"""
for option in reversed(options):
func = option(func)
return func
return _add_options
| 41.27027
| 114
| 0.64833
| 194
| 1,527
| 5.020619
| 0.371134
| 0.067762
| 0.061602
| 0.082136
| 0.106776
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022053
| 0.227898
| 1,527
| 36
| 115
| 42.416667
| 0.804071
| 0.172888
| 0
| 0
| 0
| 0
| 0.224167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c5e382a6852be827146dfca1422cff18cd4ad2e
| 587
|
py
|
Python
|
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T02:55:57.000Z
|
2021-04-16T15:49:08.000Z
|
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | null | null | null |
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T08:12:38.000Z
|
2021-01-05T08:12:38.000Z
|
import boto3
import os
import tarfile
if __name__ == "__main__":
s3 = boto3.client('s3', aws_access_key_id="AKIAY6UR252SQUQ3OSWZ",
aws_secret_access_key="08LQj"
"+ryk9SMojG18vERXKKzhNSYk5pLhAjrIAVX")
output_path = "./data.tar.gz"
with open(output_path, 'wb') as f:
s3.download_fileobj('definedproteins', "data.tar.gz", f)
assert os.path.isfile(output_path)
print("Download succeeded")
tar = tarfile.open(output_path, "r:gz")
tar.extractall()
tar.close()
os.remove(output_path)
| 34.529412
| 82
| 0.626917
| 68
| 587
| 5.117647
| 0.558824
| 0.143678
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036446
| 0.252129
| 587
| 17
| 83
| 34.529412
| 0.756264
| 0
| 0
| 0
| 0
| 0
| 0.22619
| 0.059524
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c600ba2b9e8dfbbc98654347a117e7d18a03ded
| 8,247
|
py
|
Python
|
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T16:13:16.000Z
|
2021-12-20T16:13:16.000Z
|
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | null | null | null |
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys
import logging
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.ndimage import label
from .utils import watershed_tissue_sections, get_spot_adjacency_matrix
# Read in a series of Loupe annotation files and return the set of all unique categories.
# NOTE: "Undefined"
def unique_annots_loupe(loupe_files):
all_annots = []
for fh in loupe_files:
df = pd.read_csv(fh, header=0, sep=",")
for a in df.iloc[:,1].values:
if isinstance(a,str) and len(a)>0 and a.lower() != "undefined":
all_annots.append(a)
return sorted(list(set(all_annots)))
# Annotataion matrix from Loupe annotation file
def read_annot_matrix_loupe(loupe_file, position_file, unique_annots):
annots = pd.read_csv(loupe_file, header=0, sep=",")
positions = pd.read_csv(position_file, index_col=0, header=None,
names=["in_tissue", "array_row", "array_col", "pixel_row", "pixel_col"])
annot_matrix = np.zeros((len(unique_annots), len(annots['Barcode'])), dtype=int)
positions_list = []
for i,b in enumerate(annots['Barcode']):
xcoor = positions.loc[b,'array_col']
ycoor = positions.loc[b,'array_row']
positions_list.append('%d_%d' % (xcoor, ycoor))
if annots.iloc[i,1] in unique_annots:
annot_matrix[unique_annots.index(annots.iloc[i,1]),i] = 1
annot_frame = pd.DataFrame(annot_matrix, index=unique_annots, columns=positions_list)
return annot_frame
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to standard array indexing with odd rows implicitly shifted.
def pseudo_hex_to_oddr(c):
x,y = c
if int(np.rint(y)) % 2 == 1:
x -= 1
return [int(np.rint(x//2)),int(np.rint(y))]
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to Cartesian coordinates where neighbors are separated by unit distance.
def pseudo_hex_to_true_hex(c):
x_arr, y_arr = pseudo_hex_to_oddr(c)
x = x_arr
y = y_arr * np.sqrt(3)/2
if y_arr % 2 == 1:
x += 0.5
return [x,y]
''' Determines connected components by recursively checking neighbors in a hex grid.
bin_oddr_matrix - binary odd-right indexed matrix where 1 indicates annotated spot.
'''
def connected_components_hex(bin_oddr_matrix):
lmat = np.zeros_like(bin_oddr_matrix)
lmax = 0
# Returns immediate neighbors of a coordinate in an odd-right hex grid index.
def neighbors(cor):
N = []
# Spots on even-numbered rows have the following adjacency:
# [[1,1,0],[1,1,1],[1,1,0]]
if cor[1] % 2 == 0:
offsets = [[-1,-1],[0,-1],[-1,0],[1,0],[-1,1],[0,1]]
# Spots on odd-numbered rows have the following adjacency:
# [[0,1,1],[1,1,1],[0,1,1]]
else:
offsets = [[0,-1],[1,-1],[-1,0],[1,0],[0,1],[1,1]]
# Find all valid neighbors (within image bounds and present in binary array).
for o in offsets:
q = np.array(cor) + np.array(o)
if q[0]>=0 and q[1]>=0 and q[0]<bin_oddr_matrix.shape[1] and q[1]<bin_oddr_matrix.shape[0]:
if bin_oddr_matrix[q[1],q[0]] == 1:
N.append(q)
return N
# Find set of all spots connected to a given coordinate.
def neighborhood(cor, nmat):
nmat[cor[1],cor[0]] = True
N = neighbors(cor)
if len(N)==0:
return nmat
for q in N:
if not nmat[q[1],q[0]]:
neighborhood(q, nmat)
return nmat
# Default recursion limit is 999 -- if there are more than 1k spots on grid we want to
# allow for all of them to be traversed.
sys.setrecursionlimit(int(np.sum(bin_oddr_matrix)))
# Determine neighborhood of each unlabled spot, assign a label, and proceed.
for y in range(bin_oddr_matrix.shape[0]):
for x in range(bin_oddr_matrix.shape[1]):
if bin_oddr_matrix[y,x]==1 and lmat[y,x]==0:
nmat = neighborhood([x,y], np.zeros_like(bin_oddr_matrix, dtype=bool))
lmax += 1
lmat[nmat] = lmax
return lmat, lmax
''' Analog of detect_tissue_sections for hexagonally packed ST grids (Visium)
'''
def detect_tissue_sections_hex(coordinates, check_overlap=False, threshold=120):
# Convert from spatial hexagonal coordinates to odd-right indexing:
oddr_indices = np.array(list(map(pseudo_hex_to_oddr, coordinates)))
xdim, ydim = 64, 78 # Visium arrays have 78 rows of 64 spots each
bin_oddr_matrix = np.zeros((ydim,xdim))
for ind in oddr_indices:
bin_oddr_matrix[ind[1],ind[0]]=1
labels, n_labels = connected_components_hex(bin_oddr_matrix)
''' From here on, copy-pasta from utils.detect_tissue_section for removing small components
and detecting overlap.
'''
# get the labels of original spots (before dilation)
unique_labels,unique_labels_counts = np.unique(labels*bin_oddr_matrix,return_counts=True)
logging.info('Found %d candidate tissue sections'%(unique_labels.max()+1))
# this is used to label new tissue sections obtained by watershedding
max_label = unique_labels.max()+1
# let us see if there are any tissue sections with unexpected many spots
if check_overlap:
for unique_label,unique_label_counts in zip(unique_labels,unique_labels_counts):
# skip background
if unique_label == 0:
continue
# most likely two tissue sections are slightly overlapping
elif unique_label_counts >= threshold:
logging.warning('Tissue section has %d spots. Let us try to break the tissue section into two.'%(unique_label_counts))
labels = watershed_tissue_sections(unique_label,labels,max_label)
max_label = max_label + 1
unique_labels,unique_labels_counts = np.unique(labels*bin_oddr_matrix,return_counts=True)
# discard tissue sections with less than 10 spots
for idx in range(0,len(unique_labels_counts)):
if unique_labels_counts[idx] < 10:
labels[labels == unique_labels[idx]] = 0
spots_labeled = labels*bin_oddr_matrix
# get labels of detected tissue sections
# and discard skip the background class
unique_labels = np.unique(spots_labeled)
unique_labels = unique_labels[unique_labels > 0]
logging.info('Keeping %d tissue sections'%(len(unique_labels)))
return unique_labels, spots_labeled
''' Create a boolean vector indicating which spots from the coordinate list belong to the
tissue section being considered (tissue_idx, spots_tissue_section_labeled obtained by
connected component analysis in detect_tissue_sections_hex).
'''
def get_tissue_section_spots_hex(tissue_idx, array_coordinates_float, spots_tissue_section_labeled):
tissue_section_spots = np.zeros(array_coordinates_float.shape[0],dtype=bool)
for n, chex in enumerate(array_coordinates_float):
cor = pseudo_hex_to_oddr(chex)
if spots_tissue_section_labeled[cor[1],cor[0]] == tissue_idx:
tissue_section_spots[n] = True
return tissue_section_spots
''' Return spot adjacency matrix given a list of coordinates in pseudo-hex:
'''
def get_spot_adjacency_matrix_hex(coordinates):
cartesian_coords = np.array(list(map(pseudo_hex_to_true_hex, coordinates)))
return get_spot_adjacency_matrix(cartesian_coords)
from scipy.ndimage.measurements import label
from splotch.utils import read_array, filter_arrays, detect_tissue_sections, get_tissue_section_spots
import glob
if __name__ == "__main__":
annot_files = glob.glob('../data/Visium_test/*.csv')
aars = unique_annots_loupe(annot_files)
loupe_file = '../data/Visium_test/V014-CGND-MA-00765-A_loupe_AARs.csv'
position_file = '../data/Visium_test/V014-CGND-MA-00765-A/outs/spatial/tissue_positions_list.csv'
annot_frame = read_annot_matrix_loupe(loupe_file, position_file, aars)
array_coordinates_float = np.array([list(map(float, c.split("_"))) for c in annot_frame.columns.values])
unique_labels, spots_labeled = detect_tissue_sections_hex(array_coordinates_float, True, 600)
plt.figure()
plt.imshow(spots_labeled)
plt.show()
for tissue_idx in unique_labels:
tissue_section_spots = get_tissue_section_spots_hex(tissue_idx,array_coordinates_float,
spots_labeled)
tissue_section_coordinates_float = array_coordinates_float[tissue_section_spots]
tissue_section_coordinates_string = ["%.2f_%.2f" % (c[0],c[1]) for c in tissue_section_coordinates_float]
tissue_section_W = get_spot_adjacency_matrix_hex(tissue_section_coordinates_float)
print(np.sum(tissue_section_W))
df = pd.DataFrame(tissue_section_W, index=tissue_section_coordinates_string,
columns=tissue_section_coordinates_string)
| 36.0131
| 122
| 0.751789
| 1,328
| 8,247
| 4.447289
| 0.222139
| 0.052828
| 0.03742
| 0.004064
| 0.186082
| 0.160515
| 0.110735
| 0.099898
| 0.086014
| 0.074501
| 0
| 0.020147
| 0.139323
| 8,247
| 228
| 123
| 36.171053
| 0.811919
| 0.191464
| 0
| 0.030075
| 0
| 0.007519
| 0.068472
| 0.02675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075188
| false
| 0
| 0.075188
| 0
| 0.233083
| 0.007519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c609ad8257f94c3be0be69725b48962c792c7f1
| 1,729
|
py
|
Python
|
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
from flask import (
Blueprint,
render_template,
request,
session,
current_app as app
)
from flask_login import current_user
from floa.extensions import loa
from floa.models.library import Library
bp = Blueprint(
name='home',
import_name=__name__,
url_prefix="/"
)
@app.errorhandler(404)
def handle_404(err):
return render_template('404.html'), 404
@app.errorhandler(500)
def handle_500(err):
return render_template('500.html'), 500
@app.context_processor
def context_process():
last_update = loa.last_update
catalog_count = len(loa.catalog)
return dict(
last_update=last_update,
catalog_count=catalog_count,
session_library=session['LIBRARY'])
@bp.route("/")
def home():
if current_user.is_authenticated:
if loa.time_for_update():
session['LIBRARY'] = Library(library=current_user.library.library)\
.update(loa.catalog).library
else:
session['LIBRARY'] = current_user.library.library
else:
if 'LIBRARY' not in session:
session['LIBRARY'] = Library().update(loa.catalog).library
return render_template(
'home.html',
data=dict(catalog=loa.catalog)
)
@bp.route("/_update/item", methods=["POST"])
def update_book_status():
# create library list from the session object
library = Library(library=session['LIBRARY'])
library.set_status(
id=request.json['id'],
status=request.json['status']
)
# save updated library to session
session['LIBRARY'] = library.library
if current_user.is_authenticated:
current_user.library = library
current_user.save()
return "OK"
| 24.7
| 79
| 0.657606
| 206
| 1,729
| 5.330097
| 0.320388
| 0.140255
| 0.076503
| 0.068306
| 0.163934
| 0.067395
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.229612
| 1,729
| 69
| 80
| 25.057971
| 0.806306
| 0.043378
| 0
| 0.071429
| 0
| 0
| 0.064809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.089286
| 0.035714
| 0.267857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c619fe8bbdf105e5a1586be4e70bb3d3697916a
| 3,496
|
py
|
Python
|
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | 1
|
2017-10-28T00:21:43.000Z
|
2017-10-28T00:21:43.000Z
|
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | null | null | null |
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | 4
|
2017-01-26T23:31:32.000Z
|
2019-04-17T14:02:00.000Z
|
import logging
import orangengine
from api.models import Device as DeviceModel
from celery.utils.log import get_task_logger
from api import debug
celery_logger = get_task_logger(__name__)
if debug:
celery_logger.setLevel(logging.DEBUG)
celery_logger.debug('Enabled Debug mode')
class OEDeviceFactory(object):
"""Device Factory for the orangengine device instances
The factory is responsible for maintaining singlton instances for each device.
It contains public methods for updating (refreshing) the api device models from
the database, and the respective orangengine device instances.
"""
def __init__(self):
self._devices = {}
self._device_models = {}
self._refresh_all_device_models()
celery_logger.debug("*****************************: %s", self)
@staticmethod
def _dispatch_device(device_model):
"""use the device model to dispatch an orangengine device and store it"""
if device_model:
conn_params = {
'host': device_model.hostname,
'username': device_model.username,
'password': device_model.password,
'device_type': device_model.driver,
'apikey': device_model.apikey,
}
celery_logger.info("Dispatching device: %s", device_model.hostname)
return orangengine.dispatch(**conn_params)
def _refresh_device_model(self, hostname):
"""load and override the device model from the database for the given hostname"""
device_model = DeviceModel.query.filter_by(deleted=False, hostname=hostname).first()
if device_model:
self._device_models[hostname] = device_model
return device_model
def _refresh_all_device_models(self):
"""replace all device models and refrsh them"""
self._device_models = {}
device_models = DeviceModel.query.filter_by(deleted=False).all()
if device_models:
for device_model in device_models:
self._device_models[device_model.hostname] = device_model
def _init_device(self, hostname):
celery_logger.debug("init %s", hostname)
device_model = self._device_models.get(hostname)
if device_model is None:
device_model = self._refresh_device_model(hostname)
device = self._dispatch_device(device_model)
self._devices[hostname] = device
return device
def get_device(self, hostname, refresh_none=True):
"""Return the orangengine device singlton instance for the given hostname.
Optionally (by default) refresh the device (and model) if it is not found
"""
celery_logger.debug("getting device %s", hostname)
device = self._devices.get(hostname)
if not device and refresh_none:
device = self._init_device(hostname)
return device
def get_all_device_models(self):
"""Return a list of all device models currently stored
"""
return self._device_models.values()
def get_device_model(self, hostname):
"""Return the device model for a given hostname
"""
return self._device_models.get(hostname)
def delete_device(self, hostname, include_model=True):
"""Delete the orangengine device instance and optionally the model
"""
self._devices.pop(hostname)
if include_model:
self._device_models.pop(hostname)
| 33.615385
| 92
| 0.663043
| 410
| 3,496
| 5.412195
| 0.239024
| 0.12393
| 0.057684
| 0.028391
| 0.073907
| 0.032447
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251144
| 3,496
| 103
| 93
| 33.941748
| 0.847594
| 0.224828
| 0
| 0.1
| 0
| 0
| 0.051048
| 0.011429
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0.016667
| 0.083333
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c6938ad771712cddf43056b1ad20a6d5a62ca66
| 4,240
|
py
|
Python
|
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | 13
|
2019-03-22T15:22:22.000Z
|
2021-09-30T21:15:37.000Z
|
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | null | null | null |
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | null | null | null |
import copy
import logging
import pathlib
import torch
import torch.nn as nn
from yolov3.config import get_default_config
from yolov3.utils.config_node import ConfigNode
class CheckPointer:
def __init__(
self,
model,
optimizer=None,
scheduler=None,
checkpoint_dir=None,
logger=None,
distributed_rank=0,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.checkpoint_dir = pathlib.Path(
checkpoint_dir) if checkpoint_dir is not None else None
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.distributed_rank = distributed_rank
def save(self, name, **kwargs):
if self.checkpoint_dir is None or self.distributed_rank != 0:
return
checkpoint = copy.deepcopy(kwargs)
if isinstance(self.model,
(nn.DataParallel, nn.parallel.DistributedDataParallel)):
checkpoint['model'] = self.model.module.state_dict()
else:
checkpoint['model'] = self.model.state_dict()
if self.optimizer is not None:
checkpoint['optimizer'] = self.optimizer.state_dict()
if self.scheduler is not None:
checkpoint['scheduler'] = self.scheduler.state_dict()
outpath = self.checkpoint_dir / f'{name}.pth'
self.logger.info(f'Saving checkpoint to {outpath.as_posix()}')
torch.save(checkpoint, outpath)
self.tag_last_checkpoint(outpath)
def load(self, path=None, backbone=False):
if path is None and self.has_checkpoint():
path = self.get_checkpoint_filepath()
if isinstance(path, str):
path = pathlib.Path(path)
if path is None or not path.exists():
raise RuntimeError('Checkpoint not found.')
self.logger.info(f'Loading checkpoint from {path.as_posix()}')
checkpoint = self._load_checkpoint(path)
self.load_checkpoint(checkpoint, backbone)
if 'optimizer' in checkpoint.keys() and self.optimizer is not None:
self.logger.info(f'Loading optimizer from {path.as_posix()}')
self.optimizer.load_state_dict(checkpoint['optimizer'])
if 'scheduler' in checkpoint.keys() and self.scheduler is not None:
self.logger.info(f'Loading scheduler from {path.as_posix()}')
self.scheduler.load_state_dict(checkpoint['scheduler'])
default_config = get_default_config()
if 'config' in checkpoint.keys():
config = ConfigNode(checkpoint['config'])
else:
config = default_config
return config, checkpoint.get('iteration', 0)
def has_checkpoint(self):
if self.checkpoint_dir is None:
return False
checkpoint_file = self.checkpoint_dir / 'last_checkpoint'
return checkpoint_file.exists()
def get_checkpoint_filepath(self):
checkpoint_file = self.checkpoint_dir / 'last_checkpoint'
try:
with open(checkpoint_file, 'r') as fin:
last_saved = fin.read()
last_saved = last_saved.strip()
last_saved = self.checkpoint_dir / last_saved
except IOError:
last_saved = None
return last_saved
def tag_last_checkpoint(self, last_filepath):
outfile = self.checkpoint_dir / 'last_checkpoint'
with open(outfile, 'w') as fout:
fout.write(last_filepath.name)
@staticmethod
def _load_checkpoint(path):
return torch.load(path, map_location='cpu')
def load_checkpoint(self, checkpoint, backbone):
if isinstance(self.model,
(nn.DataParallel, nn.parallel.DistributedDataParallel)):
if not backbone:
self.model.module.load_state_dict(checkpoint['model'])
else:
self.model.module.backbone.load_state_dict(checkpoint['model'])
else:
if not backbone:
self.model.load_state_dict(checkpoint['model'])
else:
self.model.backbone.load_state_dict(checkpoint['model'])
| 36.551724
| 79
| 0.624764
| 479
| 4,240
| 5.356994
| 0.192067
| 0.055729
| 0.053001
| 0.05378
| 0.287997
| 0.193297
| 0.144193
| 0.109119
| 0.053001
| 0
| 0
| 0.001645
| 0.283019
| 4,240
| 115
| 80
| 36.869565
| 0.842434
| 0
| 0
| 0.131313
| 0
| 0
| 0.082075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080808
| false
| 0
| 0.070707
| 0.010101
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c6f498aea5f5f14a181bf4e682dea6414249ebe
| 1,749
|
py
|
Python
|
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
def gaussian_filter(in_channel, out_channel, kernel_size=15, sigma=3):
"""
This method returns 2d gaussian filter
input :
in_channel : Number of input channels
out_channel : Expected number of output channels
kernel_size : size of the filter (H x H)
sigma : sigma
output:
returns : gaussian_filter
"""
# Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
# Calculate the 2-dimensional gaussian kernel which is
# the product of two gaussian distributions for two different
# variables (in this case called x and y)
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(in_channel, 1, 1, 1)
gaussian_filter = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=kernel_size, groups=in_channel, bias=False)
gaussian_filter.weight.data = gaussian_kernel
gaussian_filter.weight.requires_grad = False
return gaussian_filter
| 34.98
| 83
| 0.675815
| 243
| 1,749
| 4.666667
| 0.345679
| 0.114638
| 0.056437
| 0.070547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017306
| 0.240137
| 1,749
| 50
| 84
| 34.98
| 0.835967
| 0.311607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.2
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c72586f407f6e08ecae9c71f47245060e33b3dd
| 28,356
|
py
|
Python
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
#!/usr/bin/env python
from six import BytesIO
import wx
import wx.richtext as rt
import images
#----------------------------------------------------------------------
class RichTextFrame(wx.Frame):
def __init__(self, *args, **kw):
wx.Frame.__init__(self, *args, **kw)
self.MakeMenuBar()
self.MakeToolBar()
self.CreateStatusBar()
self.SetStatusText("Welcome to wx.richtext.RichTextCtrl!")
self.rtc = rt.RichTextCtrl(self, style=wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER);
wx.CallAfter(self.rtc.SetFocus)
self.rtc.Freeze()
self.rtc.BeginSuppressUndo()
self.rtc.BeginParagraphSpacing(0, 20)
self.rtc.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.rtc.BeginBold()
self.rtc.BeginFontSize(14)
self.rtc.WriteText("Welcome to wxRichTextCtrl, a wxWidgets control for editing and presenting styled text and images")
self.rtc.EndFontSize()
self.rtc.Newline()
self.rtc.BeginItalic()
self.rtc.WriteText("by Julian Smart")
self.rtc.EndItalic()
self.rtc.EndBold()
self.rtc.Newline()
self.rtc.WriteImage(images._rt_zebra.GetImage())
self.rtc.EndAlignment()
self.rtc.Newline()
self.rtc.Newline()
self.rtc.WriteText("What can you do with this thing? ")
self.rtc.WriteImage(images._rt_smiley.GetImage())
self.rtc.WriteText(" Well, you can change text ")
self.rtc.BeginTextColour((255, 0, 0))
self.rtc.WriteText("colour, like this red bit.")
self.rtc.EndTextColour()
self.rtc.BeginTextColour((0, 0, 255))
self.rtc.WriteText(" And this blue bit.")
self.rtc.EndTextColour()
self.rtc.WriteText(" Naturally you can make things ")
self.rtc.BeginBold()
self.rtc.WriteText("bold ")
self.rtc.EndBold()
self.rtc.BeginItalic()
self.rtc.WriteText("or italic ")
self.rtc.EndItalic()
self.rtc.BeginUnderline()
self.rtc.WriteText("or underlined.")
self.rtc.EndUnderline()
self.rtc.BeginFontSize(14)
self.rtc.WriteText(" Different font sizes on the same line is allowed, too.")
self.rtc.EndFontSize()
self.rtc.WriteText(" Next we'll show an indented paragraph.")
self.rtc.BeginLeftIndent(60)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLeftIndent()
self.rtc.Newline()
self.rtc.WriteText("Next, we'll show a first-line indent, achieved using BeginLeftIndent(100, -40).")
self.rtc.BeginLeftIndent(100, -40)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLeftIndent()
self.rtc.Newline()
self.rtc.WriteText("Numbered bullets are possible, again using sub-indents:")
self.rtc.BeginNumberedBullet(1, 100, 60)
self.rtc.Newline()
self.rtc.WriteText("This is my first item. Note that wxRichTextCtrl doesn't automatically do numbering, but this will be added later.")
self.rtc.EndNumberedBullet()
self.rtc.BeginNumberedBullet(2, 100, 60)
self.rtc.Newline()
self.rtc.WriteText("This is my second item.")
self.rtc.EndNumberedBullet()
self.rtc.Newline()
self.rtc.WriteText("The following paragraph is right-indented:")
self.rtc.BeginRightIndent(200)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndRightIndent()
self.rtc.Newline()
self.rtc.WriteText("The following paragraph is right-aligned with 1.5 line spacing:")
self.rtc.BeginAlignment(wx.TEXT_ALIGNMENT_RIGHT)
self.rtc.BeginLineSpacing(wx.TEXT_ATTR_LINE_SPACING_HALF)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLineSpacing()
self.rtc.EndAlignment()
self.rtc.Newline()
self.rtc.WriteText("Other notable features of wxRichTextCtrl include:")
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Compatibility with wxTextCtrl API")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Easy stack-based BeginXXX()...EndXXX() style setting in addition to SetStyle()")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("XML loading and saving")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Undo/Redo, with batching option and Undo suppressing")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Clipboard copy and paste")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("wxRichTextStyleSheet with named character and paragraph styles, and control for applying named styles")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("A design that can easily be extended to other content types, ultimately with text boxes, tables, controls, and so on")
self.rtc.EndSymbolBullet()
self.rtc.Newline()
self.rtc.WriteText("Note: this sample content was generated programmatically from within the MyFrame constructor in the demo. The images were loaded from inline XPMs. Enjoy wxRichTextCtrl!")
self.rtc.Newline()
self.rtc.Newline()
self.rtc.BeginFontSize(12)
self.rtc.BeginBold()
self.rtc.WriteText("Additional comments by David Woods:")
self.rtc.EndBold()
self.rtc.EndFontSize()
self.rtc.Newline()
self.rtc.WriteText("I find some of the RichTextCtrl method names, as used above, to be misleading. Some character styles are stacked in the RichTextCtrl, and they are removed in the reverse order from how they are added, regardless of the method called. Allow me to demonstrate what I mean.")
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.rtc.BeginBold()
self.rtc.WriteText('BeginBold() makes it bold. ')
self.rtc.BeginItalic()
self.rtc.WriteText('BeginItalic() makes it bold-italic. ')
self.rtc.EndBold()
self.rtc.WriteText('EndBold() should make it italic but instead makes it bold. ')
self.rtc.EndItalic()
self.rtc.WriteText('EndItalic() takes us back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.rtc.BeginBold()
self.rtc.WriteText('BeginBold() makes it bold. ')
self.rtc.BeginUnderline()
self.rtc.WriteText('BeginUnderline() makes it bold-underline. ')
self.rtc.EndBold()
self.rtc.WriteText('EndBold() should make it underline but instead makes it bold. ')
self.rtc.EndUnderline()
self.rtc.WriteText('EndUnderline() takes us back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('According to Julian, this functions "as expected" because of the way the RichTextCtrl is written. I wrote the SetFontStyle() method here to demonstrate a way to work with overlapping styles that solves this problem.')
self.rtc.Newline()
# Create and initialize text attributes
self.textAttr = rt.RichTextAttr()
self.SetFontStyle(fontColor=wx.Colour(0, 0, 0), fontBgColor=wx.Colour(255, 255, 255), fontFace='Times New Roman', fontSize=10, fontBold=False, fontItalic=False, fontUnderline=False)
self.rtc.WriteText('Start with plain text. ')
self.SetFontStyle(fontBold=True)
self.rtc.WriteText('Bold. ')
self.SetFontStyle(fontItalic=True)
self.rtc.WriteText('Bold-italic. ')
self.SetFontStyle(fontBold=False)
self.rtc.WriteText('Italic. ')
self.SetFontStyle(fontItalic=False)
self.rtc.WriteText('Back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.SetFontStyle(fontBold=True)
self.rtc.WriteText('Bold. ')
self.SetFontStyle(fontUnderline=True)
self.rtc.WriteText('Bold-Underline. ')
self.SetFontStyle(fontBold=False)
self.rtc.WriteText('Underline. ')
self.SetFontStyle(fontUnderline=False)
self.rtc.WriteText('Back to plain text. ')
self.rtc.Newline()
self.rtc.EndParagraphSpacing()
self.rtc.EndSuppressUndo()
self.rtc.Thaw()
def SetFontStyle(self, fontColor = None, fontBgColor = None, fontFace = None, fontSize = None,
fontBold = None, fontItalic = None, fontUnderline = None):
if fontColor:
self.textAttr.SetTextColour(fontColor)
if fontBgColor:
self.textAttr.SetBackgroundColour(fontBgColor)
if fontFace:
self.textAttr.SetFontFaceName(fontFace)
if fontSize:
self.textAttr.SetFontSize(fontSize)
if fontBold != None:
if fontBold:
self.textAttr.SetFontWeight(wx.FONTWEIGHT_BOLD)
else:
self.textAttr.SetFontWeight(wx.FONTWEIGHT_NORMAL)
if fontItalic != None:
if fontItalic:
self.textAttr.SetFontStyle(wx.FONTSTYLE_ITALIC)
else:
self.textAttr.SetFontStyle(wx.FONTSTYLE_NORMAL)
if fontUnderline != None:
if fontUnderline:
self.textAttr.SetFontUnderlined(True)
else:
self.textAttr.SetFontUnderlined(False)
self.rtc.SetDefaultStyle(self.textAttr)
def OnURL(self, evt):
wx.MessageBox(evt.GetString(), "URL Clicked")
def OnFileOpen(self, evt):
# This gives us a string suitable for the file dialog based on
# the file handlers that are loaded
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=False)
dlg = wx.FileDialog(self, "Choose a filename",
wildcard=wildcard,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if path:
fileType = types[dlg.GetFilterIndex()]
self.rtc.LoadFile(path, fileType)
dlg.Destroy()
def OnFileSave(self, evt):
if not self.rtc.GetFilename():
self.OnFileSaveAs(evt)
return
self.rtc.SaveFile()
def OnFileSaveAs(self, evt):
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=True)
dlg = wx.FileDialog(self, "Choose a filename",
wildcard=wildcard,
style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if path:
fileType = types[dlg.GetFilterIndex()]
ext = rt.RichTextBuffer.FindHandlerByType(fileType).GetExtension()
if not path.endswith(ext):
path += '.' + ext
self.rtc.SaveFile(path, fileType)
dlg.Destroy()
def OnFileViewHTML(self, evt):
# Get an instance of the html file handler, use it to save the
# document to a StringIO stream, and then display the
# resulting html text in a dialog with a HtmlWindow.
handler = rt.RichTextHTMLHandler()
handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
handler.SetFontSizeMapping([7,9,11,12,14,22,100])
stream = BytesIO()
if not handler.SaveStream(self.rtc.GetBuffer(), stream):
return
import wx.html
dlg = wx.Dialog(self, title="HTML", style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
html = wx.html.HtmlWindow(dlg, size=(500,400), style=wx.BORDER_SUNKEN)
html.SetPage(stream.getvalue())
btn = wx.Button(dlg, wx.ID_CANCEL)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html, 1, wx.ALL|wx.EXPAND, 5)
sizer.Add(btn, 0, wx.ALL|wx.CENTER, 10)
dlg.SetSizer(sizer)
sizer.Fit(dlg)
dlg.ShowModal()
handler.DeleteTemporaryImages()
def OnFileExit(self, evt):
self.Close(True)
def OnBold(self, evt):
self.rtc.ApplyBoldToSelection()
def OnItalic(self, evt):
self.rtc.ApplyItalicToSelection()
def OnUnderline(self, evt):
self.rtc.ApplyUnderlineToSelection()
def OnAlignLeft(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_LEFT)
def OnAlignRight(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_RIGHT)
def OnAlignCenter(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_CENTRE)
def OnIndentMore(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetLeftIndent(attr.GetLeftIndent() + 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnIndentLess(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetLeftIndent() >= 100:
attr.SetLeftIndent(attr.GetLeftIndent() - 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingMore(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() + 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingLess(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetParagraphSpacingAfter() >= 20:
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() - 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnLineSpacingSingle(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(10)
self.rtc.SetStyle(r, attr)
def OnLineSpacingHalf(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(15)
self.rtc.SetStyle(r, attr)
def OnLineSpacingDouble(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(20)
self.rtc.SetStyle(r, attr)
def OnFont(self, evt):
if not self.rtc.HasSelection():
return
r = self.rtc.GetSelectionRange()
fontData = wx.FontData()
fontData.EnableEffects(False)
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_FONT)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
fontData.SetInitialFont(attr.GetFont())
dlg = wx.FontDialog(self, fontData)
if dlg.ShowModal() == wx.ID_OK:
fontData = dlg.GetFontData()
font = fontData.GetChosenFont()
if font:
attr.SetFlags(wx.TEXT_ATTR_FONT)
attr.SetFont(font)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnColour(self, evt):
colourData = wx.ColourData()
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
colourData.SetColour(attr.GetTextColour())
dlg = wx.ColourDialog(self, colourData)
if dlg.ShowModal() == wx.ID_OK:
colourData = dlg.GetColourData()
colour = colourData.GetColour()
if colour:
if not self.rtc.HasSelection():
self.rtc.BeginTextColour(colour)
else:
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
attr.SetTextColour(colour)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnUpdateBold(self, evt):
evt.Check(self.rtc.IsSelectionBold())
def OnUpdateItalic(self, evt):
evt.Check(self.rtc.IsSelectionItalics())
def OnUpdateUnderline(self, evt):
evt.Check(self.rtc.IsSelectionUnderlined())
def OnUpdateAlignLeft(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_LEFT))
def OnUpdateAlignCenter(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_CENTRE))
def OnUpdateAlignRight(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_RIGHT))
def ForwardEvent(self, evt):
# The RichTextCtrl can handle menu and update events for undo,
# redo, cut, copy, paste, delete, and select all, so just
# forward the event to it.
self.rtc.ProcessEvent(evt)
def MakeMenuBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_MENU, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
fileMenu = wx.Menu()
doBind( fileMenu.Append(-1, "&Open\tCtrl+O", "Open a file"),
self.OnFileOpen )
doBind( fileMenu.Append(-1, "&Save\tCtrl+S", "Save a file"),
self.OnFileSave )
doBind( fileMenu.Append(-1, "&Save As...\tF12", "Save to a new file"),
self.OnFileSaveAs )
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "&View as HTML", "View HTML"),
self.OnFileViewHTML)
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "E&xit\tCtrl+Q", "Quit this program"),
self.OnFileExit )
editMenu = wx.Menu()
doBind( editMenu.Append(wx.ID_UNDO, "&Undo\tCtrl+Z"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_REDO, "&Redo\tCtrl+Y"),
self.ForwardEvent, self.ForwardEvent )
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_CUT, "Cu&t\tCtrl+X"),
self.ForwardEvent, self.ForwardEvent )
doBind( editMenu.Append(wx.ID_COPY, "&Copy\tCtrl+C"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_PASTE, "&Paste\tCtrl+V"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_CLEAR, "&Delete\tDel"),
self.ForwardEvent, self.ForwardEvent)
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_SELECTALL, "Select A&ll\tCtrl+A"),
self.ForwardEvent, self.ForwardEvent )
#doBind( editMenu.AppendSeparator(), )
#doBind( editMenu.Append(-1, "&Find...\tCtrl+F"), )
#doBind( editMenu.Append(-1, "&Replace...\tCtrl+R"), )
formatMenu = wx.Menu()
doBind( formatMenu.AppendCheckItem(-1, "&Bold\tCtrl+B"),
self.OnBold, self.OnUpdateBold)
doBind( formatMenu.AppendCheckItem(-1, "&Italic\tCtrl+I"),
self.OnItalic, self.OnUpdateItalic)
doBind( formatMenu.AppendCheckItem(-1, "&Underline\tCtrl+U"),
self.OnUnderline, self.OnUpdateUnderline)
formatMenu.AppendSeparator()
doBind( formatMenu.AppendCheckItem(-1, "L&eft Align"),
self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( formatMenu.AppendCheckItem(-1, "&Centre"),
self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( formatMenu.AppendCheckItem(-1, "&Right Align"),
self.OnAlignRight, self.OnUpdateAlignRight)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Indent &More"), self.OnIndentMore)
doBind( formatMenu.Append(-1, "Indent &Less"), self.OnIndentLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Increase Paragraph &Spacing"), self.OnParagraphSpacingMore)
doBind( formatMenu.Append(-1, "Decrease &Paragraph Spacing"), self.OnParagraphSpacingLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Normal Line Spacing"), self.OnLineSpacingSingle)
doBind( formatMenu.Append(-1, "1.5 Line Spacing"), self.OnLineSpacingHalf)
doBind( formatMenu.Append(-1, "Double Line Spacing"), self.OnLineSpacingDouble)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "&Font..."), self.OnFont)
mb = wx.MenuBar()
mb.Append(fileMenu, "&File")
mb.Append(editMenu, "&Edit")
mb.Append(formatMenu, "F&ormat")
self.SetMenuBar(mb)
def MakeToolBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_TOOL, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
tbar = self.CreateToolBar()
doBind( tbar.AddTool(-1, '', images._rt_open.GetBitmap(),
shortHelp="Open"), self.OnFileOpen)
doBind( tbar.AddTool(-1, '', images._rt_save.GetBitmap(),
shortHelp="Save"), self.OnFileSave)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_CUT, '', images._rt_cut.GetBitmap(),
shortHelp="Cut"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_COPY, '', images._rt_copy.GetBitmap(),
shortHelp="Copy"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_PASTE, '', images._rt_paste.GetBitmap(),
shortHelp="Paste"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_UNDO, '', images._rt_undo.GetBitmap(),
shortHelp="Undo"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_REDO, '', images._rt_redo.GetBitmap(),
shortHelp="Redo"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddCheckTool(-1, '', images._rt_bold.GetBitmap(),
shortHelp="Bold"), self.OnBold, self.OnUpdateBold)
doBind( tbar.AddCheckTool(-1, '', images._rt_italic.GetBitmap(),
shortHelp="Italic"), self.OnItalic, self.OnUpdateItalic)
doBind( tbar.AddCheckTool(-1, '', images._rt_underline.GetBitmap(),
shortHelp="Underline"), self.OnUnderline, self.OnUpdateUnderline)
tbar.AddSeparator()
doBind( tbar.AddCheckTool(-1, '', images._rt_alignleft.GetBitmap(),
shortHelp="Align Left"), self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( tbar.AddCheckTool(-1, '', images._rt_centre.GetBitmap(),
shortHelp="Center"), self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( tbar.AddCheckTool(-1, '', images._rt_alignright.GetBitmap(),
shortHelp="Align Right"), self.OnAlignRight, self.OnUpdateAlignRight)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, '', images._rt_indentless.GetBitmap(),
shortHelp="Indent Less"), self.OnIndentLess)
doBind( tbar.AddTool(-1, '', images._rt_indentmore.GetBitmap(),
shortHelp="Indent More"), self.OnIndentMore)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, '', images._rt_font.GetBitmap(),
shortHelp="Font"), self.OnFont)
doBind( tbar.AddTool(-1, '', images._rt_colour.GetBitmap(),
shortHelp="Font Colour"), self.OnColour)
tbar.Realize()
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Show the RichTextCtrl sample", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
self.AddRTCHandlers()
def AddRTCHandlers(self):
# make sure we haven't already added them.
if rt.RichTextBuffer.FindHandlerByType(rt.RICHTEXT_TYPE_HTML) is not None:
return
# This would normally go in your app's OnInit method. I'm
# not sure why these file handlers are not loaded by
# default by the C++ richtext code, I guess it's so you
# can change the name or extension if you wanted...
rt.RichTextBuffer.AddHandler(rt.RichTextHTMLHandler())
rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler())
# ...like this
rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler(name="Other XML",
ext="ox",
type=99))
# This is needed for the view as HTML option since we tell it
# to store the images in the memory file system.
wx.FileSystem.AddHandler(wx.MemoryFSHandler())
def OnButton(self, evt):
win = RichTextFrame(self, -1, "wx.richtext.RichTextCtrl",
size=(700, 500),
style = wx.DEFAULT_FRAME_STYLE)
win.Show(True)
# give easy access to the demo's PyShell if it's running
self.rtfrm = win
self.rtc = win.rtc
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.richtext.RichTextCtrl</center></h2>
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 38.684857
| 302
| 0.608513
| 3,125
| 28,356
| 5.46336
| 0.1888
| 0.091021
| 0.050606
| 0.032683
| 0.522052
| 0.446729
| 0.370644
| 0.351198
| 0.300533
| 0.275347
| 0
| 0.009543
| 0.268268
| 28,356
| 732
| 303
| 38.737705
| 0.813292
| 0.046198
| 0
| 0.417582
| 0
| 0.018315
| 0.141752
| 0.004441
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069597
| false
| 0
| 0.012821
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c768de90390e5fd0ea2640bab37a8869d234309
| 1,784
|
py
|
Python
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 237
|
2015-07-19T21:33:01.000Z
|
2022-03-30T00:19:46.000Z
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 25
|
2015-07-19T20:44:31.000Z
|
2022-01-26T10:42:07.000Z
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 107
|
2015-09-05T12:54:55.000Z
|
2022-03-28T15:36:13.000Z
|
"""Implements a HD44780 character LCD connected via NodeMCU GPIO pins."""
from machine import Pin
from utime import sleep, ticks_ms
from nodemcu_gpio_lcd import GpioLcd
# Wiring used for this example:
#
# 1 - Vss (aka Ground) - Connect to one of the ground pins on you NodeMCU board.
# 2 - VDD - Connect to 3V
# 3 - VE (Contrast voltage) - I'll discuss this below
# 4 - RS (Register Select) connect to D0 (as per call to GpioLcd)
# 5 - RW (Read/Write) - connect to ground
# 6 - EN (Enable) connect to D1 (as per call to GpioLcd)
# 7 - D0 - leave unconnected
# 8 - D1 - leave unconnected
# 9 - D2 - leave unconnected
# 10 - D3 - leave unconnected
# 11 - D4 - connect to D2 (as per call to GpioLcd)
# 12 - D5 - connect to D3 (as per call to GpioLcd)
# 13 - D6 - connect to D4 (as per call to GpioLcd)
# 14 - D7 - connect to D5 (as per call to GpioLcd)
# 15 - A (BackLight Anode) - Connect to 3V
# 16 - K (Backlight Cathode) - Connect to Ground
#
# On 14-pin LCDs, there is no backlight, so pins 15 & 16 don't exist.
#
# The Contrast line (pin 3) typically connects to the center tap of a
# 10K potentiometer, and the other 2 legs of the 10K potentiometer are
# connected to pins 1 and 2 (Ground and VDD)
def test_main():
"""Test function for verifying basic functionality."""
print("Running test_main")
lcd = GpioLcd(rs_pin=Pin(16),
enable_pin=Pin(5),
d4_pin=Pin(4),
d5_pin=Pin(0),
d6_pin=Pin(2),
d7_pin=Pin(14),
num_lines=2, num_columns=20)
lcd.putstr("It Works!\nSecond Line")
sleep(3)
lcd.clear()
count = 0
while True:
lcd.move_to(0, 0)
lcd.putstr("%7d" % (ticks_ms() // 1000))
sleep(1)
count += 1
| 34.307692
| 81
| 0.627242
| 283
| 1,784
| 3.90106
| 0.45583
| 0.089674
| 0.048913
| 0.059783
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064566
| 0.27074
| 1,784
| 51
| 82
| 34.980392
| 0.784012
| 0.622758
| 0
| 0
| 0
| 0
| 0.065728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c77d3d22c710ab0e8e3582be4b79df9edb68531
| 11,579
|
py
|
Python
|
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
LunaBlack/dgl
|
bd1e48a51e348b0e8e25622325adeb5ddea1c0ea
|
[
"Apache-2.0"
] | 2
|
2021-12-09T12:36:13.000Z
|
2022-03-01T21:22:36.000Z
|
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | null | null | null |
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | 2
|
2020-12-07T09:34:01.000Z
|
2020-12-13T06:18:58.000Z
|
import dgl
import errno
import numpy as np
import os
import random
import torch
from collections import defaultdict
from rdkit import Chem
def mkdir_p(path):
"""Create a folder for the given path.
Parameters
----------
path: str
Folder to create
"""
try:
os.makedirs(path)
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
print('Directory {} already exists.'.format(path))
else:
raise
def setup(args, seed=0):
"""Setup for the experiment:
1. Decide whether to use CPU or GPU for training
2. Fix random seed for python, NumPy and PyTorch.
Parameters
----------
seed : int
Random seed to use.
Returns
-------
args
Updated configuration
"""
assert args['max_k'] >= max(args['top_ks']), \
'Expect max_k to be no smaller than the possible options ' \
'of top_ks, got {:d} and {:d}'.format(args['max_k'], max(args['top_ks']))
if torch.cuda.is_available():
args['device'] = 'cuda:0'
else:
args['device'] = 'cpu'
# Set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
mkdir_p(args['result_path'])
return args
def collate(data):
"""Collate multiple datapoints
Parameters
----------
data : list of 7-tuples
Each tuple is for a single datapoint, consisting of
a reaction, graph edits in the reaction, an RDKit molecule instance for all reactants,
a DGLGraph for all reactants, a complete graph for all reactants, the features for each
pair of atoms and the labels for each pair of atoms.
Returns
-------
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
mols : list of rdkit.Chem.rdchem.Mol
List of RDKit molecule instances for the reactants.
batch_mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs.
batch_complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs.
batch_atom_pair_labels : float32 tensor of shape (V, 10)
Labels of atom pairs in the batch of graphs.
"""
reactions, graph_edits, mols, mol_graphs, complete_graphs, \
atom_pair_feats, atom_pair_labels = map(list, zip(*data))
batch_mol_graphs = dgl.batch(mol_graphs)
batch_mol_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_mol_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs = dgl.batch(complete_graphs)
batch_complete_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_complete_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs.edata['feats'] = torch.cat(atom_pair_feats, dim=0)
batch_atom_pair_labels = torch.cat(atom_pair_labels, dim=0)
return reactions, graph_edits, mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels
def reaction_center_prediction(device, model, mol_graphs, complete_graphs):
"""Perform a soft prediction on reaction center.
Parameters
----------
device : str
Device to use for computation, e.g. 'cpu', 'cuda:0'
model : nn.Module
Model for prediction.
mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
Returns
-------
scores : float32 tensor of shape (E_full, 5)
Predicted scores for each pair of atoms to perform one of the following
5 actions in reaction:
* The bond between them gets broken
* Forming a single bond
* Forming a double bond
* Forming a triple bond
* Forming an aromatic bond
biased_scores : float32 tensor of shape (E_full, 5)
Comparing to scores, a bias is added if the pair is for a same atom.
"""
node_feats = mol_graphs.ndata.pop('hv').to(device)
edge_feats = mol_graphs.edata.pop('he').to(device)
node_pair_feats = complete_graphs.edata.pop('feats').to(device)
return model(mol_graphs, complete_graphs, node_feats, edge_feats, node_pair_feats)
def rough_eval(complete_graphs, preds, labels, num_correct):
batch_size = complete_graphs.batch_size
start = 0
for i in range(batch_size):
end = start + complete_graphs.batch_num_edges[i]
preds_i = preds[start:end, :].flatten()
labels_i = labels[start:end, :].flatten()
for k in num_correct.keys():
topk_values, topk_indices = torch.topk(preds_i, k)
is_correct = labels_i[topk_indices].sum() == labels_i.sum().float().cpu().data.item()
num_correct[k].append(is_correct)
start = end
def rough_eval_on_a_loader(args, model, data_loader):
"""A rough evaluation of model performance in the middle of training.
For final evaluation, we will eliminate some possibilities based on prior knowledge.
Parameters
----------
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
Returns
-------
str
Message for evluation result.
"""
model.eval()
num_correct = {k: [] for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
rough_eval(batch_complete_graphs, biased_pred, batch_atom_pair_labels, num_correct)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, np.mean(correct_count))
return msg
def eval(complete_graphs, preds, reactions, graph_edits, num_correct, max_k, easy):
"""Evaluate top-k accuracies for reaction center prediction.
Parameters
----------
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
preds : float32 tensor of shape (E_full, 5)
Soft predictions for reaction center, E_full being the number of possible
atom-pairs and 5 being the number of possible bond changes
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
num_correct : dict
Counting the number of datapoints for meeting top-k accuracies.
max_k : int
Maximum number of atom pairs to be selected. This is intended to be larger
than max(num_correct.keys()) as we will filter out many atom pairs due to
considerations such as avoiding duplicates.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
"""
# 0 for losing the bond
# 1, 2, 3, 1.5 separately for forming a single, double, triple or aromatic bond.
bond_change_to_id = {0.0: 0, 1:1, 2:2, 3:3, 1.5:4}
id_to_bond_change = {v: k for k, v in bond_change_to_id.items()}
num_change_types = len(bond_change_to_id)
batch_size = complete_graphs.batch_size
start = 0
for i in range(batch_size):
# Decide which atom-pairs will be considered.
reaction_i = reactions[i]
reaction_atoms_i = []
reaction_bonds_i = defaultdict(bool)
reactants_i, _, product_i = reaction_i.split('>')
product_mol_i = Chem.MolFromSmiles(product_i)
product_atoms_i = set([atom.GetAtomMapNum() for atom in product_mol_i.GetAtoms()])
for reactant in reactants_i.split('.'):
reactant_mol = Chem.MolFromSmiles(reactant)
reactant_atoms = [atom.GetAtomMapNum() for atom in reactant_mol.GetAtoms()]
if (len(set(reactant_atoms) & product_atoms_i) > 0) or (not easy):
reaction_atoms_i.extend(reactant_atoms)
for bond in reactant_mol.GetBonds():
end_atoms = sorted([bond.GetBeginAtom().GetAtomMapNum(),
bond.GetEndAtom().GetAtomMapNum()])
bond = tuple(end_atoms + [bond.GetBondTypeAsDouble()])
reaction_bonds_i[bond] = True
num_nodes = complete_graphs.batch_num_nodes[i]
end = start + complete_graphs.batch_num_edges[i]
preds_i = preds[start:end, :].flatten()
candidate_bonds = []
topk_values, topk_indices = torch.topk(preds_i, max_k)
for j in range(max_k):
preds_i_j = topk_indices[j].cpu().item()
# A bond change can be either losing the bond or forming a
# single, double, triple or aromatic bond
change_id = preds_i_j % num_change_types
change_type = id_to_bond_change[change_id]
pair_id = preds_i_j // num_change_types
atom1 = pair_id // num_nodes + 1
atom2 = pair_id % num_nodes + 1
# Avoid duplicates and an atom cannot form a bond with itself
if atom1 >= atom2:
continue
if atom1 not in reaction_atoms_i:
continue
if atom2 not in reaction_atoms_i:
continue
candidate = (int(atom1), int(atom2), float(change_type))
if reaction_bonds_i[candidate]:
continue
candidate_bonds.append(candidate)
gold_bonds = []
gold_edits = graph_edits[i]
for edit in gold_edits.split(';'):
atom1, atom2, change_type = edit.split('-')
atom1, atom2 = int(atom1), int(atom2)
gold_bonds.append((min(atom1, atom2), max(atom1, atom2), float(change_type)))
for k in num_correct.keys():
if set(gold_bonds) <= set(candidate_bonds[:k]):
num_correct[k] += 1
start = end
def reaction_center_final_eval(args, model, data_loader, easy):
"""Final evaluation of model performance.
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
Returns
-------
msg : str
Summary of the top-k evaluation.
"""
model.eval()
num_correct = {k: 0 for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
eval(batch_complete_graphs, biased_pred, batch_reactions,
batch_graph_edits, num_correct, args['max_k'], easy)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, correct_count / len(data_loader.dataset))
return msg
| 37.112179
| 97
| 0.648847
| 1,566
| 11,579
| 4.597063
| 0.192848
| 0.056397
| 0.03431
| 0.024309
| 0.415197
| 0.363939
| 0.348243
| 0.322128
| 0.291013
| 0.277677
| 0
| 0.007952
| 0.261508
| 11,579
| 311
| 98
| 37.231511
| 0.833938
| 0.350289
| 0
| 0.287671
| 0
| 0
| 0.038527
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 1
| 0.054795
| false
| 0
| 0.054795
| 0
| 0.143836
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c79a93effba00b7b6196ac9c718d0c037c656b9
| 5,168
|
py
|
Python
|
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | 1
|
2022-03-16T19:19:15.000Z
|
2022-03-16T19:19:15.000Z
|
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | null | null | null |
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | null | null | null |
import csv
import os
import difflib
import statistics
import numpy as np
import matplotlib.pyplot as plt
SMALL_SIZE = 12
MEDIUM_SIZE = 14
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('title', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=LARGE_SIZE, titlesize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
base_dir = "/Users/ewcss/data/ssbt/20220211_benchmark"
methods = {"GGA": ["PBE", "PBE-D3(BJ)", "BLYP", "BLYP-D3(BJ)", "B97-D", "B97-D3", "mPW91", "mPW91-D3(BJ)", "VV10", "rVV10"],
"meta-GGA": ["M06-L", "M06-L-D3(0)", "SCAN", "SCAN-D3(BJ)", "TPSS", "TPSS-D3(BJ)", "MN12-L", "MN12-L-D3(BJ)", "B97M-rV"],
"hybrid GGA": ["PBE0", "PBE0-D3(BJ)", "B3LYP", "B3LYP-D3(BJ)", "CAM-B3LYP", "CAM-B3LYP-D3(0)", "mPW1PW91", "mPW1PW91-D3(BJ)", "wB97X", "wB97XD", "wB97XD3", "wB97XV"],
"hybrid meta-GGA": ["M06-2X", "M06-2X-D3(0)", "M06-HF", "M08-SO", "M11", "MN15", "BMK", "BMK-D3(BJ)", "TPSSh", "TPSSh-D3(BJ)", "SCAN0", "mPWB1K", "mPWB1K-D3(BJ)", "wB97M-V"]}
vac_mae = {x: dict() for x in methods}
vac_rel = {x: dict() for x in methods}
pcm_mae = {x: dict() for x in methods}
pcm_rel = {x: dict() for x in methods}
with open(os.path.join(base_dir, "abserrs_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
# if funct == "M06-HF":
# continue
avg = float(row[-1])
for group, functs in methods.items():
if funct in functs:
vac_mae[group][funct] = avg
with open(os.path.join(base_dir, "abserrs_rel_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
avg = float(row[-1])
# if funct == "M06-HF":
# continue
for group, functs in methods.items():
if funct in functs:
vac_rel[group][funct] = avg
# with open(os.path.join(base_dir, "abserrs_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_mae[group][funct] = avg
#
# with open(os.path.join(base_dir, "abserrs_rel_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_rel[group][funct] = avg
fig, axs = plt.subplots(2, 1, figsize=(14, 6), sharex=True)
for i, dset in enumerate([vac_mae, vac_rel]):
ax = axs[i]
if i == 0:
ax.set_ylabel("MAE (eV)")
else:
ax.set_ylabel("MRAE (unitless)")
xs = ["GGA", "meta-GGA", "hybrid GGA", "hybrid meta-GGA"]
avgs = list()
lowlims = list()
uplims = list()
data = list()
for group in xs:
data.append(np.array(sorted(list(dset[group].values()))))
ax.violinplot(data, [1,2,3,4], showmeans=False, showmedians=False, showextrema=False)
quartile1 = np.zeros(4)
medians = np.zeros(4)
quartile3 = np.zeros(4)
for i, d in enumerate(data):
q1, m, q3 = np.percentile(d, [25, 50, 75])
quartile1[i] = q1
medians[i] = m
quartile3[i] = q3
whiskers = np.array([adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
ax.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.vlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(xs)
plt.tight_layout()
fig.savefig("sp_performance_violin.png", dpi=150)
plt.show()
| 32.917197
| 186
| 0.572175
| 746
| 5,168
| 3.880697
| 0.270777
| 0.01658
| 0.02487
| 0.023489
| 0.402073
| 0.387219
| 0.387219
| 0.356131
| 0.316062
| 0.316062
| 0
| 0.050547
| 0.257353
| 5,168
| 157
| 187
| 32.917197
| 0.703752
| 0.239358
| 0
| 0.241379
| 0
| 0
| 0.150618
| 0.022657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011494
| false
| 0
| 0.068966
| 0
| 0.091954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c7aa53b02ade1969b440eeb2dca4bdd3802359c
| 205
|
py
|
Python
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
# sys.stdin.readline()
import sys
input = sys.stdin.readline
a, b, c, d = map(int, input().split())
if a+b > c+d:
ans = 'Left'
elif a+b == c+d:
ans = 'Balanced'
else:
ans = 'Right'
print(ans)
| 15.769231
| 38
| 0.57561
| 36
| 205
| 3.277778
| 0.555556
| 0.050847
| 0.076271
| 0.101695
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22439
| 205
| 12
| 39
| 17.083333
| 0.742138
| 0.097561
| 0
| 0
| 0
| 0
| 0.092896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c7b5575e035c24915e3b04e46105e06901e65b5
| 255
|
py
|
Python
|
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | 5
|
2019-04-10T03:51:13.000Z
|
2020-07-12T10:50:24.000Z
|
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | null | null | null |
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | null | null | null |
def flatten(elems):
stack = []
if isinstance(elems, (list, tuple)):
for x in elems:
stack += flatten(x)
elif isinstance(elems, (dict)):
for x in elems.values():
stack += flatten(x)
else:
stack.append(elems)
return stack
| 19.615385
| 38
| 0.603922
| 34
| 255
| 4.529412
| 0.5
| 0.12987
| 0.077922
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254902
| 255
| 12
| 39
| 21.25
| 0.810526
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c7c6ab3c977d309a6e23ab36c08b279c63de1a3
| 3,822
|
py
|
Python
|
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
"""
by_locator : tuple --> (<selenium By object>, <selector string>)
x_offset : int --> integer value of x offset in pixels
y_offset : int --> integer value of y offset in pixels
x_destination : int --> integer value of x location on page
y_desitination : int --> integer value of y location on page
by_locator_source : tuple --> (<selenium By object>, <selector string>)
by_locator_target : tuple --> (<selenium By object>, <selector string>)
clear_first : bool --> toggle for clearing input field before writing text to it
press_enter : bool --> toggle for sending the ENTER key to an input field after writing to it
"""
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed, wait_until_not_displayed
@wait_until_displayed
@wait_for_page_to_load
def click_element(self, by_locator:tuple, x_offset:int=0, y_offset:int=0) -> None:
# I hate clicking
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
scroll_height = self._driver.execute_script('return document.body.scrollHeight')
window_size = self._driver.get_window_size()['height']
if element.location['y'] > (scroll_height - .5 * window_size):
self._driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
elif element.location['y'] < (.5 * window_size):
self._driver.execute_script('window.scrollTo(0, 0)')
else:
self._driver.execute_script(f"window.scrollTo({element.location['x']}, {element.location['y'] - .5 * window_size});")
if x_offset == 0 and y_offset == 0:
try:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
except:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
else:
ActionChains(self._driver).move_to_element_with_offset(WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator)), x_offset, y_offset).click().perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element_by_offset(self, by_locator:tuple, x_destination:int, y_desitination:int) -> None:
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
ActionChains(self._driver).drag_and_drop_by_offset(element, x_destination, y_desitination).perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element(self, by_locator_source:tuple, by_locator_target:tuple) -> None:
source = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_source))
target = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_target))
ActionChains(self._driver).drag_and_drop(source, target).perform()
@wait_until_displayed
@wait_for_page_to_load
def send_text_to_element(self, by_locator:tuple, text:str, clear_first:bool=True, press_enter:bool=False) -> None:
if clear_first:
self._driver.find_element(*by_locator).clear()
self._driver.find_element(*by_locator).send_keys(text)
if press_enter:
self._driver.find_element(*by_locator).send_keys(Keys.ENTER)
@wait_until_displayed
@wait_for_page_to_load
def hover_over_element(self, by_locator:tuple) -> None:
element = self._driver.find_element(*by_locator)
ActionChains(self._driver).move_to_element(element).perform()
| 53.083333
| 205
| 0.75641
| 541
| 3,822
| 4.985213
| 0.207024
| 0.100111
| 0.059696
| 0.070078
| 0.578791
| 0.506118
| 0.378569
| 0.378569
| 0.350389
| 0.3363
| 0
| 0.003033
| 0.137363
| 3,822
| 71
| 206
| 53.830986
| 0.814983
| 0.179487
| 0
| 0.333333
| 0
| 0.020833
| 0.062419
| 0.037193
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c7e366d11f836cc2b4028018db9d96639fae992
| 174
|
py
|
Python
|
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | 1
|
2021-07-26T03:06:14.000Z
|
2021-07-26T03:06:14.000Z
|
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | null | null | null |
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | null | null | null |
n = int(input())
def even(x):
yield x * 2
for number in range(n):
print(next(even(number)))
# Don't forget to print out the first n numbers one by one here
| 13.384615
| 63
| 0.62069
| 32
| 174
| 3.375
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.264368
| 174
| 12
| 64
| 14.5
| 0.835938
| 0.350575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c7f914b76e891552a3b496827a2a433ae7084c1
| 2,096
|
py
|
Python
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 17
|
2018-09-25T16:28:36.000Z
|
2022-01-31T14:43:24.000Z
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 14
|
2018-11-04T14:45:14.000Z
|
2022-02-01T04:02:47.000Z
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 3
|
2018-09-25T16:28:44.000Z
|
2022-02-01T04:08:23.000Z
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
from django.db import connections
from django.utils import timezone
from django.utils.functional import cached_property
from cronman.config import app_settings
from cronman.job import BaseCronJob
from cronman.models import CronTask
from cronman.spawner import CronSpawner
from cronman.utils import cron_jobs_module_config
class RunCronTasks(BaseCronJob):
"""Starts worker processes for cron jobs requested to run in Admin
via CronTask model.
"""
lock_ignore_errors = True
cronitor_id = app_settings.CRONMAN_RUN_CRON_TASKS_CRONITOR_ID
@cached_property
def cron_spawner(self):
"""Cron Spawner instance"""
return CronSpawner(logger=self.logger)
def run(self):
"""Main logic"""
cron_tasks = self.get_pending_cron_tasks()
num_cron_tasks = len(cron_tasks)
num_started = 0
for i, cron_task in enumerate(cron_tasks, 1):
self.logger.info(
"Starting worker for CronTask {} ({}/{})".format(
cron_task, i, num_cron_tasks
)
)
pid = self.start_cron_task(cron_task)
if pid is not None:
num_started += 1
if num_started:
status_message = "Started {} CronTask(s).".format(num_started)
else:
status_message = "No CronTasks started."
self.logger.info(status_message)
def get_pending_cron_tasks(self):
"""Retrieve pending CronTasks"""
allowed_tasks = cron_jobs_module_config(
"ALLOWED_CRON_TASKS", default=()
)
cron_tasks = list(
CronTask.objects.pending()
.filter(start_at__lte=timezone.now())
.filter(cron_job__in=allowed_tasks)
)
connections.close_all() # close db connections
return cron_tasks
def start_cron_task(self, cron_task):
"""Starts worker for given CronTask"""
return self.cron_spawner.start_worker(cron_task.job_spec())
| 31.757576
| 74
| 0.648378
| 257
| 2,096
| 5.019455
| 0.392996
| 0.076744
| 0.023256
| 0.031008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004545
| 0.265267
| 2,096
| 65
| 75
| 32.246154
| 0.833117
| 0.116889
| 0
| 0
| 0
| 0
| 0.055678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.195652
| 0
| 0.413043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c8301238acf3bc4525ac9e26175e629b0f3e112
| 2,893
|
py
|
Python
|
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
import re
parse_re = re.compile(
r'pos\=\<(?P<x>-?\d+),(?P<y>-?\d+),(?P<z>-?\d+)\>, r\=(?P<r>\d+)'
)
def parse(text):
m = parse_re.match(text)
d = m.groupdict()
pos = int(d['x']), int(d['y']), int(d['z'])
r = int(d['r'])
return pos, r
def dist(a, b):
return sum(abs(x1-x2) for x1, x2 in zip(a, b))
def getinrange(data):
mmax = max(data, key=lambda d: d[1])
maxp, maxr = mmax
pp = [(p, r) for p, r in data if dist(p, maxp) <= maxr]
return len(pp)
def solve(data):
pp = [p for p, r in data]
xs = [x for x, y, z in pp]
ys = [y for x, y, z in pp]
zs = [z for x, y, z in pp]
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
zmin, zmax = min(zs), max(zs)
dd = 1
zero = (0, 0, 0)
while dd < xmax - xmin:
dd *= 2
while True:
tcount = 0
best = None
bestVal = None
for x in range(xmin, xmax+1, dd):
for y in range(ymin, ymax+1, dd):
for z in range(zmin, zmax+1, dd):
pos = (x, y, z)
count = 0
for p, r in data:
if (dist(pos, p) - r) / dd <= 0:
count += 1
if count > tcount:
tcount = count
best = pos
bestVal = dist(best, zero)
elif count == tcount:
if best is None or dist(pos, zero) < bestVal:
best = pos
bestVal = dist(best, zero)
if dd > 1:
x, y, z = best
xx = (x - dd, x + dd)
yy = (y - dd, y + dd)
zz = (z - dd, z + dd)
xmin, xmax = min(xx), max(xx)
ymin, ymax = min(yy), max(yy)
zmin, zmax = min(zz), max(zz)
dd = dd // 2
else:
return best, bestVal
def test1():
data = [
"pos=<0,0,0>, r=4",
"pos=<1,0,0>, r=1",
"pos=<4,0,0>, r=3",
"pos=<0,2,0>, r=1",
"pos=<0,5,0>, r=3",
"pos=<0,0,3>, r=1",
"pos=<1,1,1>, r=1",
"pos=<1,1,2>, r=1",
"pos=<1,3,1>, r=1",
]
data = [parse(d) for d in data]
assert getinrange(data) == 7
def test2():
data = [
"pos=<10,12,12>, r=2",
"pos=<12,14,12>, r=2",
"pos=<16,12,12>, r=4",
"pos=<14,14,14>, r=6",
"pos=<50,50,50>, r=200",
"pos=<10,10,10>, r=5",
]
data = [parse(d) for d in data]
best, bestVal = solve(data)
assert bestVal == 36
def main():
with open('day23.txt', 'r') as f:
data = f.readlines()
data = [parse(d) for d in data]
print('Part 1:', getinrange(data))
best, bestVal = solve(data)
print('Part 2:', bestVal)
if __name__ == '__main__':
test1()
test2()
main()
| 27.037383
| 69
| 0.412375
| 446
| 2,893
| 2.652466
| 0.215247
| 0.011834
| 0.01268
| 0.017751
| 0.218935
| 0.148774
| 0.079459
| 0
| 0
| 0
| 0
| 0.063879
| 0.40477
| 2,893
| 106
| 70
| 27.292453
| 0.623113
| 0
| 0
| 0.113402
| 0
| 0.010309
| 0.123747
| 0.016592
| 0
| 0
| 0
| 0
| 0.020619
| 1
| 0.072165
| false
| 0
| 0.010309
| 0.010309
| 0.123711
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c85751be92171445c98d3494d9be709e143efc5
| 1,526
|
py
|
Python
|
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
tot_name = os.path.join(os.path.dirname(__file__),'src/data', file_name)
# open the json datafile and read it in
with open(tot_name, 'r') as inputfile:
doc = json.load(inputfile)
# transform the data to the correct types and convert temp to celsius
id_movie = int(doc['id'])
movie_name = str(doc['original_title'])
year = str(doc['production_companies']['production_countries']['release date'])
country_origin = str(doc['production_companies']['origin_country'])
category_1 = str(doc['genres']['name'])
category_2 = str(doc['genres']['name'])
movie_rating = float(doc['popularity'])
avg_rating = float(doc['production_companies']['production_countries']['vote_average'])
total_clicks = float(doc['production_companies']['production_countries']['vote_count'])
# check for nan's in the numeric values and then enter into the database
valid_data = True
#for valid in np.isnan([lat, lon, humid, press, min_temp, max_temp, temp]):
# if valid is False:
# valid_data = False
# break;
row = (id_movie, movie_name, year, country_origin, category_1, category_2, movie_rating,
avg_rating, total_clicks)
insert_cmd = """INSERT INTO movies
(id_movie, movie_name, year,
country_origin, category_1, category_2,
movie_rating, avg_rating, total_clicks)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
print(insert_cmd,row)
if valid_data is True:
pg_hook.run(insert_cmd, parameters=row)
| 40.157895
| 92
| 0.671035
| 215
| 1,526
| 4.525581
| 0.418605
| 0.018499
| 0.024666
| 0.028777
| 0.323741
| 0.281603
| 0.281603
| 0.178828
| 0.178828
| 0.168551
| 0
| 0.004878
| 0.193971
| 1,526
| 37
| 93
| 41.243243
| 0.786179
| 0.194626
| 0
| 0
| 0
| 0.041667
| 0.408681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c8601c43b4ff494fe3c99410a606a7250f4d9f9
| 20,189
|
py
|
Python
|
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
"""
Code for hierarchical multi-class classifiers.
Author: Thomas Mortier
Date: Feb. 2021
TODO:
* Add option for set-valued prediction
* Feature: allow tree structures with non-unique node labels (currently, warning is thrown)
"""
import time
import warnings
import numpy as np
from .utils import HLabelEncoder, PriorityQueue
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.utils import _message_with_time
from sklearn.utils.validation import check_X_y, check_array, check_random_state
from sklearn.exceptions import NotFittedError, FitFailedWarning
from sklearn.metrics import accuracy_score
from joblib import Parallel, delayed, parallel_backend
from collections import ChainMap
class LCPN(BaseEstimator, ClassifierMixin):
"""Local classifier per parent node (LCPN) classifier.
Parameters
----------
estimator : scikit-learn base estimator
Represents the base estimator for the classification task in each node.
sep : str, default=';'
Path separator used for processing the hierarchical labels. If set to None,
a random hierarchy is created and provided flat labels are converted,
accordingly.
k : tuple of int, default=(2,2)
Min and max number of children a node can have in the random generated tree. Is ignored when
sep is not set to None.
n_jobs : int, default=None
The number of jobs to run in parallel. Currently this applies to fit,
and predict.
random_state : RandomState or an int seed, default=None
A random number generator instance to define the state of the
random permutations generator.
verbose : int, default=0
Controls the verbosity: the higher, the more messages
Examples
--------
>>> from hclf.multiclass import LCPN
>>> from sklearn.linear_model import LogisticRegression
>>>
>>> clf = LCPN(LogisticRegression(random_state=0),
>>> sep=";",
>>> n_jobs=4,
>>> random_state=0,
>>> verbose=1)
>>> clf.fit(X, y)
>>> clf.score(X, y)
"""
def __init__(self, estimator, sep=';', k=(2,2), n_jobs=None, random_state=None, verbose=0):
self.estimator = clone(estimator)
self.sep = sep
self.k = k
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.tree = {}
def _add_path(self, path):
current_node = path[0]
add_node = path[1]
# check if add_node is already registred
if add_node not in self.tree:
# check if add_node is terminal
if len(path) > 2:
# register add_node to the tree
self.tree[add_node] = {
"lbl": add_node,
"estimator": None,
"children": [],
"parent": current_node}
# add add_node to current_node's children (if not yet in list of children)
if add_node not in self.tree[current_node]["children"]:
self.tree[current_node]["children"].append(add_node)
# set estimator when num. of children for current_node is higher than 1 and if not yet set
if len(self.tree[current_node]["children"]) > 1 and self.tree[current_node]["estimator"] is None:
self.tree[current_node]["estimator"] = clone(self.estimator)
else:
# check for duplicate node labels
if self.tree[add_node]["parent"] != current_node:
warnings.warn("Duplicate node label {0} detected in hierarchy with parents {1}, {2}!".format(add_node, self.tree[add_node]["parent"], current_node), FitFailedWarning)
# process next couple of nodes in path
if len(path) > 2:
path = path[1:]
self._add_path(path)
def _fit_node(self, node):
# check if node has estimator
if node["estimator"] is not None:
# transform data for node
y_transform = []
sel_ind = []
for i,y in enumerate(self.y_):
if node["lbl"] in y.split(self.sep):
# need to include current label and sample (as long as it's "complete")
y_split = y.split(self.sep)
if y_split.index(node["lbl"]) < len(y_split)-1:
y_transform.append(y_split[y_split.index(node["lbl"])+1])
sel_ind.append(i)
X_transform = self.X_[sel_ind,:]
node["estimator"].fit(X_transform, y_transform)
if self.verbose >= 2:
print("Model {0} fitted!".format(node["lbl"]))
# now make sure that the order of labels correspond to the order of children
node["children"] = node["estimator"].classes_
return {node["lbl"]: node}
def fit(self, X, y):
"""Implementation of the fitting function for the LCPN classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The class labels
Returns
-------
self : object
Returns self.
"""
self.random_state_ = check_random_state(self.random_state)
# need to make sure that X and y have the correct shape
X, y = check_X_y(X, y, multi_output=False) # multi-output not supported (yet)
# check if n_jobs is integer
if not self.n_jobs is None:
if not isinstance(self.n_jobs, int):
raise TypeError("Parameter n_jobs must be of type int.")
# store number of outputs and complete data seen during fit
self.n_outputs_ = 1
self.X_ = X
self.y_ = y
# store label of root node
self.rlbl = self.y_[0].split(self.sep)[0]
# init tree
self.tree = {self.rlbl: {
"lbl": self.rlbl,
"estimator": None,
"children": [],
"parent": None}}
# check if sep is None or str
if type(self.sep) != str and self.sep is not None:
raise TypeError("Parameter sep must be of type str or None.")
# init and fit the hierarchical model
start_time = time.time()
# first init the tree
try:
if self.sep is None:
# transform labels to labels in some random hierarchy
self.sep = ';'
self.label_encoder_ = HLabelEncoder(k=self.k,random_state=self.random_state_)
self.y_ = self.label_encoder_.fit_transform(self.y_)
else:
self.label_encoder_ = None
for lbl in self.y_:
self._add_path(lbl.split(self.sep))
# now proceed to fitting
with parallel_backend("loky"):
fitted_tree = Parallel(n_jobs=self.n_jobs)(delayed(self._fit_node)(self.tree[node]) for node in self.tree)
self.tree = {k: v for d in fitted_tree for k, v in d.items()}
except NotFittedError as e:
raise NotFittedError("Tree fitting failed! Make sure that the provided data is in the correct format.")
# now store classes (leaf nodes) seen during fit
cls = []
nodes_to_visit = [self.tree[self.rlbl]]
while len(nodes_to_visit) > 0:
curr_node = nodes_to_visit.pop()
for c in curr_node["children"]:
# check if child is leaf node
if c not in self.tree:
cls.append(c)
else:
# add child to nodes_to_visit
nodes_to_visit.append(self.tree[c])
self.classes_ = cls
# make sure that classes_ are in same format of original labels
if self.label_encoder_ is not None:
self.classes_ = self.label_encoder_.inverse_transform(self.classes_)
else:
# construct dict with leaf node lbls -> path mappings
lbl_to_path = {yi.split(self.sep)[-1]: yi for yi in self.y_}
self.classes_ = [lbl_to_path[cls] for cls in self.classes_]
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "fitting", stop_time-start_time))
return self
def _predict_nbop(self, i, X):
preds = []
# run over all samples
for x in X:
x = x.reshape(1,-1)
pred = self.rlbl
pred_path = [pred]
while pred in self.tree:
curr_node = self.tree[pred]
# check if we have a node with single path
if curr_node["estimator"] is not None:
pred = curr_node["estimator"].predict(x)[0]
else:
pred = curr_node["children"][0]
pred_path.append(pred)
preds.append(self.sep.join(pred_path))
return {i: preds}
def _predict_bop(self, i, X, scores):
preds = []
# run over all samples
for x in X:
x = x.reshape(1,-1)
nodes_to_visit = PriorityQueue()
nodes_to_visit.push(1.,self.rlbl)
pred = None
while not nodes_to_visit.is_empty():
curr_node_prob, curr_node = nodes_to_visit.pop()
curr_node_lbl = curr_node.split(self.sep)[-1]
curr_node_prob = 1-curr_node_prob
# check if we are at a leaf node
if curr_node_lbl not in self.tree:
pred = curr_node
break
else:
curr_node_v = self.tree[curr_node_lbl]
# check if we have a node with single path
if curr_node_v["estimator"] is not None:
# get probabilities
curr_node_ch_probs = self._predict_proba(curr_node_v["estimator"], x, scores)
# apply chain rule of probability
curr_node_ch_probs = curr_node_ch_probs*curr_node_prob
# add children to queue
for j,c in enumerate(curr_node_v["children"]):
prob_child = curr_node_ch_probs[:,j][0]
nodes_to_visit.push(prob_child, curr_node+self.sep+c)
else:
c = curr_node_v["children"][0]
nodes_to_visit.push(curr_node_prob,curr_node+self.sep+c)
preds.append(pred)
return {i: preds}
def predict(self, X, bop=False):
"""Return class predictions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input samples.
bop : boolean, default=False
Returns Bayes-optimal solution when set to True. Returns
solution by following the path of maximum probability in each node, otherwise.
Returns
-------
preds : ndarray
Returns an array of predicted class labels.
"""
# check input
X = check_array(X)
scores = False
preds = []
start_time = time.time()
# check whether the base estimator supports probabilities
if not hasattr(self.estimator, 'predict_proba'):
# check whether the base estimator supports class scores
if not hasattr(self.estimator, 'decision_function'):
raise NotFittedError("{0} does not support \
probabilistic predictions nor scores.".format(self.estimator))
else:
scores = True
try:
# now proceed to predicting
with parallel_backend("loky"):
if not bop:
d_preds = Parallel(n_jobs=self.n_jobs)(delayed(self._predict_nbop)(i,X[ind]) for i,ind in enumerate(np.array_split(range(X.shape[0]), self.n_jobs)))
else:
d_preds = Parallel(n_jobs=self.n_jobs)(delayed(self._predict_bop)(i,X[ind],scores) for i,ind in enumerate(np.array_split(range(X.shape[0]), self.n_jobs)))
# collect predictions
preds_dict = dict(ChainMap(*d_preds))
for k in np.sort(list(preds_dict.keys())):
preds.extend(preds_dict[k])
# in case of no predefined hierarchy, backtransform to original labels
if self.label_encoder_ is not None:
preds = self.label_encoder_.inverse_transform([p.split(self.sep)[-1] for p in preds])
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "predicting", stop_time-start_time))
return preds
def _predict_proba(self, estimator, X, scores=False):
if not scores:
return estimator.predict_proba(X)
else:
# get scores
scores = estimator.decision_function(X)
scores = np.exp(scores)
# check if we only have one score (ie, when K=2)
if len(scores.shape) == 2:
# softmax evaluation
scores = scores/np.sum(scores,axis=1).reshape(scores.shape[0],1)
else:
# sigmoid evaluation
scores = 1/(1+np.exp(-scores))
scores = scores.reshape(-1,1)
scores = np.hstack([1-scores,scores])
return scores
def predict_proba(self, X):
"""Return probability estimates.
Important: the returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input samples.
avg : boolean, default=True
Return model average when true, and array of probability estimates otherwise.
Returns
-------
probs : ndarray
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in self.classes_.
"""
# check input
X = check_array(X)
scores = False
probs = []
start_time = time.time()
# check whether the base estimator supports probabilities
if not hasattr(self.estimator, 'predict_proba'):
# check whether the base estimator supports class scores
if not hasattr(self.estimator, 'decision_function'):
raise NotFittedError("{0} does not support \
probabilistic predictions nor scores.".format(self.estimator))
else:
scores = True
try:
nodes_to_visit = [(self.tree[self.rlbl], np.ones((X.shape[0],1)))]
while len(nodes_to_visit) > 0:
curr_node, parent_prob = nodes_to_visit.pop()
# check if we have a node with single path
if curr_node["estimator"] is not None:
# get probabilities
curr_node_probs = self._predict_proba(curr_node["estimator"], X, scores)
# apply chain rule of probability
curr_node_probs = curr_node_probs*parent_prob
for i,c in enumerate(curr_node["children"]):
# check if child is leaf node
prob_child = curr_node_probs[:,i].reshape(-1,1)
if c not in self.tree:
probs.append(prob_child)
else:
# add child to nodes_to_visit
nodes_to_visit.append((self.tree[c],prob_child))
else:
c = curr_node["children"][0]
# check if child is leaf node
if c not in self.tree:
probs.append(parent_prob)
else:
# add child to nodes_to_visit
nodes_to_visit.append((self.tree[c],parent_prob))
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "predicting probabilities", stop_time-start_time))
return np.hstack(probs)
def score(self, X, y):
"""Return mean accuracy score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
# check input and outputs
X, y = check_X_y(X, y, multi_output=False)
start_time = time.time()
try:
preds = self.predict(X)
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "calculating score", stop_time-start_time))
score = accuracy_score(y, preds)
return score
def score_nodes(self, X, y):
"""Return mean accuracy score for each node in the hierarchy.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
Returns
-------
score_dict : dict
Mean accuracy of self.predict(X) wrt. y for each node in the hierarchy.
"""
# check input and outputs
X, y = check_X_y(X, y, multi_output=False)
start_time = time.time()
score_dict = {}
try:
# transform the flat labels, in case of no predefined hierarchy
if self.label_encoder_ is not None:
y = self.label_encoder_.transform(y)
for node in self.tree:
node = self.tree[node]
# check if node has estimator
if node["estimator"] is not None:
# transform data for node
y_transform = []
sel_ind = []
for i, yi in enumerate(y):
if node["lbl"] in yi.split(self.sep):
# need to include current label and sample (as long as it's "complete")
y_split = yi.split(self.sep)
if y_split.index(node["lbl"]) < len(y_split)-1:
y_transform.append(y_split[y_split.index(node["lbl"])+1])
sel_ind.append(i)
X_transform = X[sel_ind,:]
if len(sel_ind) != 0:
# obtain predictions
node_preds = node["estimator"].predict(X_transform)
acc = accuracy_score(y_transform, node_preds)
score_dict[node["lbl"]] = acc
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "calculating node scores", stop_time-start_time))
return score_dict
| 42.864119
| 182
| 0.553668
| 2,460
| 20,189
| 4.393902
| 0.14187
| 0.027385
| 0.019983
| 0.007216
| 0.450365
| 0.395504
| 0.369969
| 0.346933
| 0.314368
| 0.301786
| 0
| 0.005626
| 0.357323
| 20,189
| 470
| 183
| 42.955319
| 0.827437
| 0.272871
| 0
| 0.392727
| 0
| 0
| 0.051273
| 0
| 0
| 0
| 0
| 0.002128
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.12
| 0.021818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c86a8871548627b9a0755d57d564bc3d174dbdd
| 2,649
|
py
|
Python
|
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
def Check(src):
lang = None
if src == "auto":
lang = "Auto detect language"
if src == "en":
lang = "English - English"
if src == "de":
lang = "German - Deutsch"
if src == "ar":
lang = "Arabic - عربي"
if src == "es":
lang = "Spanish - español, castellano"
if src == "ru":
lang = "Russian - русский"
if src == "pl":
lang = "Polish - Polski"
if src == "it":
lang = "Italian - Italiano"
if src == "ja":
lang = "Japanese - 日本語"
if src == "ga":
lang = "Irish - Gaeilge"
if src == "hi":
lang = "Hindi - हिन्दी, हिंदी"
if src == "he":
lang = "Hebrew - עברית"
if src == "fr":
lang = "French - Français"
if src == "nl":
lang = "Dutch - Nederlands"
if src == "cs":
lang = "Czech - česky, čeština"
if src == "da":
lang = "Danish - Dansk"
if src == "zh":
lang = "Chinese - 中文, Zhōngwén"
if src == "fa":
lang = "Persian - فارسی"
return lang
"""
if src == "auto":
src = "Auto detect language"
if src == "en":
src = "English - English"
if src == "de":
src = "German - Deutsch"
if src == "ar":
src = "Arabic - عربي"
if src == "es":
src = "Spanish - español, castellano"
if src == "ru":
src = "Russian - русский"
if src == "pl":
src = "Polish - Polski"
if src == "it":
src = "Italian - Italiano"
if src == "ja":
src = "Japanese - 日本語"
if src == "ga":
src = "Irish - Gaeilge"
if src == "hi":
src = "Hindi - हिन्दी, हिंदी"
if src == "he":
src = "Hebrew - עברית"
if src == "fr":
src = "French - Français"
if src == "nl":
src = "Dutch - Nederlands"
if src == "cs":
src = "Czech - česky, čeština"
if src == "da":
src = "Danish - Dansk"
if src == "zh":
src = "Chinese - 中文, Zhōngwén"
if src == "fa":
src = "Persian - فارسی"
if dst == "en":
dst = "English - English"
if dst == "de":
dst = "German - Deutsch"
if dst == "ar":
dst = "Arabic - عربي"
if dst == "es":
dst = "Spanish - español, castellano"
if dst == "ru":
dst = "Russian - русский"
if dst == "pl":
dst = "Polish - Polski"
if dst == "it":
dst = "Italian - Italiano"
if dst == "ja":
dst = "Japanese - 日本語"
if dst == "ga":
dst = "Irish - Gaeilge"
if dst == "hi":
dst = "Hindi - हिन्दी, हिंदी"
if dst == "he":
dst = "Hebrew - עברית"
if dst == "fr":
dst = "French - Français"
if dst == "nl":
dst = "Dutch - Nederlands"
if dst == "cs":
dst = "Czech - česky, čeština"
if dst == "da":
dst = "Danish - Dansk"
if dst == "zh":
dst = "Chinese - 中文, Zhōngwén"
if dst == "fa":
dst = "Persian - فارسی"
"""
| 23.442478
| 42
| 0.495659
| 363
| 2,649
| 3.666667
| 0.192837
| 0.135237
| 0.036063
| 0.058603
| 0.558978
| 0.204358
| 0.048084
| 0.048084
| 0.048084
| 0.048084
| 0
| 0
| 0.309928
| 2,649
| 113
| 43
| 23.442478
| 0.718271
| 0
| 0
| 0
| 0
| 0
| 0.377258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c86adac816e4b256e05f833e885292823f8146c
| 1,003
|
py
|
Python
|
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | null | null | null |
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | 1
|
2021-06-01T21:54:15.000Z
|
2021-06-01T21:54:15.000Z
|
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | null | null | null |
"""Decorator unctions for displaying commands."""
from functools import wraps
from shutil import get_terminal_size
import click
def command_handler(command_title, colour='green'):
"""Use this decorator for surrounding the functions with banners."""
def decorator(function):
"""Nested decorator function."""
terminal_width = int(get_terminal_size()[0])
title = ' {} '.format(command_title)
banner_length = int((terminal_width - len(title)) / 2)
banner = '-' * banner_length
command_banner = '|{0}{1}{0}|'.format(
banner, title.title())
lower_banner = '|{}|'.format('-' * int(len(command_banner) - 2))
@wraps(function)
def wrapper(*args, **kwargs):
"""Nested wrapper function."""
click.secho(command_banner, fg=colour)
result = function(*args, **kwargs)
click.secho(lower_banner, fg=colour)
return result
return wrapper
return decorator
| 34.586207
| 72
| 0.617149
| 109
| 1,003
| 5.53211
| 0.422018
| 0.064677
| 0.049751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.252243
| 1,003
| 28
| 73
| 35.821429
| 0.796
| 0.157527
| 0
| 0
| 0
| 0
| 0.031553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.15
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c8c154f105569426c30727bc7ab8defbef28f73
| 1,051
|
py
|
Python
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 6
|
2016-10-10T09:26:07.000Z
|
2018-09-20T08:59:42.000Z
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 11
|
2016-10-10T12:11:07.000Z
|
2018-05-09T22:11:02.000Z
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 16
|
2016-09-28T16:00:58.000Z
|
2019-02-25T16:52:12.000Z
|
#!/usr/bin/env python
import argparse
import consulate
class Options(object):
pass
options = Options()
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True, help='service name')
parser.add_argument('-s', '--slice', help='slice name (optional)')
parser.add_argument('-r', '--role', required=True, help='server role name')
parser.add_argument('-e', '--environment', required=True, help='environment name')
args = parser.parse_args(namespace=options)
print('[Initiating service removal]')
print(' Service: %s' % args.name)
print(' Slice: %s' % args.slice)
print(' Role: %s' % args.role)
print(' Environment: %s' % args.environment)
consul_session = consulate.Consul()
if args.slice is None:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}'.format(args.environment, args.role, args.name)
else:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}/{3}'.format(args.environment, args.role, args.name, args.slice)
del consul_session.kv[deployment_key]
print('Service removal triggered.')
| 30.911765
| 124
| 0.713606
| 140
| 1,051
| 5.285714
| 0.385714
| 0.048649
| 0.091892
| 0.056757
| 0.208108
| 0.208108
| 0.208108
| 0.108108
| 0
| 0
| 0
| 0.007447
| 0.105614
| 1,051
| 33
| 125
| 31.848485
| 0.779787
| 0.01903
| 0
| 0
| 0
| 0
| 0.281553
| 0.07767
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.043478
| 0.086957
| 0
| 0.130435
| 0.26087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c8e315e18d51be8398247d53085f6019815be6e
| 2,717
|
py
|
Python
|
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | 1
|
2022-03-24T19:17:47.000Z
|
2022-03-24T19:17:47.000Z
|
# Copyright 2021 Martin Kalcok
# See LICENSE file for licensing details.
"""Pytest fixtures for functional tests."""
# pylint: disable=W0621
import logging
import tempfile
from pathlib import Path
import pytest
from kubernetes import client, config
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def namespace() -> str:
"""Return namespace used for functional tests."""
return "default"
@pytest.fixture(scope="module")
async def kube_config(ops_test: OpsTest) -> Path:
"""Return path to the kube config of the tested Kubernetes cluster.
Config file is fetched from kubernetes-master unit and stored in the temporary file.
"""
k8s_master = ops_test.model.applications["kubernetes-master"].units[0]
with tempfile.TemporaryDirectory() as tmp_dir:
kube_config_file = Path(tmp_dir).joinpath("kube_config")
# This split is needed because `model_name` gets reported in format "<controller>:<model>"
model_name = ops_test.model_name.split(":", maxsplit=1)[-1]
cmd = "juju scp -m {} {}:config {}".format(
model_name, k8s_master.name, kube_config_file
).split()
return_code, _, std_err = await ops_test.run(*cmd)
assert return_code == 0, std_err
yield kube_config_file
@pytest.fixture()
async def cleanup_k8s(kube_config, namespace: str):
"""Cleanup kubernetes resources created during test."""
yield # act only on teardown
config.load_kube_config(str(kube_config))
pod_prefixes = ["read-test-ceph-", "write-test-ceph"]
pvc_prefix = "pvc-test-"
core_api = client.CoreV1Api()
for pod in core_api.list_namespaced_pod(namespace).items:
pod_name = pod.metadata.name
if any(pod_name.startswith(prefix) for prefix in pod_prefixes):
try:
logger.info("Removing Pod %s", pod_name)
core_api.delete_namespaced_pod(pod_name, namespace)
except client.ApiException as exc:
if exc.status != 404:
raise exc
logger.debug("Pod %s is already removed", pod_name)
for pvc in core_api.list_namespaced_persistent_volume_claim(namespace).items:
pvc_name = pvc.metadata.name
if pvc_name.startswith(pvc_prefix):
try:
logger.info("Removing PersistentVolumeClaim %s", pvc_name)
core_api.delete_namespaced_persistent_volume_claim(pvc_name, namespace)
except client.ApiException as exc:
if exc.status != 404:
raise exc
logger.debug("PersistentVolumeClaim %s is already removed.", pvc_name)
| 36.226667
| 98
| 0.670593
| 344
| 2,717
| 5.104651
| 0.392442
| 0.051253
| 0.023918
| 0.014806
| 0.142369
| 0.085421
| 0.085421
| 0.085421
| 0.085421
| 0.085421
| 0
| 0.010572
| 0.234082
| 2,717
| 74
| 99
| 36.716216
| 0.833253
| 0.104527
| 0
| 0.163265
| 0
| 0
| 0.105551
| 0.019108
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.020408
| false
| 0
| 0.122449
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6657771a019db8ff3764b551b4d27a9c8de3eee0
| 3,922
|
py
|
Python
|
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth import get_user_model
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template import loader, Context
from django.utils.html import strip_tags
from allauth.exceptions import ImmediateHttpResponse
from ..models import LoginAuthorization, LogUnauthorizedLogin, AuthorizedDomain
User = get_user_model()
class AuthorizationService:
def __init__(self, user=None):
self._user = user
@property
def user(self):
return self._user
@user.setter
def user(self, user):
self._user = user
@property
def login_authorization(self):
try:
return LoginAuthorization.objects.get(
username=getattr(self.user, User.USERNAME_FIELD, ''))
except LoginAuthorization.DoesNotExist as exc:
raise exc
def make_unathorized_login(self, reason):
try:
LogUnauthorizedLogin.objects.create(
username=self.user.get_username(), reason=reason)
except Exception:
pass
return ImmediateHttpResponse(
redirect(reverse('caronte:unauthorized_login')))
def is_email_in_authorized_domain(self):
email = self.user.email
if not email or '@' not in email:
return False
domain = email.split('@')[1]
try:
AuthorizedDomain.objects.get(domain=domain)
return True
except AuthorizedDomain.DoesNotExist:
return False
def set_fields_from_authorized(self, authorized_user, fields=None):
if authorized_user:
fields = fields or ['is_staff', 'is_superuser']
for field in fields:
setattr(self.user,
field,
getattr(authorized_user, field, False))
return True
return False
def copy_fields(self, source_user, fields=None, dest_update=True):
"""
Update fields from list param 'fields' to 'dest_user' User from
'source_user' User.
"""
fields = fields or []
changed = False
for field in fields:
social_field = getattr(source_user, field)
if not (getattr(self.user, field) == social_field):
setattr(self.user, field, social_field)
changed = True
if changed and dest_update:
self.user.save()
return changed
@staticmethod
def _email_for_sociallogin(subject, template, context=None):
context = context or {}
message = loader.get_template(template).render(Context(context))
mail_admins(subject,
strip_tags(message).lstrip('\n'),
fail_silently=True,
html_message=message)
def email_new_sociallogin(self, request):
email = self.user.email
context = {'email': email,
'user_url': request.build_absolute_uri(
reverse(admin_urlname(self.user._meta, 'changelist')))
+ '?email={}'.format(email)}
subject = 'Nuovo socialaccount di {}'.format(email)
return self._email_for_sociallogin(
subject, "custom_email_user/email/new_sociallogin.html", context)
def email_link_sociallogin(self, request):
email = self.user.email
context = {'email': email,
'user_url': request.build_absolute_uri(
self.user.get_absolute_url())}
subject = 'Collegamento socialaccount di {}'.format(email)
return self._email_for_sociallogin(
subject, "custom_email_user/email/link_sociallogin.html", context)
| 35.017857
| 79
| 0.624936
| 418
| 3,922
| 5.665072
| 0.289474
| 0.054054
| 0.02027
| 0.022804
| 0.182432
| 0.142736
| 0.142736
| 0.142736
| 0.142736
| 0.142736
| 0
| 0.000718
| 0.289648
| 3,922
| 112
| 80
| 35.017857
| 0.849246
| 0.027027
| 0
| 0.255556
| 0
| 0
| 0.063672
| 0.030383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122222
| false
| 0.011111
| 0.111111
| 0.011111
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
665885ddd8b1d1e99097726c1613e0a5986ad3d5
| 15,918
|
py
|
Python
|
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | 4
|
2021-05-09T16:00:12.000Z
|
2021-12-16T12:31:25.000Z
|
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | null | null | null |
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.python.platform import flags
from tensorflow.contrib.data.python.ops import batching
import tensorflow as tf
import json
from torch.utils.data import Dataset
import pickle
import os.path as osp
import os
import numpy as np
import time
from scipy.misc import imread, imresize
from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder
from torchvision import transforms
import torch
import torchvision
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Dataset Options
flags.DEFINE_string('dsprites_path',
'/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz',
'path to dsprites characters')
flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image')
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_type', 'npy', 'npy or png')
flags.DEFINE_bool('single', False, 'single ')
flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single')
# Data augmentation options
# flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image')
# flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
# flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
# flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')
flags.DEFINE_string('eval', '', '')
flags.DEFINE_string('init', '', '')
flags.DEFINE_string('norm', '', '')
flags.DEFINE_string('n_steps', '', '')
flags.DEFINE_string('reinit_freq', '', '')
flags.DEFINE_string('print_every', '', '')
flags.DEFINE_string('n_sample_steps', '', '')
flags.DEFINE_integer('gpu-id', 0, '')
def cutout(mask_color=(0, 0, 0)):
mask_size_half = FLAGS.cutout_mask_size // 2
offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0
def _cutout(image):
image = np.asarray(image).copy()
if np.random.random() > FLAGS.cutout_prob:
return image
h, w = image.shape[:2]
if FLAGS.cutout_inside:
cxmin, cxmax = mask_size_half, w + offset - mask_size_half
cymin, cymax = mask_size_half, h + offset - mask_size_half
else:
cxmin, cxmax = 0, w + offset
cymin, cymax = 0, h + offset
cx = np.random.randint(cxmin, cxmax)
cy = np.random.randint(cymin, cymax)
xmin = cx - mask_size_half
ymin = cy - mask_size_half
xmax = xmin + FLAGS.cutout_mask_size
ymax = ymin + FLAGS.cutout_mask_size
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(w, xmax)
ymax = min(h, ymax)
image[:, ymin:ymax, xmin:xmax] = np.array(mask_color)[:, None, None]
return image
return _cutout
class CelebA(Dataset):
def __init__(self):
self.path = "/root/data/img_align_celeba"
self.ims = os.listdir(self.path)
self.ims = [osp.join(self.path, im) for im in self.ims]
def __len__(self):
return len(self.ims)
def __getitem__(self, index):
label = 1
if FLAGS.single:
index = 0
path = self.ims[index]
im = imread(path)
im = imresize(im, (32, 32))
image_size = 32
im = im / 255.
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0, 1, size=(image_size, image_size, 3))
return im_corrupt, im, label
class Cifar10(Dataset):
def __init__(
self, FLAGS,
train=True,
full=False,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
# if FLAGS.cutout:
# transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.full = full
self.data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=train,
download=True)
self.test_data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=False,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
def __len__(self):
if self.full:
return len(self.data) + len(self.test_data)
else:
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
if self.full:
if index >= len(self.data):
im, label = self.test_data[index - len(self.data)]
else:
im, label = self.data[index]
else:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
if self.noise:
im = im * self.rescale + \
np.random.uniform(0, self.rescale * 1 / 256., im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return im_corrupt, im, label
class Cifar100(Dataset):
def __init__(self, train=True, augment=False):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
if FLAGS.cutout:
transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.data = CIFAR100(
"/root/cifar100",
transform=transform,
train=train,
download=True)
self.one_hot_map = np.eye(100)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Svhn(Dataset):
def __init__(self, train=True, augment=False):
transform = transforms.ToTensor()
self.data = SVHN("/root/svhn", transform=transform, download=True)
self.one_hot_map = np.eye(10)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
em, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Mnist(Dataset):
def __init__(self, train=True, rescale=1.0):
self.data = MNIST(
"/root/mnist",
transform=transforms.ToTensor(),
download=True, train=train)
self.labels = np.eye(10)
self.rescale = rescale
def __len__(self):
return len(self.data)
def __getitem__(self, index):
im, label = self.data[index]
label = self.labels[label]
im = im.squeeze()
# im = im.numpy() / 2 + np.random.uniform(0, 0.5, (28, 28))
# im = im.numpy() / 2 + 0.2
im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28))
im = im * self.rescale
image_size = 28
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(0, self.rescale, (28, 28))
return im_corrupt, im, label
class DSprites(Dataset):
def __init__(
self,
cond_size=False,
cond_shape=False,
cond_pos=False,
cond_rot=False):
dat = np.load(FLAGS.dsprites_path)
if FLAGS.dshape_only:
l = dat['latents_values']
mask = (l[:, 4] == 16 / 31) & (l[:, 5] == 16 /
31) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = self.label[:, 1:2]
elif FLAGS.dpos_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 1] == 1) & (
l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = self.label[:, 4:] + 0.5
elif FLAGS.dsize_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = (self.label[:, 2:3])
elif FLAGS.drot_only:
l = dat['latents_values']
mask = (l[:, 2] == 0.5) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = (self.label[:, 3:4])
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
elif FLAGS.dsprites_restrict:
l = dat['latents_values']
mask = (l[:, 1] == 1) & (l[:, 3] == 0 * np.pi / 39)
self.data = dat['imgs'][mask]
self.label = dat['latents_values'][mask]
else:
self.data = dat['imgs']
self.label = dat['latents_values']
if cond_size:
self.label = self.label[:, 2:3]
elif cond_shape:
self.label = self.label[:, 1:2]
elif cond_pos:
self.label = self.label[:, 4:]
elif cond_rot:
self.label = self.label[:, 3:4]
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
else:
self.label = self.label[:, 1:2]
self.identity = np.eye(3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
im = self.data[index]
image_size = 64
if not (
FLAGS.dpos_only or FLAGS.dsize_only) and (
not FLAGS.cond_size) and (
not FLAGS.cond_pos) and (
not FLAGS.cond_rot) and (
not FLAGS.drot_only):
label = self.identity[self.label[index].astype(
np.int32) - 1].squeeze()
else:
label = self.label[index]
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = 0.5 + 0.5 * np.random.randn(image_size, image_size)
return im_corrupt, im, label
class Imagenet(Dataset):
def __init__(self, train=True, augment=False):
if train:
for i in range(1, 11):
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'train_data_batch_{}'.format(i)),
'rb'))
if i == 1:
labels = f['labels']
data = f['data']
else:
labels.extend(f['labels'])
data = np.vstack((data, f['data']))
else:
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'val_data'),
'rb'))
labels = f['labels']
data = f['data']
self.labels = labels
self.data = data
self.one_hot_map = np.eye(1000)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index], self.labels[index]
else:
im, label = self.data[0], self.labels[0]
label -= 1
im = im.reshape((3, 32, 32)) / 255
im = im.transpose((1, 2, 0))
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Textures(Dataset):
def __init__(self, train=True, augment=False):
self.dataset = ImageFolder("/mnt/nfs/yilundu/data/dtd/images")
def __len__(self):
return 2 * len(self.dataset)
def __getitem__(self, index):
idx = index % (len(self.dataset))
im, label = self.dataset[idx]
im = np.array(im)[:32, :32] / 255
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
return im, im, label
| 33.441176
| 102
| 0.537379
| 1,982
| 15,918
| 4.168012
| 0.12109
| 0.035952
| 0.018642
| 0.028326
| 0.581165
| 0.548602
| 0.515555
| 0.475245
| 0.434814
| 0.403462
| 0
| 0.040527
| 0.32724
| 15,918
| 475
| 103
| 33.511579
| 0.730881
| 0.038824
| 0
| 0.479893
| 0
| 0
| 0.078302
| 0.011709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069705
| false
| 0
| 0.040214
| 0.018767
| 0.184987
| 0.002681
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66595693a0bfed64682ff38551b196526a22e500
| 981
|
py
|
Python
|
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | null | null | null |
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | null | null | null |
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | 1
|
2021-09-15T11:17:36.000Z
|
2021-09-15T11:17:36.000Z
|
class Solution:
def isValid(self, s: str) -> bool:
# String should have even length
if len(s) % 2 != 0:
return False
# Use list as Stack DS
bracStack = []
# If Bracket open add in Stack else Pop & Check
for ele in s:
if ele in ['(', '{', '[']:
bracStack.append(ele)
elif ele in [')', '}', ']']:
# Stack should have atleast 1 opening corresponding to closing bracket
if len(bracStack) == 0:
return False
topBracStack = bracStack.pop()
if ele == ')' and topBracStack != '(':
return False
elif ele == '}' and topBracStack != '{':
return False
elif ele == ']' and topBracStack != '[':
return False
# Stack Length Should Be Zero at End
return True if len(bracStack) == 0 else False
| 29.727273
| 86
| 0.459735
| 100
| 981
| 4.51
| 0.49
| 0.121951
| 0.119734
| 0.159645
| 0.210643
| 0.210643
| 0.210643
| 0.210643
| 0.210643
| 0.210643
| 0
| 0.009058
| 0.437309
| 981
| 32
| 87
| 30.65625
| 0.807971
| 0.204893
| 0
| 0.263158
| 0
| 0
| 0.015504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
665bab55df7c6bcde1b85c9c43014205b79501eb
| 2,984
|
py
|
Python
|
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 1
|
2021-11-02T09:54:41.000Z
|
2021-11-02T09:54:41.000Z
|
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | null | null | null |
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 2
|
2020-04-17T10:50:06.000Z
|
2021-11-02T09:54:47.000Z
|
"""
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
class ImageSettings:
def __init__(self,
image_size_x_0,
image_size_x_1,
image_size_z_0,
image_size_z_1,
lateral_pixel_density,
transducer_obj):
# Copy transducers params
self._transducer = transducer_obj
# Copy image params
self._image_size_x_0 = image_size_x_0
self._image_size_z_0 = image_size_z_0
self._image_size_x_1 = image_size_x_1
self._image_size_z_1 = image_size_z_1
self._image_size_x = abs(image_size_x_1 - image_size_x_0)
self._image_size_z = abs(image_size_z_1 - image_size_z_0)
# Number of pixels per distance between transducers
self._lat_pixel_density = lateral_pixel_density
self._calc_min_axial_resolution()
# Calculate high resolution for images
self._calc_high_res()
return
def _calc_min_axial_resolution(self):
self._axial_res_min = 1 / self._transducer.bandwidth_hz * self._transducer.speed_of_sound
return
def _calc_high_res(self):
# Calculate number of x pixels
n_x = np.round(self._image_size_x / self._transducer._x_pitch * self._lat_pixel_density)
n_x = n_x.astype(np.int).item()
# Calculate number of z pixels
n_z = np.round(self._image_size_z / self._axial_res_min)
n_z = n_z.astype(np.int).item()
self._high_resolution = (n_x, n_z)
print('The highest resolution for the system is: ', self._high_resolution)
return
def get_pixels_coords(self, x_res=None, z_res=None):
if x_res != None:
n_x = x_res
else:
n_x = self._high_resolution[0]
if z_res != None:
n_z = z_res
else:
n_z = self._high_resolution[1]
# Calculate positions
x_coords = np.linspace(self._image_size_x_0, self._image_size_x_1, n_x)
x_coords = x_coords.reshape(-1,)
# Calculate positions
z_coords = np.linspace(self._image_size_z_0, self._image_size_z_1, n_z)
z_coords = z_coords.reshape(-1,)
self._pixels_coords = np.transpose(np.dstack(np.meshgrid(x_coords, z_coords)).reshape(-1, 2))
return self._pixels_coords
| 31.410526
| 101
| 0.655831
| 438
| 2,984
| 4.086758
| 0.296804
| 0.12067
| 0.094413
| 0.054749
| 0.212849
| 0.163128
| 0.139106
| 0.050279
| 0
| 0
| 0
| 0.016136
| 0.273123
| 2,984
| 95
| 102
| 31.410526
| 0.809129
| 0.282172
| 0
| 0.108696
| 0
| 0
| 0.020048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.021739
| 0
| 0.217391
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
665e40e33fdd973b30b29de0d4999dd092a29402
| 681
|
py
|
Python
|
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import calc
def main():
try:
while True:
try:
expression = input('calc> ')
# Parse the expression.
lexer = calc.Lexer(expression)
tokens = lexer.parse()
print(tokens)
parser = calc.Parser(tokens)
tree = parser.parse()
# Evaluate the expression.
if tree:
value = tree.evaluate()
print(f'{tree} = {value}')
except Exception as e:
print(e)
except KeyboardInterrupt:
print()
if __name__ == '__main__':
main()
| 23.482759
| 46
| 0.444934
| 58
| 681
| 5.086207
| 0.5
| 0.088136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00271
| 0.45815
| 681
| 28
| 47
| 24.321429
| 0.796748
| 0.093979
| 0
| 0.1
| 0
| 0
| 0.04886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
666446282fcb45a4a20b926c54fc47be65a01ac8
| 8,534
|
py
|
Python
|
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | null | null | null |
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | 1
|
2021-12-07T17:03:44.000Z
|
2021-12-07T17:03:44.000Z
|
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | null | null | null |
import numpy as np
from aiida.common import AttributeDict
from aiida.engine import WorkChain, append_
from aiida.orm import Dict, List, StructureData
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
from aiida.orm.utils import load_node
from aiida.plugins import WorkflowFactory
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from aiida_environ.calculations.adsorbate.gen_supercell import (
adsorbate_gen_supercell,
gen_hydrogen,
)
from aiida_environ.calculations.adsorbate.post_supercell import adsorbate_post_supercell
from aiida_environ.data.charge import EnvironChargeData
from aiida_environ.utils.charge import get_charge_range
from aiida_environ.utils.vector import get_struct_bounds
EnvPwBaseWorkChain = WorkflowFactory("environ.pw.base")
PwBaseWorkChain = WorkflowFactory("quantumespresso.pw.base")
class AdsorbateGrandCanonical(WorkChain):
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(
EnvPwBaseWorkChain,
namespace="base",
namespace_options={"help": "Inputs for the `EnvPwBaseWorkChain`."},
exclude=("pw.structure", "pw.external_charges"),
)
spec.input("vacancies", valid_type=List)
spec.input("bulk_structure", valid_type=StructureData)
spec.input("mono_structure", valid_type=StructureData)
spec.input("calculation_parameters", valid_type=Dict)
spec.outline(
cls.setup,
cls.selection,
cls.simulate,
# cls.postprocessing
)
def setup(self):
self.ctx.environ_parameters = self.inputs.base.pw.environ_parameters
self.ctx.calculation_details = {}
calculation_parameters = self.inputs.calculation_parameters.get_dict()
calculation_parameters.setdefault("charge_distance", 5.0)
calculation_parameters.setdefault("charge_max", 1.0)
calculation_parameters.setdefault("charge_min", -1.0)
calculation_parameters.setdefault("charge_increment", 0.2)
calculation_parameters.setdefault("charge_spread", 0.5)
calculation_parameters.setdefault("system_axis", 3)
calculation_parameters.setdefault("cell_shape_x", 2)
calculation_parameters.setdefault("cell_shape_y", 2)
calculation_parameters.setdefault("reflect_vacancies", True)
self.ctx.calculation_parameters = Dict(dict=calculation_parameters)
# TODO: check sanity of inputs
def selection(self):
d = adsorbate_gen_supercell(
self.ctx.calculation_parameters,
self.inputs.mono_structure,
self.inputs.vacancies,
)
self.ctx.struct_list = d["output_structs"]
self.ctx.num_adsorbate = d["num_adsorbate"]
self.report(f"struct_list written: {self.ctx.struct_list}")
self.report(f"num_adsorbate written: {self.ctx.num_adsorbate}")
def simulate(self):
distance = self.ctx.calculation_parameters["charge_distance"]
axis = self.ctx.calculation_parameters["system_axis"]
charge_max = self.ctx.calculation_parameters["charge_max"]
charge_inc = self.ctx.calculation_parameters["charge_increment"]
charge_spread = self.ctx.calculation_parameters["charge_spread"]
charge_range = get_charge_range(charge_max, charge_inc)
# TODO: maybe do this at setup and change the cell if it's too big?
cpos1, cpos2 = get_struct_bounds(self.inputs.mono_structure, axis)
# change by 5 angstrom
cpos1 -= distance
cpos2 += distance
npcpos1 = np.zeros(3)
npcpos2 = np.zeros(3)
npcpos1[axis - 1] = cpos1
npcpos2[axis - 1] = cpos2
nsims = (len(charge_range) * (len(self.ctx.struct_list) + 1)) + 1
self.report(f"number of simulations to run = {nsims}")
for i, charge_amt in enumerate(charge_range):
self.ctx.calculation_details[charge_amt] = {}
# loop over charges
charges = EnvironChargeData()
# get position of charge
charges.append_charge(
-charge_amt / 2, tuple(npcpos1), charge_spread, 2, axis
)
charges.append_charge(
-charge_amt / 2, tuple(npcpos2), charge_spread, 2, axis
)
for j, structure_pk in enumerate(self.ctx.struct_list):
# regular monolayer simulation with adsorbate/charge
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
inputs.pw.parameters = inputs.pw.parameters.get_dict()
structure = load_node(structure_pk)
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.parameters["SYSTEM"]["tot_charge"] = charge_amt
inputs.pw.parameters["ELECTRONS"]["mixing_mode"] = "local-TF"
inputs.pw.external_charges = charges
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = f"s{j}_c{i}"
inputs = prepare_process_inputs(EnvPwBaseWorkChain, inputs)
running = self.submit(EnvPwBaseWorkChain, **inputs)
self.report(f"<s{j}_c{i}> launching EnvPwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details[charge_amt][structure_pk] = running.pk
self.to_context(workchains=append_(running))
# base monolayer simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = self.inputs.mono_structure
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.external_charges = charges
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = f"smono_c{i}"
inputs = prepare_process_inputs(EnvPwBaseWorkChain, inputs)
running = self.submit(EnvPwBaseWorkChain, **inputs)
self.report(f"<smono_c{i}> launching EnvPwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details[charge_amt]["mono"] = running.pk
self.to_context(workchains=append_(running))
# bulk simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = self.inputs.bulk_structure
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = "sbulk"
inputs.pw.metadata.options.parser_name = "quantumespresso.pw"
delattr(inputs.pw.metadata.options, "debug_filename")
delattr(inputs.pw, "environ_parameters")
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f"<sbulk> launching PwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details["bulk"] = running.pk
self.to_context(workchains=append_(running))
# hydrogen simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = gen_hydrogen()
self.report(f"{structure}")
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.pw.structure = structure
inputs.metadata.call_link_label = "sads_neutral"
inputs.pw.metadata.options.parser_name = "quantumespresso.pw"
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
delattr(inputs.pw.metadata.options, "debug_filename")
delattr(inputs.pw, "environ_parameters")
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f"<sads_neutral> launching PwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details["adsorbate"] = running.pk
self.to_context(workchains=append_(running))
self.report(f"calc_details written: {self.ctx.calculation_details}")
def postprocessing(self):
adsorbate_post_supercell(
self.inputs.mono_struct,
self.inputs.bulk_struct,
self.ctx.calculation_parameters,
self.ctx.calculation_details,
self.ctx.struct_list,
self.ctx.num_adsorbate,
)
| 44.915789
| 88
| 0.664284
| 923
| 8,534
| 5.937161
| 0.192849
| 0.031934
| 0.052555
| 0.036496
| 0.511679
| 0.408029
| 0.353832
| 0.341423
| 0.239051
| 0.239051
| 0
| 0.005537
| 0.238106
| 8,534
| 189
| 89
| 45.153439
| 0.837281
| 0.033747
| 0
| 0.275
| 0
| 0
| 0.12714
| 0.032058
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.03125
| false
| 0
| 0.08125
| 0
| 0.11875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6664aaeb4a16b83003b59cd285e9bdc4f631fdb5
| 6,481
|
py
|
Python
|
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | 1
|
2021-06-17T04:47:41.000Z
|
2021-06-17T04:47:41.000Z
|
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | null | null | null |
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
class TrainingDataset(torch.utils.data.Dataset):
is_categorical = False
def __init__(
self,
X,
y,
output_mapping=None,
categorical_mapping=None,
columns=None,
device=None,
):
self.columns = columns
self.device = device
# Preprocess categoricals
if categorical_mapping:
X_slices = OrderedDict()
for key, val in sorted(
categorical_mapping.items(), key=lambda k: k[1]["idx"]
):
X_slices[val["idx"]] = map_categoricals_to_ordinals(
X[:, val["idx"]], val["map"]
).to(self.device)
idx_slice = sorted([val["idx"] for key, val in categorical_mapping.items()])
X_continuous = (
torch.from_numpy(np.delete(X, idx_slice, -1).astype(float))
.float()
.to(self.device)
)
self.X = (X_continuous, X_slices)
else:
self.X = (torch.from_numpy(X).float().to(self.device), OrderedDict())
# Preprocess targets
if output_mapping:
self.y = map_categoricals_to_ordinals(y, output_mapping).to(self.device)
self.n_output_dims = len(output_mapping.keys())
else:
self.y = torch.from_numpy(y.astype(float)).float()
if len(self.y.size()) == 1:
self.y = self.y.unsqueeze(-1).to(self.device)
self.n_output_dims = list(self.y.size())[-1]
def __len__(self):
return len(self.X[0])
def __getitem__(self, index):
return (
self.X[0][index, ...],
OrderedDict({key: self.X[1][key][index, ...] for key in self.X[1]}),
self.y[index, ...],
)
def random_batch(self, n_samples):
"""Generates a random batch of `n_samples` with replacement."""
random_idx = np.random.randint(0, self.__len__() - 1, size=n_samples)
return self.__getitem__(random_idx)
class InferenceDataset(torch.utils.data.Dataset):
"""Creates a PyTorch Dataset object for a set of points for inference."""
def __init__(self, X, categorical_mapping=None, columns=None, device=None):
self.columns = columns
self.device = device
# Preprocess categoricals
if categorical_mapping:
X_slices = OrderedDict()
for key, val in sorted(
categorical_mapping.items(), key=lambda k: k[1]["idx"]
):
X_slices[val["idx"]] = map_categoricals_to_ordinals(
X[:, val["idx"]], val["map"]
).to(self.device)
idx_slice = sorted([val["idx"] for key, val in categorical_mapping.items()])
X_continuous = torch.from_numpy(
np.delete(X, idx_slice, -1).astype(float)
).float()
self.X = (X_continuous.to(self.device), X_slices)
else:
self.X = (torch.from_numpy(X).float().to(self.device), OrderedDict())
def __len__(self):
return len(self.X[0])
def __getitem__(self, index):
return (
self.X[0][index, ...],
OrderedDict({key: self.X[1][key][index, ...] for key in self.X[1]}),
)
class EarlyStopping(object):
"""
Implemented from: https://gist.github.com/stefanonardo/693d96ceb2f531fa05db530f3e21517d
"""
def __init__(self, mode="min", min_delta=0, patience=10, percentage=False):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta, percentage)
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if torch.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta, percentage):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if not percentage:
if mode == "min":
self.is_better = lambda a, best: a < best - min_delta
if mode == "max":
self.is_better = lambda a, best: a > best + min_delta
else:
if mode == "min":
self.is_better = lambda a, best: a < best - (best * min_delta / 100)
if mode == "max":
self.is_better = lambda a, best: a > best + (best * min_delta / 100)
def generate_categorical_to_ordinal_map(inputs):
if isinstance(inputs, pd.Series):
inputs = inputs.values
uq_inputs = np.unique(inputs)
return dict(zip(list(uq_inputs), list(range(len(uq_inputs)))))
def map_categoricals_to_ordinals(categoricals, mapping):
unmapped_targets = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_targets) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_targets)
)
return torch.from_numpy(
np.vectorize(mapping.get)(categoricals).astype(float)
).long()
def map_categoricals_to_one_hot(categoricals, mapping):
unmapped_elements = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_elements) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_elements)
)
return torch.from_numpy(
np.squeeze(
np.eye(len(mapping.keys()))[
np.vectorize(mapping.get)(categoricals).reshape(-1)
]
).astype(float)
).long()
def map_ordinals_to_categoricals(ordinals, mapping):
if isinstance(ordinals, torch.Tensor):
ordinals = ordinals.detach().cpu().numpy()
elif isinstance(ordinals, list):
ordinals = np.array(ordinals)
inv_target_mapping = {v: k for k, v in mapping.items()}
return np.vectorize(inv_target_mapping.get)(ordinals).squeeze()
| 33.755208
| 91
| 0.577843
| 779
| 6,481
| 4.620026
| 0.183569
| 0.01945
| 0.026674
| 0.025007
| 0.500417
| 0.440122
| 0.428452
| 0.413448
| 0.413448
| 0.413448
| 0
| 0.011677
| 0.299645
| 6,481
| 191
| 92
| 33.931937
| 0.781229
| 0.043358
| 0
| 0.394737
| 0
| 0
| 0.023331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.026316
| 0.026316
| 0.230263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66654d5cfc565e697020cd64524f69662efe7ca5
| 312
|
py
|
Python
|
urls.py
|
stephenmcd/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | 12
|
2015-06-09T02:31:43.000Z
|
2021-12-11T21:35:38.000Z
|
urls.py
|
binarygrrl/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | null | null | null |
urls.py
|
binarygrrl/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | 9
|
2016-11-14T23:56:51.000Z
|
2021-04-14T07:47:44.000Z
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from core import game
admin.autodiscover()
game.autodiscover()
urlpatterns = patterns("",
("^admin/", include(admin.site.urls)),
url("", include("social_auth.urls")),
url("", include("core.urls")),
)
| 18.352941
| 60
| 0.692308
| 38
| 312
| 5.657895
| 0.473684
| 0.093023
| 0.130233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 312
| 16
| 61
| 19.5
| 0.802239
| 0
| 0
| 0
| 0
| 0
| 0.102894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
666552755de681921ce121bf7878b38237804c08
| 3,258
|
py
|
Python
|
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 20:11:22 2019
@author: drone911
"""
from helper import *
from models import *
import numpy as np
from keras.datasets import mnist
from tqdm import tqdm
import warnings
def train(train_images, generator, discriminator, gan, num_classes=120, random_dim=128, epochs=100, batch_size=128):
num_train_images=train_images.shape[0]
num_batches=int(num_train_images/batch_size)
hist_disc_avg, hist_gen_avg=[], []
for e in range(epochs):
fake_img_y=np.zeros((batch_size, 1))
fake_img_y[:]=0
real_img_y=np.zeros((batch_size, 1))
real_img_y[:]=0.9
gan_y=np.ones((batch_size, 1))
hist_disc, hist_gen=[], []
iterator=tqdm(range(num_batches))
try:
for i in iterator:
sampled_noise = generate_inputs(random_dim, batch_size)
real_img_x=train_images[np.random.randint(0,train_images.shape[0],size=batch_size)]
fake_img_x=generator.predict(sampled_noise)
train_disc_x=np.concatenate((real_img_x, fake_img_x), axis=0)
train_disc_y=np.concatenate((real_img_y, fake_img_y), axis=0)
discriminator.trainable=True
hist_disc.append(discriminator.train_on_batch(train_disc_x, train_disc_y))
noise=generate_inputs(random_dim, batch_size)
discriminator.trainable=False
hist_gen.append(gan.train_on_batch(noise, gan_y))
hist_disc_avg.append(np.mean(hist_disc[0:num_batches]))
hist_gen_avg.append(np.mean(hist_gen[0:num_batches]))
print("-------------------------------------------------------")
print("discriminator loss at epoch {}:{}".format(e, hist_disc_avg[-1]))
print("generator loss at epoch {}:{}".format(e, hist_gen_avg[-1]))
print("-------------------------------------------------------")
plot_generated_images(e, generator, random_dim=random_dim)
plot_loss(hist_disc, hist_gen)
if e % 10 == 0:
discriminator.save_weights("models\\disc_v1_epoch_{}.h5".format(e))
generator.save_weights("models\\gen_v1_epoch_{}.h5".format(e))
except KeyboardInterrupt:
iterator.close()
print("Interrupted")
break
if __name__=="__main__":
warnings.filterwarnings("ignore")
(train_images, train_labels), (test_images, test_labels)=mnist.load_data()
random_dim=100
batch_size=128
lr=0.0002
beta_1=0.5
train_images=np.concatenate((train_images, test_images), axis=0)
train_images=train_images.reshape(-1,28,28,1)
train_images=(train_images.astype(np.float32) - 127.5) / 127.5
generator=get_gen_nn(random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
discriminator=get_disc_nn(lr=lr, beta_1=beta_1,verbose=False)
gan=create_gan(discriminator, generator, random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
train(train_images, generator, discriminator, gan, random_dim=random_dim, epochs=50, batch_size=128)
| 41.240506
| 116
| 0.612339
| 430
| 3,258
| 4.32093
| 0.276744
| 0.082885
| 0.034446
| 0.038751
| 0.248654
| 0.191604
| 0.123789
| 0.061356
| 0.047363
| 0.047363
| 0
| 0.037338
| 0.243708
| 3,258
| 78
| 117
| 41.769231
| 0.716721
| 0.023634
| 0
| 0.033898
| 0
| 0
| 0.07879
| 0.051371
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.101695
| 0
| 0.118644
| 0.084746
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6667684709a7e3192cfea4fd79e3ee7e997e694d
| 2,418
|
py
|
Python
|
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.sql.functions import count, lit, col, udf, expr, collect_list, explode
from pyspark.sql.types import IntegerType, StringType, MapType, ArrayType, BooleanType, FloatType
from pyspark.sql import HiveContext
from datetime import datetime, timedelta
from pyspark.sql.functions import broadcast
def _list_to_map(count_array):
count_map = {}
for item in count_array:
key_value = item.split(':')
count_map[key_value[0]] = key_value[1]
return count_map
def add_count_map(df):
# Convert count_array to count_map
list_to_map_udf = udf(_list_to_map, MapType(
StringType(), StringType(), False))
df = df.withColumn('count_map', list_to_map_udf(df.count_array))
return df
def variance(plist):
l = len(plist)
ex = sum(plist)/l
ex2 = sum([i*i for i in plist])/l
return ex2-ex*ex
query = "select count_array,day,uckey from factdata where day in ('2020-05-15','2020-05-14','2020-05-13','2020-05-12','2020-05-11','2020-05-10','2020-05-09')"
sc = SparkContext()
hive_context = HiveContext(sc)
df = hive_context.sql(query)
df = add_count_map(df)
df = df.select('uckey', 'day', explode(df.count_map)).withColumnRenamed("value", "impr_count")
df = df.withColumn('impr_count', udf(lambda x: int(x), IntegerType())(df.impr_count))
df = df.groupBy('uckey', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", 'impr_count')
split_uckey_udf = udf(lambda x: x.split(","), ArrayType(StringType()))
df = df.withColumn('col', split_uckey_udf(df.uckey))
df = df.select('uckey', 'impr_count', 'day', df.col[1]).withColumnRenamed("col[1]", 'slot_id')
df_slot = df.select('slot_id', 'impr_count', 'day')
df_slot = df_slot.groupBy('slot_id', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", "impr_total")
bc_df_slot = broadcast(df_slot)
df_new = df.join(bc_df_slot, on=["slot_id", 'day'], how="inner")
df_new = df_new.withColumn('percent', udf(lambda x, y: (x*100)/y, FloatType())(df_new.impr_count, df_new.impr_total))
df2 = df_new.groupBy("uckey").agg(collect_list('percent').alias('percent'))
df2 = df2.withColumn('var', udf(lambda x: variance(x), FloatType())(df2.percent))
df2.select("uckey", "var").orderBy(["var"], ascending=False).show(300, truncate=False)
df2.cache()
print("% uckeys having varience > 0.01 ", df2.filter((df2.var <= 0.01)).count()*100/df2.count())
| 37.78125
| 158
| 0.706369
| 377
| 2,418
| 4.34748
| 0.278515
| 0.060403
| 0.034167
| 0.028066
| 0.118365
| 0.082977
| 0.058572
| 0.058572
| 0.058572
| 0
| 0
| 0.040414
| 0.119934
| 2,418
| 63
| 159
| 38.380952
| 0.729793
| 0.013234
| 0
| 0
| 0
| 0.023256
| 0.177013
| 0.047399
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.139535
| 0
| 0.27907
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66698ee5453f94b084a237ee9ea9e607d1b0395c
| 9,922
|
py
|
Python
|
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import math
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import CLUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
#if args.iid:
dict_users_iid_temp = mnist_iid(dataset_train, args.num_users)
#else:
dict_users = mnist_noniid(dataset_train, args.num_users)
#dict_users_iid_temp = dict_users
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
#print('img_size=',img_size)
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob_fl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
net_glob_cl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
net_glob_fl.train()
net_glob_cl.train()
# copy weights
w_glob_fl = net_glob_fl.state_dict()
w_glob_cl = net_glob_cl.state_dict()
# training
eta = 0.01
Nepoch = 5 # num of epoch
loss_train_fl, loss_train_cl = [], []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
para_g = []
loss_grad = []
delta_batch_loss_list = []
beta_list = []
count_list = np.zeros(256).tolist()
line1_iter_list = []
line2_iter_list = []
wgfed_list = []
wgcl_list = []
w_locals, loss_locals = [], []
w0_locals,loss0_locals =[], []
weight_div_list = []
para_cl = []
para_fl = []
beta_locals, mu_locals, sigma_locals = [],[],[]
x_stat_loacals, pxm_locals =[],[]
data_locals = [[] for i in range(args.epochs)]
w_fl_iter,w_cl_iter = [], []
beta_max_his, mu_max_his, sigma_max_his = [], [], []
acc_train_cl_his, acc_train_fl_his = [], []
net_glob_fl.eval()
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
acc_train_cl_his.append(acc_test_cl)
acc_train_fl_his.append(acc_test_cl)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
dict_users_iid = []
for iter in range(args.num_users):
dict_users_iid.extend(dict_users_iid_temp[iter])
# Centralized learning
for iter in range(args.epochs):
w_locals, loss_locals = [], []
glob_cl = CLUpdate(args=args, dataset=dataset_train, idxs=dict_users_iid)
w_cl, loss_cl = glob_cl.cltrain(net=copy.deepcopy(net_glob_cl).to(args.device))
w_cl_iter.append(copy.deepcopy(w_cl))
net_glob_cl.load_state_dict(w_cl)
loss_train_cl.append(loss_cl) # loss of CL
print('cl,iter = ', iter, 'loss=', loss_cl)
# testing
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
acc_train_cl_his.append(acc_test_cl.item())
# FL setting
for iter in range(args.epochs): # num of iterations
w_locals, loss_locals, d_locals = [], [], []
beta_locals, mu_locals, sigma_locals = [], [], []
x_stat_loacals, pxm_locals =[],[]
# M clients local update
m = max(int(args.frac * args.num_users), 1) # num of selected users
idxs_users = np.random.choice(range(args.num_users), m, replace=False) # select randomly m clients
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) # data select
w, loss, delta_bloss, beta, x_stat, d_local = local.train(net=copy.deepcopy(net_glob_fl).to(args.device))
x_value, count = np.unique(x_stat,return_counts=True) # compute the P(Xm)
w_locals.append(copy.deepcopy(w))# collect local model
loss_locals.append(copy.deepcopy(loss))#collect local loss fucntion
d_locals.extend(d_local)# collect the isx of local training data in FL
beta_locals.append(np.max(beta))# beta value
mu_locals.append(np.max(delta_bloss)) # mu value
sigma_locals.append(np.std(delta_bloss))#sigma value
x_stat_loacals.append(x_stat) # Xm
pxm_locals.append(np.array(count/(np.sum(count)))) #P(Xm)
data_locals[iter] = d_locals#collect dta
w_glob_fl = FedAvg(w_locals)# update the global model
net_glob_fl.load_state_dict(w_glob_fl)# copy weight to net_glob
w_fl_iter.append(copy.deepcopy(w_glob_fl))
loss_fl = sum(loss_locals) / len(loss_locals)
loss_train_fl.append(loss_fl) # loss of FL
# compute P(Xg)
xg_value, xg_count = np.unique(x_stat_loacals,return_counts=True)
xg_count = np.array(xg_count)/(np.sum(xg_count))
print('fl,iter = ',iter,'loss=',loss_fl)
# compute beta, mu, sigma
beta_max = (np.max(beta_locals))
mu_max = (np.max(mu_locals))
sigma_max = (np.max(sigma_locals))
beta_max_his.append(np.max(beta_locals))
mu_max_his.append(np.max(mu_locals))
sigma_max_his.append(np.max(sigma_locals))
# print('beta=', beta_max)
# print('mu=', mu_max)
# print('sigma=', sigma_max)
# testing
net_glob_fl.eval()
acc_train_fl, loss_train_flxx = test_img(net_glob_fl, dataset_train, args)
acc_test_fl, loss_test_flxx = test_img(net_glob_fl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_fl))
print("Testing accuracy: {:.2f}".format(acc_test_fl))
line1_list=[]
# the weight divergence of numerical line
for j in range(len(pxm_locals)):
lditem1 = sigma_max*(np.sqrt(2/(np.pi*50*(iter+1)))+np.sqrt(2/(np.pi*50*m*(iter+1))))
lditem2 = mu_max*(np.abs(pxm_locals[j]-xg_count))
lditem3= 50*(iter+1)*(((1+eta*beta_max)**((iter+1)*Nepoch))-1)/(50*m*(iter+1)*beta_max) # 50 is batch size (10)* num of epoch (5)
line1 = lditem3*(lditem1+lditem2)
line1_list.append(line1) # m clients
line1_iter_list.append(np.sum(line1_list)) # iter elements
acc_train_fl_his.append(acc_test_fl.item())
#weight divergence of simulation
for i in range(len(w_cl_iter)):
para_cl = w_cl_iter[i]['layer_input.weight']
para_fl = w_fl_iter[i]['layer_input.weight']
line2 = torch.norm(para_cl-para_fl)
print(torch.norm(para_cl-para_fl)/torch.norm(para_cl))
line2_iter_list.append(line2.item())
print('y_line1=',line1_iter_list)# numerical
print('y_line2=',line2_iter_list) # simulation
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line2_iter_list, c="red")
plt.xlabel('Iterations')
plt.ylabel('Difference')
plt.savefig('Figure/different.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(beta_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Beta_max')
plt.savefig('Figure/beta_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sigma_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Sigma_max')
plt.savefig('Figure/sigma_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(mu_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Mu_max')
plt.savefig('Figure/mu_max.png')
colors = ["blue", "red"]
labels = ["non-iid", "iid"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(acc_train_fl_his, c=colors[0], label=labels[0])
ax.plot(acc_train_cl_his, c=colors[1], label=labels[1])
ax.legend()
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.savefig('Figure/Accuracy_non_iid2_temp.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line1_iter_list, c=colors[0])
plt.xlabel('Local_Iterations')
plt.ylabel('Grad')
plt.savefig('Figure/numerical _temp.png')
| 36.884758
| 141
| 0.651078
| 1,469
| 9,922
| 4.121852
| 0.167461
| 0.024277
| 0.013377
| 0.013873
| 0.402312
| 0.329315
| 0.252519
| 0.196862
| 0.163501
| 0.144674
| 0
| 0.016144
| 0.213364
| 9,922
| 268
| 142
| 37.022388
| 0.759641
| 0.089397
| 0
| 0.192893
| 0
| 0
| 0.078268
| 0.003674
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071066
| 0
| 0.071066
| 0.055838
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6670c507913d776c7f3759690ef2c0ab2aa02880
| 591
|
py
|
Python
|
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
lista = []
for n in range(0, 5):
lista.append(int(input(f'Digite um valor para a posição {n}: ')))
print('=-=' * 10)
print(f'Você digitou os valores {lista}')
maior = lista[0]
menor = lista[0]
for n in lista:
if maior < n:
maior = n
if menor > n:
menor = n
print(f'O maior valor digitado foi {maior} nas posições ', end='')
for i, v in enumerate(lista):
if v == maior:
print(f'{i}...', end='')
print()
print(f'O menor valor digitado foi {menor} nas posições ', end='')
for i, v in enumerate(lista):
if v == menor:
print(f'{i}...', end='')
| 26.863636
| 69
| 0.575296
| 97
| 591
| 3.505155
| 0.360825
| 0.088235
| 0.035294
| 0.1
| 0.223529
| 0.223529
| 0.223529
| 0.223529
| 0.223529
| 0.223529
| 0
| 0.013393
| 0.241963
| 591
| 22
| 70
| 26.863636
| 0.745536
| 0
| 0
| 0.190476
| 0
| 0
| 0.300676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6674228e20201842275a8416c646d65895ba336f
| 6,461
|
py
|
Python
|
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode
from chb.x86.X86Operand import X86Operand
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
@x86registry.register_tag("rcl", X86Opcode)
class X86RotateLeftCF(X86Opcode):
"""RCL dst, src
args[0]: index of dst in x86dictionary
args[1]: index of src in x86dictionary
"""
def __init__(
self,
x86d: "X86Dictionary",
ixval: IndexedTableValue) -> None:
X86Opcode.__init__(self, x86d, ixval)
@property
def dst_operand(self) -> X86Operand:
return self.x86d.operand(self.args[0])
@property
def src_operand(self) -> X86Operand:
return self.x86d.operand(self.args[1])
@property
def operands(self) -> Sequence[X86Operand]:
return [self.dst_operand, self.src_operand]
def annotation(self, xdata: InstrXData) -> str:
"""data format: a:vxx
vars[0]: dst
xprs[0]: src (number of bits to rotate)
xprs[1]: dst-rhs (value to rotate)
"""
lhs = str(xdata.vars[0])
rhs1 = str(xdata.xprs[0])
rhs2 = str(xdata.xprs[2])
return lhs + ' = ' + rhs2 + ' rotate-left-by' + rhs1 + ' CF'
def lhs(self, xdata: InstrXData) -> List[XVariable]:
return xdata.vars
def rhs(self, xdata: InstrXData) -> List[XXpr]:
return xdata.xprs
# --------------------------------------------------------------------------
# Rotates the bits of the first operand (destination operand) the number of
# bit positions specified in the second operand (count operand) and stores
# the result in the destination operand. The count operand is an unsigned
# integer that can be an immediate or a value in the CL register. In legacy
# and compatibility mode, the processor restricts the count to a number
# between 0 and 31 by masking all the bits in the count operand except
# the 5 least-significant bits.
#
# The otate through carry left (RCL) instruction shifts all the bits
# toward more-significant bit positions, except for the most-significant
# bit, which is rotated to the least-significant bit location.
#
# The RCL instruction includes the CF flag in the rotation. The RCL
# instruction shifts the CF flag into the least-significant bit and shifts
# the most-significant bit into the CF flag.
#
# The OF flag is defined only for the 1-bit rotates; it is undefined in all
# other cases (except that a zero-bit rotate does nothing, that is affects
# no flags). For left rotates, the OF flag is set to the exclusive OR of
# the CF bit (after the rotate) and the most-significant bit of the result.
# CASE size:
# 8: tempcount = (count & 31) % 9
# 16: tempcount = (count & 31) % 17
# 32: tempcount = (count & 31)
# WHILE tempcount != 0 DO:
# tempCF = msb(dest)
# dest = (dest * 2) + CF
# CF = tempCF
# tempcount = tempcount - 1
# IF count == 1:
# OF = msb(dest) xor CF
# ELSE:
# OF is undefined
#
# Flags affected:
# The CF flag contains the value of the bit shifted into it. The OF flag
# is affected only for single-bit rotates; it is undefined for multi-bit
# rotates. The SF, ZF, AF, and PF flags are not affected.
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "X86SimulationState") -> None:
srcop = self.src_operand
dstop = self.dst_operand
srcval = simstate.get_rhs(iaddr, srcop)
dstval = simstate.get_rhs(iaddr, dstop)
cflag = simstate.get_flag_value(iaddr, 'CF')
if cflag is None:
simstate.set(iaddr, dstop, SV.mk_undefined_simvalue(dstop.size))
elif (dstval.is_literal
and dstval.is_defined
and srcval.is_literal
and srcval.is_defined):
dstval = cast(SV.SimLiteralValue, dstval)
srcval = cast(SV.SimLiteralValue, srcval)
(cflag, result) = dstval.bitwise_rcl(srcval, cflag)
simstate.set(iaddr, dstop, result)
if srcval.value > 0:
simstate.update_flag(iaddr, 'CF', cflag == 1)
if srcval.value == 1:
oflag = result.msb ^ cflag
simstate.update_flag(iaddr, 'OF', oflag == 1)
else:
simstate.update_flag(iaddr, "OF", None)
| 40.130435
| 80
| 0.629005
| 829
| 6,461
| 4.864897
| 0.332931
| 0.015621
| 0.012398
| 0.015621
| 0.048599
| 0.024795
| 0.024795
| 0.024795
| 0.024795
| 0
| 0
| 0.024113
| 0.236186
| 6,461
| 160
| 81
| 40.38125
| 0.79311
| 0.531806
| 0
| 0.048387
| 0
| 0
| 0.021717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.209677
| 0.080645
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
667689203557923536a76893ffda9eef2e58e85a
| 2,135
|
py
|
Python
|
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
import challenges
import unittest
class RottingOrangesTests(unittest.TestCase):
def test_time_to_rot(self):
"""
Graph BFS problem. Tells the time taken for oranges to all rot.
Test cases from LeetCode.
"""
# Test Cases
oranges1 = [
[2,1,1],
[1,1,0],
[0,1,1]
]
assert challenges.time_to_rot(oranges1) == 4
oranges2 = [
[2,1,1],
[0,1,1],
[1,0,1]
]
assert challenges.time_to_rot(oranges2) == -1
oranges3 = [
[0,2]
]
assert challenges.time_to_rot(oranges3) == 0
class NumIslandsTests(unittest.TestCase):
def test_num_islands(self):
'''Returns the number of distinct land masses from a 2D grid.'''
# Test Cases
map1 = [
[1, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0]
]
assert challenges.num_islands(map1) == 1
map2 = [
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]
]
assert challenges.num_islands(map2) == 3
class ClassSchedulingTests(unittest.TestCase):
def test_course_order(self):
"""Returns the order in which courses must be taken,
in order to meet prerequisites.
"""
courses1 = [ [1,0] ]
assert challenges.course_order(2, courses1) == [0, 1]
courses2 = [ [1,0], [2,0], [3,1], [3,2] ]
possibleSchedules = [ [0, 1, 2, 3], [0, 2, 1, 3] ]
assert challenges.course_order(4, courses2) in possibleSchedules
class WordLadderTests(unittest.TestCase):
def test_word_ladder_length(self):
"""Returns the minimum amount of 1-letter transformations to change
one word to another.
"""
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
assert challenges.word_ladder_length(beginWord, endWord, wordList) == 5
if __name__ == '__main__':
unittest.main()
| 26.6875
| 79
| 0.516628
| 261
| 2,135
| 4.111111
| 0.325671
| 0.033551
| 0.036347
| 0.033551
| 0.136999
| 0.091333
| 0.024231
| 0.014911
| 0
| 0
| 0
| 0.075199
| 0.352225
| 2,135
| 80
| 80
| 26.6875
| 0.700651
| 0.158782
| 0
| 0.098039
| 0
| 0
| 0.018835
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 1
| 0.078431
| false
| 0
| 0.039216
| 0
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66769c379769d62d8db4f6ca3c7ed84d674f3460
| 1,293
|
py
|
Python
|
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# Day 6: Find All Duplicates in an Array
#
# Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements
# appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
class Solution:
def findDuplicates(self, nums: [int]) -> [int]:
# We have an array of length N that contains values from 1 to n, n ≤ N
# We need to keep track of the number we've already seen, for this we
# would need a list of m elements, m < ≤ n ≤ N
# This means we can actually use the input array as it is large enough,
# given that all values are positive we can flip them to negative to
# encode the seen values
duplicates = []
for number in nums:
value = abs(number) # Maybe this position has been used as a marker
seen = abs(number) - 1 # indices start at 0, values at 1
if nums[seen] < 0:
# We already found this number before
duplicates.append(value)
else:
# Mark the array for this number
nums[seen] *= -1
return duplicates
# Test
assert Solution().findDuplicates([4,3,2,7,8,2,3,1]) == [2,3]
| 40.40625
| 79
| 0.608662
| 207
| 1,293
| 3.826087
| 0.512077
| 0.010101
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021372
| 0.312452
| 1,293
| 31
| 80
| 41.709677
| 0.863892
| 0.600928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6683c0d1956dae22490efd4a21cbb16c9e118a7c
| 339
|
py
|
Python
|
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
x = tf.constant(35, name='x')
print(x)
y = tf.Variable(x+5, name='y')
with tf.Session() as session:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("output", session.graph)
model = tf.global_variables_initializer()
session.run(model)
print (session.run(y))
writer.close()
| 21.1875
| 59
| 0.672566
| 50
| 339
| 4.5
| 0.56
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010714
| 0.174041
| 339
| 15
| 60
| 22.6
| 0.792857
| 0
| 0
| 0
| 0
| 0
| 0.023669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
668cea27bdbc4f6209d2380260dbf5312ca4bad1
| 2,944
|
py
|
Python
|
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
from odoo import fields, models, api, _
from odoo.exceptions import UserError
class SaleOrderPopup(models.TransientModel):
_name = 'sale.order.popup'
@api.multi
def popup_button(self):
for rec in self.env['sale.order'].browse(self._context.get('active_id')):
if rec._get_forbidden_state_confirm() & set(rec.mapped('state')):
raise UserError(_(
'It is not allowed to confirm an order in the following states: %s'
) % (', '.join(rec._get_forbidden_state_confirm())))
for order in rec.filtered(lambda order: order.partner_id not in order.message_partner_ids):
order.message_subscribe([order.partner_id.id])
rec.write({
'state': 'sale',
'confirmation_date': fields.Datetime.now()
})
rec._action_confirm()
if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):
rec.action_done()
return True
class Quotation_Send_Popup(models.TransientModel):
_name = 'quotation.send.popup'
@api.multi
def action_quotation_send_popup(self):
for rec in self.env['sale.order'].browse(self._context.get('active_id')):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
lang = rec.env.context.get('lang')
template = template_id and self.env['mail.template'].browse(template_id)
if template and template.lang:
lang = template._render_template(template.lang, 'sale.order', rec.ids[0])
ctx = {
'default_model': 'sale.order',
'default_res_id': rec.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True,
'model_description': rec.with_context(lang=lang).type_name,
'custom_layout': "mail.mail_notification_paynow",
'proforma': rec.env.context.get('proforma', False),
'force_email': True
}
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
return True
| 42.057143
| 116
| 0.567935
| 326
| 2,944
| 4.846626
| 0.361963
| 0.044304
| 0.027848
| 0.024684
| 0.144304
| 0.110127
| 0.110127
| 0.070886
| 0.070886
| 0.070886
| 0
| 0.001986
| 0.315897
| 2,944
| 70
| 117
| 42.057143
| 0.782522
| 0
| 0
| 0.16129
| 0
| 0
| 0.216299
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.032258
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
668da6a3dfe98b38ca927b8c9945a7980761c6b8
| 830
|
py
|
Python
|
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
"""UDP hole punching server."""
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import sys
DEFAULT_PORT = 4000
class ServerProtocol(DatagramProtocol):
def datagramReceived(self, datagram, address):
"""Handle incoming datagram messages."""
print(datagram)
# data_string = datagram.decode("utf-8")
# msg_type = data_string[:2]
ip, port = address
for i in range(0, 3):
self.transport.write(bytes(str(port)), address, int(port) +i)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: ./server.py PORT")
port = DEFAULT_PORT
# sys.exit(1)
else:
port = int(sys.argv[1])
reactor.listenUDP(port, ServerProtocol())
print('Listening on *:%d' % (port))
reactor.run()
| 28.62069
| 73
| 0.631325
| 100
| 830
| 5.11
| 0.61
| 0.043053
| 0.074364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017378
| 0.237349
| 830
| 29
| 74
| 28.62069
| 0.789889
| 0.16747
| 0
| 0
| 0
| 0
| 0.070692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.263158
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
668e417b3a6306ecd6bbd0fcf013eefd855c3921
| 12,972
|
py
|
Python
|
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | 2
|
2022-02-03T00:51:30.000Z
|
2022-02-03T18:42:43.000Z
|
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, List, Literal, TypedDict
from .FHIR_Address import FHIR_Address
from .FHIR_Age import FHIR_Age
from .FHIR_Annotation import FHIR_Annotation
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Coding import FHIR_Coding
from .FHIR_ContactDetail import FHIR_ContactDetail
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_Contributor import FHIR_Contributor
from .FHIR_Count import FHIR_Count
from .FHIR_DataRequirement import FHIR_DataRequirement
from .FHIR_Distance import FHIR_Distance
from .FHIR_Dosage import FHIR_Dosage
from .FHIR_Duration import FHIR_Duration
from .FHIR_Element import FHIR_Element
from .FHIR_Expression import FHIR_Expression
from .FHIR_HumanName import FHIR_HumanName
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_integer import FHIR_integer
from .FHIR_Meta import FHIR_Meta
from .FHIR_Money import FHIR_Money
from .FHIR_ParameterDefinition import FHIR_ParameterDefinition
from .FHIR_Period import FHIR_Period
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_Range import FHIR_Range
from .FHIR_Ratio import FHIR_Ratio
from .FHIR_Reference import FHIR_Reference
from .FHIR_RelatedArtifact import FHIR_RelatedArtifact
from .FHIR_SampledData import FHIR_SampledData
from .FHIR_Signature import FHIR_Signature
from .FHIR_string import FHIR_string
from .FHIR_Timing import FHIR_Timing
from .FHIR_TriggerDefinition import FHIR_TriggerDefinition
from .FHIR_UsageContext import FHIR_UsageContext
# A Map of relationships between 2 structures that can be used to transform data.
FHIR_StructureMap_Source = TypedDict(
"FHIR_StructureMap_Source",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Type or variable this rule applies to.
"context": FHIR_id,
# Extensions for context
"_context": FHIR_Element,
# Specified minimum cardinality for the element. This is optional; if present, it acts an implicit check on the input content.
"min": FHIR_integer,
# Extensions for min
"_min": FHIR_Element,
# Specified maximum cardinality for the element - a number or a "*". This is optional; if present, it acts an implicit check on the input content (* just serves as documentation; it's the default value).
"max": FHIR_string,
# Extensions for max
"_max": FHIR_Element,
# Specified type for the element. This works as a condition on the mapping - use for polymorphic elements.
"type": FHIR_string,
# Extensions for type
"_type": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBase64Binary": str,
# Extensions for defaultValueBase64Binary
"_defaultValueBase64Binary": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBoolean": bool,
# Extensions for defaultValueBoolean
"_defaultValueBoolean": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCanonical": str,
# Extensions for defaultValueCanonical
"_defaultValueCanonical": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCode": str,
# Extensions for defaultValueCode
"_defaultValueCode": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDate": str,
# Extensions for defaultValueDate
"_defaultValueDate": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDateTime": str,
# Extensions for defaultValueDateTime
"_defaultValueDateTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDecimal": float,
# Extensions for defaultValueDecimal
"_defaultValueDecimal": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueId": str,
# Extensions for defaultValueId
"_defaultValueId": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInstant": str,
# Extensions for defaultValueInstant
"_defaultValueInstant": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInteger": float,
# Extensions for defaultValueInteger
"_defaultValueInteger": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueMarkdown": str,
# Extensions for defaultValueMarkdown
"_defaultValueMarkdown": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueOid": str,
# Extensions for defaultValueOid
"_defaultValueOid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValuePositiveInt": float,
# Extensions for defaultValuePositiveInt
"_defaultValuePositiveInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueString": str,
# Extensions for defaultValueString
"_defaultValueString": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueTime": str,
# Extensions for defaultValueTime
"_defaultValueTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUnsignedInt": float,
# Extensions for defaultValueUnsignedInt
"_defaultValueUnsignedInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUri": str,
# Extensions for defaultValueUri
"_defaultValueUri": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUrl": str,
# Extensions for defaultValueUrl
"_defaultValueUrl": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUuid": str,
# Extensions for defaultValueUuid
"_defaultValueUuid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueAddress": FHIR_Address,
# A value to use if there is no existing value in the source object.
"defaultValueAge": FHIR_Age,
# A value to use if there is no existing value in the source object.
"defaultValueAnnotation": FHIR_Annotation,
# A value to use if there is no existing value in the source object.
"defaultValueAttachment": FHIR_Attachment,
# A value to use if there is no existing value in the source object.
"defaultValueCodeableConcept": FHIR_CodeableConcept,
# A value to use if there is no existing value in the source object.
"defaultValueCoding": FHIR_Coding,
# A value to use if there is no existing value in the source object.
"defaultValueContactPoint": FHIR_ContactPoint,
# A value to use if there is no existing value in the source object.
"defaultValueCount": FHIR_Count,
# A value to use if there is no existing value in the source object.
"defaultValueDistance": FHIR_Distance,
# A value to use if there is no existing value in the source object.
"defaultValueDuration": FHIR_Duration,
# A value to use if there is no existing value in the source object.
"defaultValueHumanName": FHIR_HumanName,
# A value to use if there is no existing value in the source object.
"defaultValueIdentifier": FHIR_Identifier,
# A value to use if there is no existing value in the source object.
"defaultValueMoney": FHIR_Money,
# A value to use if there is no existing value in the source object.
"defaultValuePeriod": FHIR_Period,
# A value to use if there is no existing value in the source object.
"defaultValueQuantity": FHIR_Quantity,
# A value to use if there is no existing value in the source object.
"defaultValueRange": FHIR_Range,
# A value to use if there is no existing value in the source object.
"defaultValueRatio": FHIR_Ratio,
# A value to use if there is no existing value in the source object.
"defaultValueReference": FHIR_Reference,
# A value to use if there is no existing value in the source object.
"defaultValueSampledData": FHIR_SampledData,
# A value to use if there is no existing value in the source object.
"defaultValueSignature": FHIR_Signature,
# A value to use if there is no existing value in the source object.
"defaultValueTiming": FHIR_Timing,
# A value to use if there is no existing value in the source object.
"defaultValueContactDetail": FHIR_ContactDetail,
# A value to use if there is no existing value in the source object.
"defaultValueContributor": FHIR_Contributor,
# A value to use if there is no existing value in the source object.
"defaultValueDataRequirement": FHIR_DataRequirement,
# A value to use if there is no existing value in the source object.
"defaultValueExpression": FHIR_Expression,
# A value to use if there is no existing value in the source object.
"defaultValueParameterDefinition": FHIR_ParameterDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueRelatedArtifact": FHIR_RelatedArtifact,
# A value to use if there is no existing value in the source object.
"defaultValueTriggerDefinition": FHIR_TriggerDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueUsageContext": FHIR_UsageContext,
# A value to use if there is no existing value in the source object.
"defaultValueDosage": FHIR_Dosage,
# A value to use if there is no existing value in the source object.
"defaultValueMeta": FHIR_Meta,
# Optional field for this source.
"element": FHIR_string,
# Extensions for element
"_element": FHIR_Element,
# How to handle the list mode for this element.
"listMode": Literal["first", "not_first", "last", "not_last", "only_one"],
# Extensions for listMode
"_listMode": FHIR_Element,
# Named context for field, if a field is specified.
"variable": FHIR_id,
# Extensions for variable
"_variable": FHIR_Element,
# FHIRPath expression - must be true or the rule does not apply.
"condition": FHIR_string,
# Extensions for condition
"_condition": FHIR_Element,
# FHIRPath expression - must be true or the mapping engine throws an error instead of completing.
"check": FHIR_string,
# Extensions for check
"_check": FHIR_Element,
# A FHIRPath expression which specifies a message to put in the transform log when content matching the source rule is found.
"logMessage": FHIR_string,
# Extensions for logMessage
"_logMessage": FHIR_Element,
},
total=False,
)
| 56.4
| 836
| 0.712458
| 1,657
| 12,972
| 5.471334
| 0.145444
| 0.041694
| 0.044121
| 0.060666
| 0.402603
| 0.402603
| 0.402603
| 0.402603
| 0.402603
| 0.392897
| 0
| 0.000709
| 0.238822
| 12,972
| 229
| 837
| 56.646288
| 0.91746
| 0.503315
| 0
| 0
| 0
| 0
| 0.252008
| 0.104426
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.268657
| 0
| 0.268657
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
668f3e390bdd48e5a8dc955598a92ec70a35392d
| 2,484
|
py
|
Python
|
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | 1
|
2021-02-17T06:06:50.000Z
|
2021-02-17T06:06:50.000Z
|
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import address_form
from django.http import JsonResponse
from .utils import cartData,guestobj
import json,datetime
def store(request):
items = item.objects.all()
data = cartData(request)
cart_quantity = data['cart_quantity']
context={'items':items, 'cart_quantity':cart_quantity}
return render(request, 'ecom.html', context)
def cart_page(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_quantity = data['cart_quantity']
context={'items':items,'order':order,'cart_quantity':cart_quantity}
return render(request,'cart.html', context)
def checkout(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_quantity = data['cart_quantity']
context={'items':items,'order':order,'cart_quantity':cart_quantity}
return render(request,'checkout.html', context)
def updateitem(request):
data = json.loads(request.body)
itemName = data['itemName']
action = data['action']
user = request.user
Item = item.objects.get(name = itemName)
order,added = cart.objects.get_or_create(user = user,complete=False)
order_item,created = cart_item.objects.get_or_create(order = order, item = Item)
if action == 'add':
order_item.quantity = order_item.quantity + 1
elif action == 'remove':
order_item.quantity = order_item.quantity - 1
order_item.save()
if order_item.quantity <= 0:
order_item.delete()
return JsonResponse('Item was added' , safe = False)
def processOrder(request):
transactionId = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
user = request.user
order,added = cart.objects.get_or_create(user = user,complete=False)
else:
user,order = guestobj(request,data)
total = float(data['form']['total'])
order.transaction_id = transactionId
if total == order.total_bill:
order.complete = True
order.save()
address.objects.create(
user = user,
order = order,
address = data['shipping']['address'],
city = data['shipping']['city'],
state = data['shipping']['state'],
pincode = data['shipping']['zipcode'],
)
return JsonResponse('Payment Complete', safe = False)
| 28.883721
| 84
| 0.67029
| 298
| 2,484
| 5.47651
| 0.271812
| 0.088235
| 0.052083
| 0.036765
| 0.377451
| 0.348039
| 0.348039
| 0.278799
| 0.251225
| 0.251225
| 0
| 0.001513
| 0.201691
| 2,484
| 86
| 85
| 28.883721
| 0.821483
| 0
| 0
| 0.276923
| 0
| 0
| 0.109054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.107692
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66941e3ed65b1efe5312473285b552d665a56ecc
| 29,897
|
py
|
Python
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T11:33:00.000Z
|
2021-07-05T21:00:46.000Z
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 8
|
2020-08-03T12:45:31.000Z
|
2021-02-23T19:51:32.000Z
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T12:11:43.000Z
|
2022-01-29T10:59:00.000Z
|
"""FILE lgt_createinput.main.py
This script creates condensed LPJ netcdf files
for landforms and soil properties
landforms.nc:
- lfcnt (landid) number of landforms in cell
- frac (landid, lfid/ standid) area fraction this landform represents
- slope (landid, lfid/ standid)
- elevation (landid, lfid/ standid) avg. elevation in this landform
- soildepth (landid, lfid/ standid) [implemented later const in model for now]
sites.nc:
- soildepth
- clay
- silt
- sand
- totc
- elevation (reference elevation for grid, 0.5deg)
Christian Werner, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F)
email: christian.werner@senkenberg.de
2017/02/07
"""
from collections import OrderedDict
import datetime
import glob
import logging
import math
import numpy as np
import os
import pandas as pd
import string
import time
import xarray as xr
from ._geoprocessing import analyze_filename_dem, \
classify_aspect, \
classify_landform, \
calculate_asp_slope, \
compute_spatial_dataset
from ._srtm1 import split_srtm1_dataset
__version__ = "0.0.2"
log = logging.getLogger(__name__)
# import constants
from . import NODATA
from . import ENCODING
# quick helpers
# TODO: move to a dedicated file later
def time_dec(func):
"""A decorator to measure execution time of function"""
def wrapper(*arg, **kwargs):
t = time.time()
res = func(*arg, **kwargs)
log.debug('DURATION: <%s> : ' % func.__name__ + str(time.time()-t))
return res
return wrapper
varSoil = {'TOTC': ('soc', 'Soil Organic Carbon', 'soc', 'percent', 0.1),
'SDTO': ('sand', 'Sand', 'sand', 'percent', 1.0),
'STPC': ('silt', 'Silt', 'silt', 'percent', 1.0),
'CLPC': ('clay', 'Clay', 'clay', 'percent', 1.0)}
varLF = {'lfcnt': ('lfcnt', 'Number of landforms', 'lfcnt', '-', 1.0),
'slope': ('slope', 'Slope', 'slope', 'deg', 1.0),
'aspect': ('aspect', 'Aspect', 'aspect', 'deg', 1.0),
'asp_slope': ('asp_slope', 'Aspect-corrected Slope', 'asp_slope', 'deg', 1.0),
'fraction': ('fraction', 'Landform Fraction', 'fraction', '1/1', 1.0),
'elevation': ('elevation', 'Elevation', 'elevation', 'm', 1.0),
'soildepth': ('soildepth', 'Soil Depth', 'soildepth', 'm', 1.0)
}
soil_vars = sorted(varSoil.keys())
lf_vars = sorted(varLF.keys())
def convert_float_coord_to_string(coord, p=2):
"""Convert a (lon,lat) coord to string."""
lon, lat = round(coord[0], p), round(coord[1], p)
LA, LO = 'n', 'e'
if lat < 0: LA = 's'
if lon < 0: LO = 'w'
lat_s = "%.2f" % round(abs(lat),2)
lon_s = "%.2f" % round(abs(lon),2)
coord_s = '%s%s%s%s' % (LA, lat_s.zfill(p+3), LO, lon_s.zfill(p+4))
return coord_s
def has_significant_land(ds, min_frac=0.01):
"""Test if land fraction in tile is significant."""
# min_frac in %, default: 0.001 %
if (ds['mask'].values.sum() / float(len(ds.lat.values) * len(ds.lon.values))) * 100 > min_frac:
return True
return False
def define_landform_classes(step, limit, TYPE='SIMPLE'):
"""Define the landform classes."""
# Parameters:
# - step: elevation interval for landform groups (def: 400m )
# - limit: elevation limit [inclusive, in m]
ele_breaks = [-1000] + list(range(step, limit, step)) + [10000]
ele_cnt = range(1, len(ele_breaks))
# code system [code position 2 & 3, 1= elevations_tep]
# code: [slopeid<1..6>][aspectid<0,1..4>]
#
# slope:
#
# Name SIMPLE WEISS
#
# hilltop 1 1
# upper slope 2*
# mid slope 3* 3*
# flats 4 4
# lower slope 5*
# valley 6 6
#
#
# aspect:
#
# Name SIMPLE WEISS
#
# north 1 1
# east 2 2
# south 3 3
# west 4 4
if TYPE == 'WEISS':
lf_set = [10,21,22,23,24,31,32,33,34,40,51,52,53,54,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
elif TYPE == 'SIMPLE':
# TYPE: SIMPLE (1:hilltop, 3:midslope, 4:flat, 6:valley)
lf_set = [10,31,32,33,34,40,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
else:
log.error('Currently only classifiation schemes WEISS, SIMPLE supported.')
return (lf_full_set, ele_breaks)
def tiles_already_processed(TILESTORE_PATH):
"""Check if the tile exists."""
existing_tiles = glob.glob(os.path.join(TILESTORE_PATH, '*.nc'))
#existing_tiles = [os.path.basename(x) for x in glob.glob(glob_string)]
processed_tiles = []
for existing_tile in existing_tiles:
with xr.open_dataset(existing_tile) as ds:
source = ds.tile.get('source')
if source is not None:
processed_tiles.append(source)
else:
log.warn('Source attr not set in file %s.' % existing_tile)
return processed_tiles
def match_watermask_shpfile(glob_string):
"""Check if the generated shp glob_string exists."""
found=False
if len(glob.glob(glob_string)) == 0:
shp = None
elif len(glob.glob(glob_string)) == 1:
shp = glob.glob(glob_string)[0]
found = True
else:
log.error("Too many shape files.")
exit()
# second try: look for zip file
if found is False:
shp = glob_string.replace(".shp", ".zip")
if len(glob.glob(shp)) == 0:
shp = None
elif len(glob.glob(shp)) == 1:
shp = glob.glob(shp)[0]
else:
log.error("Too many shape files.")
exit()
return shp
def get_tile_summary(ds, cutoff=0):
"""Compute the fractional cover of the landforms in this tile."""
unique, counts = np.unique(ds['landform_class'].to_masked_array(), return_counts=True)
counts = np.ma.masked_array(counts, mask=unique.mask)
unique = np.ma.compressed(unique)
counts = np.ma.compressed(counts)
total_valid = float(np.sum(counts))
df = pd.DataFrame({'lf_id': unique.astype('int'), 'cells': counts})
df['frac'] = (df['cells'] / df['cells'].sum())*100
df = df[df['frac'] >= cutoff]
df['frac_scaled'] = (df['cells'] / df['cells'].sum())*100
# also get lf-avg of elevation and slope
df['elevation'] = -1
df['slope'] = -1
df['asp_slope'] = -1
df['aspect'] = -1
if 'soildepth' in ds.data_vars:
df['soildepth'] = -1
a_lf = ds['landform_class'].to_masked_array()
# average aspect angles
def avg_aspect(a):
x = 0
y = 0
for v in a.ravel():
x += math.sin(math.radians(v))
y += math.cos(math.radians(v))
avg = math.degrees(math.atan2(x, y))
if avg < 0:
avg += 360
return avg
# calculate the avg. elevation and slope in landforms
for i, r in df.iterrows():
ix = a_lf == int(r['lf_id'])
lf_slope = ds['slope'].values[ix].mean()
lf_asp_slope = ds['asp_slope'].values[ix].mean()
lf_elevation = ds['elevation'].values[ix].mean()
lf_aspect = avg_aspect(ds['aspect'].values[ix])
if 'soildepth' in ds.data_vars:
lf_soildepth = ds['soildepth'].values[ix].mean()
df.loc[i, 'soildepth'] = lf_soildepth
df.loc[i, 'slope'] = lf_slope
df.loc[i, 'asp_slope'] = lf_asp_slope
df.loc[i, 'elevation'] = lf_elevation
df.loc[i, 'aspect'] = lf_aspect
if 'soildepth' in ds.data_vars:
df.loc[i, 'soildepth'] = lf_soildepth
return df
def tile_files_compatible(files):
"""Get global attribute from all tile netcdf files and check
they were created with an identical elevation step.
"""
fingerprints = []
for file in files:
with xr.open_dataset(file) as ds:
fingerprint = (ds.tile.get('elevation_step'), ds.tile.get('classification'))
fingerprints.append(fingerprint)
# check if elements are equal
if all(x==fingerprints[0] for x in fingerprints):
# check if there are Nones' in any fingerprint
if not all(fingerprints):
return False
return True
return False
def create_stats_table(df, var):
"""Create a landform info table for all coords and given var."""
df_ = df[var].unstack(level=-1, fill_value=NODATA)
# rename columns and split coord tuple col to lon and lat col
df_.columns = ['lf' + str(col) for col in df_.columns]
if 'lf0' in df_.columns:
del df_['lf0']
df_ = df_.reset_index()
df_[['lon', 'lat', 'lf_cnt']] = df_['coord'].apply(pd.Series)
df_['lf_cnt'] = df_['lf_cnt'].astype(int)
# cleanup (move lon, lat to front, drop coord col)
df_.drop('coord', axis=1, inplace=True)
latloncnt_cols = ['lon', 'lat', 'lf_cnt']
new_col_order = latloncnt_cols + \
[x for x in df_.columns.tolist() if x not in latloncnt_cols]
return df_[new_col_order]
@time_dec
def convert_dem_files(cfg, lf_ele_levels):
"""Compute landform units based on elevation, slope, aspect and tpi classes."""
if cfg.SRTMSTORE_PATH is not None:
# if glob_string is a directory, add wildcard for globbing
glob_string = cfg.SRTMSTORE_PATH
if os.path.isdir(cfg.SRTMSTORE_PATH):
glob_string = os.path.join(cfg.SRTMSTORE_PATH, '*')
dem_files = sorted(glob.glob(glob_string))
existing_tiles = tiles_already_processed(cfg.TILESTORE_PATH)
for dem_file in dem_files:
fname = os.path.basename(dem_file)
fdir = os.path.dirname(dem_file)
# SRTM1 default nameing convention
str_lat = fname[:3]
str_lon = fname[3:7]
# if tiles don't exist process them
process_tiles = True
if cfg.OVERWRITE:
process_tiles = True
else:
_, source_name = analyze_filename_dem(fname)
if source_name in existing_tiles:
process_tiles = False
if process_tiles:
log.info('processing: %s (%s)' % (dem_file, datetime.datetime.now()))
shp_glob_string = os.path.join(cfg.WATERMASKSTORE_PATH, str_lon + str_lat + '*.shp')
matched_shp_file = match_watermask_shpfile(shp_glob_string.lower())
ds_srtm1 = compute_spatial_dataset(dem_file, fname_shp=matched_shp_file)
tiles = split_srtm1_dataset(ds_srtm1)
for i, tile in enumerate(tiles):
# reclass
if tile != None and has_significant_land(tile):
log.debug("Valid tile %d in file %s." % (i+1, dem_file))
classify_aspect(tile)
classify_landform(tile, elevation_levels=lf_ele_levels, TYPE=cfg.CLASSIFICATION)
calculate_asp_slope(tile)
# store file in tilestore
# get tile center coordinate and name
lon, lat = tile.geo.center()
lonlat_string = convert_float_coord_to_string((lon,lat))
tile_name = "srtm1_processed_%s.nc" % lonlat_string
tile.to_netcdf(os.path.join(cfg.TILESTORE_PATH, tile_name), \
format='NETCDF4_CLASSIC')
else:
log.debug("Empty tile %d in file %s ignored." % (i+1, dem_file))
@time_dec
def compute_statistics(cfg):
"""Extract landform statistics from tiles in tilestore."""
available_tiles = glob.glob(os.path.join(cfg.TILESTORE_PATH, '*.nc'))
log.debug('Number of tiles found: %d' % len(available_tiles))
if len(available_tiles) == 0:
log.error('No processed tiles available in directory "%s"' % cfg.TILESTORE_PATH)
exit()
tiles = sorted(available_tiles)
if not tile_files_compatible(tiles):
log.error('Tile files in %s are not compatible.' % cfg.TILESTORE_PATH)
exit()
tiles_stats = []
for tile in tiles:
log.debug('Computing statistics for tile %s' % tile)
with xr.open_dataset(tile) as ds:
lf_stats = get_tile_summary(ds, cutoff=cfg.CUTOFF)
lf_stats.reset_index(inplace=True)
number_of_ids = len(lf_stats)
lon, lat = ds.geo.center()
coord_tuple = (round(lon,2),round(lat,2), int(number_of_ids))
lf_stats['coord'] = pd.Series([coord_tuple for _ in range(len(lf_stats))])
lf_stats.set_index(['coord', 'lf_id'], inplace=True)
tiles_stats.append( lf_stats )
df = pd.concat(tiles_stats)
frac_lf = create_stats_table(df, 'frac_scaled')
elev_lf = create_stats_table(df, 'elevation')
slope_lf = create_stats_table(df, 'slope')
asp_slope_lf = create_stats_table(df, 'asp_slope')
aspect_lf = create_stats_table(df, 'aspect')
return (frac_lf, elev_lf, slope_lf, asp_slope_lf, aspect_lf)
def is_3d(ds, v):
"""Check if xr.DataArray has 3 dimensions."""
dims = ds[v].dims
if len(dims) == 3:
return True
return False
def assign_to_dataarray(data, df, lf_full_set, refdata=False):
"""Place value into correct location of data array."""
if refdata==True:
data[:] = NODATA
else:
data[:] = np.nan
for _, r in df.iterrows():
if refdata:
data.loc[r.lat, r.lon] = r.lf_cnt
else:
for lf in r.index[3:]:
if r[lf] > NODATA:
lf_id = int(lf[2:])
lf_pos = lf_full_set.index(lf_id)
data.loc[dict(lf_id=lf_id, lat=r.lat, lon=r.lon)] = r[lf]
return data
def spatialclip_df(df, extent):
"""Clip dataframe wit lat lon columns by extent."""
if any(e is None for e in extent):
log.warn("SpatialClip: extent passed is None.")
lon1, lat1, lon2, lat2 = extent
if ('lon' not in df.columns) or ('lat' not in df.columns):
log.warn("SpatialClip: lat/ lon cloumn missing in df.")
return df[((df.lon >= lon1) & (df.lon <= lon2)) &
((df.lat >= lat1) & (df.lat <= lat2))]
def build_site_netcdf(soilref, elevref, extent=None):
"""Build the site netcdf file."""
# extent: (x1, y1, x2, y2)
ds_soil_orig = xr.open_dataset(soilref)
ds_ele_orig = xr.open_dataset(elevref)
if extent is not None:
lat_min, lat_max = extent[1], extent[3]
lon_min, lon_max = extent[0], extent[2]
# slice simulation domain
ds_soil = ds_soil_orig.where((ds_soil_orig.lon >= lon_min) & (ds_soil_orig.lon <= lon_max) &
(ds_soil_orig.lat >= lat_min) & (ds_soil_orig.lat <= lat_max) &
(ds_soil_orig.lev==1.0), drop=True).squeeze(drop=True)
ds_ele = ds_ele_orig.where((ds_ele_orig.longitude >= lon_min) & (ds_ele_orig.longitude <= lon_max) &
(ds_ele_orig.latitude >= lat_min) & (ds_ele_orig.latitude <= lat_max), drop=True).squeeze(drop=True)
else:
ds_soil = ds_soil_orig.sel(lev=1.0).squeeze(drop=True)
ds_ele = ds_ele_orig.squeeze(drop=True)
del ds_soil['lev']
# identify locations that need filling and use left neighbor
smask = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
emask = np.where(ds_ele['data'].to_masked_array().mask, 1, 0)
# no soil data but elevation: gap-fill wioth neighbors
missing = np.where((smask == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
if len(ix) > 0:
log.debug('Cells with elevation but no soil data [BEFORE GF: %d].' % len(ix))
for i, j in zip(ix, jx):
for v in soil_vars:
if (j > 0) and np.isfinite(ds_soil[v][i, j-1]):
ds_soil[v][i, j] = ds_soil[v][i, j-1].copy(deep=True)
elif (j < ds_soil[v].shape[1]-1) and np.isfinite(ds_soil[v][i, j+1]):
ds_soil[v][i, j] = ds_soil[v][i, j+1].copy(deep=True)
else:
log.warn('neighbours have nodata !')
x = ds_soil[v][i, j].to_masked_array()
smask2 = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
missing = np.where((smask2 == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
log.debug('Cells with elevation but no soil data [AFTER GF: %d].' % len(ix))
dsout = xr.Dataset()
# soil vars
for v in soil_vars:
conv = varSoil[v][-1]
da = ds_soil[v].copy(deep=True) * conv
da.name = varSoil[v][0]
vattr = {'name': varSoil[v][0],
'long_name': varSoil[v][1],
'standard_name': varSoil[v][2],
'units': varSoil[v][3],
'coordinates': "lat lon"}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = np.ma.masked_where(emask, da.to_masked_array())
dsout[da.name] = da
# ele var
da = xr.full_like(da.copy(deep=True), np.nan)
da.name = 'elevation'
vattr = {'name': 'elevation', 'long_name': 'Elevation',
'units': 'meters', 'standard_name': 'elevation'}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = ds_ele['data'].to_masked_array()
dsout[da.name] = da
return dsout
@time_dec
def build_landform_netcdf(lf_full_set, df_dict, cfg, elevation_levels, refnc=None):
"""Build landform netcdf based on refnc dims and datatables."""
def has_soildepth():
if 'soildepth_lf' in df_dict:
return True
else:
return False
dsout = xr.Dataset()
COORDS = [('lf_id', lf_full_set), ('lat', refnc.lat), ('lon', refnc.lon)]
SHAPE = tuple([len(x) for _, x in COORDS])
# initiate data arrays
_blank = np.empty(SHAPE)
da_lfcnt = xr.DataArray(_blank.copy()[0,:,:].astype(int), name='lfcnt',
coords=COORDS[1:])
da_frac = xr.DataArray(_blank.copy(), name='fraction', coords=COORDS)
da_slope = xr.DataArray(_blank.copy(), name='slope', coords=COORDS)
da_asp_slope = xr.DataArray(_blank.copy(), name='asp_slope', coords=COORDS)
da_elev = xr.DataArray(_blank.copy(), name='elevation', coords=COORDS)
da_aspect = xr.DataArray(_blank.copy(), name='aspect', coords=COORDS)
if has_soildepth(): da_soildepth = xr.DataArray(_blank.copy(), name='soildepth', coords=COORDS)
frac_lf = df_dict['frac_lf']
slope_lf = df_dict['slope_lf']
asp_slope_lf = df_dict['asp_slope_lf']
elev_lf = df_dict['elev_lf']
aspect_lf = df_dict['aspect_lf']
if has_soildepth(): soildepth_lf = df_dict['soildepth_lf']
# check that landform coordinates are in refnc
df_extent = [frac_lf.lon.min(), frac_lf.lat.min(), frac_lf.lon.max(), frac_lf.lat.max()]
log.debug('df_extent: %s' % str(df_extent))
log.debug('contains: %s' % str(refnc.geo.contains(df_extent)))
if refnc.geo.contains(df_extent) == False:
frac_lf = spatialclip_df(frac_lf, refnc.geo.extent)
slope_lf = spatialclip_df(slope_lf, refnc.geo.extent)
asp_slope_lf = spatialclip_df(asp_slope_lf, refnc.geo.extent)
elev_lf = spatialclip_df(elev_lf, refnc.geo.extent)
aspect_lf = spatialclip_df(aspect_lf, refnc.geo.extent)
if has_soildepth(): spatialclip_df(soildepth_lf, refnc.geo.extent)
# dump files
frac_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_frac.csv'), index=False)
slope_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_slope.csv'), index=False)
asp_slope_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_asp_slope.csv'), index=False)
elev_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_elev.csv'), index=False)
aspect_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_aspect.csv'), index=False)
if has_soildepth(): soildepth_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_soildepth.csv'), index=False)
# assign dataframe data to arrays
da_lfcnt = assign_to_dataarray(da_lfcnt, frac_lf, lf_full_set, refdata=True)
da_frac = assign_to_dataarray(da_frac, frac_lf, lf_full_set)
da_slope = assign_to_dataarray(da_slope, slope_lf, lf_full_set)
da_asp_slope = assign_to_dataarray(da_asp_slope, asp_slope_lf, lf_full_set)
da_elev = assign_to_dataarray(da_elev, elev_lf, lf_full_set)
da_aspect = assign_to_dataarray(da_aspect, aspect_lf, lf_full_set)
if has_soildepth(): da_soildepth = assign_to_dataarray(da_soildepth, soildepth_lf, lf_full_set)
# store arrays in dataset
dsout[da_lfcnt.name] = da_lfcnt
dsout[da_frac.name] = da_frac
dsout[da_slope.name] = da_slope
dsout[da_asp_slope.name] = da_asp_slope
dsout[da_elev.name] = da_elev
dsout[da_aspect.name] = da_aspect
if has_soildepth(): dsout[da_soildepth.name] = da_soildepth
for v in dsout.data_vars:
vattr = {}
if v in lf_vars:
vattr = {'name': varLF[v][0],
'long_name': varLF[v][1],
'standard_name': varLF[v][2],
'units': varLF[v][3],
'coordinates': "lat lon"}
dsout[v].tile.update_attrs(vattr)
dsout[v].tile.update_encoding(ENCODING)
dsout['lat'].tile.update_attrs(dict(standard_name='latitude',
long_name='latitude',
units='degrees_north'))
dsout['lon'].tile.update_attrs(dict(standard_name='longitude',
long_name='longitude',
units='degrees_east'))
dsout['lf_id'].tile.update_attrs(dict(standard_name='lf_id',
long_name='lf_id',
units='-'))
for dv in dsout.data_vars:
dsout[dv].tile.update_encoding(ENCODING)
# register the specific landform properties (elevation steps, classfication)
dsout.tile.set('elevation_step', elevation_levels[1])
dsout.tile.set('classification', cfg.CLASSIFICATION.lower())
return dsout
def build_compressed(ds):
"""Build LPJ-Guess 4.0 compatible compressed netcdf file."""
# identify landforms netcdf
if 'lfcnt' in ds.data_vars:
v = 'lfcnt'
elif 'elevation' in ds.data_vars:
v = 'elevation'
else:
log.error("Not a valid xr.Dataset (landforms or site only).")
# create id position dataarray
da_ids = xr.ones_like(ds[v]) * NODATA
latL = []
lonL = []
d = ds[v].to_masked_array()
# REVIEW: why is 'to_masked_array()'' not working here?
d = np.ma.masked_where(d == NODATA, d)
land_id = 0
D_ids = OrderedDict()
for j in reversed(range(len(d))):
for i in range(len(d[0])):
if d[j, i] is not np.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
latL.append(lat)
lonL.append(lon)
da_ids.loc[lat, lon] = land_id
D_ids[(lat, lon)] = land_id
land_id += 1
LFIDS = range(land_id)
# create coordinate variables
_blank = np.zeros(len(LFIDS))
lats = xr.DataArray(latL, name='lat', coords=[('land_id', LFIDS)])
lons = xr.DataArray(lonL, name='lon', coords=[('land_id', LFIDS)])
lats.tile.update_attrs(dict(standard_name='latitude',
long_name='latitude',
units='degrees_north'))
lons.tile.update_attrs(dict(standard_name='longitude',
long_name='longitude',
units='degrees_east'))
# create land_id reference array
# TODO: clip land_id array to Chile country extent?
da_ids.tile.update_encoding(ENCODING)
ds_ids = da_ids.to_dataset(name='land_id')
# create xr.Dataset
dsout = xr.Dataset()
dsout[lats.name] = lats
dsout[lons.name] = lons
# walk through variables, get lat/ lon cells' data
for v in ds.data_vars:
if is_3d(ds, v):
_shape = (len(LFIDS), len(ds[ds[v].dims[0]]))
COORDS = [('land_id', LFIDS), ('lf_id', ds['lf_id'])]
else:
_shape = (len(LFIDS),)
COORDS = [('land_id', LFIDS)]
_blank = np.ones( _shape )
_da = xr.DataArray(_blank[:], name=v, coords=COORDS)
for lat, lon in zip(latL, lonL):
land_id = D_ids[(lat, lon)]
vals = ds[v].sel(lat=lat, lon=lon).to_masked_array()
_da.loc[land_id] = vals
_da.tile.update_attrs(ds[v].attrs)
_da.tile.update_encoding(ENCODING)
dsout[_da.name] = _da
if is_3d(ds, v):
dsout['lf_id'].tile.update_attrs(dict(standard_name='lf_id',
long_name='lf_id',
units='-'))
# copy lgt attributes from ssrc to dst
dsout.tile.copy_attrs(ds)
return (ds_ids, dsout)
def mask_dataset(ds, valid):
"""Mask all values that are not valid/ 1 (2d or 3d)."""
for v in ds.data_vars:
dims = ds[v].dims
if len(dims) > len(valid.shape):
z = len(ds[v].values)
valid = np.array(z*[valid])
ds[v].values = np.ma.masked_where(valid == 0, ds[v].values).filled(NODATA)
return ds
def create_gridlist(ds):
"""Create LPJ-Guess 4.0 gridlist file."""
outL = []
for j in reversed(range(len(ds['land_id']))):
for i in range(len(ds['land_id'][0])):
x = ds['land_id'][j, i].values #to_masked_array()
if x != NODATA: #p.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
land_id = int(ds['land_id'].sel(lat=lat, lon=lon).values)
outS = "%3.2f %3.2f %d" % (lat, lon, land_id)
outL.append(outS)
return '\n'.join(outL) + '\n'
def main(cfg):
"""Main Script."""
# default soil and elevation data (contained in package)
import pkg_resources
SOIL_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_WISESOIL_DOM_05deg.nc')
ELEVATION_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_ELEVATION_05deg.nc')
log.info("Converting DEM files and computing landform stats")
# define the final landform classes (now with elevation brackets)
lf_classes, lf_ele_levels = define_landform_classes(200, 6000, TYPE=cfg.CLASSIFICATION)
# process dem files to tiles (if not already processed)
convert_dem_files(cfg, lf_ele_levels)
#sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
# compute stats from tiles
df_frac, df_elev, df_slope, df_asp_slope, df_aspect = compute_statistics(cfg)
#print 'reading files'
#df_frac = pd.read_csv('lfdata.cutoff_1.0p/df_frac.csv')
#df_asp_slope = pd.read_csv('lfdata.cutoff_1.0p/df_asp_slope.csv')
#df_slope = pd.read_csv('lfdata.cutoff_1.0p/df_slope.csv')
#df_aspect = pd.read_csv('lfdata.cutoff_1.0p/df_aspect.csv')
#df_elev = pd.read_csv('lfdata.cutoff_1.0p/df_elev.csv')
# build netcdfs
log.info("Building 2D netCDF files")
sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
df_dict = dict(frac_lf=df_frac, elev_lf=df_elev, slope_lf=df_slope,
asp_slope_lf=df_asp_slope, aspect_lf=df_aspect)
landformnc = build_landform_netcdf(lf_classes, df_dict, cfg, lf_ele_levels, refnc=sitenc)
# clip to joined mask
#elev_mask = np.where(sitenc['elevation'].values == NODATA, 0, 1)
#landform_mask = np.where(landformnc['lfcnt'].values == NODATA, 0, 1)
#valid_mask = elev_mask * landform_mask
elev_mask = ~np.ma.getmaskarray(sitenc['elevation'].to_masked_array())
sand_mask = ~np.ma.getmaskarray(sitenc['sand'].to_masked_array())
land_mask = ~np.ma.getmaskarray(landformnc['lfcnt'].to_masked_array())
valid_mask = elev_mask * sand_mask * land_mask
sitenc = mask_dataset(sitenc, valid_mask)
landformnc = mask_dataset(landformnc, valid_mask)
landform_mask = np.where(landformnc['lfcnt'].values == -9999, np.nan, 1)
#landform_mask = np.where(landform_mask == True, np.nan, 1)
for v in sitenc.data_vars:
sitenc[v][:] = sitenc[v].values * landform_mask
# write 2d/ 3d netcdf files
sitenc.to_netcdf(os.path.join(cfg.OUTDIR, 'sites_2d.nc'),
format='NETCDF4_CLASSIC')
landformnc.to_netcdf(os.path.join(cfg.OUTDIR, 'landforms_2d.nc'),
format='NETCDF4_CLASSIC')
# convert to compressed netcdf format
log.info("Building compressed format netCDF files")
ids_2d, comp_sitenc = build_compressed(sitenc)
ids_2db, comp_landformnc = build_compressed(landformnc)
# write netcdf files
ids_2d.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2d.nc"),
format='NETCDF4_CLASSIC')
ids_2db.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2db.nc"),
format='NETCDF4_CLASSIC')
comp_landformnc.to_netcdf(os.path.join(cfg.OUTDIR, "landform_data.nc"),
format='NETCDF4_CLASSIC')
comp_sitenc.to_netcdf(os.path.join(cfg.OUTDIR, "site_data.nc"),
format='NETCDF4_CLASSIC')
# gridlist file
log.info("Creating gridlist file")
gridlist = create_gridlist(ids_2d)
open(os.path.join(cfg.OUTDIR, cfg.GRIDLIST_TXT), 'w').write(gridlist)
log.info("Done")
| 36.282767
| 135
| 0.596247
| 4,142
| 29,897
| 4.104056
| 0.138098
| 0.014589
| 0.010589
| 0.013001
| 0.259133
| 0.164245
| 0.128949
| 0.109771
| 0.084476
| 0.065651
| 0
| 0.015101
| 0.271265
| 29,897
| 823
| 136
| 36.326853
| 0.765135
| 0.155835
| 0
| 0.179389
| 0
| 0
| 0.104742
| 0.003595
| 0
| 0
| 0
| 0.00243
| 0
| 1
| 0.043893
| false
| 0.001908
| 0.030534
| 0
| 0.124046
| 0.009542
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66942000229050463aff5906c4c70265c74740a1
| 4,379
|
py
|
Python
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""Скрипт проверяет дату обновления прайса на сайте http://www.dns-shop.ru/"""
# # Основа взята из http://stackoverflow.com/a/37755811/5909792
# def get_html(url, check_content_func=None):
# # from PyQt5.QtCore import QUrl
# # from PyQt5.QtWidgets import QApplication
# # from PyQt5.QtWebEngineWidgets import QWebEnginePage
#
# from PyQt4.QtCore import QUrl
# from PyQt4.QtGui import QApplication
# from PyQt4.QtWebKit import QWebPage as QWebEnginePage
#
# class ExtractorHtml:
# def __init__(self, url):
# self.html = None
#
# _app = QApplication([])
# self._page = QWebEnginePage()
# self._page.mainFrame().load(QUrl(url))
# # self._page.load(QUrl(url))
# self._page.loadFinished.connect(self._load_finished_handler)
#
# # Ожидание загрузки страницы и получения его содержимого
# # Этот цикл асинхронный код делает синхронным
# while self.html is None:
# _app.processEvents()
#
# _app.quit()
#
# self._page = None
#
# def _callable(self, data):
# if check_content_func:
# if check_content_func(data):
# self.html = data
#
# else:
# self.html = data
#
# def _load_finished_handler(self):
# # self._page.toHtml(self._callable)
# self.html = self._page.mainFrame().toHtml()
#
# return ExtractorHtml(url).html
#
#
# class UpdateDateTextNotFound(Exception):
# pass
#
#
# import os
#
#
# def download_price():
# url = 'http://www.dns-shop.ru/'
#
# html = get_html(url, lambda html: 'price-list-downloader' in html)
#
# from bs4 import BeautifulSoup
# root = BeautifulSoup(html, 'lxml')
#
# for a in root.select('#price-list-downloader a'):
# href = a['href']
#
# if href.endswith('.xls'):
# from urllib.parse import urljoin
# file_url = urljoin(url, href)
# # print(file_url)
#
# update_date_text = a.next_sibling.strip()
#
# import re
# match = re.search(r'\d{,2}.\d{,2}.\d{4}', update_date_text)
# if match is None:
# raise UpdateDateTextNotFound()
#
# date_string = match.group()
# # print(date_string)
#
# # from datetime import datetime
# # print(datetime.strptime(date_string, '%d.%m.%Y'))
#
# file_name = os.path.basename(href)
# file_name = date_string + '_' + file_name
#
# if os.path.exists(file_name):
# return file_name
#
# from urllib.request import urlretrieve
# urlretrieve(file_url, file_name)
#
# return file_name
#
# return
#
#
# while True:
# file_name = download_price()
# print(file_name)
#
# import time
# # time.sleep(10 * 60 * 60)
# time.sleep(60)
from PyQt5.QtCore import QUrl, QTimer
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage
def _callable(html):
if 'price-list-downloader' not in html:
return
from bs4 import BeautifulSoup
root = BeautifulSoup(html, 'lxml')
for a in root.select('#price-list-downloader a'):
href = a['href']
if href.endswith('.xls'):
from urllib.parse import urljoin
file_url = urljoin(url, href)
update_date_text = a.next_sibling.strip()
import re
match = re.search(r'\d{,2}.\d{,2}.\d{4}', update_date_text)
if match is None:
return
date_string = match.group()
import os
file_name = os.path.basename(href)
file_name = date_string + '_' + file_name
from datetime import datetime
print(datetime.today().date(), file_name, file_url)
url = 'http://www.dns-shop.ru/'
app = QApplication([])
page = QWebEnginePage()
page.load(QUrl(url))
page.loadFinished.connect(lambda x=None: page.toHtml(_callable))
# Настроим вызов загрузки страницы на каждые 10 часов
timer = QTimer()
timer.setInterval(10 * 60 * 60 * 1000)
timer.timeout.connect(lambda x=None: page.load(QUrl(url)))
timer.start()
app.exec()
| 26.70122
| 78
| 0.58575
| 503
| 4,379
| 4.952286
| 0.308151
| 0.04175
| 0.017664
| 0.016861
| 0.431152
| 0.354075
| 0.307507
| 0.307507
| 0.307507
| 0.307507
| 0
| 0.017425
| 0.292304
| 4,379
| 163
| 79
| 26.865031
| 0.786383
| 0.633021
| 0
| 0.057143
| 0
| 0
| 0.076596
| 0.030496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.228571
| 0
| 0.314286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6696f698bff747564601f269987739a28d5abfe1
| 12,918
|
py
|
Python
|
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
import pytest
from dnaio import Sequence
from cutadapt.adapters import (
RemoveAfterMatch,
RemoveBeforeMatch,
FrontAdapter,
BackAdapter,
PrefixAdapter,
SuffixAdapter,
LinkedAdapter,
MultipleAdapters,
IndexedPrefixAdapters,
IndexedSuffixAdapters,
)
def test_back_adapter_absolute_number_of_errors():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=1,
min_overlap=3,
)
assert adapter.max_error_rate == 1 / 10
def test_back_adapter_absolute_number_of_errors_with_wildcards():
adapter = BackAdapter(
sequence="NNNNNNNNNNGATCGGAAGA",
max_errors=1,
)
assert adapter.max_error_rate == 1 / 10
def test_front_adapter_partial_occurrence_in_back():
adapter = FrontAdapter("CTGAATT", max_errors=0, min_overlap=4)
assert adapter.match_to("GGGGGCTGAA") is None
def test_back_adapter_partial_occurrence_in_front():
adapter = BackAdapter("CTGAATT", max_errors=0, min_overlap=4)
assert adapter.match_to("AATTGGGGGGG") is None
def test_issue_52():
adapter = BackAdapter(
sequence="GAACTCCAGTCACNNNNN",
max_errors=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
)
sequence = "CCCCAGAACTACAGTCCCGGC"
am = RemoveAfterMatch(
astart=0,
astop=17,
rstart=5,
rstop=21,
score=15,
errors=2,
adapter=adapter,
sequence=sequence,
)
assert am.wildcards() == "GGC"
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue was at the time not considered to be an actual issue with the alignment
# algorithm. The following alignment with three errors was found because it had more
# matches than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# The alignment algorithm has since been changed so that not the number of matches
# is relevant, but a score that penalizes indels. Now, the resulting alignment
# should be this one (with only two errors):
#
# TCGTATGCCGTCTTC
# =========X==X
# TCGTATGCCCTCC
adapter = BackAdapter(
sequence="TCGTATGCCGTCTTC",
max_errors=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False,
)
result = adapter.match_to("TCGTATGCCCTCC")
assert result.errors == 2, result
assert result.astart == 0, result
assert result.astop == 13, result
def test_back_adapter_indel_and_exact_occurrence():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=0.1,
min_overlap=3,
)
match = adapter.match_to("GATCGTGAAGAGATCGGAAGA")
# We want the leftmost match of these two possible ones:
# GATCGTGAAGAGATCGGAAGA
# GATCG-GAAGA
# GATCGGAAGA
assert match.astart == 0
assert match.astop == 10
assert match.rstart == 0
assert match.rstop == 11
assert match.errors == 1
assert match.score == 8
def test_back_adapter_indel_and_mismatch_occurrence():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=0.1,
min_overlap=3,
)
match = adapter.match_to("CTGGATCGGAGAGCCGTAGATCGGGAGAGGC")
# CTGGATCGGA-GAGCCGTAGATCGGGAGAGGC
# ||||||| || ||||||X|||
# GATCGGAAGA GATCGGAAGA
assert match.astart == 0
assert match.astop == 10
assert match.rstart == 3
assert match.rstop == 12
assert match.score == 7
assert match.errors == 1
def test_str():
a = BackAdapter("ACGT", max_errors=0.1)
str(a)
str(a.match_to("TTACGT"))
def test_prefix_with_indels_one_mismatch():
a = PrefixAdapter(
sequence="GCACATCT",
max_errors=0.15,
min_overlap=1,
read_wildcards=False,
adapter_wildcards=False,
indels=True,
)
# GCACATCGGAA
# |||||||X
# GCACATCT
result = a.match_to("GCACATCGGAA")
assert result.astart == 0
assert result.astop == 8
assert result.rstart == 0
assert result.rstop == 8
assert result.score == 6 # 7 matches, 1 mismatch
assert result.errors == 1
def test_prefix_with_indels_two_mismatches():
a = PrefixAdapter(
sequence="GCACATTT",
max_errors=0.3,
min_overlap=1,
read_wildcards=False,
adapter_wildcards=False,
indels=True,
)
result = a.match_to("GCACATCGGAA")
# GCACATCGGAA
# ||||||XX
# GCACATTT
assert result.astart == 0
assert result.astop == 8
assert result.rstart == 0
assert result.rstop == 8
assert result.score == 4
assert result.errors == 2
def test_linked_adapter():
front_adapter = PrefixAdapter("AAAA", min_overlap=4)
back_adapter = BackAdapter("TTTT", min_overlap=3)
linked_adapter = LinkedAdapter(
front_adapter,
back_adapter,
front_required=True,
back_required=False,
name="name",
)
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 3
read = Sequence(name="seq", sequence="AAAACCCCCTTTT")
trimmed = linked_adapter.match_to(read.sequence).trimmed(read)
assert trimmed.name == "seq"
assert trimmed.sequence == "CCCCC"
def test_info_record():
adapter = BackAdapter(
sequence="GAACTCCAGTCACNNNNN",
max_errors=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo",
)
read = Sequence(name="abc", sequence="CCCCAGAACTACAGTCCCGGC")
am = RemoveAfterMatch(
astart=0,
astop=17,
rstart=5,
rstop=21,
score=15,
errors=2,
adapter=adapter,
sequence=read.sequence,
)
assert am.get_info_records(read) == [
[
"",
2,
5,
21,
"CCCCA",
"GAACTACAGTCCCGGC",
"",
"Foo",
"",
"",
"",
]
]
def test_random_match_probabilities():
a = BackAdapter("A", max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [1, 0.25]
assert a.end.random_match_probabilities(0.2) == [1, 0.4]
for s in ("ACTG", "XMWH"):
a = BackAdapter(s, max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [
1,
0.25,
0.25**2,
0.25**3,
0.25**4,
]
assert a.end.random_match_probabilities(0.2) == [
1,
0.4,
0.4 * 0.1,
0.4 * 0.1 * 0.4,
0.4 * 0.1 * 0.4 * 0.1,
]
a = FrontAdapter("GTCA", max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [
1,
0.25,
0.25**2,
0.25**3,
0.25**4,
]
assert a.end.random_match_probabilities(0.2) == [
1,
0.4,
0.4 * 0.1,
0.4 * 0.1 * 0.4,
0.4 * 0.1 * 0.4 * 0.1,
]
def test_add_adapter_statistics():
stats = BackAdapter("A", name="name", max_errors=0.1).create_statistics()
end_stats = stats.end
end_stats.adjacent_bases["A"] = 7
end_stats.adjacent_bases["C"] = 19
end_stats.adjacent_bases["G"] = 23
end_stats.adjacent_bases["T"] = 42
end_stats.adjacent_bases[""] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = BackAdapter("A", name="name", max_errors=0.1).create_statistics()
end_stats2 = stats2.end
end_stats2.adjacent_bases["A"] = 43
end_stats2.adjacent_bases["C"] = 31
end_stats2.adjacent_bases["G"] = 27
end_stats2.adjacent_bases["T"] = 8
end_stats2.adjacent_bases[""] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.end
assert r.adjacent_bases == {"A": 50, "C": 50, "G": 50, "T": 50, "": 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_linked_matches_property():
"""Accessing matches property of non-anchored linked adapters"""
# Issue #265
front_adapter = FrontAdapter("GGG")
back_adapter = BackAdapter("TTT")
la = LinkedAdapter(
front_adapter,
back_adapter,
front_required=False,
back_required=False,
name="name",
)
assert la.match_to("AAAATTTT").score == 3
@pytest.mark.parametrize("adapter_class", [PrefixAdapter, SuffixAdapter])
def test_no_indels_empty_read(adapter_class):
# Issue #376
adapter = adapter_class("ACGT", indels=False)
adapter.match_to("")
def test_prefix_match_with_n_wildcard_in_read():
adapter = PrefixAdapter("NNNACGT", indels=False)
match = adapter.match_to("TTTACGTAAAA")
assert match is not None and (0, 7) == (match.rstart, match.rstop)
match = adapter.match_to("NTTACGTAAAA")
assert match is not None and (0, 7) == (match.rstart, match.rstop)
def test_suffix_match_with_n_wildcard_in_read():
adapter = SuffixAdapter("ACGTNNN", indels=False)
match = adapter.match_to("TTTTACGTTTT")
assert match is not None and (4, 11) == (match.rstart, match.rstop)
match = adapter.match_to("TTTTACGTCNC")
assert match is not None and (4, 11) == (match.rstart, match.rstop)
def test_multiple_adapters():
a1 = BackAdapter("GTAGTCCCGC")
a2 = BackAdapter("GTAGTCCCCC")
ma = MultipleAdapters([a1, a2])
match = ma.match_to("ATACCCCTGTAGTCCCC")
assert match.adapter is a2
def test_indexed_prefix_adapters():
adapters = [
PrefixAdapter("GAAC", indels=False),
PrefixAdapter("TGCT", indels=False),
]
ma = IndexedPrefixAdapters(adapters)
match = ma.match_to("GAACTT")
assert match.adapter is adapters[0]
match = ma.match_to("TGCTAA")
assert match.adapter is adapters[1]
assert ma.match_to("GGGGGGG") is None
def test_indexed_prefix_adapters_incorrect_type():
with pytest.raises(ValueError):
IndexedPrefixAdapters(
[
PrefixAdapter("GAAC", indels=False),
SuffixAdapter("TGCT", indels=False),
]
)
def test_indexed_very_similar(caplog):
IndexedPrefixAdapters(
[
PrefixAdapter("GAAC", max_errors=1, indels=False),
PrefixAdapter("GAAG", max_errors=1, indels=False),
]
)
assert "cannot be assigned uniquely" in caplog.text
def test_indexed_too_high_k():
with pytest.raises(ValueError) as e:
IndexedPrefixAdapters(
[
PrefixAdapter("ACGTACGT", max_errors=3, indels=False),
PrefixAdapter("AAGGTTCC", max_errors=2, indels=False),
]
)
assert "Error rate too high" in e.value.args[0]
def test_indexed_suffix_adapters():
adapters = [
SuffixAdapter("GAAC", indels=False),
SuffixAdapter("TGCT", indels=False),
]
ma = IndexedSuffixAdapters(adapters)
match = ma.match_to("TTGAAC")
assert match.adapter is adapters[0]
match = ma.match_to("AATGCT")
assert match.adapter is adapters[1]
def test_indexed_suffix_adapters_incorrect_type():
with pytest.raises(ValueError):
IndexedSuffixAdapters(
[
SuffixAdapter("GAAC", indels=False),
PrefixAdapter("TGCT", indels=False),
]
)
def test_multi_prefix_adapter_with_indels():
adapters = [
PrefixAdapter("GTAC", max_errors=1, indels=True),
PrefixAdapter("TGCT", max_errors=1, indels=True),
]
ma = IndexedPrefixAdapters(adapters)
match = ma.match_to("GATACGGG")
assert match.adapter is adapters[0]
match = ma.match_to("TAGCTAA")
assert match.adapter is adapters[1]
def test_indexed_prefix_adapters_with_n_wildcard():
sequence = "GGTCCAGA"
ma = IndexedPrefixAdapters([PrefixAdapter(sequence, max_errors=1, indels=False)])
for i in range(len(sequence)):
# N in the read should be counted as mismatch
t = sequence[:i] + "N" + sequence[i + 1 :] + "TGCT"
result = ma.match_to(t)
assert isinstance(result, RemoveBeforeMatch)
assert (result.rstart, result.rstop) == (0, 8)
assert result.errors == 1
assert result.score == 6
| 27.780645
| 88
| 0.618517
| 1,562
| 12,918
| 4.941741
| 0.179898
| 0.025392
| 0.019433
| 0.0114
| 0.432439
| 0.389558
| 0.350564
| 0.283716
| 0.269335
| 0.249126
| 0
| 0.040202
| 0.264437
| 12,918
| 464
| 89
| 27.840517
| 0.772153
| 0.06967
| 0
| 0.402817
| 0
| 0
| 0.059459
| 0.008053
| 0
| 0
| 0
| 0
| 0.180282
| 1
| 0.078873
| false
| 0
| 0.008451
| 0
| 0.087324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66992cf30daf9b3de5a678f20db0b9dc5b3fafdf
| 7,561
|
py
|
Python
|
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | 8
|
2021-07-19T11:54:00.000Z
|
2022-03-29T01:45:07.000Z
|
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | null | null | null |
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | 1
|
2021-08-19T23:56:56.000Z
|
2021-08-19T23:56:56.000Z
|
import copy
import random
from .parameters import Parameters
class EventModel:
"""Defines an event model, also called "activity"
An event model is defined by these parameters:
* Activity name: :obj:`str`
* Schedule: :obj:`list` of :obj:`tuple` (in minutes :obj:`int`)
* Repetitions range: minimum (:obj:`int`) and maximum (:obj:`int`)
* Duration range: minimum (:obj:`int`) and maximum (:obj:`int`) in minutes
* Other parameters:
* mask efficiency ratio: :obj:`float`
* collective event: :obj:`bool`
* shared event: :obj:`bool`
The schedule defines the allowed periods of time in which an activity can happen.
For example, ``schedule=[(120,180),(240,300)]`` allows people to carry out this activity from
the time ``120`` to ``180`` and also from time ``240`` until ``300``.
Notice that the schedule units are in minutes.
Each activity is limited to a certain duration, and its priority follows
a piecewise linear function, parametrized by:
* ``r``: repeat\ :sub:`min`
* ``R``: repeat\ :sub:`max`
* ``e``: event count
.. math::
Priority(e) =
\\left\{\\begin{matrix}
1-(1-\\alpha)\\cfrac{e}{r}\,,\quad 0 \leq e < r \\\\
\\alpha\\cfrac{R-e}{R-r}\,,\quad r \leq e < R \\
\end{matrix}\\right.
.. tikz:: Priority piecewise linear function
\pgfmathsetmacro{\\N}{10};
\pgfmathsetmacro{\\M}{6};
\pgfmathsetmacro{\\NN}{\\N-1};
\pgfmathsetmacro{\\MM}{\\M-1};
\pgfmathsetmacro{\\repmin}{2.25};
\pgfmathsetmacro{\\repmax}{8.5};
\pgfmathsetmacro{\\a}{2};
\coordinate (A) at (0,\\MM);
\coordinate (B) at (\\NN,0);
\coordinate (C) at (\\repmin, \\a);
\coordinate (D) at (\\repmax, 0);
\coordinate (E) at (\\repmin, 0);
\coordinate (F) at (0, \\a);
\draw[stepx=1,thin, black!20] (0,0) grid (\\N,\\M);
\draw[->, very thick] (0,0) to (\\N,0) node[right] {Event count};
\draw[->, very thick] (0,0) to (0,\\M) node[above] {Priority};
\draw (0.1,0) -- (-0.1, 0) node[anchor=east] {0};
\draw (0, 0.1) -- (0, -0.1);
\draw (\\repmin,0.1) -- (\\repmin,-0.1) node[anchor=north] {$repeat_{min}$};
\draw (\\repmax,0.1) -- (\\repmax,-0.1) node[anchor=north] {$repeat_{max}$};
\draw[ultra thick] (0.1, \\MM) -- (-0.1, \\MM) node[left] {1};
\draw[very thick, black!50, dashed] (C) -- (F) node[left] {$\\alpha$};
\draw[very thick, black!50, dashed] (C) -- (E);
\draw[ultra thick, red] (A) -- (C);
\draw[ultra thick, red] (C) -- (D);
:xscale: 80
:align: left
"""
id: int = -1
params: Parameters
count: int
noise: int
def __init__(self, params: Parameters) -> None:
self.next()
self.id = EventModel.id
self.params = params
self.count = 0
self.noise = None
@classmethod
def reset(cls) -> None:
"""Resets :class:`~archABM.event_model.EventModel` ID."""
EventModel.id = -1
@staticmethod
def next() -> None:
"""Increments one unit the :class:`~archABM.event_model.EventModel` ID."""
EventModel.id += 1
def get_noise(self) -> int:
"""Generates random noise
Returns:
int: noise amount in minutes
"""
if self.noise is None:
m = 15 # minutes # TODO: review hardcoded value
if m == 0:
self.noise = 0
else:
self.noise = random.randrange(m) # minutes
return self.noise
def new(self):
"""Generates a :class:`~archABM.event_model.EventModel` copy, with reset count and noise
Returns:
EventModel: cloned instance
"""
self.count = 0
self.noise = None
return copy.copy(self)
def duration(self, now) -> int:
"""Generates a random duration between :attr:`duration_min` and :attr:`duration_max`.
.. note::
If the generated duration, together with the current timestamp,
exceeds the allowed schedule, the duration is limited to finish
at the scheduled time interval.
The :attr:`noise` attribute is used to model the schedule's time tolerance.
Args:
now (int): current timestamp in minutes
Returns:
int: event duration in minutes
"""
duration = random.randint(self.params.duration_min, self.params.duration_max)
estimated = now + duration
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise < estimated:
duration = b + noise - now + 1
break
return duration
def priority(self) -> float:
"""Computes the priority of a certain event.
The priority function follows a piecewise linear function, parametrized by:
* ``r``: repeat\ :sub:`min`
* ``R``: repeat\ :sub:`max`
* ``e``: event count
.. math::
Priority(e) =
\\left\{\\begin{matrix}
1-(1-\\alpha)\\cfrac{e}{r}\,,\quad 0 \leq e < r \\\\
\\alpha\\cfrac{R-e}{R-r}\,,\quad r \leq e < R \\
\end{matrix}\\right.
Returns:
float: priority value [0-1]
"""
alpha = 0.5 # TODO: review hardcoded value
if self.params.repeat_max is None:
return random.uniform(0.0, 1.0)
if self.count == self.params.repeat_max:
return 0.0
if self.count < self.params.repeat_min:
return 1 - (1 - alpha) * self.count / self.params.repeat_min
if self.params.repeat_min == self.params.repeat_max:
return alpha
return alpha * (self.params.repeat_max - self.count) / (self.params.repeat_max - self.params.repeat_min)
def probability(self, now: int) -> float:
"""Wrapper to call the priority function
If the event :attr:`count` is equal to the :attr:`repeat_max` parameters,
it yields a ``0`` probability. Otherwise, it computes the :meth:`priority` function
described above.
Args:
now (int): current timestamp in minutes
Returns:
float: event probability [0-1]
"""
p = 0.0
if self.count == self.params.repeat_max:
return p
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise:
p = self.priority()
break
return p
def valid(self) -> bool:
"""Computes whether the event count has reached the :attr:`repeat_max` limit.
It yields ``True``
if :attr:`repeat_max` is ``undefined`` or
if the event :attr:`count` is less than :attr:`repeat_max`.
Otherwise, it yields ``False``.
Returns:
bool: valid event
"""
if self.params.repeat_max is None:
return True
return self.count < self.params.repeat_max
def consume(self) -> None:
"""Increments one unit the event count"""
self.count += 1
# logging.info("Event %s repeated %d out of %d" % (self.name, self.count, self.target))
def supply(self) -> None:
"""Decrements one unit the event count"""
self.count -= 1
| 33.455752
| 112
| 0.547943
| 947
| 7,561
| 4.341077
| 0.232313
| 0.043785
| 0.046704
| 0.036974
| 0.35782
| 0.314522
| 0.252493
| 0.239358
| 0.164437
| 0.123571
| 0
| 0.022554
| 0.308028
| 7,561
| 225
| 113
| 33.604444
| 0.763188
| 0.578098
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008889
| 0
| 1
| 0.152778
| false
| 0
| 0.041667
| 0
| 0.430556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
669c4ded1d39066ae7e38bea807e79c4ad3272ab
| 2,764
|
py
|
Python
|
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
def detail_base_item_list():
return [
'distance',
'elevation',
'floors',
'heart',
'minutesFairlyActive',
'minutesLightlyActive',
'minutesSedentary',
'minutesVeryActive',
'steps'
]
def read_json_file(filename):
if not os.path.isfile(filename):
sys.exit('Error: file:`' + filename + '` not found')
raw_file = open(filename, 'r')
data = json.load(raw_file)
raw_file.close()
return data
def parse_item(raw_root, item, grad, yyyymmdd):
filename = os.path.join(raw_root, item + '_' + grad, yyyymmdd + '.json')
data = read_json_file(filename)
tmp = data['activities-' + item + '-intraday']['dataset']
result = {}
for d in tmp:
hhmm = d['time'].replace(':', '')[0:4]
result[hhmm] = d
del result[hhmm]['time']
return result
def parse_sleep(raw_root, grad, yyyymmdd):
filename = os.path.join(raw_root, 'sleep_' + grad, yyyymmdd + '.json')
data = read_json_file(filename)
tmp = []
sleep = data['sleep']
for part in sleep:
tmp = tmp + part['minuteData']
result = {}
for d in tmp:
hhmm = d['dateTime'].replace(':', '')[0:4]
result[hhmm] = d
del result[hhmm]['dateTime']
return result
def hhmm_list(grad):
result = []
if grad == '1m':
for h in range(24):
for m in range(60):
result.append('%02d%02d' % (h,m))
elif grad == '15m':
for h in range(24):
for m15 in range(4):
result.append('%02d%02d' % (h,m15*15))
return result
def item_join(item_dict, grad):
result ={}
for hhmm in hhmm_list(grad):
tmp = {}
for key in item_dict.keys():
if hhmm in item_dict[key].keys():
tmp[key] = item_dict[key][hhmm]
else:
tmp[key] = {}
result[hhmm] = tmp
return result
def simplify(joined_dict, grad, yyyymmdd):
item_list = detail_base_item_list()
item_list.append('sleep')
item_list.remove('distance')
result = []
for hhmm in hhmm_list(grad):
d = joined_dict[hhmm]
tmp = {}
tmp['dt'] = yyyymmdd + hhmm + '00'
for item in item_list:
tmp[item] = int(d[item]['value']) if ('value' in d[item].keys()) else ''
tmp['distance'] = float(d['distance']['value']) if ('value' in d['distance'].keys()) else ''
tmp['calories_level'] = int( d['calories']['level']) if ('level' in d['calories'].keys()) else ''
tmp['calories_mets' ] = int( d['calories']['mets' ]) if ('mets' in d['calories'].keys()) else ''
tmp['calories_value'] = float(d['calories']['value']) if ('value' in d['calories'].keys()) else ''
# mile to meter
tmp['distance'] = round(tmp['distance'] * 1609.344, 4)
tmp['calories_value'] = round(tmp['calories_value'], 4)
result.append(tmp)
return result
| 23.827586
| 102
| 0.599132
| 378
| 2,764
| 4.269841
| 0.240741
| 0.02974
| 0.037175
| 0.037175
| 0.325898
| 0.255266
| 0.23544
| 0.140025
| 0.094176
| 0
| 0
| 0.018131
| 0.22178
| 2,764
| 115
| 103
| 24.034783
| 0.732218
| 0.004703
| 0
| 0.255814
| 0
| 0
| 0.166606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.034884
| 0.011628
| 0.197674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
669d3d5f4966f2fc9848beb0d7bd023a928904e0
| 4,251
|
py
|
Python
|
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import random
from utils.dataset_processing import grasp, image
import matplotlib.pyplot as plt
dataset_path = './tfds/'
train_data, meta = tfds.load('Jacquard', split='train', with_info=True, shuffle_files=False)
BATCH_SIZE = 1
number_train = meta.splits['train'].num_examples
output_size = 300
def preprocess(sample):
tfds_rgb = sample['rgb']
tfds_depth = sample['depth']
tfds_box = sample['box']
return (tfds_rgb, tfds_depth, tfds_box)
def augment(tfds_rgb, tfds_depth, tfds_box):
# get center
c = output_size // 2
# rotate box
rotations = [0, np.pi / 2, 2 * np.pi / 2, 3 * np.pi / 2]
rot = random.choice(rotations)
zoom_factor = np.random.uniform(0.5, 1.0)
# zoom box
tfds_box = grasp.GraspRectangles.load_from_tensor(tfds_box)
tfds_box.to_array()
tfds_box.rotate(rot, (c, c))
tfds_box.zoom(zoom_factor, (c, c))
pos_img, ang_img, width_img = tfds_box.draw((output_size, output_size))
width_img = np.clip(width_img, 0.0, output_size /2 ) / (output_size / 2)
cos = np.cos(2 * ang_img)
sin = np.sin(2 * ang_img)
pos_img = tf.expand_dims(pos_img, axis=-1)
cos = tf.expand_dims(cos, axis=-1)
sin = tf.expand_dims(sin, axis=-1)
width_img = tf.expand_dims(width_img, axis=-1)
output = tf.concat([pos_img, cos, sin, width_img], axis=-1)
# input data
rgb_img = image.Image.from_tensor(tfds_rgb)
rgb_img.rotate(rot)
rgb_img.zoom(zoom_factor)
rgb_img.resize((output_size, output_size))
rgb_img.normalise()
# Depth
depth_img = image.DepthImage.from_tensor(tfds_depth)
depth_img.rotate(rot)
depth_img.normalise()
depth_img.zoom(zoom_factor)
depth_img.resize((output_size, output_size))
input = tf.concat([rgb_img, depth_img], axis=-1)
input = tf.cast(input, tf.float64)
return (input, output)
train_data = train_data.map(preprocess)
# train_data = train_data.map(augment)
train_data = train_data.map(lambda tfds_rgb, tfds_depth, tfds_box: tf.py_function(augment, [tfds_rgb, tfds_depth, tfds_box], [tf.float64]))
rows=1
cols=4
train_data = train_data.take(100)
for input, output in train_data:
# pos_img = label[0]
# cos = label[1]
# sin = label[2]
# width_img = label[3]
fig = plt.figure()
ax0 = fig.add_subplot(rows, cols, 1)
ax0.imshow(output[0][:, :, 0])
ax0.set_title('pos_img')
ax0.axis("off")
ax1 = fig.add_subplot(rows, cols, 2)
ax1.imshow(output[0][:, :, 1])
ax1.set_title('cos')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 3)
ax1.imshow(output[0][:, :, 2])
ax1.set_title('sin')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 4)
ax1.imshow(output[0][:, :, 3])
ax1.set_title('width')
ax1.axis("off")
ax2 = fig.add_subplot(rows, cols, 5)
ax2.imshow(input[0][:, :, :3])
ax2.set_title('sin')
ax2.axis("off")
ax3 = fig.add_subplot(rows, cols, 6)
ax3.imshow(input[0][:, :, 3:])
ax3.set_title('width_img')
ax3.axis("off")
# q_img, ang_img, width_img = post_processing(q_img=pos_img,
# cos_img=cos,
# sin_img=sin,
# width_img=width_img)
# ax3 = fig.add_subplot(rows, cols, 9)
# ax3.imshow(q_img)
# ax3.set_title('q_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 10)
# ax3.imshow(ang_img)
# ax3.set_title('ang_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 11)
# ax3.imshow(width_img)
# ax3.set_title('width_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 12)
# ax3.imshow(inpaint_depth)
# ax3.set_title('from_pcd_inpaint')
# ax3.axis("off")
# s = evaluation.calculate_iou_match(grasp_q = q_img,
# grasp_angle = ang_img,
# ground_truth_bbs = gtbbs,
# no_grasps = 3,
# grasp_width = width_img,
# threshold=0.25)
# print('iou results', s)
plt.show()
| 26.735849
| 139
| 0.604799
| 623
| 4,251
| 3.895666
| 0.216693
| 0.046148
| 0.053564
| 0.070045
| 0.254223
| 0.196951
| 0.153688
| 0.117841
| 0.073754
| 0.045735
| 0
| 0.03432
| 0.252882
| 4,251
| 159
| 140
| 26.735849
| 0.729849
| 0.256881
| 0
| 0.037975
| 0
| 0
| 0.026906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.075949
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
669ffe2b5e6215275de00b66a4a28e352cc9a091
| 2,063
|
py
|
Python
|
ch16_ex.py
|
DexHunter/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | 24
|
2019-05-07T15:11:28.000Z
|
2022-03-02T04:50:28.000Z
|
ch16_ex.py
|
Dekzu/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | null | null | null |
ch16_ex.py
|
Dekzu/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | 19
|
2019-08-05T20:59:04.000Z
|
2022-03-07T05:13:32.000Z
|
class Time:
'''Represents the time of day.
attributes: hour, minute, second
'''
def print_time(t):
print ('(%.2d:%.2d:%.2d)' % (t.hour, t.minute, t.second))
def is_after(t1, t2):
return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)
def mul_time(t, n):
'''Multiple time t by n
n: int
Returns a time tr
'''
return int_to_time(time_to_int(t) * n)
def add_time(t1, t2):
sum = Time()
sum.hour = t1.hour + t2.hour
sum.minute = t1.minute + t2.minute
sum.second = t1.second + t2.second
while sum.second >= 60:
sum.second -= 60
sum.minute += 1
while sum.minute >= 60:
sum.minute -= 60
sum.hour += 1
return sum
def increment(t, sec):
'''Writes a inc function does not contain any loops
#for the second exercise of writing a pure function, I think you can just create a new object by copy.deepcopy(t) and modify the new object. I think it is quite simple so I will skip this one, if you differ please contact me and I will try to help
idea: using divmod
sec: seconds in IS
'''
t.second += sec
inc_min, t.second = div(t.seconds, 60)
t.minute += inc_min
inc_hour, t.minute = div(t.minute, 60)
t.hour += inc_hour
return t
def int_to_time(seconds):
"""Makes a new Time object.
seconds: int seconds since midnight.
"""
time = Time()
minutes, time.second = divmod(seconds, 60)
time.hour, time.minute = divmod(minutes, 60)
return time
def time_to_int(time):
"""Computes the number of seconds since midnight.
time: Time object.
"""
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
if __name__ == '__main__':
t = Time()
t.hour = 17
t.minute = 43
t.second = 6
print_time(mul_time(t, 3))
t2 = Time()
t2.hour = 17
t2.minute = 44
t2.second = 5
print_time(t)
start = Time()
start.hour = 9
start.minute =45
start.second = 0
duration = Time()
duration.hour = 1
duration.minute = 35
duration.second = 0
done = add_time(start, duration)
print_time(done)
print( is_after(t, t2) )
| 20.838384
| 248
| 0.652448
| 344
| 2,063
| 3.825581
| 0.296512
| 0.022796
| 0.015198
| 0.021277
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040398
| 0.220068
| 2,063
| 99
| 249
| 20.838384
| 0.777502
| 0.27872
| 0
| 0
| 0
| 0
| 0.016795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122807
| false
| 0
| 0
| 0.017544
| 0.245614
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66a1405cb275e20463fb6f972194333959f1c8d7
| 1,449
|
py
|
Python
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 2
|
2015-02-25T01:12:51.000Z
|
2017-02-08T22:54:41.000Z
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 48
|
2015-01-12T18:01:56.000Z
|
2021-06-10T20:05:26.000Z
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | null | null | null |
from sqlalchemy import *
from sqlalchemy.orm import relationship
from base import Base
from unit import Unit
class Variable(Base):
__tablename__ = 'Variables'
id = Column('VariableID', Integer, primary_key=True)
code = Column('VariableCode', String(255), nullable=False)
name = Column('VariableName', String(255), nullable=False)
speciation = Column('Speciation', String(255), nullable=False)
variable_unit_id = Column('VariableUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
sample_medium = Column('SampleMedium', String(255), nullable=False)
value_type = Column('ValueType', String(255), nullable=False)
is_regular = Column('IsRegular', Boolean, nullable=False)
time_support = Column('TimeSupport', Float, nullable=False)
time_unit_id = Column('TimeUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
data_type = Column('DataType', String(255), nullable=False)
general_category = Column('GeneralCategory', String(255), nullable=False)
no_data_value = Column('NoDataValue', Float, nullable=False)
# relationships
variable_unit = relationship(Unit, primaryjoin=(
"Unit.id==Variable.variable_unit_id")) # <-- Uses class attribute names, not table column names
time_unit = relationship(Unit, primaryjoin=("Unit.id==Variable.time_unit_id"))
def __repr__(self):
return "<Variable('%s', '%s', '%s')>" % (self.id, self.code, self.name)
| 45.28125
| 102
| 0.718427
| 170
| 1,449
| 5.964706
| 0.382353
| 0.153846
| 0.117357
| 0.151874
| 0.171598
| 0.171598
| 0.088757
| 0
| 0
| 0
| 0
| 0.016949
| 0.144928
| 1,449
| 31
| 103
| 46.741935
| 0.801453
| 0.046929
| 0
| 0
| 0
| 0
| 0.197388
| 0.046444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0.041667
| 0.958333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66a4535ff16536c58c62bd0252d04c6087d6613d
| 7,751
|
py
|
Python
|
pandas/pandastypes.py
|
pyxll/pyxll-examples
|
e8a1cba1ffdb346191f0c80bea6877cbe0291957
|
[
"Unlicense"
] | 93
|
2015-04-27T14:44:02.000Z
|
2022-03-03T13:14:49.000Z
|
pandas/pandastypes.py
|
samuelpedrini/pyxll-examples
|
ce7f839b4ff4f4032b78dffff2357f3feaadc3a1
|
[
"Unlicense"
] | 4
|
2019-12-13T11:32:17.000Z
|
2022-03-03T14:07:02.000Z
|
pandas/pandastypes.py
|
samuelpedrini/pyxll-examples
|
ce7f839b4ff4f4032b78dffff2357f3feaadc3a1
|
[
"Unlicense"
] | 53
|
2015-04-27T14:44:14.000Z
|
2022-01-23T05:26:52.000Z
|
"""
Custom excel types for pandas objects (eg dataframes).
For information about custom types in PyXLL see:
https://www.pyxll.com/docs/udfs.html#custom-types
For information about pandas see:
http://pandas.pydata.org/
Including this module in your pyxll config adds the following custom types that can
be used as return and argument types to your pyxll functions:
- dataframe
- series
- series_t
Dataframes with multi-index indexes or columns will be returned with the columns and
index values in the resulting array. For normal indexes, the index will only be
returned as part of the resulting array if the index is named.
eg::
from pyxll import xl_func
import pandas as pa
@xl_func("int rows, int cols, float value: dataframe")
def make_empty_dataframe(rows, cols, value):
# create an empty dataframe
df = pa.DataFrame({chr(c + ord('A')) : value for c in range(cols)}, index=range(rows))
# return it. The custom type will convert this to a 2d array that
# excel will understand when this function is called as an array
# function.
return df
@xl_func("dataframe df, string col: float")
def sum_column(df, col):
return df[col].sum()
In excel (use Ctrl+Shift+Enter to enter an array formula)::
=make_empty_dataframe(3, 3, 100)
>> A B C
>> 100 100 100
>> 100 100 100
>> 100 100 100
=sum_column(A1:C4, "A")
>> 300
"""
from pyxll import xl_return_type, xl_arg_type
import datetime as dt
import pandas as pa
import numpy as np
import pytz
try:
import pywintypes
except ImportError:
pywintypes = None
@xl_return_type("dataframe", "var")
def _dataframe_to_var(df):
"""return a list of lists that excel can understand"""
if not isinstance(df, pa.DataFrame):
return df
df = df.applymap(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
index_header = [str(df.index.name)] if df.index.name is not None else []
if isinstance(df.index, pa.MultiIndex):
index_header = [str(x) or "" for x in df.index.names]
if isinstance(df.columns, pa.MultiIndex):
result = [([""] * len(index_header)) + list(z) for z in zip(*list(df.columns))]
for header in result:
for i in range(1, len(header) - 1):
if header[-i] == header[-i-1]:
header[-i] = ""
if index_header:
column_names = [x or "" for x in df.columns.names]
for i, col_name in enumerate(column_names):
result[i][len(index_header)-1] = col_name
if column_names[-1]:
index_header[-1] += (" \ " if index_header[-1] else "") + str(column_names[-1])
num_levels = len(df.columns.levels)
result[num_levels-1][:len(index_header)] = index_header
else:
if index_header and df.columns.name:
index_header[-1] += (" \ " if index_header[-1] else "") + str(df.columns.name)
result = [index_header + list(df.columns)]
if isinstance(df.index, pa.MultiIndex):
prev_ix = None
for ix, row in df.iterrows():
header = list(ix)
if prev_ix:
header = [x if x != px else "" for (x, px) in zip(ix, prev_ix)]
result.append(header + list(row))
prev_ix = ix
elif index_header:
for ix, row in df.iterrows():
result.append([ix] + list(row))
else:
for ix, row in df.iterrows():
result.append(list(row))
return _normalize_dates(result)
@xl_return_type("series", "var")
def _series_to_var(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(s.index, s)))
return _normalize_dates(result)
@xl_return_type("series_t", "var")
def _series_to_var_transform(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(*zip(s.index, s))))
return _normalize_dates(result)
@xl_arg_type("dataframe", "var")
def _var_to_dataframe(x):
"""return a pandas DataFrame from a list of lists"""
if not isinstance(x, (list, tuple)):
raise TypeError("Expected a list of lists")
x = _fix_pywintypes(x)
columns = x[0]
rows = x[1:]
return pa.DataFrame(list(rows), columns=columns)
@xl_arg_type("series", "var")
def _var_to_series(s):
"""return a pandas Series from a list of lists (arranged vertically)"""
if not isinstance(s, (list, tuple)):
raise TypeError("Expected a list of lists")
s = _fix_pywintypes(s)
keys, values = [], []
for row in s:
if not isinstance(row, (list, tuple)):
raise TypeError("Expected a list of lists")
if len(row) < 2:
raise RuntimeError("Expected rows of length 2 to convert to a pandas Series")
key, value = row[:2]
# skip any empty rows
if key is None and value is None:
continue
keys.append(key)
values.append(value)
return pa.Series(values, index=keys)
@xl_arg_type("series_t", "var")
def _var_to_series_t(s):
"""return a pandas Series from a list of lists (arranged horizontally)"""
if not isinstance(s, (list, tuple)):
raise TypeError("Expected a list of lists")
s = _fix_pywintypes(s)
keys, values = [], []
for row in zip(*s):
if not isinstance(row, (list, tuple)):
raise TypeError("Expected a list of lists")
if len(row) < 2:
raise RuntimeError("Expected rows of length 2 to convert to a pandas Series")
key, value = row[:2]
# skip any empty rows
if key is None and value is None:
continue
keys.append(key)
values.append(value)
return pa.Series(values, index=keys)
def _normalize_dates(data):
"""
Ensure all date types returns are standard datetimes with a timezone.
pythoncom will fail to convert datetimes to Windows dates without tzinfo.
This is useful if using these functions to convert a dataframe to native
python types for setting to a Range using COM. If only passing objects
to/from python using PyXLL functions then this isn't necessary (but
isn't harmful either).
"""
def normalize_date(x):
if isinstance(x, pa.tslib.NaTType):
return ValueError()
elif isinstance(x, pa.tslib.Timestamp) or isinstance(x, dt.datetime):
return dt.datetime(*x.timetuple()[:6], tzinfo=x.tzinfo or pytz.utc)
elif isinstance(x, dt.date):
return dt.datetime(*x.timetuple()[:3], tzinfo=pytz.utc)
return x
return [[normalize_date(c) for c in r] for r in data]
def _fix_pywintypes(data):
"""
Converts any pywintypes.TimeType instances passed in to the
conversion functions into datetime types.
This is useful if using these functions to convert a n Excel Range of
of values a pandas type, as pandas will crash if called with the
pywintypes.TimeType.
"""
if pywintypes is None:
return data
def fix_pywintypes(c):
if isinstance(c, pywintypes.TimeType):
return dt.datetime(*c.timetuple()[:6])
return c
return [[fix_pywintypes(c) for c in r] for r in data]
| 31.897119
| 95
| 0.632047
| 1,134
| 7,751
| 4.237213
| 0.193122
| 0.03205
| 0.016025
| 0.027471
| 0.418522
| 0.387305
| 0.365661
| 0.365661
| 0.334235
| 0.288866
| 0
| 0.010513
| 0.263708
| 7,751
| 242
| 96
| 32.028926
| 0.831435
| 0.338021
| 0
| 0.362903
| 0
| 0
| 0.059713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.056452
| 0
| 0.282258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66a463bd296e2375b0d9a6abd3ff5e747d929dcd
| 10,912
|
py
|
Python
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | 7
|
2020-04-28T12:34:42.000Z
|
2021-05-17T06:20:51.000Z
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | 1
|
2020-07-09T18:17:32.000Z
|
2020-07-10T13:56:01.000Z
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
import re
from collections import defaultdict as dfd
from .models import *
from datetime import date
from datetime import timedelta
from django.db.models import Sum
from django.db.models import Count
from django.db.models.functions import ExtractDay,ExtractMonth,ExtractYear
today = date.today()
yesterday = today - timedelta(days = 1)
colorList = {
1:"#FF0000",
2:"#FF4040",
3:"#FF4040",
4:"#FF4040",
5:"#FF7474",
6:"#FF7474",
7:"#FF7474",
8:"#FF7474",
9:"#FF7474",
10:"#FF7474",
11:"#FF7474",
12:"#FF7474",
13:"#FF8787",
14:"#FF8787",
15:"#FF8787",
16:"#FF8787",
17:"#FF8787",
18:"#FF8787",
19:"#FF8787",
20:"#FFB3B3",
21:"#FFB3B3",
22:"#FFB3B3",
23:"#FFB3B3",
24:"#FFB3B3",
25:"#FFB3B3",
26:"#FFECEC",
27:"#FFECEC",
28:"#FFECEC",
29:"#FFECEC",
30:"#FFE0E0",
31:"#FFE0E0",
32:"#FFE0E0",
33:"#FFE0E0",
34:"#FFE0E0",
35:"#FFE0E0",
}
stateCode = {
'Andaman and Nicobar Islands': "AN" ,
'Andhra Pradesh': "AP",
'Arunachal Pradesh': "AR",
'Assam': "AS" ,
'Bihar':"BR" ,
'Chandigarh':"CT" ,
'Chhattisgarh': "CH",
'Delhi':"DL" ,
'Dadara & Nagar Havelli': "DN",
'Goa':"GA" ,
'Gujarat': "GJ",
'Haryana': "HR",
'Himachal Pradesh': "HP",
'Jammu and Kashmir': "JK" ,
'Jharkhand': "JH",
'Karnataka': "KA",
'Kerala': "KL",
'Ladakh': "LK",
'Lakshadweep': "LD",
'Madhya Pradesh': "MP",
'Maharashtra':"MH" ,
'Manipur':"MN" ,
'Meghalaya': "ML",
'Mizoram': "MZ",
'Nagaland': "NL",
'Odisha': "OD",
'Puducherry': "PY",
'Punjab': "PB",
'Rajasthan': "RJ",
'Sikkim': "SK",
'Tamil Nadu':"TN" ,
'Telengana': "TS",
'Tripura':"TR" ,
'Uttarakhand': "UK",
'Uttar Pradesh':"UP" ,
'West Bengal':"WB"
}
# Create your views here.
def filter_integer(x):
array = re.findall(r'[0-9]+', x)
return ''.join(array)
def getData():
#get data directlly to scrape site
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# URL = 'https://www.mohfw.gov.in/'
# page = requests.get(URL)
# soup = BeautifulSoup(page.content, 'html.parser')
# tableData = soup.findAll('div', attrs={'class':'data-table table-responsive'})
# tableData = tableData[0].find('tbody')
# dataList=[]
# for i in tableData.findAll('tr'):
# data=[]
# for j,vlu in enumerate(i.findAll('td')):
# if j==1:
# data.append(vlu.text)
# elif j>1:
# data.append(filter_integer(vlu.text))
# if len(data)>2:
# dataList.append(data)
# total = ['Total number of confirmed cases in India']
# for vlu in dataList[-1]:
# total.append(filter_integer(vlu))
# print(total)
# del dataList[-1]
# #dataList[-1]=total
# for i in range(len(dataList)):
# dataList[i].insert(0, i+1)
# print(dataList)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#get data from database
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
dataList = []
tconfirmCases,tcuredCases,tdeathCases=0,0,0
updateDate=0
for i,vlu in enumerate(dailyData.objects.filter(when__date=date.today()) ):
dataList.append([i+1, vlu.stateName, vlu.confirmedCases, vlu.curedCases, vlu.deathCases])
updateDate = vlu.when
tconfirmCases+=int(vlu.confirmedCases)
tcuredCases+= int(vlu.curedCases)
tdeathCases+= int(vlu.deathCases)
total = ['Total number of confirmed cases in India',tconfirmCases,tcuredCases,tdeathCases]
#print('databse')
#print(total, dataList)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
confirmCases = dfd(list)
for i in dataList:
try:
confirmCases[ stateCode[i[1]] ].append(int(i[2]))
confirmCases[ stateCode[i[1]] ].append(i[1])
confirmCases[ stateCode[i[1]] ].append(stateCode[i[1]])
except:
print("Except from getData()")
sortedConfirmedCases = sorted(confirmCases.items(), key=lambda x: x[1] , reverse=True)
#print(sortedConfirmedCases)
sortedConfirmedCasesList = []
colorData = dict()
colorFill=dict()
c=0
c2=255
radius=32
colorCode=1
for i in sortedConfirmedCases:
sortedConfirmedCasesList.append({
'centered': i[1][2],
'fillKey': i[1][2],
'radius': radius+((i[1][0])//2400)*2,
'state': i[1][1]+","+str(i[1][0])
})
#colorFill[ i[1][2] ] = "rgb("+str(c2)+","+ str(0)+","+str(c) +")"
colorFill[ i[1][2] ] = colorList[colorCode]
colorCode+=1
#print(colorCode)
colorData[ i[1][2] ]={ 'fillKey': i[1][2] }
c+=(i[1][0])//200
radius-=1
colorFill['defaultFill'] = '#dddddd'
return dataList, total,sortedConfirmedCasesList,colorData,colorFill, updateDate
def tripleGraph(data):
dataPoint1,dataPoint2,dataPoint3= [],[],[]
#print(data)
for i in data:
dataPoint1.append({ 'y': int(i[2]), 'label': i[1] ,'indexLabel': i[2] ,'indexLabelFontSize': 10})
dataPoint2.append({ 'y': int(i[3]), 'label': i[1] ,'indexLabel': i[3] ,'indexLabelFontSize': 10})
dataPoint3.append({ 'y': int(i[4]), 'label': i[1] ,'indexLabel': i[4] ,'indexLabelFontSize': 10})
#print(dataPoint1)
#print(dataPoint2)
#print(dataPoint3)
return dataPoint1,dataPoint2,dataPoint3
def getPieData(data):
confirmedPie,curedPie,deathPie = [], [], []
for i in data:
if i[0]==1:
confirmedPie.append({ 'y': i[2], 'name': i[1], 'exploded': 'true' })
curedPie.append({ 'y': i[3], 'name': i[1], 'exploded': 'true' })
deathPie.append({ 'y': i[4], 'name': i[1], 'exploded': 'true' })
else:
confirmedPie.append({ 'y': i[2], 'name': i[1]})
curedPie.append({ 'y': i[2], 'name': i[1]})
deathPie.append({ 'y': i[2], 'name': i[1]})
return confirmedPie,curedPie,deathPie
def findNewCases():
todayDataDB = dailyData.objects.filter(when__date=date.today())
yesterdayDataDB = dailyData.objects.filter(when__date=( date.today() - timedelta(days = 1) ))
todayConfirmedData =0
todayCuredData = 0
todayDeathData = 0
yesterdayConfirmedData =0
yesterdayCuredData = 0
yesterdayDeathData = 0
for vlu in todayDataDB:
todayConfirmedData+= int(vlu.confirmedCases)
todayCuredData+= int(vlu.curedCases)
todayDeathData+= int(vlu.deathCases)
for vlu in yesterdayDataDB:
yesterdayConfirmedData+= int(vlu.confirmedCases)
yesterdayCuredData+= int(vlu.curedCases)
yesterdayDeathData+= int(vlu.deathCases)
return (todayConfirmedData - yesterdayConfirmedData),(todayCuredData - yesterdayCuredData),(todayDeathData - yesterdayDeathData)
def getIncrementedData():
dataFromDM = dailyData.objects.values( day=ExtractDay('when'),
month=ExtractMonth('when'),
year = ExtractYear('when') ).annotate(Sum('confirmedCases'),
Sum('curedCases'),
Sum('deathCases'))
dataFromDM= dataFromDM.order_by('month')
#print(dataFromDM)
#print(len(dataFromDM))
incrementedConfirmedCases,incrementedCuredCases, incrementedDeathCases = dfd(int), dfd(int), dfd(int)
temp1, temp2, temp3 = 25435,5000,800
for i in dataFromDM:
d='{}/{}/{}'.format(i['day'],i['month'],i['year'])
incrementedConfirmedCases[d]=(i['confirmedCases__sum'] - temp1)
incrementedCuredCases[d]=(i['curedCases__sum'] - temp2)
incrementedDeathCases[d]=(i['deathCases__sum'] - temp3)
temp1 = i['confirmedCases__sum']
temp2 = i['curedCases__sum']
temp3 = i['deathCases__sum']
#print(i['confirmedCases__sum'],d)
#print(incrementedConfirmedCases)
#print(incrementedCuredCases)
#print(incrementedDeathCases)
dateOfCnfInc ,dataOfCnfInc = list(incrementedConfirmedCases.keys()), list(incrementedConfirmedCases.values())
dateOfCurInc ,dataOfCurInc = list(incrementedCuredCases.keys()), list(incrementedCuredCases.values())
dateOfDthInc ,dataOfDthInc = list(incrementedDeathCases.keys()), list(incrementedDeathCases.values())
return dateOfCnfInc ,dataOfCnfInc,dateOfCurInc ,dataOfCurInc,dateOfDthInc ,dataOfDthInc
def getIncrementedTestData():
todayTests = 1000000
incTestCount = 100000
yesterdayTests = 900000
testIncreamentData = []
try:
todayTests = TestCounter.objects.get( when__date=date.today() )
yesterdayTests = TestCounter.objects.get(when__date=( today - timedelta(days = 1) ))
todayTests = todayTests.tests
#print('---> ',yesterdayTests.tests)
yesterdayTests = yesterdayTests.tests
#print("dhdh")
incTestCount = todayTests - yesterdayTests
except:
print("Except from getIncrementedTestData() ")
temp =1199081
for i in TestCounter.objects.all():
#print(i.tests,str(i.when)[:10] )
testIncreamentData.append({ 'y': i.tests-temp, 'label': str(i.when)[:10] })
temp = i.tests
#print(testIncreamentData)
return testIncreamentData, todayTests, incTestCount
def home(request):
data,total,sortedConfirmedCasesList,colorData,colorFill ,updateDate = getData()
sortedData = sorted(data,key= lambda x: int(x[2]))
#print("sorted data",sortedData)
dataPoint1,dataPoint2,dataPoint3 = tripleGraph(sortedData[12:])
confirmedPie,curedPie,deathPie = getPieData(sortedData[12:])
#print(total)
newConfirmedCases,newCuredCases, newDeathCases = findNewCases()
dateOfCnfInc ,dataOfCnfInc,dateOfCurInc ,dataOfCurInc,dateOfDthInc ,dataOfDthInc = getIncrementedData()
testIncreamentData, todayTests, incTestCount = getIncrementedTestData()
#getIncrementedTestData
visiting = Counter(count1=1)
visiting.save()
visited = Counter.objects.all().count()
# totalTests = TestCounter.objects.get( when__date=date.today() )
# totalTests = totalTests.tests
context= {
'data':data,
'total':total,
'sortedConfirmedCasesList':sortedConfirmedCasesList,
'colorData':colorData,
"totalConf":total[1],
"totalCure":total[2],
"totalDeath":total[3],
'colorFill':colorFill,
'dataPoint1':dataPoint1,
'dataPoint2':dataPoint2,
'dataPoint3':dataPoint3,
'totalAffected':len(data),
'updateDate':updateDate,
'visited':visited,
'confirmedPie':confirmedPie,
'curedPie':curedPie,
'deathPie':deathPie,
'newConfirmedCases':newConfirmedCases,
'newCuredCases':newCuredCases,
'newDeathCases':newDeathCases,
'confirmDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCnfInc,dataOfCnfInc)] ,
'curedDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCurInc,dataOfCurInc)] ,
'deathDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfDthInc,dataOfDthInc)] ,
'todayTests':todayTests,
'testIncreamentData':testIncreamentData,
'incTestCount':incTestCount
}
#print(dailyData.objects.filter(when__date=yesterday) )
#print([{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCnfInc,dataOfCnfInc)])
#print('today',today)
return render(request,'home.html',context)
| 26.421308
| 129
| 0.640121
| 1,174
| 10,912
| 5.922487
| 0.291312
| 0.008629
| 0.010355
| 0.012225
| 0.157055
| 0.093341
| 0.072631
| 0.036819
| 0.017834
| 0.017834
| 0
| 0.038249
| 0.154234
| 10,912
| 413
| 130
| 26.421308
| 0.715137
| 0.185759
| 0
| 0.024194
| 0
| 0
| 0.171879
| 0.007931
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.044355
| 0
| 0.108871
| 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66aa16869b2a00e5d9cde4a253891d698c5527b2
| 2,437
|
py
|
Python
|
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
from gym import spaces
import numpy as np
from bark.models.dynamic import StateDefinition
from modules.runtime.commons.parameters import ParameterServer
import math
import operator
from src.commons.spaces import BoundedContinuous, Discrete
from src.observers.observer import StateObserver
class SimpleObserver(StateObserver):
def __init__(self,
params=ParameterServer()):
StateObserver.__init__(self, params)
self._state_definition = [int(StateDefinition.X_POSITION),
int(StateDefinition.Y_POSITION),
int(StateDefinition.THETA_POSITION),
int(StateDefinition.VEL_POSITION)]
self._observation_len = \
self._max_num_vehicles*self._len_state
def observe(self, world, agents_to_observe):
"""see base class
"""
concatenated_state = np.zeros(self._observation_len, dtype=np.float32)
for i, (_, agent) in enumerate(world.agents.items()):
normalized_state = self._normalize(agent.state)
reduced_state = self._select_state_by_index(normalized_state)
starts_id = i*self._len_state
concatenated_state[starts_id:starts_id+self._len_state] = reduced_state
if i >= self._max_num_vehicles:
break
return concatenated_state
def _norm(self, agent_state, position, range):
agent_state[int(position)] = \
(agent_state[int(position)] - range[0])/(range[1]-range[0])
return agent_state
def _normalize(self, agent_state):
agent_state = \
self._norm(agent_state,
StateDefinition.X_POSITION,
self._world_x_range)
agent_state = \
self._norm(agent_state,
StateDefinition.Y_POSITION,
self._world_y_range)
agent_state = \
self._norm(agent_state,
StateDefinition.THETA_POSITION,
self._theta_range)
agent_state = \
self._norm(agent_state,
StateDefinition.VEL_POSITION,
self._velocity_range)
return agent_state
def reset(self, world, agents_to_observe):
super(SimpleObserver, self).reset(world, agents_to_observe)
return world
@property
def observation_space(self):
return spaces.Box(
low=np.zeros(self._observation_len),
high=np.ones(self._observation_len))
@property
def _len_state(self):
return len(self._state_definition)
| 30.848101
| 77
| 0.672959
| 278
| 2,437
| 5.557554
| 0.298561
| 0.097087
| 0.046602
| 0.046602
| 0.184466
| 0.121036
| 0.121036
| 0.093204
| 0
| 0
| 0
| 0.002707
| 0.242101
| 2,437
| 78
| 78
| 31.24359
| 0.833785
| 0.005745
| 0
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114754
| false
| 0
| 0.131148
| 0.032787
| 0.360656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66aa1e9b55b1f6a0fc3a8c730d67ac565985ed59
| 9,610
|
py
|
Python
|
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
import altair as alt
import pandas as pd
def scatterplot(x, y, data, hue=None, color=None, opacity=1.,
x_autoscale=True, y_autoscale=True):
"""Display a basic scatterplot.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
opacity : float
Opacity of the points in the plot
x_autoscale : bool
Scale the x-axis to fit the data,
otherwise axis starts at zero
y_autoscale : bool
Scale the y-axis to fit the data,
otherwise axis starts at zero
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.scatterplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
mark_kwargs = {
'opacity': opacity
}
if color is not None and hue is None:
mark_kwargs['color'] = color
encode_kwargs = {}
if hue is not None: encode_kwargs['color'] = f'{hue}:N'
chart = alt.Chart(data).mark_point(**mark_kwargs).encode(
x=alt.X(f'{x}:Q',
scale=alt.Scale(zero=not x_autoscale)
),
y=alt.Y(f'{y}:Q',
scale=alt.Scale(zero=not y_autoscale)
),
**encode_kwargs
)
return chart
def jointplot(x, y, data, hue=None, color=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.05, maxbins=30,
hist_height=50):
"""Display a scatterplot with axes histograms.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
show_X : bool
Show the distribution for the x-axis values
show_y : bool
Show the distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
maxbins : int
Max bins for the histograms
hist_height : int
Height of histograms
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
top_hist = chart.mark_area(**area_kwargs).encode(
alt.X(f'{x}:Q',
# when using bins, the axis scale is set through
# the bin extent, so we do not specify the scale here
# (which would be ignored anyway)
bin=alt.Bin(maxbins=maxbins, extent=xscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.Y('count()', stack=None, title=''),
**encode_kwargs
).properties(height=hist_height)
right_hist = chart.mark_area(**area_kwargs).encode(
alt.Y(f'{y}:Q',
bin=alt.Bin(maxbins=maxbins, extent=yscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.X('count()', stack=None, title=''),
**encode_kwargs
).properties(width=hist_height)
if show_x and show_y:
return top_hist & (points | right_hist)
if show_x and not show_y:
return top_hist & points
if not show_x and show_y:
return points | right_hist
return points
def clean_jointplot(x, y, data, hue=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.2, bandwidth_scalar=10,
line_height=50, top_spacing=-40, right_spacing=0,
apply_configure_view=True):
"""Display a clean scatterplot with axes distribution lines.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to coloring the points
show_X : bool
Show the line distribution for the x-axis values
show_y : bool
Show the line distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
bandwidth_scalar : float, int
Sets bandwidth for the density estimation.
Bandwidth = value_range / bandwidth_scalar
line_height : int
Height of the distribution lines
top_spacing : int
Amount of spacing between top distribution line and scatter
right_spacing : int
Amount of spacing between right distribution line and scatter
apply_configure_view : bool
Whether to apply strokeWidth=0 to the configure view function.
Note that if this is applied you cant later combine this chart
with another chart. To combine this chart with another chart
you will need to set apply_configure_view to False and then reapply
.configure_view in the combined chart to make the weird axis
borders go away
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.clean_jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
transform_kwargs = {}
if hue is not None:
transform_kwargs['groupby'] = [hue]
line_axis_kwargs = {'labels': False, 'tickOpacity': 0., 'domain': False,
'grid': False}
top_line = chart.transform_density(
density=x,
bandwidth=x_diff / bandwidth_scalar,
counts=True,
extent=xscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
x=alt.X(f'value:Q',
scale=xscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
y=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
**encode_kwargs
).properties(height=line_height)
right_line = chart.transform_density(
density=y,
bandwidth=y_diff / bandwidth_scalar,
counts=True,
extent=yscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
y=alt.X(f'value:Q',
scale=yscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
x=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
order='value:Q',
**encode_kwargs
).properties(width=line_height)
if show_x and show_y:
combined = alt.vconcat(top_line,
alt.hconcat(points, right_line, spacing=right_spacing),
spacing=top_spacing)
if show_x and not show_y:
combined = alt.vconcat(top_line, points, spacing=top_spacing)
if not show_x and show_y:
combined = alt.hconcat(points, right_line, spacing=right_spacing)
if not show_x and not show_y:
combined = points
if apply_configure_view:
combined = combined.configure_view(strokeWidth=0)
return combined
| 29.478528
| 87
| 0.591467
| 1,273
| 9,610
| 4.344069
| 0.148468
| 0.02387
| 0.01953
| 0.014467
| 0.715913
| 0.693128
| 0.612839
| 0.541049
| 0.512116
| 0.512116
| 0
| 0.008567
| 0.295525
| 9,610
| 325
| 88
| 29.569231
| 0.808272
| 0.381165
| 0
| 0.538961
| 0
| 0
| 0.041091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019481
| false
| 0
| 0.012987
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66abb66cbd60706f6fbdf7789edf198d10295b85
| 12,103
|
py
|
Python
|
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
import gym
from gym import spaces
from itertools import cycle
import random
import sys
import os
import pygame
from pygame.locals import *
import flappy
import numpy as np
import cv2
# GLOBALS
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
PLAYERS_FILES = ('assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png')
BACKGROUND_FILE= 'assets/sprites/background-day.png'
PIPES_LIST = 'assets/sprites/pipe-green.png'
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
try:
xrange
except NameError:
xrange = range
class FlappyEnv(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, server):
super(FlappyEnv, self).__init__()
if server == True:
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.action_space = spaces.Discrete(10) # Weight the flap such that 1/10 action is to flap.
self.observation_space = spaces.Box(low = 0, high = 255, shape = (80, 80, 1), dtype=np.uint8)
pygame.init()
self.FPSCLOCK = pygame.time.Clock()
self.SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
# image, sound and hitmask dicts
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
IMAGES['player'] = (
pygame.image.load(PLAYERS_FILES[0]).convert_alpha(),
pygame.image.load(PLAYERS_FILES[1]).convert_alpha(),
pygame.image.load(PLAYERS_FILES[2]).convert_alpha(),
)
IMAGES['pipe'] = (
pygame.transform.flip(
pygame.image.load(PIPES_LIST).convert_alpha(), False, True),
pygame.image.load(PIPES_LIST).convert_alpha(),
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
IMAGES['background'] = pygame.image.load(BACKGROUND_FILE).convert()
# Sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
# Hitmasks for pipes
HITMASKS['pipe'] = (
self.getHitmask(IMAGES['pipe'][0]),
self.getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
self.getHitmask(IMAGES['player'][0]),
self.getHitmask(IMAGES['player'][1]),
self.getHitmask(IMAGES['player'][2]),
)
self.SCREEN.blit(IMAGES['background'], (0,0))
pygame.display.update()
# Game Settings
self.playerIndexGen = cycle([0, 1, 2, 1])
self.basex = 0
self.playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerx = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerIndex = 0
self.score = 0
self.loopIter = 0
self.pipeVelX = -4
self.baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
self.playerHeight = IMAGES['player'][self.playerIndex].get_height()
# player velocity, max velocity, downward accleration, accleration on flap
self.playerVelY = -9 # player's velocity along Y, default same as playerFlapped
self.playerMaxVelY = 10 # max vel along Y, max descend speed
self.playerMinVelY = -8 # min vel along Y, max ascend speed
self.playerAccY = 1 # players downward accleration
self.playerRot = 45 # player's rotation
self.playerVelRot = 3 # angular speed
self.playerRotThr = 20 # rotation threshold
self.playerFlapAcc = -9 # players speed on flapping
self.playerFlapped = False # True when player flaps
self.running = True
self.upperPipes = []
self.lowerPipes = []
def step(self, action):
basex = self.basex
reward = 0.0
obs = list()
if action == 1:
if self.playery > -2 * IMAGES['player'][0].get_height():
self.playerVelY = self.playerFlapAcc
self.playerFlapped = True
SOUNDS['wing'].play()
# check for crash here
crashTest = self.checkCrash({'x': self.playerx, 'y': self.playery, 'index': self.playerIndex},
self.upperPipes, self.lowerPipes)
if crashTest[0]:
self.running = False
reward -= 100
else:
reward += 0.1 # Little bit of reward for surviving
# check for score
playerMidPos = self.playerx + IMAGES['player'][0].get_width() / 2
for pipe in self.upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
self.score += 1
reward += 1
SOUNDS['point'].play()
# playerIndex basex change
if (self.loopIter + 1) % 3 == 0:
self.playerIndex = next(self.playerIndexGen)
self.loopIter = (self.loopIter + 1) % 30
basex = -((-basex + 100) % self.baseShift)
# rotate the player
if self.playerRot > -90:
self.playerRot -= self.playerVelRot
# player's movement
if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
self.playerVelY += self.playerAccY
if self.playerFlapped:
self.playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
self.playerRot = 45
self.playerHeight = IMAGES['player'][self.playerIndex].get_height()
self.playery += min(self.playerVelY, BASEY - self.playery - self.playerHeight)
# move pipes to left
for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
uPipe['x'] += self.pipeVelX
lPipe['x'] += self.pipeVelX
# add new pipe when first pipe is about to touch left of screen
if len(self.upperPipes) > 0 and 0 < self.upperPipes[0]['x'] < 5:
newPipe = self.getRandomPipe()
self.upperPipes.append(newPipe[0])
self.lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if len(self.upperPipes) > 0 and self.upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
self.upperPipes.pop(0)
self.lowerPipes.pop(0)
# draw sprites
self.SCREEN.blit(IMAGES['background'], (0,0))
for i, (uPipe, lPipe) in enumerate(zip(self.upperPipes, self.lowerPipes)):
if i == 0:
obs.insert(1, uPipe['x'])
obs.insert(2, uPipe['y'])
obs.insert(3, lPipe['x'])
obs.insert(4, lPipe['y'])
self.SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
self.SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
self.SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
self.showScore(self.score)
# Player rotation has a threshold
visibleRot = self.playerRotThr
if self.playerRot <= self.playerRotThr:
visibleRot = self.playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][self.playerIndex], visibleRot)
self.SCREEN.blit(playerSurface, (self.playerx, self.playery))
return self.get_observation(), reward, not self.running, {} # obs, reward, done, info
def get_observation(self):
surf = pygame.surfarray.array3d(pygame.display.get_surface())
x = cv2.resize(surf, (80, 80)) # resize to 80x80
x = np.array(x, dtype=np.uint8)
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
x = np.reshape(x, (80, 80, 1))
return x
def reset(self):
self.playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerx = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) - 200
self.basex = 0
self.playerIndex = 0
self.playerIndexGen = cycle([0, 1, 2, 1])
self.score = 0
self.running = True
obs = [0, 0, 0, 0, 0]
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = self.getRandomPipe()
newPipe2 = self.getRandomPipe()
# list of upper pipes
self.upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
self.lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
return self.get_observation()
def render(self, mode='human'):
pygame.display.update()
self.FPSCLOCK.tick(FPS)
# Helper functions
def getRandomPipe(self):
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(self, score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
self.SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(self, player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = self.pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = self.pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(self, rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(self, image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def get_actions(self):
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
return 1
| 32.799458
| 130
| 0.673635
| 1,652
| 12,103
| 4.891041
| 0.214891
| 0.025866
| 0.035272
| 0.033787
| 0.260644
| 0.170916
| 0.155941
| 0.115347
| 0.037748
| 0.025124
| 0
| 0.0233
| 0.170206
| 12,103
| 369
| 131
| 32.799458
| 0.781241
| 0.138891
| 0
| 0.110266
| 0
| 0
| 0.090487
| 0.023202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041825
| false
| 0
| 0.041825
| 0
| 0.136882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66aded0365be403ed572fa925d74446e3fe43e79
| 4,587
|
py
|
Python
|
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | 2
|
2021-08-12T20:22:40.000Z
|
2022-02-06T18:13:38.000Z
|
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | null | null | null |
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | 3
|
2020-07-31T17:19:20.000Z
|
2021-12-11T11:38:23.000Z
|
from typing import AsyncGenerator, List, Union, Any
from aiohttp.client import ClientSession
from vkmini.utils import AbstractLogger
from vkmini.request import longpoll_get, default_session
from vkmini.exceptions import TokenInvalid
from vkmini import VkApi
class Update:
# TODO ну тут без комментариев
class _Message:
date: int
from_id: int
id: int
out: int
peer_id: int
text: str
conversation_message_id: int
fwd_messages: list
important: bool
attachments: list
is_hidden: bool
client_info: dict
reply_message: dict = None
def __init__(self, object):
self.__dict__.update(object)
type: str
object: dict
message: _Message
vk: VkApi
def __init__(self, update, vk):
self.vk = vk
self.type = update['type']
self.object = update['object']
if self.type == 'message_new':
self.message = self._Message(self.object['message'])
def __getitem__(self, key):
return self.object[key]
async def reply_to_peer(self, message, **kwargs):
return await self.vk.msg_op(1, self.message.peer_id, message, **kwargs)
class GroupLP:
# TODO: чёт старовато капец
wrap_events: bool
group_id: int
server: str
wait: int
key: str
ts: int
time: float
vk: VkApi
_session: Union[ClientSession, None]
__session_owner: bool = False
def __init__(self, vk: VkApi, group_id: int, wait: int = 25,
logger: AbstractLogger = None,
session: ClientSession = default_session) -> None:
"""
Параметр `wait` описан в документации
(https://vk.com/dev/bots_longpoll)
`logger` -- любой объект, имеющий атрибуты info, debug и warning,
по умолчанию None, то есть логирование не ведется
`session` -- экземпляр aiohttp.ClientSession, который будет
использоваться при выполнении запросов к LongPoll серверу
(при использовании класса в контексте, будет создана автоматически,
иначе будет использоваться стандартная общая сессия,
см. vkmini.set_default)
Возвращает "сырой" класс, для подготовки к работе, нужно использовать
его в контексте или вызвать метод `start`
Пример с контекстом:
```
async with GroupLP(vk, group_id) as lp:
print(await lp.check())
```
Пример без контекста:
```
lp = GroupLP(vk, group_id)
await lp.start()
print(await lp.check())
```
"""
self._vk = vk
self.wait = wait
self.group_id = group_id
self.logger = logger or vk.logger
self._session = session
@property
async def check(self) -> List[Union[Update, List[Any]]]:
'Возвращает список событий (updates)'
data = await longpoll_get(
f"{self.server}?act=a_check&key={self.key}" +
f"&ts={self.ts}&wait={self.wait}",
self._session
)
if 'failed' in data:
if data['failed'] == 1:
self.ts = data['ts']
elif data['failed'] == 2:
await self.get_longpoll_data(False)
else:
await self.get_longpoll_data(True)
return []
else:
self.ts = data['ts']
# if self.wrap_events:
# return [Update(update, self.vk) for update in data['updates']]
# else:
return data['updates']
async def get_longpoll_data(self, new_ts: bool) -> None:
data = await self._vk._method(
'groups.getLongPollServer', group_id=self.group_id
)
if not self._vk.excepts:
if data.get('error', {}).get('error_code') == 5:
raise TokenInvalid(data['error'])
self.server = data['server']
self.key = data['key']
if new_ts:
self.ts = data['ts']
async def start(self) -> None:
await self.get_longpoll_data(True)
async def __aenter__(self) -> "GroupLP":
if self._session is None:
self._session = ClientSession()
self.__session_owner = True
await self.get_longpoll_data(True)
return self
async def __aexit__(self, *_) -> None:
if self.__session_owner:
await self._session.close()
async def listen(self) -> AsyncGenerator[Update, None]:
while True:
for update in await self.check:
yield update
| 28.849057
| 79
| 0.58535
| 536
| 4,587
| 4.833955
| 0.330224
| 0.027789
| 0.028946
| 0.030876
| 0.046314
| 0.037051
| 0.026245
| 0
| 0
| 0
| 0
| 0.001921
| 0.318945
| 4,587
| 158
| 80
| 29.031646
| 0.827465
| 0.197951
| 0
| 0.09901
| 0
| 0
| 0.064312
| 0.026988
| 0
| 0
| 0
| 0.006329
| 0
| 1
| 0.039604
| false
| 0
| 0.069307
| 0.009901
| 0.326733
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66af18eea69ccb8397ca09f7ca83656cd98f0584
| 1,162
|
py
|
Python
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 1
|
2021-04-28T23:08:07.000Z
|
2021-04-28T23:08:07.000Z
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 1
|
2022-01-22T22:02:55.000Z
|
2022-01-22T22:02:55.000Z
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 2
|
2022-01-05T10:01:22.000Z
|
2022-02-16T10:58:46.000Z
|
import tarfile
import pandas as pd
import sqlalchemy as db
from aswan import AswanConfig, ProdConfig, Project
from aswan.migrate import pull, push
from aswan.models import Base
from aswan.object_store import get_object_store
def test_push_pull(tmp_path):
conf = ProdConfig.from_dir(tmp_path / "cfg")
Base.metadata.create_all(db.create_engine(conf.db))
ostore = get_object_store(conf.object_store)
remote = tmp_path / "remote"
df1 = pd.DataFrame([{"A": 10}])
df2 = pd.DataFrame([{"B": 10}])
tabfp = conf.t2_path / "tab"
df1.to_parquet(tabfp)
ostore.dump_str("YAAAY", "fing")
push(conf, str(remote))
df2.to_parquet(tabfp)
tfile = next(remote.glob("**/*.tgz"))
with tarfile.open(tfile, "r:gz") as tar:
names = tar.getnames()
assert "fing" in names
assert not pd.read_parquet(tabfp).equals(df1)
pull(conf, str(remote))
assert pd.read_parquet(tabfp).equals(df1)
def test_project_push_pull(tmp_path):
aconf = AswanConfig.default_from_dir(
tmp_path / "cfg", remote_root=str(tmp_path / "remote")
)
project = Project(aconf)
project.push()
project.pull()
| 24.723404
| 62
| 0.683305
| 167
| 1,162
| 4.580838
| 0.401198
| 0.054902
| 0.036601
| 0.039216
| 0.115033
| 0.070588
| 0
| 0
| 0
| 0
| 0
| 0.011677
| 0.189329
| 1,162
| 46
| 63
| 25.26087
| 0.800425
| 0
| 0
| 0
| 0
| 0
| 0.041308
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.060606
| false
| 0
| 0.212121
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66b23735ac5dd60f24c047d430921a774e2c8f6b
| 1,055
|
py
|
Python
|
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
import datetime as dt
class Booking:
def __init__(self):
self.user_id = ''
self.name =''
self.pet_name = ''
self.time_slot = 0
self.id = ''
self.phone_number = ''
def to_dict(self):
return {
'name' : self.name,
'petname' : self.pet_name,
'timeslot' : self.time_slot,
'userid' : self.user_id,
'phonenumber' : self.phone_number
}
#convert data from db to class
def from_dict(self,dict,id):
self.name = dict['name']
self.pet_name = dict['petname']
self.time_slot = int(dict['timeslot'])
self.user_id = dict['userid']
self.phone_number = dict['phonenumber']
self.id = id
self.datetime = dt.datetime.fromtimestamp(self.time_slot)
return self
def __str__(self):
time_string = self.datetime.strftime("%I:%M %p")
return f'Name: {self.name}\nPhone Number: {self.phone_number}\nPet Name: {self.pet_name}\nTime: {time_string}'
| 31.029412
| 118
| 0.559242
| 129
| 1,055
| 4.364341
| 0.317829
| 0.085258
| 0.078153
| 0.079929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001377
| 0.311848
| 1,055
| 34
| 118
| 31.029412
| 0.774105
| 0.027488
| 0
| 0
| 0
| 0.034483
| 0.175439
| 0.044834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.034483
| 0.034483
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66b517ab0ecf7dee82c7b5fd1f3ac99536fb011e
| 1,927
|
py
|
Python
|
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import sys
from pathlib import Path
import os
pythonpath = sys.executable
curdir = Path(__file__).parent.resolve()
parentdir = curdir.parent
# If openvino_env is already activated, launch jupyter lab
# This will also start if openvino_env_2 is activated instead of openvino_env
# The assumption is that that is usually intended
if "openvino_env" in pythonpath:
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
else:
if sys.platform == "win32":
scripts_dir = "Scripts"
else:
scripts_dir = "bin"
# If openvino_env is not activated, search for the openvino_env folder in the
# current and parent directory and launch the notebooks
try:
pythonpath = os.path.normpath(
os.path.join(curdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
try:
pythonpath = os.path.normpath(
os.path.join(parentdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
print(pythonpath)
print(
"openvino_env could not be found in the current or parent "
"directory, or the installation is not complete. Please follow "
"the instructions on "
"https://github.com/openvinotoolkit/openvino_notebooks to "
"install the notebook requirements in a virtual environment.\n\n"
"After installation, you can also launch the notebooks by "
"activating the virtual environment manually (see the README "
"on GitHub, linked above) and typing `jupyter lab notebooks`.\n\n"
f"Current directory: {curdir}"
f"Python executable: {sys.executable}"
)
| 39.326531
| 82
| 0.632071
| 225
| 1,927
| 5.328889
| 0.422222
| 0.082569
| 0.043369
| 0.06005
| 0.226022
| 0.226022
| 0.190158
| 0.190158
| 0.12844
| 0.12844
| 0
| 0.002878
| 0.278672
| 1,927
| 48
| 83
| 40.145833
| 0.859712
| 0.172289
| 0
| 0.282051
| 0
| 0
| 0.414097
| 0.041536
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102564
| 0
| 0.102564
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66b64a14727f525c1e5bbd7f0c1785592ad8eed7
| 1,143
|
py
|
Python
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 1
|
2021-07-26T00:58:53.000Z
|
2021-07-26T00:58:53.000Z
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 15
|
2020-03-28T05:27:53.000Z
|
2022-01-07T17:44:08.000Z
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 3
|
2021-05-08T19:59:02.000Z
|
2021-05-11T17:14:45.000Z
|
import os.path, os, time
from datetime import datetime
def getLastUpdatedTime(file: str):
return datetime.fromtimestamp(os.path.getmtime(file)).isoformat()
def updatePost(postUrl: str) -> None:
lastUpdatedTime = getLastUpdatedTime(postUrl)
prefix = "last_modified_at"
string = f"{prefix}: {lastUpdatedTime}"
with open(postUrl, "r", encoding="utf8") as file:
lines = file.readlines()
for index, line in enumerate(lines[1:]):
#print(index, line)
if line.startswith("---"):
lines.insert(index - 1, string + '\n')
#print("found break")
break
if line.startswith(prefix):
if line.startswith(string[:28]):
return
lines[index] = string + '\n'
with open(postUrl, "w", encoding="utf8") as file:
file.writelines(lines)
from os import listdir
from os.path import isfile, join
path = "_posts"
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
#print(onlyfiles)
for i in onlyfiles:
completePath = f"{path}/{i}"
updatePost(completePath)
| 30.078947
| 69
| 0.601925
| 132
| 1,143
| 5.189394
| 0.416667
| 0.026277
| 0.070073
| 0.052555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007203
| 0.271216
| 1,143
| 38
| 70
| 30.078947
| 0.815126
| 0.047244
| 0
| 0
| 0
| 0
| 0.069982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0.037037
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66baa831bc3a0b5f4c002eec9ab7e86c9dd317b9
| 4,578
|
py
|
Python
|
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('science')
import os, sys, time
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/petsc-3.12.5/lib/petsc/bin/")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/se2wave/utils/python")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/processing/TEAR/PythonCodes/")
from se2waveload import *
from Lib_GeneralFunctions import *
from GeneratePaperFigs import *
from ModelIllustration import *
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 20
FontSizeControlFreak(SMALL_SIZE,MEDIUM_SIZE,BIGGER_SIZE)
from palettable.colorbrewer.diverging import PuOr_11_r as FieldColor
cmap = FieldColor.mpl_colormap
from matplotlib.colors import ListedColormap
import matplotlib.lines as mlines
from palettable.cartocolors.qualitative import Safe_5 as LineColor
cmapProf = ListedColormap(LineColor.mpl_colors[:])
###################################################################
###################### Reference solution
###################################################################
pathRef = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/References/"
# Reference saved into a list of objects
RefList = [SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-0.txt", "0km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-1.txt", "2km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-2.txt", "4km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-3.txt", "6km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-4.txt", "8km"),
]
# Reference saved into a list of objects
RefListTPV = [TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-0.0e+00.txt", "0km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-2.0e+03.txt", "2km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-4.0e+03.txt", "4km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-6.0e+03.txt", "6km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-8.0e+03.txt", "8km"),
]
###################################################################
###################### Reference solution
###################################################################
# Figure 3
start_time = time.time()
fname = "step-{timestep:04}_wavefield.pbin"
path = "/import/freenas-m-03-geodynamics/jhayek/TEAR/Results/T2/Runs/TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s/"
i=4630
FieldFilename = os.path.join(path,fname.format(timestep=i))
MeshFilename = os.path.join(path, "default_mesh_coor.pbin")
se2_coor = se2wave_load_coordinates(MeshFilename)
FileList = glob(os.path.join(path,"step-{timestep}_wavefield.pbin".format(timestep="*")))
l = [i.replace(os.path.join(path,'step-'),'').replace('_wavefield.pbin','') for i in FileList]
TimeStepVal, LCoorX, LCoorY, LFieldX, LFieldY, LFieldvelX, LFieldvelY = ExtractFields(FieldFilename, se2_coor)
FolderProfilesPath = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/CorrectedSimulations/20220325/"
DataProfile = LoadPickleFile(Filename = "TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s-Tilt20.0-P3-TPList_t4630_d62.5.pickle",FolderPath = FolderProfilesPath)
x0,y0 = 7350,2675
InsetAxis = [x0-200,x0+200,y0-200,y0+200]
F1, ax = Plot4KomaSetup(LCoorX, LCoorY, LFieldX, LFieldvelX,
["X-Component Displacement ", "X-Component Displacement [m]"],
TimeStepVal,InsetAxis,
cmap=cmap, rasterized=True)
del x0,y0,InsetAxis
# Tilted case plotting
iidx = 0
for iidx,Test1 in enumerate(DataProfile):
ax[0].plot(Test1.Time, Test1.DispX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[1].plot(Test1.Time, Test1.VelX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[0].set_xlabel("time [s]")
#F1.suptitle("Tilting (20deg) Kostrov simulation")
[item.PlotReference(ax[0], "Slip", filtering=False) for item in RefList]
[item.PlotReference(ax[1], "SlipRate", filtering=False) for item in RefList]
Format_LabelsOnFig_formatAxis(F1, ax[:2],inverted=True, ncols = 3, HeightBbox=1.2)
LabelizeAxisList(ax,Pos=[0.9, 0.9],fontsize=BIGGER_SIZE)
print("Saving Figure...")
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.pdf"
F1.savefig(OutFile.format("3"))
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.png"
F1.savefig(OutFile.format("3"))
| 41.243243
| 159
| 0.68851
| 568
| 4,578
| 5.454225
| 0.382042
| 0.054874
| 0.036152
| 0.041317
| 0.420917
| 0.394448
| 0.229826
| 0.197224
| 0.162363
| 0.137831
| 0
| 0.053301
| 0.106597
| 4,578
| 111
| 160
| 41.243243
| 0.704156
| 0.042377
| 0
| 0.029851
| 0
| 0.089552
| 0.347431
| 0.310794
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.283582
| 0
| 0.283582
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66bba8495cc9b2de4fa5d89e4f271bf43563f4b0
| 3,560
|
py
|
Python
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 5
|
2016-09-06T08:02:10.000Z
|
2018-06-10T20:45:21.000Z
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 2
|
2019-03-11T21:44:50.000Z
|
2020-03-17T09:20:47.000Z
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import fastentrypoints
from setuptools import setup, __version__ as setuptools_version
import os
import sys
srcdir = os.path.normpath(os.path.join(os.path.dirname(__file__), "src"))
if os.path.isfile(os.path.join(srcdir, "rosrepo", "__init__.py")) and os.path.isfile(os.path.join(srcdir, "rosrepo", "main.py")):
sys.path.insert(0, srcdir)
else:
sys.stderr.write("This script is supposed to run from the rosrepo source tree")
sys.exit(1)
from rosrepo import __version__ as rosrepo_version
install_requires = ["catkin_pkg", "catkin_tools", "python-dateutil", "pygit2", "requests", "rosdep", "pyyaml"]
extras_require = {}
# The following code is a somewhat barbaric attempt to get conditional
# dependencies that works on setuptools versions before 18.0 as well:
if int(setuptools_version.split(".", 1)[0]) < 18:
if sys.version_info[0] < 3:
install_requires.append("futures")
if sys.version_info[:2] < (3, 5):
install_requires.append("scandir")
# Unfortunately, the fake conditional dependencies do not work with
# the caching mechanism of bdist_wheel, so if you want to create wheels,
# use at least setuptools version 18
assert "bdist_wheel" not in sys.argv
else:
# We have a reasonably modern setuptools version
from distutils.version import StrictVersion as Version
if Version(setuptools_version) >= Version("36.2"):
# Starting with setuptools 36.2, we can do proper conditional
# dependencies "PEP 508 style", the way God intended
install_requires.append("futures ; python_version<'3'")
install_requires.append("scandir ; python_version<'3.5'")
else:
# No proper conditional dependencies, but we can resort to some
# trickery and get the job done nevertheless
extras_require[":python_version<'3'"] = ["futures"]
extras_require[":python_version<'3.5'"] = ["scandir"]
setup(
name = "rosrepo",
description = "Manage ROS workspaces with multiple Gitlab repositories",
author = "Timo Röhling",
author_email = "timo.roehling@fkie.fraunhofer.de",
license = "Apache Software License",
keywords = ["catkin", "ROS", "Git"],
packages = ["rosrepo"],
package_dir = {"": "src"},
data_files = [("share/bash-completion/completions", ["bash/rosrepo"])],
version = rosrepo_version,
install_requires = install_requires,
extras_require = extras_require,
test_suite = "nose.collector",
entry_points = {
"console_scripts": ["rosrepo = rosrepo.main:main"]
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Version Control",
"Programming Language :: Python",
]
)
| 40
| 129
| 0.692416
| 455
| 3,560
| 5.30989
| 0.47033
| 0.017384
| 0.034768
| 0.01904
| 0.10596
| 0.083609
| 0.083609
| 0.083609
| 0.054636
| 0.054636
| 0
| 0.014721
| 0.198596
| 3,560
| 88
| 130
| 40.454545
| 0.832107
| 0.343258
| 0
| 0.057692
| 0
| 0
| 0.324078
| 0.03731
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66bcb0ae9b3366b6b0c297fee8c32430261239e3
| 2,948
|
py
|
Python
|
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_designer_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | 1
|
2020-08-29T09:17:12.000Z
|
2020-08-29T09:17:12.000Z
|
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_design_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | null | null | null |
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_design_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Date : 2020/8/30
# @Author : Bruce Liu /Lin Luo
# @Mail : 15869300264@163.com
class Card(object):
"""
卡片类
"""
def __init__(self, name: str, limited: bool = False, limited_num: int = 100000, surplus: int = 0):
"""
初始化一张卡
:param limited: 是否限额
:param limited_num: 限额数量
:param surplus: 余额
"""
self.name = name
# 是否限额
self.limited = limited
# 限额总数
self.limited_num = limited_num
# 余额
self.surplus = surplus
# 本次操作的金额
self.operator_num = 0
def __add__(self, other) -> bool:
"""
将other中本次操作的金额转移到self对象中
即从other中划一部分钱到本卡
:param other:
:return:
"""
# 判断是否可以转账
if (
self.limited and self.surplus + other.operator_num > self.limited_num) or other.surplus - other.operator_num < 0:
return False
else:
# 可以转入
self.surplus += other.operator_num
other.surplus -= other.operator_num
other.operator_num = 0
return True
def __sub__(self, other) -> bool:
"""
将本卡中的一部分钱转到other中
:param other:
:return:
"""
# 判断是否可以转账
if self.surplus - self.operator_num >= 0 and (
not other.limited or other.surplus + self.operator_num <= other.limited_num):
self.surplus -= self.operator_num
other.surplus += self.operator_num
self.operator_num = 0
return True
else:
return False
def trans(source_card: Card, target_card: Card, trans_num: int):
"""
执行转账
:param source_card: 转出卡
:param target_card: 转入卡
:param trans_num: 转账金额
:return:
"""
print(f'trans 100 from {source_card.name} to {target_card.name}')
print(f'surplus of source_card: {source_card.name} before trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} before trans: {target_card.surplus}')
source_card.operator_num = trans_num
res = target_card + source_card
print(f'transfer result: {res}')
print(f'surplus of source_card: {source_card.name} after trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} after trans: {target_card.surplus}')
if __name__ == '__main__':
# 实例化三张卡
# 第一张不限额,且有10000余额
card_1 = Card('card_1', False, 100000, 10000)
# 第二张限额1000,余额为0
card_2 = Card('card_2', True, 1000, 0)
# 第三章限额10000,余额为100
card_3 = Card('card_3', True, 10000, 100)
# 从第二张卡转100到第一张卡
trans(card_2, card_1, 100)
print()
# 从第一张卡转2000到第三张卡
trans(card_1, card_3, 2000)
print()
# 从第一张卡转999到第三张卡
trans(card_1, card_2, 999)
print()
# 从第一张卡转2到第三张卡
trans(card_1, card_2, 2)
print()
# 从第三张卡转10000到第一张卡
trans(card_3, card_1, 10000)
| 27.045872
| 129
| 0.587517
| 352
| 2,948
| 4.698864
| 0.272727
| 0.079807
| 0.054414
| 0.055623
| 0.340387
| 0.157195
| 0.120919
| 0.120919
| 0.120919
| 0.073761
| 0
| 0.062317
| 0.303256
| 2,948
| 108
| 130
| 27.296296
| 0.742941
| 0.190638
| 0
| 0.255319
| 0
| 0
| 0.189016
| 0.038444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0
| 0
| 0.191489
| 0.212766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66bd2091216a58b01f3847f7b8145c69c89e49b7
| 13,057
|
py
|
Python
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 180
|
2018-09-20T07:27:40.000Z
|
2022-03-19T07:55:42.000Z
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 80
|
2018-09-26T18:55:56.000Z
|
2022-02-10T02:03:26.000Z
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 72
|
2018-08-30T00:49:15.000Z
|
2022-02-15T23:22:40.000Z
|
import math
import os
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import ops
import utils
import model_fctd
import data
import config
import visualizations as vis
FLAGS = tf.app.flags.FLAGS
def test_encode_decode_synth_data():
batch_size = 50
n_gt_max = 4
image_h = 150
image_w = 300
image_size = [image_h, image_w]
map_sizes = [[19, 38], [10, 19], [5, 10], [3, 5], [2, 3], [1, 1]]
n_stages = len(map_sizes)
# region_sizes = 300. * np.minimum(FLAGS.region_size_alpha / np.asarray([38, 19, 10, 5, 3, 1]), 0.95)
region_sizes = [11.84210526, 23.68421053, 45., 90., 150., 285.]
print(region_sizes)
pos_thresh = 1.5
neg_thresh = 2.0
def _generate_random_gt(batch_size, n_gt_max):
gt_cx = image_w * np.random.uniform(low=0.2, high=0.8, size=[batch_size, n_gt_max, 1])
gt_cy = image_h * np.random.uniform(low=0.2, high=0.8, size=[batch_size, n_gt_max, 1])
gt_w = image_w * np.random.uniform(low=0.2, high=1, size=[batch_size, n_gt_max, 1])
gt_h = image_h * np.random.uniform(low=0.05, high=0.5, size=[batch_size, n_gt_max, 1])
gt_theta = np.random.uniform(low=-0.5, high=0.5, size=[batch_size, n_gt_max, 1])
gt_rboxes = np.concatenate([gt_cx, gt_cy, gt_w, gt_h, gt_theta], axis=2)
return gt_rboxes
def _visualize(ax, match_status, local_gt, gt_rboxes, gt_counts,
decoded_pred, decoded_counts, link_status=None):
"""
Visualize encoded groundtruth
ARGS
ax: pyplot axis
match_status: int [map_h, map_w] match status
local_gt: [map_h, map_w, rbox_dim] encoded groundtruths
gt_rboxes: [5]
gt_counts: []
decoded_pred: [n_decoded_pred_max, 5]
decoded_counts: int []
link_status: int [map_h, map_w, 8] link status
"""
map_h, map_w, _ = local_gt.shape
step_x = float(image_w) / map_w
step_y = float(image_h) / map_h
# visualize regions
region_bboxes = []
for p in range(map_h * map_w):
px = p % map_w
py = int(math.floor(p / map_w))
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
region_bboxes.append([grid_cx, grid_cy, region_size, region_size, 0])
region_bboxes = np.asarray(region_bboxes)
# utils.visualize_rboxes(ax, region_bboxes, edgecolor='pink', facecolor='pink', alpha=0.5)
# visualize groundtruth
vis.visualize_rboxes(ax, gt_rboxes[:gt_counts, :],
verbose=False, edgecolor='green', facecolor='none', linewidth=2)
# visualize grid
for p in range(map_h * map_w):
px = p % map_w
py = p // map_w
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
match_status_p = match_status[py, px]
# draw grid center point as a circle
if match_status_p == 1: # positive
circle_color = 'red'
elif match_status_p == 0: # ignore
circle_color = 'yellow'
else: # negative
circle_color = 'blue'
circle = plt.Circle((grid_cx, grid_cy), 2, color=circle_color)
ax.add_artist(circle)
# # visualize decoded predictions
# utils.visualize_rboxes(ax, decoded_pred[:decoded_counts, :],
# edgecolor='green', facecolor='green', alpha=0.5)
if link_status is not None:
# visulaize link status
for p in range(map_h * map_w):
px = p % map_w
py = int(math.floor(p / map_w))
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
link_status_p = link_status[py, px, :]
idx = 0
for ny in [py - 1, py, py + 1]:
for nx in [px - 1, px, px + 1]:
if ny == py and nx == px:
# skip self link
continue
if link_status_p[idx] != -1:
nb_cx = (0.5 + nx) * step_x
nb_cy = (0.5 + ny) * step_y
if link_status_p[idx] == 1:
link_color = 'red'
elif link_status_p[idx] == 0:
link_color = 'yellow'
else:
raise('Internal error')
ax.plot((grid_cx, nb_cx), (grid_cy, nb_cy),
color=link_color, alpha=0.5, linewidth=2)
idx += 1
# generate random number of random groundtruths
gt_rboxes = _generate_random_gt(batch_size, n_gt_max)
gt_counts = np.random.randint(low=1, high=n_gt_max, size=[batch_size])
node_status_below = [[[]]]
match_indices_below = [[[]]]
# fetch encoding & decoding results on all stages
fetches = {}
for i in range(n_stages):
map_size = map_sizes[i]
region_size = region_sizes[i]
match_status, link_status, local_gt, match_indices = ops.encode_groundtruth(
gt_rboxes, gt_counts, map_size, image_size,
node_status_below, match_indices_below,
region_size=region_size,
pos_scale_diff_thresh=pos_thresh,
neg_scale_diff_thresh=neg_thresh,
cross_links=False)
decoded_pred, decoded_counts = ops.decode_prediction(
match_status, local_gt, image_size, region_size=region_size)
fetches['match_status_%d' % i] = match_status
fetches['link_status_%d' % i] = link_status
fetches['local_gt_%d' % i] = local_gt
fetches['decoded_pred_%d' % i] = decoded_pred
fetches['decoded_counts_%d' % i] = decoded_counts
with tf.Session() as sess:
sess_outputs = sess.run(fetches)
fig = plt.figure()
for i in range(batch_size):
fig.clear()
for j in range(n_stages):
ax = fig.add_subplot(2, 3, j+1)
ax.invert_yaxis()
_visualize(ax,
sess_outputs['match_status_%d' % j][i],
sess_outputs['local_gt_%d' % j][i],
gt_rboxes[i],
gt_counts[i],
sess_outputs['decoded_pred_%d' % j][i],
sess_outputs['decoded_counts_%d' % j][i],
# link_status=None)
link_status=sess_outputs['link_status_%d' % j][i])
ax.set_xlim(0, image_w)
ax.set_ylim(0, image_h)
ax.set_aspect('equal')
save_path = os.path.join('../vis', 'local_gt_%d.png' % i)
plt.savefig(save_path, dpi=200)
print('Visualization saved to %s' % save_path)
def test_encode_decode_real_data():
save_dir = '../vis/gt_link_node/'
utils.mkdir_if_not_exist(save_dir)
batch_size = 233
streams = data.input_stream(FLAGS.train_record_path)
pstreams = data.train_preprocess(streams)
batch = tf.train.batch(pstreams, batch_size, num_threads=1, capacity=100)
image_h = tf.shape(batch['image'])[1]
image_w = tf.shape(batch['image'])[2]
image_size = tf.pack([image_h, image_w])
detector = model_fctd.FctdDetector()
all_maps = detector.build_model(batch['image'])
det_layers = ['det_conv4_3', 'det_fc7', 'det_conv6',
'det_conv7', 'det_conv8', 'det_pool6']
fetches = {}
fetches['images'] = batch['image']
fetches['image_size'] = image_size
for i, det_layer in enumerate(det_layers):
cls_maps, lnk_maps, reg_maps = all_maps[i]
map_h, map_w = tf.shape(cls_maps)[1], tf.shape(cls_maps)[2]
map_size = tf.pack([map_h, map_w])
node_status_below = tf.constant([[[0]]], dtype=tf.int32)
match_indices_below = tf.constant([[[0]]], dtype=tf.int32)
cross_links = False # FIXME
node_status, link_status, local_gt, match_indices = ops.encode_groundtruth(
batch['rboxes'],
batch['count'],
map_size,
image_size,
node_status_below,
match_indices_below,
region_size=detector.region_sizes[i],
pos_scale_diff_thresh=FLAGS.pos_scale_diff_threshold,
neg_scale_diff_thresh=FLAGS.neg_scale_diff_threshold,
cross_links=cross_links)
fetches['node_status_%d' % i] = node_status
fetches['link_status_%d' % i] = link_status
fetches['local_gt_%d' % i] = local_gt
def _visualize_nodes_links(ax, image, node_status, link_status, image_size):
"""
Visualize nodes and links of one example.
ARGS
`node_status`: int [map_h, map_w]
`link_status`: int [map_h, map_w, n_links]
`image_size`: int [2]
"""
ax.clear()
image_display = vis.convert_image_for_visualization(
image, mean_subtracted=True)
ax.imshow(image_display)
vis.visualize_nodes(ax, node_status, image_size)
vis.visualize_links(ax, link_status, image_size)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
sess_outputs = sess.run(fetches)
fig = plt.figure()
for i in xrange(batch_size):
fig.clear()
for j, det_layer in enumerate(det_layers):
ax = fig.add_subplot(2, 3, j+1)
_visualize_nodes_links(ax,
sess_outputs['images'][i],
sess_outputs['node_status_%d' % j][i],
sess_outputs['link_status_%d' % j][i],
sess_outputs['image_size'])
save_path = os.path.join(save_dir, 'gt_node_link_%04d.jpg' % i)
plt.savefig(save_path, dpi=200)
print('Visualization saved to %s' % save_path)
def test_clip_rboxes():
def _generate_random_rboxes(n_rboxes):
rboxes = np.zeros((n_rboxes, 5))
rboxes[:,0] = np.random.uniform(low=0.0, high=1.0, size=[n_rboxes]) # cx
rboxes[:,1] = np.random.uniform(low=0.0, high=1.0, size=[n_rboxes]) # cy
rboxes[:,2] = np.random.uniform(low=0.2, high=0.8, size=[n_rboxes]) # width
rboxes[:,3] = np.random.uniform(low=0.0, high=0.3, size=[n_rboxes]) # height
rboxes[:,4] = np.random.uniform(low=-1.0, high=1.0, size=[n_rboxes]) # theta
return rboxes
n_rboxes = 5
rboxes = tf.constant(_generate_random_rboxes(n_rboxes), tf.float32)
crop_bbox = tf.constant([0, 0, 1, 1], tf.float32)
clipped_rboxes = ops.clip_rboxes(rboxes, crop_bbox)
with tf.Session() as sess:
fetches = {'rboxes': rboxes, 'clipped_rboxes': clipped_rboxes}
sess_outputs = sess.run(fetches)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.invert_yaxis() # left-top is the origin
ax.set_aspect('equal')
ax.clear()
# plot rboxes before & after clipping
vis.visualize_rboxes(ax, sess_outputs['rboxes'],
edgecolor='blue', facecolor='none', verbose=True)
vis.visualize_rboxes(ax, sess_outputs['clipped_rboxes'],
edgecolor='green', facecolor='none', verbose=True)
save_path = os.path.join('../vis', 'clipped_rboxes.png')
plt.savefig(save_path)
print('Visualization saved to %s' % save_path)
def test_data_loading_and_preprocess():
fig = plt.figure()
ax = fig.add_subplot(111)
def _visualize_example(save_path, image, gt_rboxes, mean_subtracted=True):
ax.clear()
# convert image
image_display = vis.convert_image_for_visualization(
image, mean_subtracted=mean_subtracted)
# draw image
ax.imshow(image_display)
# draw groundtruths
image_h = image_display.shape[0]
image_w = image_display.shape[1]
vis.visualize_rboxes(ax, gt_rboxes,
edgecolor='yellow', facecolor='none', verbose=False)
# save plot
plt.savefig(save_path)
n_batches = 10
batch_size = 32
save_dir = '../vis/example'
utils.mkdir_if_not_exist(save_dir)
streams = data.input_stream('../data/synthtext_train.tf')
pstreams = data.train_preprocess(streams)
batches = tf.train.shuffle_batch(pstreams, batch_size, capacity=2000, min_after_dequeue=20,
num_threads=1)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
for i in xrange(n_batches):
fetches = {'images': batches['image'],
'gt_rboxes': batches['rboxes'],
'gt_counts': batches['count']}
sess_outputs = sess.run(fetches)
for j in xrange(batch_size):
save_path = os.path.join(save_dir, '%04d_%d.jpg' % (i, j))
gt_count = sess_outputs['gt_counts'][j]
_visualize_example(save_path,
sess_outputs['images'][j],
sess_outputs['gt_rboxes'][j, :gt_count],
mean_subtracted=True)
print('Visualization saved to %s' % save_path)
def test_max_pool_on_odd_sized_maps():
size = 5
x = np.random.rand(size, size).reshape(1,size,size,1).astype(np.float32)
print(x[0,:,:,0])
with tf.Session() as sess:
y = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], 'SAME')
print(y.eval()[0,:,:,0])
def test_decode_combine_rboxes():
x = [np.random.rand(4,4).astype(np.float32),
np.random.rand(5,5).astype(np.float32),
np.random.rand(6,6).astype(np.float32)]
y, _ = ops.decode_combine_rboxes(x, x, x, [100, 100],
region_size=10, cell_size=10)
import ipdb; ipdb.set_trace()
with tf.Session() as sess:
y.eval()
pass
if __name__ == '__main__':
# test_encode_decode_synth_data()
test_encode_decode_real_data()
# test_clip_rboxes()
# test_data_loading_and_preprocess()
# test_max_pool_on_odd_sized_maps()
# test_decode_combine_rboxes()
| 34.726064
| 103
| 0.635138
| 1,943
| 13,057
| 3.978899
| 0.156459
| 0.02975
| 0.00996
| 0.011383
| 0.372914
| 0.328289
| 0.266848
| 0.220929
| 0.194671
| 0.170871
| 0
| 0.027797
| 0.234051
| 13,057
| 375
| 104
| 34.818667
| 0.745225
| 0.10324
| 0
| 0.249097
| 0
| 0
| 0.06608
| 0.004055
| 0
| 0
| 0
| 0.002667
| 0
| 1
| 0.039711
| false
| 0.00361
| 0.046931
| 0
| 0.093863
| 0.025271
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66bff38e64bc42b7572591b13e17cd3a431e4073
| 1,007
|
py
|
Python
|
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
"""Get status for split or move completed percentage of a given file duplicate volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand,
epilog="""Get status for split or move completed percentage of a given file duplicate volume.""")
@click.argument('volume-id')
@environment.pass_env
def cli(env, volume_id):
"""Get status for split or move completed percentage of a given file duplicate volume."""
table = formatting.Table(['Username', 'Active Conversion Start Timestamp', 'Completed Percentage'])
file_manager = SoftLayer.FileStorageManager(env.client)
value = file_manager.convert_dupe_status(volume_id)
table.add_row(
[
value['volumeUsername'],
value['activeConversionStartTime'],
value['deDuplicateConversionPercentage']
]
)
env.fout(table)
| 32.483871
| 112
| 0.713009
| 119
| 1,007
| 5.966387
| 0.445378
| 0.107042
| 0.050704
| 0.071831
| 0.291549
| 0.291549
| 0.291549
| 0.291549
| 0.291549
| 0.291549
| 0
| 0
| 0.191658
| 1,007
| 30
| 113
| 33.566667
| 0.872236
| 0.211519
| 0
| 0
| 0
| 0
| 0.284802
| 0.07152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.05
| 0.2
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66c4abe639069bea0f557f4dba81d69a1839cf18
| 392
|
py
|
Python
|
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | null | null | null |
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | 7
|
2020-02-12T03:10:01.000Z
|
2021-06-10T19:30:50.000Z
|
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | null | null | null |
from django import forms
#from pagedown.widgets import PagedownWidget
from apps.saas.models import Offer
class OfferForm(forms.ModelForm):
#content= forms.CharField(widget=PagedownWidget(show_preview=False))
#publish= forms.DateField(widget=forms.SelectDateWidget)
class Meta:
model = Offer
fields= [
"tipo_venta",
"financing",
"hardware",
"empleados",
"modulos",
]
| 21.777778
| 69
| 0.742347
| 44
| 392
| 6.568182
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145408
| 392
| 18
| 70
| 21.777778
| 0.862687
| 0.420918
| 0
| 0
| 0
| 0
| 0.191111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66c7e494275971e9a3a3aa777ced7402edea752a
| 1,237
|
py
|
Python
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 86
|
2019-01-02T06:20:09.000Z
|
2022-03-23T01:16:32.000Z
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 5
|
2019-01-22T06:18:26.000Z
|
2021-12-16T02:01:34.000Z
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 24
|
2019-01-03T09:36:54.000Z
|
2021-12-14T10:04:11.000Z
|
from options import TestOptions
import torch
from models import TETGAN
from utils import load_image, to_data, to_var, visualize, save_image
import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
# parse options
parser = TestOptions()
opts = parser.parse()
# data loader
print('--- load data ---')
style = load_image(opts.style_name)
if opts.gpu != 0:
style = to_var(style)
if opts.c2s == 1:
content = load_image(opts.content_name, opts.content_type)
if opts.gpu != 0:
content = to_var(content)
# model
print('--- load model ---')
tetGAN = TETGAN()
tetGAN.load_state_dict(torch.load(opts.model))
if opts.gpu != 0:
tetGAN.cuda()
tetGAN.eval()
print('--- testing ---')
if opts.c2s == 1:
result = tetGAN(content, style)
else:
result = tetGAN.desty_forward(style)
if opts.gpu != 0:
result = to_data(result)
print('--- save ---')
# directory
result_filename = os.path.join(opts.result_dir, opts.name)
if not os.path.exists(opts.result_dir):
os.mkdir(opts.result_dir)
save_image(result[0], result_filename)
if __name__ == '__main__':
main()
| 25.770833
| 68
| 0.609539
| 161
| 1,237
| 4.490683
| 0.329193
| 0.049793
| 0.049793
| 0.055325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010881
| 0.257074
| 1,237
| 47
| 69
| 26.319149
| 0.775843
| 0.066289
| 0
| 0.166667
| 0
| 0
| 0.06087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.166667
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66ce81273371c8d4fdeb7dac39c7d81c55ecac89
| 5,962
|
py
|
Python
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 4
|
2019-04-22T11:43:47.000Z
|
2020-09-16T00:28:15.000Z
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 34
|
2019-07-01T09:11:00.000Z
|
2022-03-30T13:35:43.000Z
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 1
|
2020-09-16T00:28:17.000Z
|
2020-09-16T00:28:17.000Z
|
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class BuoyancyResolutionStudy(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, ieos, intc, data_prefix):
super(BuoyancyResolutionStudy, self).__init__(ig)
# load data to list of structured arrays
eht = []
for ffile in filename:
eht.append(self.customLoad(ffile))
# declare data lists
xzn0, nx, ny, nz, xznr, xznl = [], [], [], [], [], []
dd, pp, gg, gamma1, gamma2 = [], [], [], [], []
dlnrhodr, dlnpdr, dlnrhodrs, nsq, br, dx = [], [], [], [], [], []
for i in range(len(filename)):
# load grid
xzn0.append(np.asarray(eht[i].item().get('xzn0')))
xznl.append(np.asarray(eht[i].item().get('xznl')))
xznr.append(np.asarray(eht[i].item().get('xznr')))
nx.append(np.asarray(eht[i].item().get('nx')))
ny.append(np.asarray(eht[i].item().get('ny')))
nz.append(np.asarray(eht[i].item().get('nz')))
# pick specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd.append(np.asarray(eht[i].item().get('dd')[intc]))
pp.append(np.asarray(eht[i].item().get('pp')[intc]))
gg.append(np.asarray(eht[i].item().get('gg')[intc]))
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht[i], 'cp')[intc]
cv = self.getRAdata(eht[i], 'cv')[intc]
gamma1.append(cp / cv) # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
gamma2.append(cp / cv) # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110)
else:
gamma1.append(np.asarray(eht[i].item().get('gamma1')[intc]))
gamma2.append(np.asarray(eht[i].item().get('gamma2')[intc]))
dlnrhodr.append(self.deriv(np.log(dd[i]), xzn0[i]))
dlnpdr.append(self.deriv(np.log(pp[i]), xzn0[i]))
dlnrhodrs.append((1. / gamma1[i]) * dlnpdr[i])
nsq.append(gg[i] * (dlnrhodr[i] - dlnrhodrs[i]))
dx.append(xznr[i] - xznl[i])
b = []
# print(nsq[0],nx[0],int(nx[0]))
for i in range(len(filename)):
br = np.zeros(int(nx[i]))
for ii in range(0, int(nx[i])):
nsqf = nsq[i]
dxf = dx[i]
br[ii] = br[ii - 1] + nsqf[ii] * dxf[ii]
# print(i,ii)
b.append(br)
# share data globally
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.nx = nx
self.ny = ny
self.nz = nz
self.b = b
self.ig = ig
def plot_buoyancy(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot buoyancy in the model"""
if (LAXIS != 2):
print("ERROR(BuoyancyResolutionStudy.py): Only LAXIS=2 is supported.")
sys.exit()
# load x GRID
grd = self.xzn0
# load DATA to plot
plt1 = self.b
nx = self.nx
ny = self.ny
nz = self.nz
# find maximum resolution data
grd_maxres = self.maxresdata(grd)
plt1_maxres = self.maxresdata(plt1)
plt_interp = []
for i in range(len(grd)):
plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
plt10_tmp = plt1[0]
plt11_tmp = plt1[0]
plt1_foraxislimit = []
plt1max = np.max(plt1[0])
for plt1i in plt1:
if (np.max(plt1i) > plt1max):
plt1_foraxislimit = plt1i
# set plot boundaries
to_plot = [plt1_foraxislimit]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Buoyancy')
for i in range(len(grd)):
plt.plot(grd[i], plt1[i], label=str(self.nx[i]) + ' x ' + str(self.ny[i]) + ' x ' + str(self.nz[i]))
print("[WARNING] (BuoyancyResolutionStudy.py): convective boundary markers taken from 256c run, tavg = 1500 secs")
# taken from 256cubed, tavg 1500 sec
bconv = 4.1e8
tconv = 9.7e8
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$buoyancy$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$buoyancy$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_buoyancy.png')
# find data with maximum resolution
def maxresdata(self, data):
tmp = 0
for idata in data:
if idata.shape[0] > tmp:
data_maxres = idata
else:
tmp = idata.shape[0]
return data_maxres
| 32.939227
| 122
| 0.548306
| 767
| 5,962
| 4.219035
| 0.324641
| 0.016069
| 0.050989
| 0.061187
| 0.226823
| 0.203956
| 0.174289
| 0.073548
| 0.073548
| 0.073548
| 0
| 0.028919
| 0.309795
| 5,962
| 180
| 123
| 33.122222
| 0.757473
| 0.176954
| 0
| 0.110092
| 0
| 0
| 0.058944
| 0.012939
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.073395
| 0
| 0.119266
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66d42f1fdcd91d122cd938babcc3fe924510d04e
| 2,147
|
py
|
Python
|
src/admin/godmode/actions/base.py
|
aimanow/sft
|
dce87ffe395ae4bd08b47f28e07594e1889da819
|
[
"Apache-2.0"
] | 280
|
2016-07-19T09:59:02.000Z
|
2022-03-05T19:02:48.000Z
|
godmode/actions/base.py
|
YAR-SEN/GodMode2
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
[
"WTFPL"
] | 3
|
2016-07-20T05:36:49.000Z
|
2018-12-10T16:16:19.000Z
|
godmode/actions/base.py
|
YAR-SEN/GodMode2
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
[
"WTFPL"
] | 20
|
2016-07-20T10:51:34.000Z
|
2022-01-12T23:15:22.000Z
|
import json
from flask import g, request, render_template
from flask.views import View
from godmode import logging
from godmode.acl import ACL
from godmode.audit_log import audit_log
from godmode.exceptions import AccessDenied
log = logging.getLogger(__name__)
class BaseAction(View):
name = None
title = None
acl = ACL.ADMIN
enable_log = True
style = ""
policy = None
stay_on_page = False
item_limit = None
def __init__(self, app, model=None, view=None):
log.info("Init action: {}".format(self.__class__.__name__))
self.app = app
self.model = model
self.view = view
self.policy = "{}.{}".format(self.view.policy, self.name)
log.info(self.policy)
def url(self):
return
def dispatch_request(self, *args, **kwargs):
has_access = ACL.has_access(g.user, self)
if not has_access:
raise AccessDenied(message="You don't have an access to this page.")
audit_log(
user=g.user,
model=self.model,
ids=kwargs.get("id") or request.args.get("ids"),
action=self.name
)
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
item_id = kwargs.get("id", None)
if item_id:
return self.do_item_action(*args, **kwargs)
ids = request.args.get("ids")
if not ids:
return json.dumps({
"remove_rows": False
})
id_list = ids.split(",")
if self.item_limit:
id_list = id_list[:self.item_limit]
for item_id in id_list:
try:
item_id = int(item_id)
except (ValueError, TypeError):
continue
kwargs["id"] = item_id
self.do_item_action(*args, **kwargs)
return json.dumps({
"remove_rows": True
})
def render_form(self, *args, **kwargs):
return render_template("actions/button_action.html", url=self.name, button_label="Submit")
def do_item_action(self, *args, **kwargs):
raise NotImplementedError()
| 26.182927
| 98
| 0.583605
| 268
| 2,147
| 4.481343
| 0.324627
| 0.058285
| 0.046628
| 0.02831
| 0.084929
| 0.043297
| 0
| 0
| 0
| 0
| 0
| 0
| 0.305077
| 2,147
| 81
| 99
| 26.506173
| 0.80496
| 0
| 0
| 0.063492
| 0
| 0
| 0.058221
| 0.01211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.111111
| 0.031746
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66d880a9b64fd73b407a720c9fa6817d2609e5bf
| 16,001
|
py
|
Python
|
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import time
import aiohttp
import re
import pathlib
import os
import json
from bs4 import BeautifulSoup
from datetime import datetime
from models.UpdatedMessage import UpdatedMessage
from models.EmbedTemplate import EmbedTemplate
from models.BotMention import BotMention
from forever import Utilities
class SolSystem():
class SolPlanet:
def __init__(self, id, name):
self.id = id
self.name = name
class SolNode:
def __init__(self, id, name, planet):
self.id = id
self.name = name
self.planet = planet
class DropTables():
def __init__(self) -> None:
self.data = {}
self.time_updated = 0
self.interval = 86400
self.session = None
async def getData(self,):
xx = time.time()
if xx - self.time_updated > self.interval: #12h
self.time_updated = time.time()
if self.session:
async with self.session.get("https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html") as r:
if r.status==200:
parsing = await r.text()
reg = re.findall("<h3 id=\"(\w+)\">(.*?)<\/h3>\s*<table>([\s\S]*?)<\/table>", parsing, re.MULTILINE|re.DOTALL)
for i in reg:
parser = BeautifulSoup(i[2], 'html.parser')
table_rows = parser.find_all('tr')
self.data[i[0]] = {}
self.data[i[0]]["title"] = i[1].replace(":", "")
self.data[i[0]]["data"] = []
tmp = {}
if i[0] == "missionRewards" or i[0] == "keyRewards" or i[0] == "transientRewards":
tmp_mission = None
tmp_rotation = None
for x in table_rows:
text = x.get_text()
if x.select('th') and "Rotation" not in text:
tmp_mission = text
tmp_rotation = None
tmp[tmp_mission] = {}
elif "Rotation" in text:
tmp_rotation = text
tmp[tmp_mission][tmp_rotation] = []
else:
if tmp_rotation:
tmp[tmp_mission][tmp_rotation].append(text)
elif "data" in tmp[tmp_mission]:
tmp[tmp_mission]["data"].append(text)
else:
tmp[tmp_mission]["data"] = []
tmp[tmp_mission]["data"].append(text)
self.data[i[0]]["data"] = tmp
elif i[0] == "relicRewards":
relicname = None
rarity = None
for x in table_rows:
text = x.get_text()
if "Relic" in text:
relic_match = re.match("((?:Axi|Neo|Meso|Lith|Requiem)\s\w{0,3}\d{0,2}\s?Relic)\s\((Radiant|Exceptional|Flawless|Intact)\)", text)
if relic_match.group(1) in tmp:
if relic_match.group(2) not in tmp[relic_match.group(1)]:
tmp[relic_match.group(1)][relic_match.group(2)] = []
rarity = relic_match.group(2)
else:
tmp[relic_match.group(1)] = {}
tmp[relic_match.group(1)][relic_match.group(2)] = []
rarity = relic_match.group(2)
relicname = relic_match.group(1)
else:
tmp[relicname][rarity].append(text)
elif i[0] == "sortieRewards":
tmp = []
for x in table_rows:
text = x.get_text()
if not x.select('th'):
tmp.append(text)
elif i[0] == "cetusRewards" or i[0] == "solarisRewards" or i[0] == "deimosRewards":
tmp = {}
bounty = None
stage = None
rotation = None
for x in table_rows:
text = x.get_text()
if x.select('th'):
if "Bounty" in text:
bounty = text
tmp[bounty] = {}
elif "Rotation" in text:
rotation = text
tmp[bounty][rotation] = {}
elif "Stage" in text:
stage = text
tmp[bounty][rotation][stage] = []
else:
tmp[bounty][rotation][stage].append(text)
elif i[0] in set("modByAvatar", "blueprintByAvatar", "resourceByAvatar", "sigilByAvatar", "additionalItemByAvatar"):
drop = None
for x in table_rows:
text = x.get_text()
itemtitles = re.match(r"^([\s\S]+?)(?:Additional Item|Mod|Resource|Blueprint\/Item|Sigil) Drop Chance: (\d{0,3}\.\d{0,3})\%$", text)
if itemtitles:
drop = itemtitles.group(1)
tmp[drop] = {}
tmp[drop]["chance"] = itemtitles.group(2)
tmp[drop]["data"] = []
else:
tmp[drop]["data"].append(text)
elif i[0] in set("modByDrop", "blueprintByDrop", "resourceByDrop"):
drop = None
for x in table_rows:
text = x.get_text()
if x.select('th'):
if "Source" not in text:
drop = text
tmp[drop] = []
else:
tmp[drop].append(text)
self.data[i[0]]["data"] = tmp
def searchKey(self, key, searched_value):
vals = []
for i in self.data[key]["data"]:
if i.lower().startswith(searched_value.lower()):
vals.append(i)
return vals
def relicSearch(self, searched_value):
vals = self.searchKey("relicRewards", searched_value)
if len(vals) == 1:
em = EmbedTemplate(title=self.data["relicRewards"]["title"], description=vals[0])
for i, j in self.data["relicRewards"]["data"][vals[0]].items():
em.add_field(name=i, value="\n".join(j))
return em
else:
return EmbedTemplate(title=self.data["relicRewards"]["title"], description="\n".join(vals))
class CetusStatus:
def __init__(self, expiry):
self.expiry = expiry
self.start = self.expiry-150*60
def isNight(self,):
if self.minutes_left() <= 50:
return True
else:
return False
def seconds_left(self):
return self.expiry-time.time()
def minutes_left(self,):
return self.seconds_left()//60
def __str__(self,):
return "Night" if self.isNight() else "Day"
class CetusMessage(UpdatedMessage):
def __init__(self, message, mention, client):
self.mention = mention
self.notify_message = None
self.lock = False
self.client = client
super().__init__(message, "poe")
async def refresh(self, cetus):
em = EmbedTemplate(title="Plains of Eidolon", timestamp=datetime.utcnow())
em.add_field(name="Status", value=str(cetus))
em.add_field(name="Time until new rotation", value=f"{cetus.minutes_left() if cetus else 0.00:.0f} min")
await self.message.edit(embed=em)
if not self.lock:
if cetus.isNight() and self.mention:
self.lock = True
self.notify_message = await self.message.channel.send(f"{self.mention.name} - {self.mention.role.mention}")
self.client.loop.call_later(cetus.seconds_left()+60, self.callback)
def callback(self,):
self.client.loop.create_task(self.remove_message())
self.lock = False
async def remove_message(self,):
await self.notify_message.delete()
self.notify_message = None
class FissureItem:
def __init__(self, oid, start_time, expiry_time, mission_type, node, era):
self.start_time = start_time
self.expiry_time = expiry_time
self.mission_type = mission_type
self.node = node
self.era = era
def expiresIn(self,):
return self.expiry_time-time.time()
def __str__(self,):
if type(self.node) == str:
tmp = f"{self.node.title()}, {self.node.title()}"
return f"{tmp}\n{(f'Expires on {Utilities.ts2string(self.expiry_time)}')}\nExpires in {self.expiresIn()//60:.0f} min"
tmp = self.node.planet.name.title()+", "+self.node.name.title()
return f"{tmp}\n{(f'Expires on {Utilities.ts2string(self.expiry_time)}')}\nExpires in {self.expiresIn()//60:.0f} min"
class FissureMessage(UpdatedMessage):
def __init__(self, message, mentions):
super().__init__(message, "fissures")
self.mentions = mentions
async def refresh(self, fissures):
em = EmbedTemplate(title="Fissures", timestamp=datetime.utcnow())
for i in fissures:
em.add_field(name=f"{i.era} {i.mission_type}", value=str(i))
await self.message.edit(embed=em)
class InvasionItem:
def __init__(self, attacker, defender, node, starttime, status):
self.attacker = attacker
self.defender = defender
self.start_time = starttime
self.node = node
self.status = status
class InvasionOpp:
#0 DEFENDING
#1 ATTACKING
def __init__(self, faction, rewards):
self.faction = faction
self.rewards = rewards
class InvasionMessage(UpdatedMessage):
def __init__(self, message, mentions):
super().__init__(message, "invasions")
self.mentions = mentions
async def refresh(self, invasions):
em = EmbedTemplate(title="Invasions", timestamp=datetime.utcnow())
for i in invasions:
vals = []
if type(i.node) == str:
vals.append(f"{i.node.title()}, {i.node.title()}")
else:
vals.append(f"{i.node.planet.name.title()}, {i.node.name.title()}")
vals.append(i.start_time)
vals.append(f"{i.defender.faction} vs {i.attacker.faction}"),
vals.append(i.status)
em.add_field(
name=f"{i.defender.rewards} vs {i.attacker.rewards}",
value=f"{vals[0]}\n{vals[1]}\n{vals[2]}\n{vals[3]}\n\u200b")
await self.message.edit(embed=em)
class NightwaveItem:
def __init__(self, start_time, expiry_time, name, daily=False):
self.start_time = start_time
self.expiry_time = expiry_time
self.name = name
self.daily = daily
class NightwaveMessage(UpdatedMessage):
def __init__(self, message):
super().__init__(message, "nightwave")
async def refresh(self, nightwave_data):
em = EmbedTemplate(title="Nightwave", timestamp=datetime.utcnow())
for i in nightwave_data:
em.add_field(name=i.name, value=(Utilities.ts2string(i.start_time+(60*120))+"\n\n"))
await self.message.edit(embed=em)
class Sorties:
class SortieItem:
def __init__(self, start_time, expiry_time, missions):
self.start_time = start_time
self.expiry_time = expiry_time
self.missions = missions
class SortieMission:
def __init__(self, missionType, node, modifier):
self.mission_type = missionType
self.node = node
self.modifier = modifier
def __str__(self,):
if type(self.node) == str:
return f"{self.mission_type}\n{self.node}\n{self.modifier}"
return f"{self.mission_type}\n{(f'{self.node.name.title()}, {self.node.planet.name.title()}')}\n{self.modifier}"
class SortieMessage(UpdatedMessage):
def __init__(self, message):
super().__init__(message, "sorties")
async def refresh(self, sortie):
em = EmbedTemplate(title="Sorties", timestamp=datetime.utcnow())
count = 1
for i in sortie.missions:
em.add_field(name=f"Mission {count}", value=str(i))
count+=1
await self.message.edit(embed=em)
class Worldstate():
def __init__(self,):
self.runtime = {}
self.fissure_eras = {
"VoidT1" : ["Lith", 1],
"VoidT2" : ["Meso", 2],
"VoidT3" : ["Neo", 3],
"VoidT4" : ["Axi", 4],
"VoidT5" : ["Requiem", 5]
}
self.session = None
self.initRuntime()
def initRuntime(self,):
self.runtime.clear()
self.runtime["invasions"] = []
self.runtime["nightwave"] = []
self.runtime["fissures"] = []
self.runtime["sorties"] = None
self.runtime["poe"] = None
def getInvasions(self, parsing, data_runtime):
for invasion in parsing["Invasions"]:
if not invasion["Completed"]:
start_time = int(invasion["Activation"]["$date"]["$numberLong"])//1000
node = next((x for x in data_runtime["warframe"]["translate"]["solsystem"]["nodes"] if x.id == invasion["Node"]), invasion["Node"])
attack_reward = "N/A"
defender_reward = "N/A"
reward_item = invasion["DefenderReward"]["countedItems"][0]["ItemType"]
translate = data_runtime["warframe"]["translate"]["items"]
defender_reward = f"{invasion['DefenderReward']['countedItems'][0]['ItemCount']}x {translate[reward_item] if reward_item in translate else reward_item}"
if invasion["AttackerReward"]:
reward_item = invasion["AttackerReward"]["countedItems"][0]["ItemType"]
attack_reward = f"{invasion['AttackerReward']['countedItems'][0]['ItemCount']}x { translate[reward_item] if reward_item in translate else reward_item}"
attack_faction = invasion["AttackerMissionInfo"]["faction"].strip("FC_")
defender_faction = invasion["DefenderMissionInfo"]["faction"].strip("FC_")
goal = invasion["Goal"]*2
current = invasion["Count"]+invasion["Goal"]
fraction_attacker = round(current/goal*100,1)
fraction_defender = round((goal-current)/goal*100,1)
attacker = InvasionOpp(attack_faction, attack_reward)
defender = InvasionOpp(defender_faction, defender_reward)
self.runtime["invasions"].append(InvasionItem(attacker, defender, node, Utilities.ts2string(start_time), f"{fraction_defender}% vs {fraction_attacker}%"))
def getNightwave(self, parsing, data_runtime):
translate = data_runtime["warframe"]["translate"]
for nightwave in parsing["SeasonInfo"]["ActiveChallenges"]:
challenge = translate["nightwave"][nightwave["Challenge"]] if nightwave["Challenge"] in translate["nightwave"] else nightwave["Challenge"]
daily = nightwave["Daily"] if "Daily" in nightwave else False
start_time = int(nightwave["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(nightwave["Expiry"]["$date"]["$numberLong"])//1000
self.runtime["nightwave"].append(NightwaveItem(start_time, expiry_time, challenge, daily))
def getFissure(self, parsing, data_runtime):
translate = data_runtime["warframe"]["translate"]
for fissure in sorted(parsing["ActiveMissions"], key=lambda item: self.fissure_eras[item["Modifier"]][1]):
oid = fissure["_id"]["$oid"]
start_time = int(fissure["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(fissure["Expiry"]["$date"]["$numberLong"])//1000
mission_type = translate["missions"][fissure["MissionType"]].title() if fissure["MissionType"] in translate["missions"] else fissure["MissionType"]
node = next((x for x in translate["solsystem"]["nodes"] if x.id == fissure["Node"]), fissure["Node"])
era = self.fissure_eras[fissure["Modifier"]][0]
self.runtime["fissures"].append(FissureItem(oid, start_time, expiry_time, mission_type, node, era))
def getSorties(self, parsing, data_runtime):
if parsing["Sorties"]:
start_time = int(parsing["Sorties"][0]["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(parsing["Sorties"][0]["Expiry"]["$date"]["$numberLong"])//1000
missionsParse = parsing["Sorties"][0]["Variants"]
missions = []
translate = data_runtime["warframe"]["translate"]
for i in missionsParse:
mission_type = translate["missions"][i["missionType"]].title() if i["missionType"] in translate["missions"] else i["missionType"].title()
node = next((x for x in translate["solsystem"]["nodes"] if x.id == i["node"]), i["node"])
modifier = translate["sorties"][i["modifierType"]].title() if i["modifierType"] in translate["sorties"] else i["modifierType"]
missions.append(Sorties.SortieMission(mission_type, node, modifier))
self.runtime["sorties"] = Sorties.SortieItem(start_time, expiry_time, missions)
def getCetus(self, parsing, data_runtime):
expiry_time = next(((int(x["Expiry"]["$date"]["$numberLong"])//1000) for x in parsing["SyndicateMissions"] if x["Tag"] == "CetusSyndicate"), None)
if expiry_time:
self.runtime["poe"] = CetusStatus(expiry_time)
async def get_data(self, data_runtime):
self.initRuntime()
if self.session:
async with self.session.get("http://content.warframe.com/dynamic/worldState.php") as r:
if r.status==200:
parsing = await r.text()
parsing = json.loads(parsing)
if "Invasions" in parsing:
self.getInvasions(parsing, data_runtime)
if "SeasonInfo" in parsing and parsing["SeasonInfo"]:
self.getNightwave(parsing, data_runtime)
if "ActiveMissions" in parsing:
self.getFissure(parsing, data_runtime)
if "Sorties" in parsing:
self.getSorties(parsing, data_runtime)
if "SyndicateMissions" in parsing:
self.getCetus(parsing, data_runtime)
| 41.778068
| 159
| 0.645272
| 2,026
| 16,001
| 4.965943
| 0.15153
| 0.021867
| 0.017493
| 0.009741
| 0.302356
| 0.258026
| 0.215088
| 0.161515
| 0.134082
| 0.114999
| 0
| 0.013417
| 0.1988
| 16,001
| 382
| 160
| 41.887435
| 0.771373
| 0.001562
| 0
| 0.244624
| 0
| 0.021505
| 0.194086
| 0.055481
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086022
| false
| 0
| 0.037634
| 0.010753
| 0.206989
| 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|