hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec7e716c12e1ea7abf3037994eb6b405105174c5 | 3,168 | py | Python | templates/scrape_mars.py | eilishlboyd/web-scraping-challenge | 9159d1fb3a8b1f2c9cbf6b82827e0eb5c6394992 | [
"ADSL"
] | null | null | null | templates/scrape_mars.py | eilishlboyd/web-scraping-challenge | 9159d1fb3a8b1f2c9cbf6b82827e0eb5c6394992 | [
"ADSL"
] | null | null | null | templates/scrape_mars.py | eilishlboyd/web-scraping-challenge | 9159d1fb3a8b1f2c9cbf6b82827e0eb5c6394992 | [
"ADSL"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[97]:
#Imports & Dependencies
get_ipython().system('pip install selenium')
get_ipython().system('pip install splinter')
from splinter import Browser
from bs4 import BeautifulSoup
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# In[98]:
url = "https://mars.nasa.gov/news/"
browser.visit(url)
# In[99]:
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# In[100]:
article = soup.find("div", class_="list_text")
news_p = article.find("div", class_="article_teaser_body").text
news_title = article.find("div", class_="content_title").text
news_date = article.find("div", class_="list_date").text
print(news_date)
print(news_title)
print(news_p)
# In[106]:
url2 = "https://jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url2)
# Scrape the browser into soup and use soup to find the image of mars
# Save the image url to a variable called `img_url`
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
image = soup.find("img", class_="thumb")["src"]
img_url = "https://jpl.nasa.gov"+image
featured_image_url = img_url
# Use the requests library to download and save the image from the `img_url` above
import requests
import shutil
response = requests.get(img_url, stream=True)
with open('img.jpg', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
# Display the image with IPython.display
from IPython.display import Image
Image(url='img.jpg')
# In[107]:
import tweepy
# Twitter API Keys
from key_vault import (consumer_key,
consumer_secret,
access_token,
access_token_secret)
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
target_user = "marswxreport"
full_tweet = api.user_timeline(target_user , count = 1)
mars_weather=full_tweet[0]['text']
mars_weather
# In[108]:
url3 = "http://space-facts.com/mars/"
browser.visit(url3)
# In[109]:
import pandas as pd
facts_url = "https://space-facts.com/mars/"
browser.visit(facts_url)
mars_data = pd.read_html(facts_url)
mars_data = pd.DataFrame(mars_data[0])
mars_facts = mars_data.to_html(header = False, index = False)
print(mars_facts)
# In[110]:
url4 = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url4)
# In[111]:
import time
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_hemis=[]
# In[112]:
for i in range (4):
time.sleep(5)
images = browser.find_by_tag('h3')
images[i].click()
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
partial = soup.find("img", class_="wide-image")["src"]
img_title = soup.find("h2",class_="title").text
img_url = 'https://astrogeology.usgs.gov'+ partial
dictionary={"title":img_title,"img_url":img_url}
mars_hemis.append(dictionary)
browser.back()
# In[113]:
print(mars_hemis)
# In[ ]:
# In[ ]:
| 20.307692 | 93 | 0.708018 | 460 | 3,168 | 4.717391 | 0.369565 | 0.02212 | 0.02765 | 0.035023 | 0.17788 | 0.111521 | 0.084793 | 0.084793 | 0 | 0 | 0 | 0.018519 | 0.147727 | 3,168 | 155 | 94 | 20.43871 | 0.785185 | 0.145833 | 0 | 0.112676 | 0 | 0.014085 | 0.203807 | 0.010078 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.126761 | 0 | 0.126761 | 0.070423 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec863bf797d4f68a78c20b0aaf1e7087f38bcc26 | 13,679 | py | Python | scripts/download-data.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | scripts/download-data.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | scripts/download-data.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys, os, shutil, json, yaml
from time import localtime
ONLY_SIMULATED = False
ONLY_GIAB = True
# Make import from parent directory possible
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import modules.file_utils as file_utils
with open("constants.yml", "r") as constants_file:
constants = yaml.load(constants_file)
# Directory Paths
reference_directory = "data/references/"
datasets_directory = "data/datasets/"
def log_task_start(item, path):
started_tasks.append(path)
print("Downloading {}...".format(item), flush=True)
def log_task_end(item, path):
finished_tasks.append(path)
print("Downloaded {}".format(item), flush=True)
def log_data_present(item):
print("{} already present".format(item), flush=True)
####################
# REFERENCE GENOMES
####################
# Add new tools needed to download reference genomes here
tools = ["twoBitToFa"]
# Constants
fasta_file_ending = ".fa"
fastq_file_ending = ".fastq"
rsync_uri = "rsync://hgdownload.soe.ucsc.edu/genome/admin/exe/linux.x86_64/"
started_tasks = []
finished_tasks = []
def get_human_genome(genome_id, file_path):
url = "http://hgdownload.soe.ucsc.edu/goldenPath/"
url += "{0}/bigZips/{0}.2bit".format(genome_id)
two_bit_path = file_path + ".2bit"
started_tasks.append(two_bit_path)
file_utils.download(url, two_bit_path)
finished_tasks.append(two_bit_path)
# Convert .2bit file to .fa
print("Extracting {} from 2bit file...".format(genome_id), flush=True)
os.system("chmod +x {0}twoBitToFa && {0}twoBitToFa {1} {2}".format(
reference_directory,
two_bit_path,
file_path
))
file_utils.delete(two_bit_path)
def get_p_falciparum(genome_id, file_path):
url = "http://bp1.s3.amazonaws.com/malaria.tar.bz2"
download_path = reference_directory + "malaria.tar.bz2"
file_utils.download(url, download_path)
print("Unzipping {}...".format(genome_id), flush=True)
unzipped_directory = file_utils.unzip(download_path)
os.rename(unzipped_directory + "/genome_sequence_pfal.fa", file_path)
file_utils.delete(download_path)
file_utils.delete(unzipped_directory)
# Add new reference genomes with options here
genomes = {
"hg19": {
"getter": get_human_genome,
"name": "Human (hg19)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"hg38": {
"getter": get_human_genome,
"name": "Human (hg38)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"pfal": {
"getter": get_p_falciparum,
"name": "Malaria",
"source": "http://bioinf.itmat.upenn.edu/BEERS/bp1/datasets.php"
}
}
def get_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
if not os.path.exists(tool_path):
log_task_start(tool_name, tool_path)
tool_uri = rsync_uri + tool_name
os.system("rsync -aPq {} {}".format(tool_uri, tool_path))
log_task_end(tool_name, tool_path)
else:
log_data_present(tool_name)
def remove_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
file_utils.delete(tool_path)
def genome_path(genome_id):
return reference_directory + genome_id + fasta_file_ending
def get_genomes():
genome_infos_path = os.path.join(reference_directory, "references.json")
genome_infos = []
if os.path.exists(genome_infos_path):
with open(genome_infos_path, "r") as genome_infos_file:
genome_infos = json.load(genome_infos_file)
for genome_id, genome_specification in genomes.items():
if ONLY_SIMULATED and genome_id not in ["hg19", "pfal"]:
print("Skipping {} (only simulated)".format(genome_specification["name"]))
continue
if ONLY_GIAB and genome_id not in ["hg38"]:
print("Skipping {} (only giab)".format(genome_specification["name"]))
continue
file_path = genome_path(genome_id)
info_path = file_path.split(fasta_file_ending)[0] + ".yml"
genome_getter = genome_specification["getter"]
if not os.path.exists(file_path):
log_task_start(genome_id, file_path)
genome_getter(genome_id, file_path)
genome_infos.append({
"id": genome_id,
"name": genome_specification["name"],
"source": genome_specification["source"]
})
with open(genome_infos_path, "w") as genome_infos_file:
genome_infos_file.write(json.dumps(genome_infos))
log_task_end(genome_id, file_path)
else:
log_data_present(genome_id)
###################
# RNASEQ DATA SETS
###################
def write_dataset_json(info):
dataset_info_path = datasets_directory + info["id"] + ".json"
info["method"] = constants["dataset"]["FILE"]
info["layout"] = constants["dataset"]["PAIRED"]
info["created"] = localtime()
info["error"] = False
with open(dataset_info_path, "w") as dataset_info_file:
json.dump(info, dataset_info_file)
def get_baruzzo(dataset, directory):
zip_name = "{}.tar.bz2".format(dataset["file_name"])
url = "http://bp1.s3.amazonaws.com/{}".format(zip_name)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {}...".format(dataset["name"]), flush=True)
file_utils.unzip(download_path)
# Move files to /beers directory
beers_directory = directory + "/beers/"
file_utils.create_directory(beers_directory)
for file_name in os.listdir(directory):
file_path = directory + "/" + file_name
if not os.path.isdir(file_path) and not file_path == download_path:
shutil.move(file_path, beers_directory + file_name)
# Move FASTQ files to root and rename
def setup_file(direction):
file_name = "{}.{}.fa".format(dataset["id"], direction)
file_origin = beers_directory + file_name
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return file_name, file_destination
forward_file_name, forward_file_path = setup_file(constants["dataset"]["FORWARD"])
reverse_file_name, reverse_file_path = setup_file(constants["dataset"]["REVERSE"])
# Move CIG file to root and rename
truth_file_name = "{}.cig".format(dataset["id"])
truth_file_path = directory + "/truth.cig"
os.rename(beers_directory + truth_file_name, truth_file_path)
file_utils.delete(download_path)
file_utils.delete(beers_directory)
write_dataset_json({
"id": dataset["id"],
"name": dataset["name"],
"readLength": "100",
"data": {
constants["dataset"]["FORWARD"]: {
"name": forward_file_name,
"path": forward_file_path,
},
constants["dataset"]["REVERSE"]: {
"name": reverse_file_name,
"path": reverse_file_path,
}
},
"evaluation": {
"type": "beers",
"truth_file": {
"name": truth_file_name,
"path": truth_file_path
}
}
})
def get_from_encode(dataset, directory):
dataset_info = {
"id": dataset["id"],
"name": dataset["name"],
"readLength": "76",
"data": {
constants["dataset"]["FORWARD"]: {},
constants["dataset"]["REVERSE"]: {}
},
"evaluation": dataset["evaluation"]
}
def get_file(file_id, direction, directory):
print("Downloading {} file...".format(direction), flush=True)
zip_name = "{}.fastq.gz".format(file_id)
url = "https://www.encodeproject.org/files/{}/@@download/{}".format(
file_id,
zip_name
)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {} file...".format(direction), flush=True)
file_utils.unzip(download_path)
file_utils.delete(download_path)
original_name = "{}.fastq".format(file_id)
file_origin = "{}/{}".format(directory, original_name)
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return original_name, file_destination
for direction, file_id in dataset["files"].items():
original_name, file_destination = get_file(file_id, direction, directory)
dataset_info["data"][direction]["name"] = original_name
dataset_info["data"][direction]["path"] = file_destination
write_dataset_json(dataset_info)
# Baruzzo Data Sets
# * id is prefix of unzipped FASTA files
# * file_name is zip name given in download url
rna_seq_data = [
{
"id": "GM12878",
"name": "GIAB Pilot Genome",
"getter": get_from_encode,
"files": {
constants["dataset"]["FORWARD"]: "ENCFF000EWJ",
constants["dataset"]["REVERSE"]: "ENCFF000EWX"
},
"evaluation": { "type": "giab" }
},
{
"id": "simulated_reads_HG19t1r1",
"getter": get_baruzzo,
"file_name": "human_t1r1",
"name": "Simulated Human T1R1"
},
# {
# "id": "simulated_reads_HG19t1r2",
# "getter": get_baruzzo,
# "file_name": "human_t1r2",
# "name": "Simulated Human T1R2"
# },
# {
# "id": "simulated_reads_HG19t1r3",
# "getter": get_baruzzo,
# "file_name": "human_t1r3",
# "name": "Simulated Human T1R3"
# },
{
"id": "simulated_reads_HG19t2r1",
"getter": get_baruzzo,
"file_name": "human_t2r1",
"name": "Simulated Human T2R1"
},
# {
# "id": "simulated_reads_HG19t2r2",
# "getter": get_baruzzo,
# "file_name": "human_t2r2",
# "name": "Simulated Human T2R2"
# },
# {
# "id": "simulated_reads_HG19t2r3",
# "getter": get_baruzzo,
# "file_name": "human_t2r3",
# "name": "Simulated Human T2R3"
# },
{
"id": "simulated_reads_HG19t3r1",
"getter": get_baruzzo,
"file_name": "human_t3r1",
"name": "Simulated Human T3R1"
},
# {
# "id": "simulated_reads_HG19t3r2",
# "getter": get_baruzzo,
# "file_name": "human_t3r2",
# "name": "Simulated Human T3R2"
# },
# {
# "id": "simulated_reads_HG19t3r3",
# "getter": get_baruzzo,
# "file_name": "human_t3r3",
# "name": "Simulated Human T3R3"
# },
{
"id": "simulated_reads_PFALt1r1",
"getter": get_baruzzo,
"file_name": "malaria_t1r1",
"name": "Simulated Malaria T1R1"
},
# {
# "id": "simulated_reads_PFALt1r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r2",
# "name": "Simulated Malaria T1R2"
# },
# {
# "id": "simulated_reads_PFALt1r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r3",
# "name": "Simulated Malaria T1R3"
# },
{
"id": "simulated_reads_PFALt2r1",
"getter": get_baruzzo,
"file_name": "malaria_t2r1",
"name": "Simulated Malaria T2R1"
},
# {
# "id": "simulated_reads_PFALt2r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r2",
# "name": "Simulated Malaria T2R2"
# },
# {
# "id": "simulated_reads_PFALt2r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r3",
# "name": "Simulated Malaria T2R3"
# },
{
"id": "simulated_reads_PFALt3r1",
"getter": get_baruzzo,
"file_name": "malaria_t3r1",
"name": "Simulated Malaria T3R1"
},
# {
# "id": "simulated_reads_PFALt3r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r2",
# "name": "Simulated Malaria T3R2"
# },
# {
# "id": "simulated_reads_PFALt3r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r3",
# "name": "Simulated Malaria T3R3"
# }
]
def get_datasets():
for dataset in rna_seq_data:
if ONLY_SIMULATED and not dataset["id"].startswith("simulated"):
print("Skipping {} (only simulated)".format(dataset["name"]))
continue
if ONLY_GIAB and dataset["id"] != "GM12878":
print("Skipping {} (only giab)".format(dataset["name"]))
continue
dataset_directory = datasets_directory + dataset["id"]
dataset_getter = dataset["getter"]
if not os.path.isdir(dataset_directory):
file_utils.create_directory(dataset_directory)
log_task_start(dataset["name"], dataset_directory)
dataset_getter(dataset, dataset_directory)
log_task_end(dataset["name"], dataset_directory)
else:
log_data_present(dataset["name"])
###################
# SCRIPT EXECUTION
###################
print("", flush=True)
print("Downloading data", flush=True)
print("", flush=True)
file_utils.create_directory(reference_directory)
file_utils.create_directory(datasets_directory)
try:
get_tools()
get_genomes()
get_datasets()
remove_tools()
finally:
for path in started_tasks:
if not path in finished_tasks:
print("An error occured, deleting {}".format(path))
file_utils.delete(path)
| 32.882212 | 86 | 0.605892 | 1,531 | 13,679 | 5.135859 | 0.154148 | 0.034592 | 0.036627 | 0.045784 | 0.329009 | 0.236805 | 0.110136 | 0.092077 | 0.092077 | 0.072491 | 0 | 0.017796 | 0.248264 | 13,679 | 415 | 87 | 32.961446 | 0.746864 | 0.155421 | 0 | 0.16129 | 0 | 0 | 0.179752 | 0.020266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053763 | false | 0 | 0.010753 | 0.003584 | 0.075269 | 0.057348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec87b2a8d66b90a05ef4708e84704798ddc9d79f | 3,512 | py | Python | src/loadgen/locustfile.py | vlesierse/awsdemo-abshop | a0390e88c0bbb5a9f2da950c9bfed88986826911 | [
"MIT-0"
] | null | null | null | src/loadgen/locustfile.py | vlesierse/awsdemo-abshop | a0390e88c0bbb5a9f2da950c9bfed88986826911 | [
"MIT-0"
] | null | null | null | src/loadgen/locustfile.py | vlesierse/awsdemo-abshop | a0390e88c0bbb5a9f2da950c9bfed88986826911 | [
"MIT-0"
] | null | null | null | #
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import random
from bs4 import BeautifulSoup
from locust import HttpLocust, TaskSet, task, between
def is_static_file(f):
if "/images" in f:
return True
else:
return False
def fetch_static_assets(session, response):
resource_urls = set()
soup = BeautifulSoup(response.text, "html.parser")
for res in soup.find_all(src=True):
url = res['src']
if is_static_file(url):
resource_urls.add(url)
for url in resource_urls:
session.client.get(url)
class UserBehavior(TaskSet):
def on_start(self):
self.products = [
"0983976883313",
"1051094507639",
"3103748076140",
"3377807835348",
"3480077496703",
"4618701513994",
"5147991444866",
"6392888360364",
"6464865908071",
"0000000000000", # dummy id
]
self.versions = ["v2a", "v2b", "v2b"] # Add an additional v2b to favor this version over v2a
self.client.headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 (LocustIO)"}
@task(2)
def index(self):
response = self.client.get("/")
fetch_static_assets(self, response)
@task(1)
def browseProduct(self):
response = self.client.get("/product/" + random.choice(self.products))
fetch_static_assets(self, response)
@task
def viewCart(self):
self.client.get("/cart")
@task
def addToCart(self):
product = random.choice(self.products)
oneclick = bool(random.getrandbits(1))
self.client.get("/product/" + product)
self.client.post("/cart", {
'product_id': product,
'type': 'oneclick' if oneclick else ''
}, cookies={"app_version": random.choice(self.versions) if oneclick else 'v1'})
@task
def checkout(self):
self.addToCart();
self.client.post("/cart/checkout", {
'name': 'Demo User',
'email': 'demo-user@example.com',
'address': '123 Road',
'zip': '80807',
'city': 'Munich',
'state': 'BY',
'country': 'Germany',
'paymentMethod': 'AmazonPay'
})
self.client.get('/cart/checkout')
self.client.post("/cart/order")
self.client.get("/cart/order")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
wait_time = between(2, 10)
| 32.220183 | 180 | 0.628986 | 428 | 3,512 | 5.11215 | 0.507009 | 0.045704 | 0.035649 | 0.023309 | 0.081353 | 0.030165 | 0 | 0 | 0 | 0 | 0 | 0.069589 | 0.259396 | 3,512 | 108 | 181 | 32.518519 | 0.771626 | 0.281891 | 0 | 0.069444 | 0 | 0.013889 | 0.211746 | 0.00839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.041667 | 0 | 0.236111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec88372c8535e15168d52d6a53fd1a5e9d041265 | 7,918 | py | Python | tests/test_templatetags.py | mixxorz/slippers | af8b8b168653b379efe78654f6801b3af317c44e | [
"MIT"
] | 175 | 2021-07-11T12:12:43.000Z | 2022-03-21T16:18:54.000Z | tests/test_templatetags.py | mixxorz/slippers | af8b8b168653b379efe78654f6801b3af317c44e | [
"MIT"
] | 6 | 2021-08-17T19:54:13.000Z | 2022-01-25T13:02:29.000Z | tests/test_templatetags.py | mixxorz/slippers | af8b8b168653b379efe78654f6801b3af317c44e | [
"MIT"
] | 6 | 2021-09-14T15:25:57.000Z | 2022-01-07T06:35:13.000Z | from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, override_settings
class ComponentTest(TestCase):
def test_render_inline_component(self):
template = """
{% avatar user="mixxorz" %}
"""
expected = """
<div>I am avatar for mixxorz</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_block_component(self):
template = """
{% #button %}I am button{% /button %}
"""
expected = """
<button>I am button</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_without_children(self):
template = """
{% icon_button icon="envelope" %}
{% #icon_button icon="envelope" %}Submit{% /icon_button %}
"""
expected = """
<button class="icon-button envelope"></button>
<button class="icon-button envelope">Submit</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_nested(self):
template = """
{% #card heading="I am heading" %}
{% #button %}I am button{% /button %}
{% /card %}
"""
expected = """
<div class="card">
<div class="card__header">I am heading</div>
<div class="card__body">
<button>I am button</button>
</div>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_kwargs_with_filters(self):
template = """
{% #card heading="I am heading"|upper %}
{% #button %}I am button{% /button %}
{% /card %}
"""
expected = """
<div class="card">
<div class="card__header">I AM HEADING</div>
<div class="card__body">
<button>I am button</button>
</div>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_render_as_variable(self):
template = """
{% avatar user="mixxorz" as my_avatar %}
{% #button as my_button %}I am button{% /button %}
<div>
{{ my_avatar }}
{{ my_button }}
</div>
"""
expected = """
<div>
<div>I am avatar for mixxorz</div>
<button>I am button</button>
</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_pass_boolean_flags(self):
template = """
{% #button disabled %}I am button{% /button %}
"""
expected = """
<button disabled>I am button</button>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
class AttrsTagTest(TestCase):
def test_basic(self):
context = Context(
{
"type": "text",
"id": "the_id",
"name": "the_name",
}
)
template = """
<input {% attrs type id name %}>
"""
expected = """
<input type="text" id="the_id" name="the_name">
"""
self.assertHTMLEqual(expected, Template(template).render(context))
def test_boolean_values(self):
context = Context(
{
"autofocus": False,
"disabled": True,
}
)
template = """
<button {% attrs autofocus disabled %}>Click me</button>
"""
expected = """
<button disabled>Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
def test_source_name(self):
context = Context(
{
"input_type": "text",
"id": "the_id",
"name": "the_name",
}
)
template = """
<input {% attrs type=input_type id name %}>
"""
expected = """
<input type="text" id="the_id" name="the_name">
"""
self.assertHTMLEqual(expected, Template(template).render(context))
class VarTagTest(TestCase):
def test_basic(self):
template = """
{% var foo="Hello, World!" %}
<div>{{ foo }}</div>
"""
expected = """
<div>Hello, World!</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
def test_value_filter(self):
template = """
{% var foo=foo|default:"Default value" %}
{% var bar="Hello, World!"|upper %}
<div>{{ foo }}</div>
<div>{{ bar }}</div>
"""
expected = """
<div>Default value</div>
<div>HELLO, WORLD!</div>
"""
self.assertHTMLEqual(expected, Template(template).render(Context()))
class MatchFilterTest(TestCase):
def test_basic(self):
context = Context({"first": "outline", "second": "ghost", "third": "square"})
template = """
<button class="{{ first|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
<button class="{{ second|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
<button class="{{ third|match:"outline:btn-outline,ghost:btn-ghost" }}">Click me</button>
"""
expected = """
<button class="btn-outline">Click me</button>
<button class="btn-ghost">Click me</button>
<button class="">Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
@override_settings(DEBUG=True)
def test_syntax_error(self):
template = """
<button class="{{ "foo"|match:"outline:btn-outline,foo:bar:baz,,:apple,:orange" }}">Click me</button>
"""
with self.assertRaises(TemplateSyntaxError):
Template(template).render(Context())
def test_ignore_spaces(self):
context = Context({"variant": "ghost"})
template = """
<button class="{{ variant|match:"outline:btn-outline, ghost:btn-ghost" }}">Click me</button>
"""
expected = """
<button class="btn-ghost">Click me</button>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
class FragmentTagTest(TestCase):
def test_basic(self):
context = Context({})
template = """
{% fragment as my_fragment %}
<p>Hello, World</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
"""
expected = """
Text coming after:
<p>Hello, World</p>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
@override_settings(DEBUG=True)
def test_syntax_error(self):
template = """
{% fragment %}
<p>Hello, World</p>
{% endfragment %}
"""
with self.assertRaises(TemplateSyntaxError):
Template(template).render(Context())
def test_with_variables(self):
context = Context({"name": "jonathan wells"})
template = """
{% fragment as my_fragment %}
<p>Hello, {{ name|title }}</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
"""
expected = """
Text coming after:
<p>Hello, Jonathan Wells</p>
"""
self.assertHTMLEqual(expected, Template(template).render(context))
| 27.397924 | 113 | 0.50581 | 715 | 7,918 | 5.503497 | 0.144056 | 0.03202 | 0.100635 | 0.132656 | 0.734689 | 0.687421 | 0.646506 | 0.559339 | 0.542058 | 0.536468 | 0 | 0 | 0.345037 | 7,918 | 288 | 114 | 27.493056 | 0.758774 | 0 | 0 | 0.669725 | 0 | 0.022936 | 0.509725 | 0.051528 | 0 | 0 | 0 | 0 | 0.082569 | 1 | 0.082569 | false | 0.004587 | 0.009174 | 0 | 0.114679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec8a2f8960c6c38997c2788b7d41e2a8915b8efa | 5,599 | py | Python | Application/index.py | mohammadn/Monte_Carlo | d490e5ae82eb134bec59953c697d3fe36ff28d8f | [
"MIT"
] | null | null | null | Application/index.py | mohammadn/Monte_Carlo | d490e5ae82eb134bec59953c697d3fe36ff28d8f | [
"MIT"
] | null | null | null | Application/index.py | mohammadn/Monte_Carlo | d490e5ae82eb134bec59953c697d3fe36ff28d8f | [
"MIT"
] | null | null | null | import os
import logging
from flask import Flask, request, render_template
app = Flask(__name__)
def doRender(tname, values={}):
if not os.path.isfile( os.path.join(os.getcwd(), 'templates/'+tname) ):
return render_template('index.htm')
return render_template(tname, **values)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def mainPage(path):
return doRender(path)
@app.route('/terminate')
def terminate():
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ids = []
ec2 = boto3.resource('ec2', region_name='us-east-1')
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
ids.append(instance.id)
if (ids != []):
ec2.instances.filter(InstanceIds=ids).stop()
ec2.instances.filter(InstanceIds=ids).terminate()
return doRender( 'index.htm', {})
@app.route('/calculate', methods=['POST'])
def calculate():
#!/usr/bin/env python3
import queue
import threading
import math
import json
import http.client
# Modified from: http://www.ibm.com/developerworks/aix/library/au-threadingpython/
# and fixed with try-except around urllib call
service = request.form.get('service')
shots = int(request.form.get('shots'))
rate = request.form.get('rate')
digits = int(request.form.get('digits'))-1
runs = int(request.form.get('resources'))
eachInstanceShots = shots/runs
count = 0
queue = queue.Queue()
if (service == 'lambda'):
class ThreadUrl(threading.Thread):
def __init__(self, queue, task_id):
threading.Thread.__init__(self)
self.queue = queue
self.task_id = task_id
self.incircles = []
self.results = []
self.resourceId = []
self.runningTime = []
def run(self):
count = self.queue.get()
host = "jy6u38g96k.execute-api.us-east-1.amazonaws.com"
try:
c = http.client.HTTPSConnection(host)
jsons= '{ "key1": "'+str(int(eachInstanceShots))+'", "key2": "'+rate+'", "key3": "'+str(digits)+'"}'
c.request("POST", "/default/test", jsons)
response = c.getresponse()
data = response.read().decode('utf-8')
data = json.loads(data)
self.incircles.extend(data[0])
self.results.extend(data[1])
self.runningTime.append(data[2])
self.resourceId.append(self.task_id)
except IOError:
print( 'Failed to open ' , host )
self.queue.task_done()
def parallel_run():
threads=[]
for i in range(0, runs):
t = ThreadUrl(queue, i)
threads.append(t)
t.setDaemon(True)
t.start()
for x in range(0, runs):
queue.put(count)
queue.join()
incircles = [t.incircles for t in threads]
results = [t.results for t in threads]
resourceId = [t.resourceId for t in threads]
runningTime = [t.runningTime for t in threads]
return incircles, results, resourceId, runningTime
mergedIncircles = []
mergedResults = []
stringedResults = ''
mergedResourceId = []
pi = int(math.pi*(10**digits))/10**digits
piValues = ''
matched = 0
roundNum = 9
sumTime = 0
for a in range(0,9):
incircles, results, resourceId, runningTime = parallel_run()
sumResults = 0
# merging results arrays
for i in range(0, len(results)):
for j in range(0,len(results[i])):
mergedResults.append(results[i][j])
# merging incircles arrays
for i in range(0, len(incircles)):
mergedIncircles.append(incircles[i])
for i in range(0, len(resourceId)):
mergedResourceId.append(resourceId[i])
# Adding up results
for i in range(0, len(mergedResults)):
sumResults = sumResults + mergedResults[i]
# Adding up runningTime
for i in range(0, len(runningTime)):
for j in range(0,len(runningTime[i])):
sumTime = sumTime + runningTime[i][j]
# Final estimation
finalResult = int(sumResults/len(mergedResults)*(10**digits))/10**digits
if( pi == finalResult):
matched = 1
roundNum = a+1
break
# transform results to string
for i in range(0,len(mergedResults)):
stringedResults = stringedResults + str(mergedResults[i]) + ','
stringedResults = stringedResults[:-1]
for i in range(0,len(mergedResults)):
piValues = piValues + str(pi) + ','
comCost = sumTime*512/1024*0.0000000083
reqCost = roundNum*runs*0.2/10**6
finalCost = comCost + reqCost
finalCost = f'{finalCost:.12f}'
comCost = f'{comCost:.12f}'
reqCost = f'{reqCost:.12f}'
return doRender( 'result.htm', {'stringedResults': piValues + '|' + stringedResults, 'incircles': mergedIncircles, 'resourceId': mergedResourceId, 'rate': int(rate), 'roundNum': roundNum, 'matched': matched, 'finalResult': finalResult, 'pi': pi, 'finalCost': finalCost, 'shots': shots, 'rate': rate, 'resources': runs, 'digits': digits+1, 'reqCost': reqCost,'comCost': comCost})
else:
#running ec2 instances
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ec2 = boto3.resource('ec2', region_name='us-east-1')
dnss = []
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
dnss.append(instance.public_dns_name)
if (dnss == []):
instances = ec2.create_instances(
ImageId='ami-0147982d8de757491',
MinCount=1,
MaxCount=runs,
InstanceType='t2.micro',)
return doRender( 'result.htm', {})
@app.errorhandler(500)
def server_error(e):
logging.exception('ERROR!')
return """
An error occurred: <pre>{}</pre>
""".format(e), 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| 28.277778 | 380 | 0.663333 | 709 | 5,599 | 5.179126 | 0.299013 | 0.022876 | 0.026144 | 0.026961 | 0.180828 | 0.150327 | 0.133987 | 0.099673 | 0.099673 | 0.080065 | 0 | 0.026464 | 0.176639 | 5,599 | 197 | 381 | 28.42132 | 0.770065 | 0.053938 | 0 | 0.101351 | 0 | 0 | 0.124267 | 0.022886 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.081081 | 0.006757 | 0.195946 | 0.006757 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec8b6f80bc96cced1d927f6a24327bd17b6dab7b | 8,344 | py | Python | src/gocept/amqprun/testing.py | NativeInstruments/gocept.amqprun | 1f2d959f18617d46d0c3dc8512a910fe302ca384 | [
"ZPL-2.1"
] | 2 | 2020-01-29T09:36:50.000Z | 2020-02-25T15:07:58.000Z | src/gocept/amqprun/testing.py | NativeInstruments/gocept.amqprun | 1f2d959f18617d46d0c3dc8512a910fe302ca384 | [
"ZPL-2.1"
] | 14 | 2020-03-20T13:39:31.000Z | 2020-10-06T14:03:55.000Z | src/gocept/amqprun/testing.py | gocept/gocept.amqprun | 1f2d959f18617d46d0c3dc8512a910fe302ca384 | [
"ZPL-2.1"
] | null | null | null | import amqp
import datetime
import email.utils
import gocept.amqprun
import gocept.amqprun.interfaces
import gocept.amqprun.main
import gocept.amqprun.worker
import logging
import os
import pkg_resources
import plone.testing
import plone.testing.zca
import signal
import string
import subprocess
import sys
import tempfile
import time
import unittest
class ZCASandbox(plone.testing.Layer):
defaultBases = [plone.testing.zca.LAYER_CLEANUP]
def testSetUp(self):
plone.testing.zca.pushGlobalRegistry()
def testTearDown(self):
plone.testing.zca.popGlobalRegistry()
ZCA_LAYER = ZCASandbox()
class QueueLayer(plone.testing.Layer):
defaultBases = [ZCA_LAYER]
RABBITMQCTL_COMMAND = os.environ.get(
'AMQP_RABBITMQCTL', 'sudo rabbitmqctl')
def setUp(self):
self['amqp-hostname'] = os.environ.get('AMQP_HOSTNAME', 'localhost')
self['amqp-username'] = os.environ.get('AMQP_USERNAME', 'guest')
self['amqp-password'] = os.environ.get('AMQP_PASSWORD', 'guest')
self['amqp-virtualhost'] = os.environ.get('AMQP_VIRTUALHOST', None)
if self['amqp-virtualhost'] is None:
self['amqp-virtualhost'] = '/test.%f' % time.time()
self.rabbitmqctl('add_vhost %s' % self['amqp-virtualhost'])
self.rabbitmqctl(
'set_permissions -p %s %s ".*" ".*" ".*"'
% (self['amqp-virtualhost'], self['amqp-username']))
self['amqp-virtualhost-created'] = True
self['amqp-connection'] = amqp.Connection(
host=self['amqp-hostname'],
userid=self['amqp-username'],
password=self['amqp-password'],
virtual_host=self['amqp-virtualhost'])
self['amqp-connection'].connect()
self['amqp-channel'] = self['amqp-connection'].channel()
def tearDown(self):
self['amqp-channel'].close()
del self['amqp-channel']
self['amqp-connection'].close()
del self['amqp-connection']
if 'amqp-virtualhost-created' in self:
self.rabbitmqctl('delete_vhost %s' % self['amqp-virtualhost'])
del self['amqp-virtualhost-created']
def rabbitmqctl(self, parameter):
command = f'{self.RABBITMQCTL_COMMAND} {parameter}'
stdout = subprocess.check_output(
'LANG=C %s' % command, stderr=subprocess.STDOUT, shell=True)
if b'Error' in stdout:
raise RuntimeError(
f'{command} failed:\n{stdout}') # pragma: no cover
QUEUE_LAYER = QueueLayer()
class QueueTestCase(unittest.TestCase):
layer = QUEUE_LAYER
def setUp(self):
super().setUp()
self._queue_prefix = 'test.%f.' % time.time()
self._queues = []
self.connection = self.layer['amqp-connection']
self.channel = self.layer['amqp-channel']
self.receive_queue = self.get_queue_name('receive')
self.channel.queue_declare(queue=self.receive_queue)
self._queues.append(self.receive_queue)
def tearDown(self):
for queue_name in self._queues:
# NOTE: we seem to need a new channel for each delete;
# trying to use self.channel for all queues results in its
# closing after the first delete
with self.connection.channel() as channel:
channel.queue_delete(queue_name)
super().tearDown()
def get_queue_name(self, suffix):
queue_name = self._queue_prefix + suffix
self._queues.append(queue_name)
return queue_name
def send_message(self, body, routing_key='', headers=None, **kw):
self.channel.basic_publish(
amqp.Message(
body,
timestamp=int(
time.mktime(datetime.datetime.now().timetuple())),
application_headers=headers or {},
msgid=email.utils.make_msgid('gocept.amqprun.test'),
**kw),
'amq.topic', routing_key=routing_key)
time.sleep(0.1)
def expect_message_on(self, routing_key):
self.channel.queue_bind(
self.receive_queue, 'amq.topic', routing_key=routing_key)
# BBB
expect_response_on = expect_message_on
def wait_for_message(self, timeout=10):
"""Wait for a response on `self.receive_queue`.
timeout ... wait for n seconds.
"""
for i in range(timeout):
message = self.channel.basic_get(self.receive_queue, no_ack=True)
if message:
break
time.sleep(1)
else:
raise RuntimeError('No message received')
return message
def create_server(self, **kw):
import gocept.amqprun.server
params = dict(hostname=self.layer['amqp-hostname'],
username=self.layer['amqp-username'],
password=self.layer['amqp-password'],
virtual_host=self.layer['amqp-virtualhost'])
# XXX not DRY, the default value is declared in Server.__init__()
setup_handlers = kw.pop('setup_handlers', True)
params.update(kw)
return gocept.amqprun.server.Server(
params, setup_handlers=setup_handlers)
class MainTestCase(QueueTestCase):
def setUp(self):
super().setUp()
plone.testing.zca.pushGlobalRegistry()
def tearDown(self):
super().tearDown()
plone.testing.zca.popGlobalRegistry()
# heuristic to avoid accreting more and more debug log output handlers
if logging.root.handlers:
handler = logging.root.handlers[-1]
if isinstance(handler, logging.StreamHandler):
logging.root.handlers.pop()
def start_server(self):
self.server = gocept.amqprun.main.create_configured_server(
self.config.name)
self.server.connect()
def start_server_in_subprocess(self, *args, **kwargs):
script = tempfile.NamedTemporaryFile(
mode='w', suffix='.py', encoding='utf-8')
module = kwargs.pop('module', 'gocept.amqprun.main')
config = [self.config.name]
config.extend(args)
script.write("""
import sys
sys.path[:] = %(path)r
import %(module)s
%(module)s.main%(config)r
""" % dict(path=sys.path, config=tuple(config), module=module))
script.flush()
self.stdout = tempfile.TemporaryFile(mode='w+', encoding='utf-8')
process = subprocess.Popen(
[sys.executable, script.name],
stdout=self.stdout, stderr=subprocess.STDOUT)
time.sleep(1)
self.pid = process.pid
def stop_server_in_subprocess(self):
os.kill(self.pid, signal.SIGINT)
self.wait_for_subprocess_exit()
self.pid = None
def wait_for_subprocess_exit(self, timeout=30):
for i in range(timeout):
pid, status = os.waitpid(self.pid, os.WNOHANG)
if (pid, status) != (0, 0):
return status
time.sleep(0.5)
else: # pragma: no cover
os.kill(self.pid, signal.SIGKILL)
self.stdout.seek(0)
self.fail('Child process did not exit\n' + self.stdout.read())
def make_config(self, package, name, mapping=None):
zcml_base = string.Template(
# pkg_resources.resource_string actually provides bytes *sigh*
str(pkg_resources.resource_string(package, '%s.zcml' % name),
'utf8'))
self.zcml = tempfile.NamedTemporaryFile()
self.zcml.write(zcml_base.substitute(mapping).encode('utf8'))
self.zcml.flush()
sub = dict(
site_zcml=self.zcml.name,
amqp_hostname=self.layer['amqp-hostname'],
amqp_username=self.layer['amqp-username'],
amqp_password=self.layer['amqp-password'],
amqp_virtualhost=self.layer['amqp-virtualhost'],
)
if mapping:
sub.update(mapping)
base = string.Template(
# pkg_resources.resource_string actually provides bytes *sigh*
str(pkg_resources.resource_string(package, '%s.conf' % name),
'utf8'))
self.config = tempfile.NamedTemporaryFile()
self.config.write(base.substitute(sub).encode('utf8'))
self.config.flush()
return self.config.name
| 34.337449 | 78 | 0.614573 | 952 | 8,344 | 5.276261 | 0.254202 | 0.038224 | 0.025881 | 0.015927 | 0.184949 | 0.066494 | 0.042206 | 0.042206 | 0.042206 | 0.042206 | 0 | 0.003255 | 0.263663 | 8,344 | 242 | 79 | 34.479339 | 0.81429 | 0.061361 | 0 | 0.126316 | 0 | 0 | 0.135017 | 0.015771 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.021053 | 0.115789 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec918eada4dea8a6caff0286e38ae25cdcefa19d | 1,133 | py | Python | examples/timeseries/example_acf.py | Hadrien-Montanelli/learnpy | b9fedb903cfe8c2fff8d7706667f17c51fb3a34f | [
"MIT"
] | 1 | 2020-10-19T21:21:29.000Z | 2020-10-19T21:21:29.000Z | examples/timeseries/example_acf.py | Hadrien-Montanelli/learnpy | b9fedb903cfe8c2fff8d7706667f17c51fb3a34f | [
"MIT"
] | 21 | 2020-10-30T10:15:36.000Z | 2020-11-25T09:22:46.000Z | examples/timeseries/example_acf.py | Hadrien-Montanelli/learnpy | b9fedb903cfe8c2fff8d7706667f17c51fb3a34f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 16:17:42 2020
Copyright 2020 by Hadrien Montanelli.
"""
# %% Imports.
# Standard library imports:
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.stattools import acf as acf2
# Learnpy imports:
from learnpy.misc import csv_to_array
from learnpy.timeseries import acf
# %% Examples.
# AR(1)-type time series:
series = csv_to_array('../../datasets/time_series_ar1.csv')
sample_acf = acf(series)
plt.plot(sample_acf[1:], '-')
sample_acf_statsmodels = acf2(series, nlags=len(series), fft=False)
plt.plot(sample_acf_statsmodels[1:], '--')
error = np.linalg.norm(sample_acf - sample_acf_statsmodels)
print('Error: ', error) # compare with statsmodels' acf
# MA(1)-type time series:
series = csv_to_array('../../datasets/time_series_ma1.csv')
sample_acf = acf(series)
plt.figure()
plt.plot(sample_acf[1:], '-')
sample_acf_statsmodels = acf2(series, nlags=len(series), fft=False)
plt.plot(sample_acf_statsmodels[1:], '--')
error = np.linalg.norm(sample_acf - sample_acf_statsmodels)
print('Error: ', error) # compare with statsmodels' acf | 29.815789 | 67 | 0.735216 | 169 | 1,133 | 4.763314 | 0.384615 | 0.134161 | 0.149068 | 0.079503 | 0.632298 | 0.632298 | 0.576398 | 0.576398 | 0.576398 | 0.576398 | 0 | 0.027833 | 0.112092 | 1,133 | 38 | 68 | 29.815789 | 0.772366 | 0.259488 | 0 | 0.6 | 0 | 0 | 0.106926 | 0.082625 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec93172931a6512e00485bff696dbae2c5038576 | 772 | py | Python | Python/problem014.py | emergent/ProjectEuler | ec1c92cc47fde80efddeb0346d9b0fa511df1f00 | [
"Unlicense"
] | null | null | null | Python/problem014.py | emergent/ProjectEuler | ec1c92cc47fde80efddeb0346d9b0fa511df1f00 | [
"Unlicense"
] | null | null | null | Python/problem014.py | emergent/ProjectEuler | ec1c92cc47fde80efddeb0346d9b0fa511df1f00 | [
"Unlicense"
] | null | null | null | #! /usr/bin/env python3
'''
Problem 14 - Project Euler
http://projecteuler.net/index.php?section=problems&id=014
'''
chains = {}
def getCollatzChainLength(n):
chain = []
extra = 0
while n != 1:
if n in chains:
extra = chains[n]
break
else:
chain.append(n)
if (n % 2 == 0): # even
n = n // 2
else: # odd
n = n * 3 + 1
else:
chain.append(n)
for i, item in enumerate(chain):
if item not in chains:
chains[item] = len(chain[i:]) + extra
return len(chain) + extra
if __name__ == '__main__':
print(max([(x, getCollatzChainLength(x)) for x in range(1, 1_000_000)],
key=(lambda x: x[1])))
| 23.393939 | 75 | 0.494819 | 99 | 772 | 3.757576 | 0.515152 | 0.053763 | 0.080645 | 0.086022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045175 | 0.369171 | 772 | 32 | 76 | 24.125 | 0.718686 | 0.150259 | 0 | 0.217391 | 0 | 0 | 0.012384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.086957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec931bc4df07b3df0aaf9c0ed905fa654356c04b | 354 | py | Python | buttonStyle.py | rafael-rfzorzi/estiloWidgets | d9e47b9b570b6806ffb334878ee0dd466b391d10 | [
"Unlicense"
] | null | null | null | buttonStyle.py | rafael-rfzorzi/estiloWidgets | d9e47b9b570b6806ffb334878ee0dd466b391d10 | [
"Unlicense"
] | null | null | null | buttonStyle.py | rafael-rfzorzi/estiloWidgets | d9e47b9b570b6806ffb334878ee0dd466b391d10 | [
"Unlicense"
] | null | null | null | from tkinter import *
class ButtonGlac(Button):
def __init__(self, master=None):
super().__init__(master)
self.configure(
bd=1,
bg='#49708D',
fg= 'white',
font=('Aharoni','10','bold'),
activebackground = '#278ab9',
activeforeground= "lightgray"
)
| 17.7 | 41 | 0.5 | 30 | 354 | 5.633333 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053333 | 0.364407 | 354 | 19 | 42 | 18.631579 | 0.697778 | 0 | 0 | 0 | 0 | 0 | 0.117479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec93f76feaf219cc796dc101bd2540c75272c895 | 1,558 | py | Python | codewars/7 kyu/growth-of-a-population.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | codewars/7 kyu/growth-of-a-population.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | codewars/7 kyu/growth-of-a-population.py | sirken/coding-practice | 9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab | [
"MIT"
] | null | null | null | from Test import Test, Test as test
'''
In a small town the population is p0 = 1000 at the beginning of a year. The population regularly increases by 2 percent per year and moreover 50 new inhabitants per year come to live in the town. How many years does the town need to see its population greater or equal to p = 1200 inhabitants?
At the end of the first year there will be:
1000 + 1000 * 0.02 + 50 => 1070 inhabitants
At the end of the 2nd year there will be:
1070 + 1070 * 0.02 + 50 => 1141 inhabitants (number of inhabitants is an integer)
At the end of the 3rd year there will be:
1141 + 1141 * 0.02 + 50 => 1213
It will need 3 entire years.
More generally given parameters:
p0, percent, aug (inhabitants coming or leaving each year), p (population to surpass)
the function nb_year should return n number of entire years needed to get a population greater or equal to p.
aug is an integer, percent a positive or null number, p0 and p are positive integers (> 0)
Examples:
nb_year(1500, 5, 100, 5000) -> 15
nb_year(1500000, 2.5, 10000, 2000000) -> 10
Note: Don't forget to convert the percent parameter as a percentage in the body of your function: if the parameter percent is 2 you have to convert it to 0.02.
'''
def nb_year(p0, percent, aug, p):
years = 0
while p0 < p:
p0 = p0 + (p0 * percent / 100) + aug
years += 1
return years
Test.assert_equals(nb_year(1500, 5, 100, 5000), 15)
Test.assert_equals(nb_year(1500000, 2.5, 10000, 2000000), 10)
Test.assert_equals(nb_year(1500000, 0.25, 1000, 2000000), 94) | 38 | 293 | 0.71887 | 281 | 1,558 | 3.950178 | 0.391459 | 0.037838 | 0.021622 | 0.027027 | 0.246847 | 0.220721 | 0.088288 | 0.052252 | 0 | 0 | 0 | 0.145396 | 0.205392 | 1,558 | 41 | 294 | 38 | 0.751212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.3 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec96d028c0ae17832cf014fcffe7d77910c0fb12 | 13,593 | py | Python | lib/rabbitdnssec.py | jderuiter/SURFdnssec | 5416441a516c689f113684e4e5cb76272c092f9b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | lib/rabbitdnssec.py | jderuiter/SURFdnssec | 5416441a516c689f113684e4e5cb76272c092f9b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-09-27T11:56:19.000Z | 2021-09-27T11:56:19.000Z | lib/rabbitdnssec.py | jderuiter/SURFdnssec | 5416441a516c689f113684e4e5cb76272c092f9b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2021-02-03T08:06:26.000Z | 2021-09-27T11:48:02.000Z | # rabbitdnssec.py -- DNSSEC management through a RabbitMQ cluster
#
# These routines can be used somewhat generally within a cluster of
# DNSSEC signers as we are using at SURFnet.
#
# From: Rick van Rein <rick@openfortress.nl>
import sys
import socket
import time
import os.path
import importlib
import ssl
import json
import syslog
import atexit
import configparser
import pika
import pika.spec
import pika.credentials
# Setup configuration, such as settings and application name
#
homedir = os.path.expanduser ('~')
appdir = homedir + '/ods-amqp'
appname = os.path.basename (sys.argv [0])
appcfg = configparser.ConfigParser ()
appcfg.read ([appdir + '/config', '/etc/opendnssec/ods-amqp.config'])
# Recreate the prefix from sys.argv [0] and add to to $PATH
#
prefix = os.path.dirname (sys.argv [0])
os.environ ['PATH'] = prefix + ':' + os.environ.get ('PATH')
# Open syslog, using standard settings
#
def cleanup_syslog ():
syslog.syslog (syslog.LOG_INFO, 'Program exiting')
syslog.closelog ()
syslog.openlog (appname,
(syslog.LOG_PERROR if sys.stderr.isatty () else 0) |
syslog.LOG_PID,
syslog.LOG_USER)
syslog.syslog (syslog.LOG_INFO, 'Program starting')
atexit.register (cleanup_syslog)
# Setup the RabbitMQ client
#
this_machine = socket.gethostname ().split ('.') [0]
this_port = int (appcfg ['rabbitmq'] ['port'])
vhost = appcfg ['rabbitmq'] ['vhost']
signer_cluster = appcfg ['rabbitmq'] ['signer_cluster']
signer_machines = appcfg ['rabbitmq'] ['signer_machines'].split ()
backup_machines = appcfg ['rabbitmq'] ['backup_machines'].split ()
plugindir = appcfg ['rabbitmq'] ['plugindir']
ca_certs = appcfg ['rabbitmq'] ['ca_certs']
backend = appcfg ['rabbitmq'] ['backend']
#
assert ((this_machine in signer_machines) or (this_machine in backup_machines))
assert (len (signer_machines) >= 2)
# Setup for TLS
#
wrap_tls = True
conf_tls = {
'ssl_version': ssl.PROTOCOL_TLSv1_2,
'ca_certs': ca_certs,
'certfile': appdir + '/ssl/certs/' + this_machine + '.pem',
'keyfile': appdir + '/ssl/private/' + this_machine + '.pem',
'server_side': False,
}
# Setup PKCS #11
#
pkcs11_libfile = appcfg ['pkcs11'] ['libfile']
pkcs11_token_label = appcfg ['pkcs11'] ['token_label']
pkcs11_pinfile_path = appcfg ['pkcs11'] ['pinfile']
pkcs11_curve_name = appcfg ['pkcs11'] ['curve_name']
# Send messages at various levels to syslog
#
def log_debug (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_DEBUG, msg)
def log_info (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_INFO, msg)
def log_notice (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_NOTICE, msg)
def log_warning (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_WARNING, msg)
def log_error (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_ERR, msg)
def log_critical (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_CRIT, msg)
# Return the name of a queue on the current machine (prefix by hostname)
#
def my_queue (queue):
return this_machine + '_' + queue
# Return the name of an exchange on the current machine (prefix by hostname)
#
def my_exchange (exchange='signer'):
return this_machine + '_' + exchange
# Return configuration dict for the current app from config section [APPNAME]
# (Use ovr_appname to override the application name to something else)
#
def my_config (ovr_appname=None):
global appcfg, appname
assert (ovr_appname != 'accounts')
if ovr_appname is None:
ovr_appname = appname
return appcfg [ovr_appname]
# Return the backend module name used for signing DNS zone data.
#
def my_backend ():
return backend
# Return the plugin directory for this program.
#
def my_plugindir (ovr_appname=None):
return plugindir + '/' + (ovr_appname or appname)
# Return the backend module used for signing DNS zone data.
# By default, a possible loading location is the plugin directory's
# subdirectory named by sys.argv [0], but ovr_appname can be used to
# override this default name for the application subdirectory under
# the plugin directory.
#
def my_backendmod (modname_prefix, modname_postfix='', ovr_appname=None):
sys.path.append (my_plugindir (ovr_appname=ovr_appname))
backendmod = importlib.import_module (
modname_prefix + backend + modname_postfix )
sys.path.pop ()
return backendmod
# Retrieve a PlainCredentials object based on the current appname.
# Overrides exist for appname and username.
#
def my_credentials (ovr_appname=None, ovr_username=None):
global appcfg, appname
if ovr_username is None:
username = appcfg [ovr_appname or appname] ['username']
else:
username = ovr_username
password = appcfg ['accounts'] [username]
return pika.PlainCredentials (username, password)
# Retrieve a ConnectionParameters objcet. This is based on settings
# in the [rabbitmq] configuration section, which applies to all appnames
# under this UNIX account, except for the credentials which can be
# supplied here as a parameter, and may well be derived with
# my_credentials().
#
def my_connectionparameters (my_creds, host=this_machine, port=this_port, **params):
return pika.ConnectionParameters (
host,
port,
virtual_host=vhost,
ssl=wrap_tls,
ssl_options=conf_tls,
credentials=my_creds,
**params)
# Construct a BasicProperties object, based on standard available
# information and optional headers. There are options for overriding
# the username.
#
def my_basicproperties (headers=None, ovr_appname=None, ovr_username=None):
return pika.spec.BasicProperties (
timestamp=time.time (),
user_id=(ovr_username or appcfg [
ovr_appname or appname] ['username']),
cluster_id=signer_cluster,
headers=headers)
def pkcs11_pin ():
"""Load the PKCS #11 PIN from the OpenDNSSEC configuration.
"""
return open (pkcs11_pinfile_path).read ().strip ()
def pkcs11_pinfile ():
"""Return the PKCS #11 PIN file from the OpenDNSSEC configuration.
"""
return pkcs11_pinfile_path
class MessageCollector (object):
"""MessageCollector synchronously loads at least one message,
but more when they are immediately available. This helps
to speed up operations when work accumulates and batch-mode
operation is possible. At the same time, it does not slow
down operations when messages drip in one at a time.
This is probably best combined with transactions, as in
chan.tx_select ()
clx = MessageCollector (chan)
clx.collect ()
...
for m in clx.messages ():
...inner.loop...
...
if ...we are happy...:
clx.ack ()
else:
clx.nack ()
chan.tx_commit ()
"""
def __init__ (self, chan, queue=None):
self.chan = chan
self.queue = queue
self.msgtags = []
self.msglist = []
self.gotempty = False
def messages (self):
"""Return the list of messages collected.
"""
return self.msglist
def count (self):
"""Return the number of messages collected.
"""
return len (self.msglist)
def ack (self):
"""Send a basic_ack() on all collected messages.
"""
for tag in self.msgtags:
self.chan.basic_ack (delivery_tag=tag)
self.msgtags = []
def nack (self, requeue=True):
"""Send a basic_nack() on all collected messages.
"""
for tag in self.msgtags:
self.chan.basic_nack (delivery_tag=tag, requeue=requeue)
self.msgtags = []
def more_to_collect (self):
"""Call this to see if we should proceed; it means that
we collected at least one message, and nothing more
is available for immediate processing.
"""
# return len (self.msglist) == 0 or not self.empty
#FAIL# print 'Length of collected messages:', len (self.msglist)
#FAIL# print 'Number of waiting messages:', self.chan.get_waiting_message_count ()
qhdl = self.chan.queue_declare (queue=self.queue, passive=True)
# print 'qhdl.method.message_count =', qhdl.method.message_count
#FAIL# return len (self.msglist) == 0 or self.chan.get_waiting_message_count () > 0
return len (self.msglist) == 0 or qhdl.method.message_count > 0
def collect (self, queue=None):
"""Collect at least one message; if more can be collected
without waiting, then do so. This method is not
re-entrant. The queue defaults to the value that was
optionally set when this object was instantiated.
"""
regcb = False
self.empty = False
tout = None
while self.more_to_collect ():
# print 'There is more to collect...'
# Note: self.chan is an instance of
# pika.adapters.blocking_connection.BlockingChannel
# which returns (None,None,None) for an empty queue
# or (mth,props,body) otherwise
#FAIL# (mth, props, body) = self.chan.consume (
#FAIL# queue=(queue or self.queue),
#FAIL# inactivity_timeout=tout)
(mth,props,body) = self.chan.basic_get (
queue=(queue or self.queue))
# print 'Class MTH =', type (mth)
#TODO# No timeout... and bad reponses when empty!
if type (mth) != pika.spec.Basic.GetOk:
#TODO# raise Exception ('Unexpectedly found empty queue "' + (queue or self.queue) + '"')
# print 'Unexpectedly found empty queue "' + (queue or self.queue) + '"'
time.sleep (60)
continue
self.msgtags.append (mth.delivery_tag)
self.msglist.append (body)
# The next looping is not blocking
tout = 10
#TODO#FROMHERE#
#TODO# self.callback_GetOk (self, self.chan, mth, props, body)
#DROP# self.chan.basic_get (callback=self.callback_GetOk,
#DROP# queue=(queue or self.queue))
#DROP# if not regcb:
#DROP# self.chan.add_callback (clx.callback_GetEmpty,
#DROP# pika.spec.Basic.GetEmpty,
#DROP# one_shot=True)
#DROP# regcb = True
pass # print 'There is nothing more to collect'
def callback_GetEmpty (self, frame):
"""Take note that no messages are currently available.
"""
self.gotempty = True
def callback_GetOk (self, chan, mth, props, body):
"""Take note of a new message. Store its delivery_tag
for future use with self.ack() or self.nack().
"""
self.msgtags.append (mth.delivery_tag)
self.msglist.append (body)
def open_client_connection (username=None, hostname='localhost'):
"""Return a connection as an AMQP client, with the given
username. A password is determined locally. When
no username is provided, guest / guest will be used.
The default host to connect to is localhost, but
another value may be passed in.
The returned value is a connection, to be used as in
cnx = open_client_connection (...)
chan = cnx.channel ()
...
cnx.close ()
Exceptions that might be raised include
pika.exceptions.AMQPChannelError
pika.exceptions.AMQPError
See amqp_client_channel() for a "with" form.
"""
if username is not None:
password = appcfg ['accounts'] [username]
creds = pika.PlainCredentials (username, password)
else:
# Construct ConnectionParameters for guest / guest
creds = None
cnxparm = pika.ConnectionParameters (
host=hostname,
port=this_port,
virtual_host=vhost,
ssl=wrap_tls,
ssl_options=conf_tls,
credentials=creds
)
cnx = pika.BlockingConnection (cnxparm)
return cnx
class amqp_client_channel ():
"""Use this class in the "with" form:
with amqp_client_channel (...) as chan:
chan.basic_publish (...)
Set username to login in another way than guest / guest.
Set hostname to connect to another host than localhost.
Set transactional to request transactional behaviour.
Any AMQP exceptions will be caught, printed and fatally exited.
In the transactional variety, the channel is setup accordingly
and calls to tx_commit() and/or tx_rollback() are supported.
When normally ending the "with" clause, any remaining work will
be committed, and any failure to that end will be reported along
with the AMQP exceptions. When the "with" clause is left early
due to an exception, than the transaction will be rolled back.
"""
def __init__ (self, username=None, hostname='localhost', transactional=False):
self.username = username
self.hostname = hostname
self.transact = transactional
def __enter__ (self):
self.cnx = open_client_connection (self.username, self.hostname)
self.chan = self.cnx.channel ()
if self.transact:
self.chan.tx_select ()
return self.chan
def __exit__ (self, typ, val, tbk):
txfail = False
if self.transact:
if val is not None:
self.chan.tx_rollback ()
else:
frame_method = self.chan.tx_commit ()
txfail = type (frame_method.method) != pika.spec.Tx.CommitOk
self.cnx.close ()
if isinstance (val, pika.exceptions.AMQPChannelError):
log_error ('AMQP Channel Error:', val)
sys.exit (1)
if isinstance (val, pika.exceptions.AMQPConnectionError):
log_error ('AMQP Connection Error:', val)
sys.exit (1)
if isinstance (val, pika.exceptions.AMQPError):
log_error ('AMQP Error:', val)
sys.exit (1)
if self.transact:
if txfail:
log_error ('AMQP Transaction Failure')
sys.exit (1)
| 30.963554 | 93 | 0.703156 | 1,897 | 13,593 | 4.937269 | 0.231945 | 0.016229 | 0.011745 | 0.017937 | 0.20158 | 0.171151 | 0.126735 | 0.126735 | 0.117553 | 0.104847 | 0 | 0.005223 | 0.183109 | 13,593 | 438 | 94 | 31.034247 | 0.838256 | 0.439123 | 0 | 0.213636 | 0 | 0 | 0.082344 | 0.004137 | 0 | 0 | 0 | 0.002283 | 0.013636 | 1 | 0.140909 | false | 0.027273 | 0.063636 | 0.027273 | 0.286364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca028126d38ff6361e05ae287f82997405bb260 | 4,173 | py | Python | froide/foirequest/delivery.py | AleksiKnuutila/tietopyynto | 4c7438499002e521114daf07f561fd140a21dfbb | [
"MIT"
] | null | null | null | froide/foirequest/delivery.py | AleksiKnuutila/tietopyynto | 4c7438499002e521114daf07f561fd140a21dfbb | [
"MIT"
] | 2 | 2020-06-05T16:43:43.000Z | 2022-02-10T15:47:12.000Z | froide/foirequest/delivery.py | AleksiKnuutila/tietopyynto | 4c7438499002e521114daf07f561fd140a21dfbb | [
"MIT"
] | null | null | null | from collections import defaultdict, namedtuple
from datetime import datetime
import importlib
import logging
import re
import os
import pytz
def get_delivery_report(sender, recipient, timestamp):
from django.conf import settings
reporter_path = settings.FROIDE_CONFIG.get('delivery_reporter', None)
if not reporter_path:
return
module, klass = reporter_path.rsplit('.', 1)
module = importlib.import_module(module)
reporter_klass = getattr(module, klass)
reporter = reporter_klass(time_zone=settings.TIME_ZONE)
return reporter.find(sender, recipient, timestamp)
DeliveryReport = namedtuple('DeliveryReport', ['log', 'time_diff',
'status', 'message_id'])
class PostfixDeliveryReporter(object):
SENDER_RE = r'\s(?P<mail_id>\w+): from=<{sender}'
MESSAGE_ID_RE = r'{mail_id}: message-id=<(?P<message_id>[^>]+)>'
ALL_RE = r' {mail_id}: '
RECIPIENT_RE = r'{mail_id}: to=<{recipient}'
STATUS_RE = re.compile(r'status=(\w+)')
TIMESTAMP_RE = re.compile(r'\w{3}\s+\d+\s+\d+:\d+:\d+')
TIME_PARSE_STR = '%b %d %H:%M:%S'
LOG_FILES = [
'/var/log/mail.log',
'/var/log/mail.log.1'
]
def __init__(self, time_zone=None):
self.timezone = pytz.timezone(time_zone)
def find(self, sender, recipient, timestamp):
for filename in self.LOG_FILES:
if not os.path.exists(filename):
continue
try:
with open(filename) as fp:
result = self.search_log(fp, sender, recipient, timestamp)
if result:
return result
except IOError as e:
logging.exception(e)
pass
def search_log(self, fp, sender, recipient, timestamp):
sender_re = re.compile(self.SENDER_RE.format(sender=sender))
mail_ids = set()
for line in fp:
match = sender_re.search(line)
if match:
mail_ids.add(match.group('mail_id'))
fp.seek(0)
mail_id_res = [re.compile(self.ALL_RE.format(mail_id=mail_id))
for mail_id in mail_ids]
lines = defaultdict(list)
for line in fp:
for mail_id, mail_id_re in zip(mail_ids, mail_id_res):
if mail_id_re.search(line) is not None:
lines[mail_id].append(line)
candidates = []
for mail_id in mail_ids:
candidate = self.extract(
lines[mail_id], mail_id, sender_re, recipient, timestamp)
if candidate is not None:
candidates.append(candidate)
if not candidates:
return None
if len(candidates) == 1:
return candidates[0]
candidates = sorted(candidates, key=lambda x: abs(x.time_diff))
return candidates[0]
def extract(self, lines, mail_id, sender_re, recipient, timestamp):
text = ''.join(lines)
recipient_re = re.compile(self.RECIPIENT_RE.format(
mail_id=mail_id, recipient=recipient))
match = recipient_re.search(text)
if match is None:
return
log_timestamp = self.get_timestamp(text, timestamp)
time_diff = (log_timestamp - timestamp).total_seconds()
if time_diff < -5:
# Log can't be before sending timestamp, allow for some overlap
return
message_id_re = re.compile(self.MESSAGE_ID_RE.format(mail_id=mail_id))
match = self.STATUS_RE.findall(text)
status = None
if match:
# find last status
status = match[-1]
match = message_id_re.search(text)
message_id = None
if match:
message_id = match.group('message_id')
return DeliveryReport(text, time_diff, status, message_id)
def get_timestamp(self, text, timestamp):
match = self.TIMESTAMP_RE.search(text)
date_str = match.group(0)
date = datetime.strptime(date_str, self.TIME_PARSE_STR)
date = date.replace(year=timestamp.year)
return self.timezone.localize(date)
| 33.926829 | 78 | 0.601006 | 522 | 4,173 | 4.614943 | 0.243295 | 0.054795 | 0.049813 | 0.024907 | 0.085513 | 0.066418 | 0 | 0 | 0 | 0 | 0 | 0.003398 | 0.294752 | 4,173 | 122 | 79 | 34.204918 | 0.815155 | 0.018692 | 0 | 0.10101 | 0 | 0 | 0.068671 | 0.014418 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.010101 | 0.090909 | 0 | 0.343434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca18d4e3d6f7699e830daef606d6fa6213aa3b2 | 2,664 | py | Python | cogs/admin.py | Mr-Owllers/owll-v2 | 73ec36b8275166f405224ba0e27e37e7390c104a | [
"MIT"
] | 2 | 2021-12-20T06:25:42.000Z | 2022-01-12T17:08:32.000Z | cogs/admin.py | Mr-Owllers/owll-v2 | 73ec36b8275166f405224ba0e27e37e7390c104a | [
"MIT"
] | null | null | null | cogs/admin.py | Mr-Owllers/owll-v2 | 73ec36b8275166f405224ba0e27e37e7390c104a | [
"MIT"
] | null | null | null | import nextcord
from nextcord.ext import commands
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help= "delete messages in bulk", aliases=["purge", "c"])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount = 5):
await ctx.channel.purge(limit = amount + 1)
await ctx.send(f"{amount} messages deleted" , delete_after = 5)
@commands.command(help= "kick a member", aliases=["k"])
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: nextcord.Member, *, reason=None):
author = ctx.author
if member == author:
await ctx.send("you can't kick yourself")
else:
try:
await member.kick(reason=reason)
await member.send(f"```\nyou were kicked from {ctx.guild.name}\nreason={reason}\n```")
await ctx.send(f"```\n{member} was kicked by {ctx.author.name}\nreason={reason}\n```")
except:
await member.kick(reason=reason)
await ctx.send(f"```\n{member} was kicked by {ctx.author.name}\nreason={reason}\n```")
@commands.command(help= "ban a member", aliases=["b"])
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: nextcord.Member, *, reason=None):
author = ctx.author
if member == author:
await ctx.send("you can't ban yourself")
else:
try:
await member.ban(reason=reason)
await member.send(f"```\nyou were banned from {ctx.guild.name}\nreason={reason}\n```")
await ctx.send(f"```\n{member} was banned by {ctx.author.name}\nreason={reason}\n```")
except:
await member.ban(reason=reason)
await ctx.send(f"```\n{member} was banned by {ctx.author.name}\nreason={reason}\n```")
@commands.command(help = "see how many ppl you banned")
@commands.has_permissions(ban_members=True)
async def bans(self, ctx):
banned = await ctx.guild.bans()
has_bans = banned != []
if not has_bans:
await ctx.send("no bans :)")
else:
await ctx.send(f"```py\n{banned}\n```")
@commands.command(help="unban a member")
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, *, member):
banned = await ctx.guild.bans()
member_name, member_discrim = member.split("#")
for ban_entry in banned:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discrim):
await ctx.guild.unban(user)
await ctx.send(f"```\n{user.name}#{user.discriminator} was unbanned by {ctx.author.name}\n```")
return
def setup(client):
client.add_cog(Admin(client)) | 36.493151 | 103 | 0.646021 | 371 | 2,664 | 4.574124 | 0.229111 | 0.065999 | 0.070713 | 0.053624 | 0.523276 | 0.470242 | 0.449028 | 0.449028 | 0.328816 | 0.314673 | 0 | 0.001401 | 0.195946 | 2,664 | 73 | 104 | 36.493151 | 0.79085 | 0 | 0 | 0.4 | 0 | 0.083333 | 0.251407 | 0.109193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.033333 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca2ad1491618b91b100bae9f35aedb4453e20e3 | 7,862 | py | Python | server/src/dashboard/helpers.py | openml/openml.org | dadc4f79c159058776500b204977a1062b927d4c | [
"BSD-3-Clause"
] | 16 | 2018-10-17T19:35:11.000Z | 2022-03-31T23:37:00.000Z | server/src/dashboard/helpers.py | PortML/openml.org | b526fae6c0ba2df0ccebf60f1dd703368ed394ec | [
"BSD-3-Clause"
] | 192 | 2018-10-17T17:31:03.000Z | 2022-03-27T23:55:51.000Z | server/src/dashboard/helpers.py | PortML/openml.org | b526fae6c0ba2df0ccebf60f1dd703368ed394ec | [
"BSD-3-Clause"
] | 8 | 2019-04-15T11:47:32.000Z | 2021-12-15T13:23:54.000Z | import logging
import time
from contextlib import contextmanager
import numpy as np
import pandas as pd
import scipy.stats
from openml import datasets, runs
from sklearn.model_selection import train_test_split
logger = logging.getLogger("dashboard")
logger.setLevel(logging.DEBUG)
def get_run_df(run_id: int):
run = runs.get_run(int(run_id), ignore_cache=True)
df = pd.DataFrame(run.fold_evaluations.items(), columns=["evaluations", "results"])
# Evaluations table
result_list = []
result_string = []
for result in df["results"]:
k_folds = list(result[0].values())
mean = str(np.round(np.mean(np.array(k_folds)), 3))
std = str(np.round(np.std(np.array(k_folds)), 3))
result_list.append(k_folds)
result_string.append(mean + " \u00B1 " + std)
df.drop(["results"], axis=1, inplace=True)
df["results"] = result_list
df["values"] = result_string
# Add some more rows indicating output prediction file name
df2 = pd.DataFrame(run.output_files.items(), columns=["evaluations", "results"])
df2["values"] = ""
df3 = pd.DataFrame(
{"task_type": run.task_type}.items(), columns=["evaluations", "results"]
)
df2["values"] = ""
df = df.append(df2)
df = df.append(df3)
df.to_pickle("cache/run" + str(run_id) + ".pkl")
return run, df
def clean_dataset(df):
df = df.loc[:, df.isnull().mean() < 0.8]
out = df.fillna(df.mode().iloc[0])
return out
def get_metadata(data_id: int):
data = datasets.get_dataset(data_id, download_data=False)
features = pd.DataFrame(
[vars(data.features[i]) for i in range(0, len(data.features))]
)
is_target = [
"true" if name == data.default_target_attribute else "false"
for name in features["name"]
]
features["Target"] = is_target
# Extract #categories
size = [
str(len(value)) if value is not None else " "
for value in features["nominal_values"]
]
features["nominal_values"].replace({None: " "}, inplace=True)
features["# categories"] = size
# choose features to be displayed
meta_features = features[
["name", "data_type", "number_missing_values", "# categories", "Target"]
]
meta_features.rename(
columns={
"name": "Attribute",
"data_type": "DataType",
"number_missing_values": "Missing values",
},
inplace=True,
)
meta_features.sort_values(by="Target", ascending=False, inplace=True)
if meta_features.shape[0] > 1000:
meta_features = meta_features[:1000]
return meta_features, data, (vars(data)["name"])
def get_data_metadata(data_id):
"""Download the dataset and get metadata
:param data_id: ID of the OpenML dataset
:return:
"""
# Get data in pandas df format
import time
start = time.time()
meta_features, data, _ = get_metadata(data_id)
x, y, categorical, attribute_names = data.get_data()
df = pd.DataFrame(x, columns=attribute_names)
if x.shape[0] < 50000:
df.to_pickle("cache/df" + str(data_id) + ".pkl")
else:
# create a subsample of data for large datasets
try:
target_feat = meta_features[meta_features["Target"] == "true"][
"Attribute"
].values[0]
except IndexError:
target_feat = None
pass
if x.shape[0] >= 50000 and target_feat:
df = clean_dataset(df)
if x.shape[0] < 100000:
sample_size = 0.5
elif 100000 <= x.shape[0] < 500000:
sample_size = 0.25
elif 500000 <= x.shape[0] < 1e6:
sample_size = 0.1
else:
sample_size = 0.05
x = df.drop(target_feat, axis=1)
y = df[target_feat]
try:
X_train, X_test, y_train, y_test = train_test_split(
x, y, stratify=y, test_size=sample_size
)
except ValueError:
X_train, X_test, y_train, y_test = train_test_split(
x, y, stratify=None, test_size=sample_size
)
x = X_test
x[target_feat] = y_test
df = pd.DataFrame(x, columns=attribute_names)
df.to_pickle("cache/df" + str(data_id) + ".pkl")
else:
df.to_pickle("cache/df" + str(data_id) + ".pkl")
meta_features = meta_features[
meta_features["Attribute"].isin(pd.Series(df.columns))
]
# Add entropy
numerical_features = list(
meta_features["Attribute"][meta_features["DataType"] == "numeric"]
)
nominal_features = list(
meta_features["Attribute"][meta_features["DataType"] == "nominal"]
)
entropy = []
for column in meta_features["Attribute"]:
if column in nominal_features:
count = df[column].value_counts()
ent = round(scipy.stats.entropy(count), 2)
entropy.append(ent)
else:
entropy.append(" ")
meta_features["Entropy"] = entropy
meta_features["Target"].replace({"false": " "}, inplace=True)
end = time.time()
logger.debug("time taken download data and find entropy " + str(end - start))
return df, meta_features, numerical_features, nominal_features
def get_highest_rank(df, leaderboard):
df.sort_values(by=["upload_time"], inplace=True)
scores = []
# highest_rank = {}
highest_score = {}
setup_ids = []
for index, row in df.iterrows():
users = list(highest_score.keys())
new_user = row["uploader_name"] not in users
if row["setup_id"] not in setup_ids or new_user:
setup_ids.append(row["setup_id"])
score = row["value"]
if new_user or (score not in scores):
scores.append(score)
scores.sort(reverse=True)
# rank = scores.index(score) + 1
if new_user or (highest_score[row["uploader_name"]] < score):
# highest_rank[row['uploader_name']] = rank
highest_score[row["uploader_name"]] = score
# if highest_rank[row['uploader_name']] > row['Rank']:
# highest_rank[row['uploader_name']] = row['Rank']
# leaderboard['highest_rank'] = list(highest_rank.values())
leaderboard["Top Score"] = list(highest_score.values())
return leaderboard
def splitDataFrameList(df, target_column):
"""df = dataframe to split,
target_column = the column containing the values to split
separator = the symbol used to perform the split
returns: a dataframe with each entry for the target column separated,
with each element moved into a new row.
The values in the other columns are duplicated across the newly divided rows.
"""
def splitListToRows(row, row_accumulator, target_column):
split_row = row[target_column]
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
row_accumulator.append(new_row)
new_rows = []
df.apply(splitListToRows, axis=1, args=(new_rows, target_column))
new_df = pd.DataFrame(new_rows)
return new_df
@contextmanager
def print_duration(name: str):
start = time.time()
yield
print(f"{name}: {time.time() - start:.3f}s")
def bin_numeric(df, column_name, output_name):
df[output_name] = pd.cut(df[column_name], 1000).astype(str)
cat = df[output_name].str.extract(r"\((.*),", expand=False).astype(float)
df["bin"] = pd.Series(cat)
df.sort_values(by="bin", inplace=True)
df[output_name] = df[output_name].str.replace(",", " -")
df[output_name] = df[output_name].str.replace("(", "")
df[output_name] = df[output_name].str.replace("]", "")
return df
| 33.313559 | 87 | 0.60608 | 1,001 | 7,862 | 4.58042 | 0.226773 | 0.054962 | 0.020938 | 0.013086 | 0.169466 | 0.146783 | 0.115812 | 0.08615 | 0.063032 | 0.056707 | 0 | 0.01507 | 0.265708 | 7,862 | 235 | 88 | 33.455319 | 0.779144 | 0.111549 | 0 | 0.107955 | 0 | 0 | 0.091723 | 0.006067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051136 | false | 0.005682 | 0.051136 | 0 | 0.142045 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca4811c42e1ebee944b531840d43c283210f993 | 414 | py | Python | clinicInformation/serializers.py | MyMedicalAssistant/MyMedicalAssistant | e03758109167cef13efed7ee1d450dbd18a1fed7 | [
"MIT"
] | null | null | null | clinicInformation/serializers.py | MyMedicalAssistant/MyMedicalAssistant | e03758109167cef13efed7ee1d450dbd18a1fed7 | [
"MIT"
] | 1 | 2020-08-05T22:58:28.000Z | 2020-08-05T22:58:28.000Z | clinicInformation/serializers.py | MyMedicalAssistant/MyMedicalAssistant | e03758109167cef13efed7ee1d450dbd18a1fed7 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import DoctorClinic
class DoctorClinicSerializer(serializers.ModelSerializer):
class Meta:
model = DoctorClinic
fields = (
'id',
'user',
'doctor_name',
'specialty',
'clinic_name',
'clinic_street',
'clinic_city',
'clinic_state',
'clinic_country',
'clinic_zipcode',
'doctor_id'
) | 19.714286 | 58 | 0.623188 | 37 | 414 | 6.72973 | 0.648649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.275362 | 414 | 21 | 59 | 19.714286 | 0.83 | 0 | 0 | 0 | 0 | 0 | 0.26506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca4f5aa27488a24c929f36854a3145b768fa867 | 3,266 | py | Python | benwaonline/auth/core.py | goosechooser/benwaonline | e2879412aa6c3c230d25cd60072445165517b6b6 | [
"MIT"
] | null | null | null | benwaonline/auth/core.py | goosechooser/benwaonline | e2879412aa6c3c230d25cd60072445165517b6b6 | [
"MIT"
] | 16 | 2017-09-13T10:21:40.000Z | 2020-06-01T04:32:22.000Z | benwaonline/auth/core.py | goosechooser/benwaonline | e2879412aa6c3c230d25cd60072445165517b6b6 | [
"MIT"
] | null | null | null | import os
import requests
from flask import current_app
from jose import jwt, exceptions
from benwaonline.cache import cache
from benwaonline.exceptions import BenwaOnlineAuthError
ALGORITHMS = ['RS256']
def verify_token(token):
unverified_header = jwt.get_unverified_header(token)
rsa_key = match_key_id(unverified_header)
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=current_app.config['API_AUDIENCE'],
issuer=current_app.config['ISSUER']
)
except jwt.ExpiredSignatureError as err:
handle_expired_signature(err)
except jwt.JWTClaimsError as err:
handle_claims(err)
except exceptions.JWTError as err:
handle_jwt(err)
except Exception as err:
handle_non_jwt()
return payload
def match_key_id(unverified_header):
"""Checks if the RSA key id given in the header exists in the JWKS."""
jwks = get_jwks()
rsa_keys = [
rsa_from_jwks(key)
for key in jwks["keys"]
if key["kid"] == unverified_header["kid"]
]
try:
return rsa_keys[0]
except IndexError:
return None
def rsa_from_jwks(key):
return {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
def handle_claims(err):
"""Handles tokens with invalid claims"""
raise BenwaOnlineAuthError(
detail='{0}'.format(err),
title='invalid claim',
status=401
)
def handle_expired_signature(err):
"""Handles tokens with expired signatures."""
raise err
def handle_jwt(err):
"""Handles tokens with other jwt-related issues."""
raise BenwaOnlineAuthError(
detail='{0}'.format(err),
title='invalid signature',
status=401
)
def handle_non_jwt():
"""Handles everything else."""
raise BenwaOnlineAuthError(
title='invalid header',
detail='unable to parse authentication token'
)
@cache.cached(timeout=48 * 3600, key_prefix='jwks')
def get_jwks():
try:
msg = 'JWKS not cached - requesting from {}'.format(current_app.config['JWKS_URL'])
current_app.logger.debug(msg)
jwksurl = requests.get(current_app.config['JWKS_URL'], timeout=5)
except requests.exceptions.Timeout:
raise BenwaOnlineAuthError(
title='JWKS Request Timed Out',
detail='the authentication server is unavailable, or another issue has occured',
status=500
)
return jwksurl.json()
def has_scope(scope, token):
unverified_claims = jwt.get_unverified_claims(token)
token_scopes = unverified_claims['scope'].split()
return True if scope in token_scopes else False
def refresh_token_request(client, refresh_token):
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'client_id': client.consumer_key,
'client_secret': client.consumer_secret
}
msg = 'Attempting to refresh token at {}'.format(client.base_url + client.access_token_url)
current_app.logger.debug(msg)
resp = requests.post(client.base_url + client.access_token_url, data=data)
return resp.json()
| 27.91453 | 95 | 0.649418 | 393 | 3,266 | 5.223919 | 0.323155 | 0.034096 | 0.031174 | 0.029226 | 0.154895 | 0.108622 | 0.08378 | 0.051632 | 0 | 0 | 0 | 0.008918 | 0.244642 | 3,266 | 116 | 96 | 28.155172 | 0.823267 | 0.064299 | 0 | 0.139785 | 0 | 0 | 0.127063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0 | 0.064516 | 0.010753 | 0.247312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca718b2290753661c89ffe0f3b12919b0789cc0 | 3,430 | py | Python | scripts/commands/SynthSeg_predict.py | hvgazula/SynthSeg | cd597b080eb11bdd54e4e75b28b79b41b322c0c8 | [
"Apache-2.0"
] | 98 | 2020-03-03T20:54:34.000Z | 2022-03-28T17:40:30.000Z | scripts/commands/SynthSeg_predict.py | hvgazula/SynthSeg | cd597b080eb11bdd54e4e75b28b79b41b322c0c8 | [
"Apache-2.0"
] | 29 | 2020-07-02T10:03:48.000Z | 2022-03-31T16:48:24.000Z | scripts/commands/SynthSeg_predict.py | hvgazula/SynthSeg | cd597b080eb11bdd54e4e75b28b79b41b322c0c8 | [
"Apache-2.0"
] | 21 | 2020-05-18T14:27:20.000Z | 2022-03-31T08:27:43.000Z | """
If you use this code, please cite one of the SynthSeg papers:
https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib
Copyright 2020 Benjamin Billot
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
"""This script enables to launch predictions with SynthSeg from the terminal."""
# print information
print('\n')
print('SynthSeg prediction')
print('\n')
# python imports
import os
import sys
from argparse import ArgumentParser
# add main folder to python path and import ./SynthSeg/predict.py
synthseg_home = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
sys.path.append(synthseg_home)
from SynthSeg.predict import predict
# parse arguments
parser = ArgumentParser()
# input/outputs
parser.add_argument("--i", type=str, dest='path_images',
help="Image(s) to segment. Can be a path to an image or to a folder.")
parser.add_argument("--o", type=str, dest="path_segmentations",
help="Segmentation output(s). Must be a folder if --i designates a folder.")
parser.add_argument("--post", type=str, default=None, dest="path_posteriors",
help="(optional) Posteriors output(s). Must be a folder if --i designates a folder.")
parser.add_argument("--resample", type=str, default=None, dest="path_resampled",
help="(optional) Resampled image(s). Must be a folder if --i designates a folder.")
parser.add_argument("--vol", type=str, default=None, dest="path_volumes",
help="(optional) Output CSV file with volumes for all structures and subjects.")
# parameters
parser.add_argument("--crop", nargs='+', type=int, default=192, dest="cropping",
help="(optional) Size of 3D patches to analyse. Default is 192.")
parser.add_argument("--threads", type=int, default=1, dest="threads",
help="(optional) Number of cores to be used. Default is 1.")
parser.add_argument("--cpu", action="store_true", help="(optional) Enforce running with CPU rather than GPU.")
# parse commandline
args = vars(parser.parse_args())
# enforce CPU processing if necessary
if args['cpu']:
print('using CPU, hiding all CUDA_VISIBLE_DEVICES')
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
del args['cpu']
# limit the number of threads to be used if running on CPU
import tensorflow as tf
tf.config.threading.set_intra_op_parallelism_threads(args['threads'])
del args['threads']
# default parameters
args['segmentation_labels'] = os.path.join(synthseg_home, 'data/labels_classes_priors/segmentation_labels.npy')
args['n_neutral_labels'] = 18
args['segmentation_label_names'] = os.path.join(synthseg_home, 'data/labels_classes_priors/segmentation_names.npy')
args['topology_classes'] = os.path.join(synthseg_home, 'data/labels_classes_priors/topological_classes.npy')
args['path_model'] = os.path.join(synthseg_home, 'models/SynthSeg.h5')
args['padding'] = args['cropping']
# call predict
predict(**args)
| 42.345679 | 115 | 0.730904 | 498 | 3,430 | 4.943775 | 0.409639 | 0.019496 | 0.05524 | 0.025995 | 0.200244 | 0.18156 | 0.149878 | 0.149878 | 0.1316 | 0.113323 | 0 | 0.007529 | 0.148105 | 3,430 | 80 | 116 | 42.875 | 0.835044 | 0.277843 | 0 | 0.05 | 0 | 0 | 0.433796 | 0.07272 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca7aaf626d3547ce8f5558fdf53e3a5737336a0 | 6,238 | py | Python | src/bitcoin/tx.py | trevormcguire/pybitcoin | 32ff859f4e51e5349fb70ca3c8a8782fd8cad25f | [
"MIT"
] | 1 | 2022-02-09T16:06:49.000Z | 2022-02-09T16:06:49.000Z | src/bitcoin/tx.py | trevormcguire/pybitcoin | 32ff859f4e51e5349fb70ca3c8a8782fd8cad25f | [
"MIT"
] | 2 | 2022-02-09T17:59:57.000Z | 2022-02-09T18:00:27.000Z | src/bitcoin/tx.py | trevormcguire/pybitcoin | 32ff859f4e51e5349fb70ca3c8a8782fd8cad25f | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import *
from .utils import encode_int, encode_varint, decode_int, decode_varint, ensure_stream, base58, hash256
from .script import Script
from io import BytesIO
from .keys import PublicKey
from .ecdsa import Signature, validate_signature
def get_tx_idx(wallet, prev_tx):
pkhash = base58.decode(wallet) #get the pkhash of the wallet
for idx in range(len(prev_tx.outputs)):
out = prev_tx.outputs[idx]
out_pkhash = [x for x in out["script_pubkey"].commands if not type(x) is int]
if out_pkhash:
match = list(filter(lambda x: x, [x == pkhash for x in out_pkhash]))
if match:
return idx
return None
def validate_tx(utxo: Tx,
tx: Tx,
message: bytes,
public_key: PublicKey) -> bool:
"""
-----------
Verify a p2pkh Transaction
-----------
"""
prev_idx = tx.inputs[0]["prev_idx"] #index of the UTXO spent
script_sig = tx.inputs[0]["script_sig"].commands #ScriptSig -> <der_sig> <sec_pubkey>
script_pubkey = utxo.outputs[prev_idx]["script_pubkey"].commands #"locking" script of UTXO
input_amt = utxo.outputs[prev_idx]["amount"] #UTXO amount
output_amt = sum([out["amount"] for out in tx.outputs])
if output_amt > input_amt: #ensure no new bitcoins are created
return False
pkhash = public_key.encode(compressed=True, hash_160=True) #hash of the sender's public key
#remember we are using p2pkh, so our pkhash is the third element
if pkhash != script_pubkey[2]: #==OP_EQUALVERIFY
return False
sig, pk = Signature.decode(script_sig[0]), PublicKey.decode(script_sig[1])
if not validate_signature(p=pk,
message=message,
sig=sig): #==OP_CHECKSIG
return False
#To do: hook into UTXO set to check if Tx is unspent
return True
class Tx(object):
"""
Object Representing a Bitcoin Transaction
"""
def __init__(self,
version: int,
inputs: List[dict],
outputs: List[dict],
locktime: int = 0):
self.version = version
self.inputs = inputs
self.outputs = outputs
self.locktime = locktime
def __repr__(self):
s = f"Version: {self.version}\nNum Inputs: {len(self.inputs)}\nInputs:\n"""
for i in self.inputs:
s += f'{i["prev_tx"].hex()} - {i["script_sig"]}\n'
s += f'Index: {i["prev_idx"]}\n'
s += f"Num Outputs: {len(self.outputs)}\nOutputs:\n"
for o in self.outputs:
s += f'{o["amount"]} SAT - {o["script_pubkey"]}\n'
s += f'Locktime: {self.locktime}'
return s
def encode(self, sig_idx: int = -1):
#version
out = [encode_int(self.version, 4)] #4 byte little-endian
#encode inputs
out += [encode_varint(len(self.inputs))]
out += [self.encode_inputs(sig_idx=sig_idx)]
#encode outputs
out += [encode_varint(len(self.outputs))]
out += [self.encode_outputs()]
#locktime and SIGHASH
out += [encode_int(self.locktime, 4)]
out += [encode_int(1, 4) if sig_idx != -1 else b""] #SIGHASH_ALL
return b"".join(out)
def encode_inputs(self, sig_idx: int = -1):
"""
prev_tx is encoded to be little endian
prev_idx, seq are 4 byte little endian encoded integers
script_sig uses Script encoding
"""
out = []
for idx in range(len(self.inputs)):
inp = self.inputs[idx]
if sig_idx == -1 or sig_idx == idx:
script_sig = inp["script_sig"].encode()
else:
script_sig = Script([]).encode()
out += [
inp["prev_tx"][::-1], #reverse bytes
encode_int(inp["prev_idx"], 4),
script_sig,
encode_int(inp["seq"], 4)
]
return b"".join(out)
def encode_outputs(self):
out = []
for o in self.outputs:
encoded = [
encode_int(o["amount"], 8),
o["script_pubkey"].encode()
]
out += encoded
return b"".join(out)
def get_id(self):
return hash256(self.encode())[::-1].hex() #little-endian, hexadecimal
@classmethod
def decode(cls, b: Union[bytes, BytesIO]) -> Tx:
"""
Decodes the raw bytes of a transaction into a Tx object
"""
b = ensure_stream(b)
segwit, witness = False, []
version = decode_int(b, 4)
num_inputs = decode_varint(b)
if num_inputs == 0:
assert b.read(1) == b"\x01" #segwit marker -- need to read one more
num_inputs = decode_varint(b)
segwit = True
inputs = []
for n in range(num_inputs):
prev_tx = b.read(32)[::-1] #little to big endian
prev_idx = decode_int(b, 4)
script_sig = Script.decode(b)
seq = decode_int(b, 4)
inputs.append({"prev_tx": prev_tx,
"prev_idx": prev_idx,
"script_sig": script_sig,
"seq": seq})
num_outputs = decode_varint(b)
outputs = []
for n in range(num_outputs):
amt = decode_int(b, 8)
script_pubkey = Script.decode(b)
outputs.append({"amount": amt,
"script_pubkey": script_pubkey})
if segwit:
for i in inputs:
num_items = decode_varint(b)
items = []
for _ in range(num_items):
item_len = decode_varint(b)
if item_len == 0:
items.append(0)
else:
items.append(b.read(item_len))
witness.append(items)
locktime = decode_int(b, 4)
return cls(version, inputs, outputs, locktime) #can include segwit, witness here
| 34.655556 | 103 | 0.534146 | 765 | 6,238 | 4.197386 | 0.223529 | 0.036437 | 0.015571 | 0.013703 | 0.08502 | 0.014326 | 0 | 0 | 0 | 0 | 0 | 0.012358 | 0.351395 | 6,238 | 179 | 104 | 34.849162 | 0.781265 | 0.131292 | 0 | 0.106061 | 0 | 0 | 0.074962 | 0.015483 | 0 | 0 | 0 | 0 | 0.007576 | 1 | 0.068182 | false | 0 | 0.05303 | 0.007576 | 0.219697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eca7bb7d5f66ac5cb221611f5b01d3bad9a853f9 | 637 | py | Python | src/cogs/utils/sendEmbed.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | 3 | 2020-07-05T21:37:07.000Z | 2021-09-21T11:11:45.000Z | src/cogs/utils/sendEmbed.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | null | null | null | src/cogs/utils/sendEmbed.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | null | null | null | from typing import Any
import discord
from discord.ext import commands
from ..mofupoints import incrementEmbedCounter
async def sendEmbed(
ctx: commands.Context, url: str, localImageFile: discord.File = None, **kwargs: Any
) -> None:
print(url)
if hasattr(ctx, "author"):
incrementEmbedCounter(ctx.author)
embed = discord.Embed(color=discord.Colour.gold(), **kwargs)
embed.set_image(url=url)
try:
await ctx.send(embed=embed, file=localImageFile)
except discord.Forbidden: # we don't have permission to send embed
await ctx.send(url, file=localImageFile)
| 26.541667 | 88 | 0.681319 | 77 | 637 | 5.623377 | 0.532468 | 0.04157 | 0.055427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.21978 | 637 | 23 | 89 | 27.695652 | 0.871227 | 0.059655 | 0 | 0 | 0 | 0 | 0.010453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecaa497a69908d5f08791b6cdfaef3362767f993 | 11,449 | py | Python | lib/ithor_env.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | null | null | null | lib/ithor_env.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | null | null | null | lib/ithor_env.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | 1 | 2021-11-04T06:25:31.000Z | 2021-11-04T06:25:31.000Z | # Module for iTHOR env set up and simple navigation
from ai2thor.controller import Controller
from termcolor import colored
from dijkstar import Graph, find_path
from lib.params import SIM_WINDOW_HEIGHT, SIM_WINDOW_WIDTH, VISBILITY_DISTANCE, FIELD_OF_VIEW
import matplotlib.pyplot as plt
import numpy as np
import time, copy, sys
class Agent_Sim():
def __init__(self, scene_type='Kitchen', scene_num=1, scene_name=None, grid_size=0.25, rotation_step=10, sleep_time=0.05, ToggleMapView=False):
self._scene_type = scene_type
self._scene_num = scene_num
self._grid_size = grid_size
self._rotation_step = rotation_step
self._sleep_time = sleep_time
# Kitchens: FloorPlan1 - FloorPlan30
# Living rooms: FloorPlan201 - FloorPlan230
# Bedrooms: FloorPlan301 - FloorPlan330
# Bathrooms: FloorPLan401 - FloorPlan430
if (scene_num<1) or (scene_num>30):
sys.stderr.write(colored('ERROR: ','red')
+ "Expect scene_num within [1,30] while get '{}'\n".format(scene_num))
if scene_type == 'Kitchen':
add_on = 0
elif scene_type == 'Living room':
add_on = 200
elif scene_type == 'Bedroom':
add_on = 300
elif scene_type == 'Bathroom':
add_on = 400
else:
sys.stderr.write(colored('ERROR: ','red')
+ "Expect scene_type 'Kitchen', 'Living room', 'Bedroom' or 'Bathroom' while get '{}'\n".format(scene_type))
sys.exit(1)
if scene_name is None:
self._scene_name = 'FloorPlan' + str(add_on + self._scene_num)
else:
self._scene_name = scene_name
self._controller = Controller(scene=self._scene_name, gridSize=self._grid_size, visibilityDistance=VISBILITY_DISTANCE, fieldOfView=FIELD_OF_VIEW)
self._controller.step('ChangeResolution', x=SIM_WINDOW_WIDTH, y=SIM_WINDOW_HEIGHT) # Change simulation window size
if ToggleMapView: # Top view of the map to see the objets layout. issue: no SG can be enerated
self._controller.step({"action": "ToggleMapView"})
self._event = self._controller.step('Pass')
self._start_time = time.time()
self._action_type = {'MOVE_FORWARD': 1, 'STAY_IDLE' :2, 'TURN_RIGHT' :3, 'TURN_LEFT': 4}
def update_event(self):
self._event = self._controller.step('Pass')
def get_agent_position(self):
self.update_event()
return self._event.metadata['agent']['position']
def get_agent_rotation(self):
self.update_event()
return self._event.metadata['agent']['rotation']
def get_reachable_coordinate(self):
self._event = self._controller.step(action='GetReachablePositions')
return self._event.metadata['actionReturn']
def get_object(self):
self.update_event()
return self._event.metadata['objects']
def unit_move(self):
self._event = self._controller.step(action='MoveAhead')
return 'MOVE_FORWARD'
def unit_rotate(self, degree):
if np.abs(degree) < 2:
print(colored('INFO: ','blue') + 'Robot rotate for {} degree which is less than 2 deg'.format(degree))
return None
degree_corrected = degree
while degree_corrected > 180:
degree_corrected -= 360
while degree_corrected < -180:
degree_corrected += 360
if degree > 0:
self._event = self._controller.step(action='RotateRight', degrees=np.abs(degree_corrected))
return 'TURN_RIGHT'
else:
self._event = self._controller.step(action='RotateLeft', degrees=np.abs(degree_corrected))
return 'TURN_LEFT'
# Assume goal is {'position': position, 'rotation': rotation} where position and rotation are dict or list
def move_towards(self, goal):
self.update_event()
agent_position = self.get_agent_position()
agent_rotation = self.get_agent_rotation()
agent_position = list(agent_position.values())
agent_rotation = list(agent_rotation.values())
goal_position = goal['position']
goal_rotation = goal['rotation']
if isinstance(goal_position, dict):
goal_position = list(goal_position.values())
goal_rotation = list(goal_rotation.values())
heading_angle = np.arctan2((goal_position[0] - agent_position[0]), (goal_position[2] - agent_position[2])) * 180 / np.pi
heading_angle_list = copy.deepcopy(agent_rotation)
heading_angle_list[1] = heading_angle
position_error = list(map(lambda x, y: np.abs(x - y), goal_position, agent_position))
rotation_error = list(map(lambda x, y: x - y, heading_angle_list, agent_rotation))
rotation_error_abs = list(map(lambda x: np.abs(x), rotation_error))
rotation_error_corrected = rotation_error[rotation_error_abs.index(max(rotation_error_abs))]
while rotation_error_corrected > 180:
rotation_error_corrected -= 360
while rotation_error_corrected < -180:
rotation_error_corrected += 360
if np.linalg.norm(np.array(position_error)) > self._grid_size * 1.10:
sys.stderr.write(colored('ERROR: ','red')
+ 'Moving step {} greater than grid size {}'.format(position_error, self._grid_size))
sys.exit(1)
elif np.linalg.norm(np.array(position_error)) < self._grid_size * 0.10:
sys.stderr.write(colored('ERROR: ','red')
+ 'Moving distance {} too small'.format(position_error))
sys.exit(1)
rotate_steps = round(np.abs(rotation_error_corrected / self._rotation_step))
for _ in range(rotate_steps):
time.sleep(self._sleep_time)
action = self.unit_rotate(self._rotation_step * np.sign(rotation_error_corrected))
action = self.unit_rotate((rotation_error_corrected - rotate_steps * self._rotation_step * np.sign(rotation_error_corrected)))
time.sleep(self._sleep_time)
action = self.unit_move()
class Dumb_Navigetor():
def __init__(self, agent_sim):
self._map = {}
self._point_list = []
self._grid_size = agent_sim._grid_size
self._point_num = 0
self._agent_sim = agent_sim
self._starting_point = self._agent_sim.get_agent_position()
self._coordinate_dict = self._agent_sim.get_reachable_coordinate()
self._map_searched = [True] * len(self._coordinate_dict)
self._build_map()
def _build_map(self):
self._point_list.append(list(self._starting_point.values()))
self._map[self._point_num] = []
self._map_searched[self._point_num] = True
self._point_num += 1
for point_adding in self._coordinate_dict:
if self._starting_point == point_adding:
continue
self._point_list.append(list(point_adding.values()))
self._point_num += 1
self._map[self._point_num - 1] = []
for point_added_index in range(self._point_num - 1):
point_added = self._point_list[point_added_index]
distance = np.linalg.norm(np.array(list(map(lambda x, y: x - y, point_added, self._point_list[self._point_num - 1]))))
if distance < self._grid_size + 0.03 * self._grid_size:
self._map[self._point_num - 1].append(point_added_index)
self._map[point_added_index].append(self._point_num - 1)
return
# Assume goal_position is dict
def dumb_navigate(self, goal_position, server=None, comfirmed=None):
print(colored('Dumb navigate to: {}','cyan').format(goal_position))
#self._controller.step(action='TeleportFull', x=0.999, y=1.01, z=-0.3541, rotation=dict(x=0.0, y=90.0, z=0.0), horizon=30.0)
# server and comfirm is not none --> this function is used as a server node
graph = Graph()
nav_starting_point = self._agent_sim.get_agent_position()
print(nav_starting_point)
nav_starting_point = list(nav_starting_point.values())
for point in self._point_list:
if np.linalg.norm(np.array(list(map(lambda x, y: x - y, point, nav_starting_point)))) < 0.25 * self._grid_size:
nav_starting_point_index = self._point_list.index(point)
break
# nav_starting_point_index = self._point_list.index(nav_starting_point)
if isinstance(goal_position, dict):
goal_point = list(goal_position.values())
goal_point_index = None
for point in self._point_list:
if np.linalg.norm(np.array(list(map(lambda x, y: x - y, point, goal_point)))) < 0.25 * self._grid_size:
goal_point_index = self._point_list.index(point)
break
if goal_point_index is None or nav_starting_point_index is None:
sys.stderr.write(colored('ERROR: ','red') + 'No matching point in map' + '\n')
return
connected_point_index = self._map[goal_point_index]
nearest_reachable_index = None
goal_in_existing_map = False
if self._map_searched[goal_point_index]:
nearest_reachable_index = goal_point_index
goal_in_existing_map = True
else:
for index in connected_point_index:
if self._map_searched[index]:
nearest_reachable_index = index
break
if nearest_reachable_index is None:
sys.stderr.write(colored('ERROR: ','red') + 'Can not reach the point by existing map' + '\n')
return
for index in range(len(self._map)):
for connected_index in range(len(self._map[index])):
if self._map_searched[self._map[index][connected_index]]:
graph.add_edge(index, self._map[index][connected_index], 1)
result = find_path(graph, nav_starting_point_index, nearest_reachable_index)
path = result.nodes
for mid_point_index in range(1, len(path)):
# This navigator serve as a server node if server is not None
if server is not None:
objs = [obj for obj in self._agent_sim._event.metadata['objects'] if obj['visible']]
server.send(objs)
print(colored('Server: ','cyan') + 'Sent Data from navigator at mid_point_index {}'.format(mid_point_index))
while True: # Waiting for client to confirm
if comfirmed.value:
break
comfirmed.value = 0 # Turn off the switch
# Action
mid_point_pose = {'position': [], 'rotation': []}
mid_point_pose['position'] = copy.deepcopy(self._point_list[path[mid_point_index]])
mid_point_pose['rotation'] = [0, 0, 0]
self._agent_sim.move_towards(mid_point_pose)
# Terminate the service by sending 'END'
if server is not None:
server.send('END')
print(colored('Server: ','cyan') + 'END')
if not goal_in_existing_map:
self._agent_sim.move_towards({'position': copy.deepcopy(self._point_list[goal_point_index]), 'rotation': [0, 0, 0]})
self._map_searched[goal_point_index] = True
return
| 44.548638 | 153 | 0.633418 | 1,450 | 11,449 | 4.698621 | 0.177931 | 0.029062 | 0.022897 | 0.013357 | 0.349479 | 0.28064 | 0.199031 | 0.162777 | 0.079994 | 0.038309 | 0 | 0.017043 | 0.262032 | 11,449 | 256 | 154 | 44.722656 | 0.789324 | 0.075552 | 0 | 0.176768 | 0 | 0 | 0.077986 | 0.001988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.010101 | 0.035354 | 0 | 0.166667 | 0.025253 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecac3170910e2522fe20eaaf7efb5d1875ecf0af | 12,033 | py | Python | tests/test_sendgrid_inbound.py | tiltec/django-anymail | 508a3a073f1b51c453bade2532627a72e204520b | [
"BSD-3-Clause"
] | null | null | null | tests/test_sendgrid_inbound.py | tiltec/django-anymail | 508a3a073f1b51c453bade2532627a72e204520b | [
"BSD-3-Clause"
] | null | null | null | tests/test_sendgrid_inbound.py | tiltec/django-anymail | 508a3a073f1b51c453bade2532627a72e204520b | [
"BSD-3-Clause"
] | null | null | null | import json
from io import BytesIO
from textwrap import dedent
from django.test import tag
from mock import ANY
from anymail.inbound import AnymailInboundMessage
from anymail.signals import AnymailInboundEvent
from anymail.webhooks.sendgrid import SendGridInboundWebhookView
from .utils import dedent_bytes, sample_image_content, sample_email_content
from .webhook_cases import WebhookTestCase
@tag('sendgrid')
class SendgridInboundTestCase(WebhookTestCase):
def test_inbound_basics(self):
raw_event = {
'headers': dedent("""\
Received: from mail.example.org by mx987654321.sendgrid.net ...
Received: by mail.example.org for <test@inbound.example.com> ...
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=example.org; ...
MIME-Version: 1.0
Received: by 10.10.1.71 with HTTP; Wed, 11 Oct 2017 18:31:04 -0700 (PDT)
From: "Displayed From" <from+test@example.org>
Date: Wed, 11 Oct 2017 18:31:04 -0700
Message-ID: <CAEPk3R+4Zr@mail.example.org>
Subject: Test subject
To: "Test Inbound" <test@inbound.example.com>, other@example.com
Cc: cc@example.com
Content-Type: multipart/mixed; boundary="94eb2c115edcf35387055b61f849"
"""),
'from': 'Displayed From <from+test@example.org>',
'to': 'Test Inbound <test@inbound.example.com>, other@example.com',
'subject': "Test subject",
'text': "Test body plain",
'html': "<div>Test body html</div>",
'attachments': "0",
'charsets': '{"to":"UTF-8","html":"UTF-8","subject":"UTF-8","from":"UTF-8","text":"UTF-8"}',
'envelope': '{"to":["test@inbound.example.com"],"from":"envelope-from@example.org"}',
'sender_ip': "10.10.1.71",
'dkim': "{@example.org : pass}", # yep, SendGrid uses not-exactly-json for this field
'SPF': "pass",
'spam_score': "1.7",
'spam_report': 'Spam detection software, running on the system "mx987654321.sendgrid.net", '
'has identified this incoming email as possible spam...',
}
response = self.client.post('/anymail/sendgrid/inbound/', data=raw_event)
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.inbound_handler, sender=SendGridInboundWebhookView,
event=ANY, esp_name='SendGrid')
# AnymailInboundEvent
event = kwargs['event']
self.assertIsInstance(event, AnymailInboundEvent)
self.assertEqual(event.event_type, 'inbound')
self.assertIsNone(event.timestamp)
self.assertIsNone(event.event_id)
self.assertIsInstance(event.message, AnymailInboundMessage)
self.assertEqual(event.esp_event.POST.dict(), raw_event) # esp_event is a Django HttpRequest
# AnymailInboundMessage - convenience properties
message = event.message
self.assertEqual(message.from_email.display_name, 'Displayed From')
self.assertEqual(message.from_email.addr_spec, 'from+test@example.org')
self.assertEqual([str(e) for e in message.to],
['Test Inbound <test@inbound.example.com>', 'other@example.com'])
self.assertEqual([str(e) for e in message.cc],
['cc@example.com'])
self.assertEqual(message.subject, 'Test subject')
self.assertEqual(message.date.isoformat(" "), "2017-10-11 18:31:04-07:00")
self.assertEqual(message.text, 'Test body plain')
self.assertEqual(message.html, '<div>Test body html</div>')
self.assertEqual(message.envelope_sender, 'envelope-from@example.org')
self.assertEqual(message.envelope_recipient, 'test@inbound.example.com')
self.assertIsNone(message.stripped_text)
self.assertIsNone(message.stripped_html)
self.assertIsNone(message.spam_detected) # SendGrid doesn't give a simple yes/no; check the score yourself
self.assertEqual(message.spam_score, 1.7)
# AnymailInboundMessage - other headers
self.assertEqual(message['Message-ID'], "<CAEPk3R+4Zr@mail.example.org>")
self.assertEqual(message.get_all('Received'), [
"from mail.example.org by mx987654321.sendgrid.net ...",
"by mail.example.org for <test@inbound.example.com> ...",
"by 10.10.1.71 with HTTP; Wed, 11 Oct 2017 18:31:04 -0700 (PDT)",
])
def test_attachments(self):
att1 = BytesIO('test attachment'.encode('utf-8'))
att1.name = 'test.txt'
image_content = sample_image_content()
att2 = BytesIO(image_content)
att2.name = 'image.png'
email_content = sample_email_content()
att3 = BytesIO(email_content)
att3.content_type = 'message/rfc822; charset="us-ascii"'
raw_event = {
'headers': '',
'attachments': '3',
'attachment-info': json.dumps({
"attachment3": {"filename": "", "name": "", "charset": "US-ASCII", "type": "message/rfc822"},
"attachment2": {"filename": "image.png", "name": "image.png", "type": "image/png",
"content-id": "abc123"},
"attachment1": {"filename": "test.txt", "name": "test.txt", "type": "text/plain"},
}),
'content-ids': '{"abc123": "attachment2"}',
'attachment1': att1,
'attachment2': att2, # inline
'attachment3': att3,
}
response = self.client.post('/anymail/sendgrid/inbound/', data=raw_event)
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.inbound_handler, sender=SendGridInboundWebhookView,
event=ANY, esp_name='SendGrid')
event = kwargs['event']
message = event.message
attachments = message.attachments # AnymailInboundMessage convenience accessor
self.assertEqual(len(attachments), 2)
self.assertEqual(attachments[0].get_filename(), 'test.txt')
self.assertEqual(attachments[0].get_content_type(), 'text/plain')
self.assertEqual(attachments[0].get_content_text(), 'test attachment')
self.assertEqual(attachments[1].get_content_type(), 'message/rfc822')
self.assertEqualIgnoringHeaderFolding(attachments[1].get_content_bytes(), email_content)
inlines = message.inline_attachments
self.assertEqual(len(inlines), 1)
inline = inlines['abc123']
self.assertEqual(inline.get_filename(), 'image.png')
self.assertEqual(inline.get_content_type(), 'image/png')
self.assertEqual(inline.get_content_bytes(), image_content)
def test_inbound_mime(self):
# SendGrid has an option to send the full, raw MIME message
raw_event = {
'email': dedent("""\
From: A tester <test@example.org>
Date: Thu, 12 Oct 2017 18:03:30 -0700
Message-ID: <CAEPk3RKEx@mail.example.org>
Subject: Raw MIME test
To: test@inbound.example.com
MIME-Version: 1.0
Content-Type: multipart/alternative; boundary="94eb2c05e174adb140055b6339c5"
--94eb2c05e174adb140055b6339c5
Content-Type: text/plain; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
It's a body=E2=80=A6
--94eb2c05e174adb140055b6339c5
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr">It's a body=E2=80=A6</div>
--94eb2c05e174adb140055b6339c5--
"""),
'from': 'A tester <test@example.org>',
'to': 'test@inbound.example.com',
'subject': "Raw MIME test",
'charsets': '{"to":"UTF-8","subject":"UTF-8","from":"UTF-8"}',
'envelope': '{"to":["test@inbound.example.com"],"from":"envelope-from@example.org"}',
'sender_ip': "10.10.1.71",
'dkim': "{@example.org : pass}", # yep, SendGrid uses not-exactly-json for this field
'SPF': "pass",
'spam_score': "1.7",
'spam_report': 'Spam detection software, running on the system "mx987654321.sendgrid.net", '
'has identified this incoming email as possible spam...',
}
response = self.client.post('/anymail/sendgrid/inbound/', data=raw_event)
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.inbound_handler, sender=SendGridInboundWebhookView,
event=ANY, esp_name='SendGrid')
event = kwargs['event']
message = event.message
self.assertEqual(message.envelope_sender, 'envelope-from@example.org')
self.assertEqual(message.envelope_recipient, 'test@inbound.example.com')
self.assertEqual(message.subject, 'Raw MIME test')
self.assertEqual(message.text, "It's a body\N{HORIZONTAL ELLIPSIS}\n")
self.assertEqual(message.html, """<div dir="ltr">It's a body\N{HORIZONTAL ELLIPSIS}</div>\n""")
def test_inbound_charsets(self):
# Captured (sanitized) from actual SendGrid inbound webhook payload 7/2020,
# using a test message constructed with a variety of charsets:
raw_post = dedent_bytes(b"""\
--xYzZY
Content-Disposition: form-data; name="headers"
Date: Fri, 24 Jul 2020 16:43:46 UTC
To: =?utf-8?q?R=C3=A9cipiendaire_pr=C3=A9cieux?= <inbound@sg.example.com>
From: =?utf-8?q?Op=C3=A9rateur?= de test <sender@example.com>
Subject: =?cp850?q?Como_usted_pidi=A2?=
--xYzZY
Content-Disposition: form-data; name="subject"
Como usted pidi\xa2
--xYzZY
Content-Disposition: form-data; name="to"
R\xc3\xa9cipiendaire pr\xc3\xa9cieux <inbound@sg.example.com>
--xYzZY
Content-Disposition: form-data; name="html"
<p>\xbfEsto se ve como esperabas?</p>
--xYzZY
Content-Disposition: form-data; name="from"
Op\xc3\xa9rateur de test <sender@example.com>
--xYzZY
Content-Disposition: form-data; name="text"
Test the ESP\x92s inbound charset handling\x85
--xYzZY
Content-Disposition: form-data; name="charsets"
{"to":"UTF-8","cc":"UTF-8","html":"iso-8859-1","subject":"cp850","from":"UTF-8","text":"windows-1252"}
--xYzZY--
""").replace(b"\n", b"\r\n")
response = self.client.post('/anymail/sendgrid/inbound/', data=raw_post,
content_type="multipart/form-data; boundary=xYzZY")
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.inbound_handler, sender=SendGridInboundWebhookView,
event=ANY, esp_name='SendGrid')
event = kwargs['event']
message = event.message
self.assertEqual(message.from_email.display_name, "Opérateur de test")
self.assertEqual(message.from_email.addr_spec, "sender@example.com")
self.assertEqual(len(message.to), 1)
self.assertEqual(message.to[0].display_name, "Récipiendaire précieux")
self.assertEqual(message.to[0].addr_spec, "inbound@sg.example.com")
self.assertEqual(message.subject, "Como usted pidió")
self.assertEqual(message.text, "Test the ESP’s inbound charset handling…")
self.assertEqual(message.html, "<p>¿Esto se ve como esperabas?</p>")
| 49.72314 | 115 | 0.605668 | 1,350 | 12,033 | 5.32 | 0.207407 | 0.085631 | 0.070454 | 0.032164 | 0.511417 | 0.466583 | 0.402673 | 0.350599 | 0.3066 | 0.274436 | 0 | 0.044272 | 0.262279 | 12,033 | 241 | 116 | 49.929461 | 0.764335 | 0.045458 | 0 | 0.287805 | 0 | 0.04878 | 0.461431 | 0.123769 | 0 | 0 | 0 | 0 | 0.258537 | 1 | 0.019512 | false | 0.019512 | 0.04878 | 0 | 0.073171 | 0.009756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecad91fbf1d2b45d2f19da435e1adb82e6ade7a6 | 233 | py | Python | scripts/randomizer.py | cmuell89/DS-A | a408252e1993423c510c3cbe49501acc6916e91f | [
"MIT"
] | null | null | null | scripts/randomizer.py | cmuell89/DS-A | a408252e1993423c510c3cbe49501acc6916e91f | [
"MIT"
] | null | null | null | scripts/randomizer.py | cmuell89/DS-A | a408252e1993423c510c3cbe49501acc6916e91f | [
"MIT"
] | null | null | null | # coding=utf-8
from random import shuffle
with open("../data/words.txt") as f:
words = f.read().splitlines()
shuffle(words)
with open("../data/random_words.txt", "w") as f:
for item in words:
f.write("%s\n" % item) | 21.181818 | 48 | 0.622318 | 38 | 233 | 3.789474 | 0.605263 | 0.111111 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005291 | 0.188841 | 233 | 11 | 49 | 21.181818 | 0.756614 | 0.051502 | 0 | 0 | 0 | 0 | 0.209091 | 0.109091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecaf99cf8d95777800b2e98c8ecd1054419889eb | 1,839 | py | Python | tests/Commands/test_SystemCommands.py | AndroidKitKat/TerminusBrowser | 18d358d7d14bea00b538f9b2d8b9cf0f063d8b5e | [
"BSD-3-Clause"
] | null | null | null | tests/Commands/test_SystemCommands.py | AndroidKitKat/TerminusBrowser | 18d358d7d14bea00b538f9b2d8b9cf0f063d8b5e | [
"BSD-3-Clause"
] | null | null | null | tests/Commands/test_SystemCommands.py | AndroidKitKat/TerminusBrowser | 18d358d7d14bea00b538f9b2d8b9cf0f063d8b5e | [
"BSD-3-Clause"
] | null | null | null | from commandChanVim import urwidView
from Commands.SystemCommands import systemCommands
from Frames.reddit.indexFrame import RedditIndexFrame
from Frames.fchan.indexFrame import IndexFrame
from Frames.defaultFrame import DefaultFrame
from customeTypes import SITE
import pytest
@pytest.fixture
def view():
view = urwidView(True)
return view
test_boards = [
('add 4chan /r9k/', ['/r9k/']),
('add 4chan /r9k/ /s4s/', ['/r9k/', '/s4s/'])
]
@pytest.mark.parametrize("test_input, expected", test_boards)
def test_addChan(test_input, expected, view):
systemCommands(test_input, view)
result = all(ex in view.cfg.deep_get(SITE.FCHAN, 'boards') for ex in expected)
assert result
test_set = [
('set test ahoy'),
('set REDDIT username test')
]
@pytest.mark.parametrize("test_input", test_set)
def test_setCommand(test_input, view):
systemCommands(test_input, view)
cmd = test_input.split()
if len(cmd) == 3:
assert view.cfg.get(cmd[1]) == cmd[2]
else:
assert view.cfg.deep_get(cmd[1], cmd[2]) == cmd[3]
test_subs = [
('add reddit linuxgaming', ['linuxgaming']),
('add reddit linuxgaming sysadmin', ['linuxgaming', 'sysadmin'])
]
@pytest.mark.parametrize("test_input, expected", test_subs)
def test_addReddit(test_input, expected, view):
systemCommands(test_input, view)
result = all(ex in view.cfg.deep_get(SITE.REDDIT, 'boards') for ex in expected)
assert result
test_views = [
('view reddit', [SITE.REDDIT, RedditIndexFrame]),
('view 4chan', [SITE.FCHAN, IndexFrame]),
('view too long', [None, DefaultFrame])
]
@pytest.mark.parametrize("test_input, expected", test_views)
def test_view(test_input, expected, view):
systemCommands(test_input, view)
assert type(view.currFocusView.frame) == expected[1]
| 29.190476 | 83 | 0.693855 | 234 | 1,839 | 5.333333 | 0.269231 | 0.09375 | 0.081731 | 0.080128 | 0.391827 | 0.325321 | 0.325321 | 0.224359 | 0.126603 | 0.126603 | 0 | 0.010499 | 0.171289 | 1,839 | 62 | 84 | 29.66129 | 0.808399 | 0 | 0 | 0.12 | 0 | 0 | 0.156063 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.14 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecb02d026ee6cbb6ddb8ef5430a7f94d901364f7 | 5,312 | py | Python | ABCer.py | xl0418/ABCer | 50f8976f00c555c2face1f451a17142e33df7856 | [
"MIT"
] | 2 | 2020-03-25T17:07:59.000Z | 2020-04-01T12:03:45.000Z | ABCer.py | xl0418/ABCer | 50f8976f00c555c2face1f451a17142e33df7856 | [
"MIT"
] | null | null | null | ABCer.py | xl0418/ABCer | 50f8976f00c555c2face1f451a17142e33df7856 | [
"MIT"
] | null | null | null | #%%
import numpy as np
from itertools import repeat
from itertools import starmap
from scipy.stats import norm
class ABCer:
def __init__(self, iterations, particles, observations):
self.iterations = iterations
self.particles = particles
self.observations = observations
def initialize_model(self, model):
self.model = model
def initialize_parameters(self, paras):
self.parameters = paras
return self.parameters
def normalized_norm(self, x):
diff_norm = np.linalg.norm(x / self.observations - 1, axis=1)
max_err = np.nanmax(diff_norm)
return diff_norm / max_err
def purterbation(self, index, weight, para):
para_last_iteration = para[index]
weight_update = weight[index] / sum(weight[index])
mean_para_last = np.sum(para_last_iteration * weight_update)
var_para_last = np.sum(
(para_last_iteration - mean_para_last)**2 * weight_update)
sample_index = np.random.choice(index, self.particles, p=weight_update)
mean_sample_para = para[sample_index]
propose_para = np.random.normal(mean_sample_para,
np.sqrt(2 * var_para_last))
evolve_weight = weight_update[index.searchsorted(sample_index)]
evolve_weight_denominator = np.sum(evolve_weight * norm.pdf(
propose_para, mean_sample_para, np.sqrt(2 * var_para_last)))
evolve_weight_numerator = norm.pdf(propose_para, mean_para_last,
np.sqrt(2 * var_para_last))
evolve_weight = evolve_weight_numerator / evolve_weight_denominator
evolve_weight = evolve_weight / sum(evolve_weight)
return evolve_weight, propose_para
def ABC(self, prior_paras):
# initialize the first iteration
number_parameters = len(self.parameters)
if len(prior_paras) != number_parameters * 2:
return print(
"Provide the corresponding length of the prior information of the parameters!"
)
para_each_iteration = np.tile(self.parameters, (self.particles, 1))
for i in range(number_parameters):
para_each_iteration[:, i] = np.random.uniform(
prior_paras[2 * i], prior_paras[2 * i + 1],
para_each_iteration.shape[0])
# store parameter evolution
disct_parameters = dict.fromkeys(range(number_parameters), [])
for key, value in disct_parameters.items():
l = np.zeros(shape=(self.iterations + 1, self.particles))
l[0,:] = para_each_iteration[:,key]
disct_parameters[key] = l
# fitness
fitness = np.zeros(shape=(self.iterations, self.particles))
# weights
disct_parameter_weights = dict.fromkeys(range(number_parameters), [])
for key, value in disct_parameter_weights.items():
l = np.zeros(self.particles)
l.fill(1 / self.particles)
disct_parameter_weights[key] = l
for g in range(self.iterations):
packed_para = [[para_each_iteration[i, :]]
for i in range(para_each_iteration.shape[0])]
simulation_each_iter_list = list(starmap(self.model, packed_para))
distance = self.normalized_norm(simulation_each_iter_list)
fitness[g, :] = 1 - distance
q5 = np.argsort(
fitness[g, :])[-int(self.particles // 4)] # best 25%
fit_index = np.where(fitness[g, :] > fitness[g, q5])[0]
print('Mean estimates: parameters: %.3e ; %.3e ' %
(np.mean(para_each_iteration[fit_index, 0]),
np.mean(para_each_iteration[fit_index, 1])))
for i in range(number_parameters):
disct_parameter_weights[i], disct_parameters[i][
g + 1, :] = self.purterbation(fit_index,
disct_parameter_weights[i],
disct_parameters[i][g, :])
para_each_iteration[:, i] = disct_parameters[i][g+1,:]
disct_parameters['fitness'] = fitness
# np.save(output, para_data)
return disct_parameters
# test
#%%
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Example
def model_test(para, time_survey=np.arange(18)):
# time_survey = np.arange(18)
y = para[0] * np.exp(para[1] * time_survey)
return y
y = model_test([1, 2])
observations=np.array([1.0, 7.0,10.0,24.0,38.0,82.0,128.0,188.0,265.0,321.0,382.0,503.0,614.0,804.0,959.0,1135.0,1413.0,1705.0])
time = np.arange(len(observations))
test_ABC = ABCer(100, 10000, observations=observations)
test_ABC.initialize_model(model_test)
test_ABC.initialize_parameters([0.0, 1.0])
test_list = test_ABC.ABC(prior_paras=[0.0, 1.0, 1.0, 2.0])
# %%
plt.plot(time,observations, 'o')
para_inferred = []
para_inferred.append(np.mean(test_list[0][20,:]))
para_inferred.append(np.mean(test_list[1][20,:]))
extend_time = np.arange(21)
y_inferred = model_test(para_inferred, np.arange(21))
plt.plot(extend_time,y_inferred,'x',color = 'r')
# %%
| 37.146853 | 132 | 0.609375 | 667 | 5,312 | 4.616192 | 0.229385 | 0.042871 | 0.049691 | 0.009743 | 0.239688 | 0.181877 | 0.175382 | 0.096785 | 0.061708 | 0.061708 | 0 | 0.034978 | 0.278803 | 5,312 | 142 | 133 | 37.408451 | 0.768729 | 0.03012 | 0 | 0.041237 | 0 | 0 | 0.02608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072165 | false | 0 | 0.051546 | 0 | 0.195876 | 0.020619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecb1f36fb203b8d051edf6d6cfdeae2ca3d1d0e9 | 1,405 | py | Python | seahub/share/urls.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 2 | 2017-06-21T09:46:55.000Z | 2018-05-30T10:07:32.000Z | seahub/share/urls.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | null | null | null | seahub/share/urls.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r'^$', list_shared_repos, name='share_admin'),
url(r'^links/$', list_shared_links, name='list_shared_links'),
url(r'^folders/$', list_priv_shared_folders, name='list_priv_shared_folders'),
url(r'^add/$', share_repo, name='share_repo'),
url(r'^remove/$', repo_remove_share, name='repo_remove_share'),
url(r'^ajax/link/remove/$', ajax_remove_shared_link, name='ajax_remove_shared_link'),
url(r'^link/send/$', send_shared_link, name='send_shared_link'),
url(r'^link/save/$', save_shared_link, name='save_shared_link'),
url(r'^ajax/upload_link/remove/$', ajax_remove_shared_upload_link, name='ajax_remove_shared_upload_link'),
url(r'^upload_link/send/$', send_shared_upload_link, name='send_shared_upload_link'),
url(r'^permission_admin/$', share_permission_admin, name='share_permission_admin'),
url(r'^ajax/repo_remove_share/$', ajax_repo_remove_share, name='ajax_repo_remove_share'),
url(r'^ajax/get-download-link/$', ajax_get_download_link, name='ajax_get_download_link'),
url(r'^ajax/get-upload-link/$', ajax_get_upload_link, name='ajax_get_upload_link'),
url(r'^ajax/private-share-dir/$', ajax_private_share_dir, name='ajax_private_share_dir'),
url(r'^ajax/get-link-audit-code/$', ajax_get_link_audit_code, name='ajax_get_link_audit_code'),
)
| 61.086957 | 110 | 0.740214 | 216 | 1,405 | 4.407407 | 0.157407 | 0.067227 | 0.058824 | 0.044118 | 0.289916 | 0.048319 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090391 | 1,405 | 22 | 111 | 63.863636 | 0.744914 | 0 | 0 | 0 | 0 | 0 | 0.417082 | 0.258363 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecb94bd0a8608cae96300c0ab8bd777428b90695 | 3,388 | py | Python | plot_movie.py | diogoff/plasma-tomography | 8798351beaa9b069128fffd606587e74d30cb0e1 | [
"MIT"
] | 5 | 2019-02-08T18:34:45.000Z | 2020-05-30T17:42:14.000Z | plot_movie.py | diogoff/plasma-tomography | 8798351beaa9b069128fffd606587e74d30cb0e1 | [
"MIT"
] | null | null | null | plot_movie.py | diogoff/plasma-tomography | 8798351beaa9b069128fffd606587e74d30cb0e1 | [
"MIT"
] | 1 | 2018-07-18T12:50:03.000Z | 2018-07-18T12:50:03.000Z |
import os
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as ani
from cmap import *
from tensorflow.keras.models import load_model
# ----------------------------------------------------------------------
if len(sys.argv) < 6:
print('Usage: %s pulse t0 t1 dt vmax' % sys.argv[0])
print('Example: %s 92213 48.0 54.0 0.01 1.0' % sys.argv[0])
exit()
# ----------------------------------------------------------------------
pulse = int(sys.argv[1])
print('pulse:', pulse)
t0 = float(sys.argv[2])
print('t0:', t0)
t1 = float(sys.argv[3])
print('t1:', t1)
dt = float(sys.argv[4])
print('dt:', dt)
digits = len(str(dt).split('.')[-1])
vmax = float(sys.argv[5])
print('vmax:', vmax)
fps = 15
# ----------------------------------------------------------------------
fname = 'bolo_data.h5'
print('Reading:', fname)
f = h5py.File(fname, 'r')
g = f[str(pulse)]
tomo = np.clip(g['tomo'][:], 0., None)/1e6
tomo_t = g['tomo_t'][:]
print('%-10s %-10s %-20s %-10s' % (pulse, 'tomo', tomo.shape, tomo.dtype))
print('%-10s %-10s %-20s %-10s' % (pulse, 'tomo_t', tomo_t.shape, tomo_t.dtype))
f.close()
# ----------------------------------------------------------------------
if t0 < tomo_t[0]:
t0 = tomo_t[0]
print('t0:', t0, '(overwrite)')
if t1 > tomo_t[-1]:
t1 = tomo_t[-1]
print('t1:', t1, '(overwrite)')
# ----------------------------------------------------------------------
frames = []
frames_t = []
for t in np.arange(t0, t1, dt):
i = np.argmin(np.fabs(tomo_t - t))
frames.append(tomo[i])
frames_t.append(tomo_t[i])
frames = np.array(frames)
frames_t = np.array(frames_t)
print('%-10s %-10s %-20s %-10s' % (pulse, 'frames', frames.shape, frames.dtype))
print('%-10s %-10s %-20s %-10s' % (pulse, 'frames_t', frames_t.shape, frames_t.dtype))
# ----------------------------------------------------------------------
path = 'movies'
if not os.path.exists(path):
os.makedirs(path)
# ----------------------------------------------------------------------
fontsize = 'small'
R0 = 1.708 - 2*0.02
R1 = 3.988 + 3*0.02
Z0 = -1.77 - 2*0.02
Z1 = +2.13 + 2*0.02
im = plt.imshow(frames[0], cmap=get_cmap(),
vmin=0., vmax=vmax,
extent=[R0, R1, Z0, Z1],
interpolation='bilinear',
animated=True)
ticks = np.linspace(0., vmax, num=5)
labels = ['%.2f' % t for t in ticks]
labels[-1] = r'$\geq$' + labels[-1]
cb = plt.colorbar(im, fraction=0.26, ticks=ticks)
cb.ax.set_yticklabels(labels, fontsize=fontsize)
cb.ax.set_ylabel('MW/m3', fontsize=fontsize)
fig = plt.gcf()
ax = plt.gca()
title = 'Pulse %s t=%.*fs' % (pulse, digits, frames_t[0])
ax.set_title(title, fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_xlabel('R (m)', fontsize=fontsize)
ax.set_ylabel('Z (m)', fontsize=fontsize)
ax.set_xlim([R0, R1])
ax.set_ylim([Z0, Z1])
plt.setp(ax.spines.values(), linewidth=0.1)
plt.tight_layout()
def animate(k):
title = 'Pulse %s t=%.*fs' % (pulse, digits, frames_t[k])
ax.set_title(title, fontsize=fontsize)
im.set_data(frames[k])
animation = ani.FuncAnimation(fig, animate, frames=range(frames.shape[0]))
fname = '%s/%s_%.*f_%.*f.mp4' % (path, pulse, digits, frames_t[0], digits, frames_t[-1])
print('Writing:', fname)
animation.save(fname, fps=fps, extra_args=['-vcodec', 'libx264'])
| 25.666667 | 90 | 0.527745 | 482 | 3,388 | 3.628631 | 0.313278 | 0.031447 | 0.027444 | 0.032018 | 0.177244 | 0.140652 | 0.105203 | 0.036592 | 0.036592 | 0 | 0 | 0.051957 | 0.147875 | 3,388 | 131 | 91 | 25.862595 | 0.553862 | 0.146399 | 0 | 0.022989 | 0 | 0 | 0.131113 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011494 | false | 0 | 0.091954 | 0 | 0.103448 | 0.172414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecba0c87c633e44ca11824d2967de64d71f156ed | 2,940 | py | Python | main.py | William-Wang1988/gitblog | 5d064f69755992fa8e85fd53bc9a8d3589d97971 | [
"MIT"
] | null | null | null | main.py | William-Wang1988/gitblog | 5d064f69755992fa8e85fd53bc9a8d3589d97971 | [
"MIT"
] | 10 | 2020-08-31T08:17:26.000Z | 2020-09-21T03:39:12.000Z | main.py | William-Wang1988/gitblog | 5d064f69755992fa8e85fd53bc9a8d3589d97971 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from github import Github
from github.Issue import Issue
import argparse
MD_HEAD = """## Gitblog
My personal blog using issues and GitHub Action
"""
ME_GITHUB_NAME = "gatsby101"
ANCHOR_NUMBER = 5
TOP_ISSUES_LABELS = [
"Top",
]
def isMe(issue):
return issue.user.login == ME_GITHUB_NAME
def format_time(time):
return str(time)[:10]
def login(token):
return Github(token)
def get_repo(user: Github, repo: str):
return user.get_repo(repo)
def get_top_issues(repo):
return repo.get_issues(labels=TOP_ISSUES_LABELS)
def get_repo_labels(repo):
return [l for l in repo.get_labels()]
def get_issues_from_label(repo, label):
return repo.get_issues(labels=(label,))
def add_issue_info(issue, md):
time = format_time(issue.created_at)
md.write(f"- [{issue.title}]({issue.html_url})--{time}\n")
def add_md_top(repo, md):
if not TOP_ISSUES_LABELS:
return
top_issues = get_top_issues(repo)
with open(md, "a+", encoding="utf-8") as md:
md.write("## TOP\n")
for issue in top_issues:
if isMe(issue):
add_issue_info(issue, md)
def add_md_recent(repo, md):
new_five_issues = repo.get_issues()[:5]
with open(md, "a+", encoding="utf-8") as md:
md.write("## Recently updated\n")
for issue in new_five_issues:
if isMe(issue):
add_issue_info(issue, md)
def add_md_header(md):
with open(md, "w", encoding="utf-8") as md:
md.write(MD_HEAD)
def add_md_label(repo, md):
labels = get_repo_labels(repo)
with open(md, "a+", encoding="utf-8") as md:
for label in labels:
# we don't need add top label again
if label.name in TOP_ISSUES_LABELS:
continue
issues = get_issues_from_label(repo, label)
if issues.totalCount:
md.write("## " + label.name + "\n")
issues = sorted(issues, key=lambda x: x.created_at, reverse=True)
i = 0
for issue in issues:
if not issue:
continue
if isMe(issue):
if i == ANCHOR_NUMBER:
md.write("<details><summary>More</summary>\n")
md.write("\n")
add_issue_info(issue, md)
i += 1
if i > ANCHOR_NUMBER:
md.write("</details>\n")
md.write("\n")
def main(token):
user = login(token)
repo = get_repo(user, "gatsby101/gitblog")
get_top_issues(repo)
add_md_header("README.md")
add_md_top(repo, "README.md")
add_md_recent(repo, "README.md")
add_md_label(repo, "README.md")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("github_token", help="github_token")
options = parser.parse_args()
main(options.github_token) | 25.128205 | 81 | 0.591156 | 408 | 2,940 | 4.044118 | 0.230392 | 0.049091 | 0.036364 | 0.041212 | 0.271515 | 0.197576 | 0.164848 | 0.115758 | 0.115758 | 0.115758 | 0 | 0.008072 | 0.283673 | 2,940 | 117 | 82 | 25.128205 | 0.775404 | 0.018707 | 0 | 0.156627 | 0 | 0 | 0.108221 | 0.026708 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156627 | false | 0 | 0.036145 | 0.084337 | 0.289157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecba4755ffe4747535ec4eeeb0e07ad5f74903d6 | 2,687 | py | Python | week-03/lab_03/lab_03_code.py | andrewn488/OMSBA-5067 | ab2f9e9a3c7dcb88f838ce8e40eb3bca142d059a | [
"MIT"
] | null | null | null | week-03/lab_03/lab_03_code.py | andrewn488/OMSBA-5067 | ab2f9e9a3c7dcb88f838ce8e40eb3bca142d059a | [
"MIT"
] | null | null | null | week-03/lab_03/lab_03_code.py | andrewn488/OMSBA-5067 | ab2f9e9a3c7dcb88f838ce8e40eb3bca142d059a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 22:15:28 2021
@author: ANalundasan
For: OMSBA 5067, Lab 3
"""
import numpy as np
import matplotlib.pyplot as plt
import math
#################### STEP 1 - KNN Classifier #################################
data = np.array([ [1, 1,1,1,1, 3],
[2, 1,1,1,2, 2],
[3, 1,1,2,1, 3],
[4, 1,1,2,2, 1],
[5, 1,2,1,1, 3],
[6, 1,2,1,2, 2],
[7, 1,2,2,1, 3],
[8, 1,2,2,2, 1],
[9, 2,1,1,1, 3],
[10, 2,1,1,2, 2],
[11, 2,1,2,1, 3],
[12, 2,1,2,2, 1],
[13, 2,2,1,1, 3],
[14, 2,2,1,2, 2],
[15, 2,2,2,1, 3],
[16, 2,2,2,2, 3],
[17, 3,1,1,1, 3],
[18, 3,1,1,2, 3],
[19, 3,1,2,1, 3],
[20, 3,1,2,2, 1],
[21, 3,2,1,1, 3],
[22, 3,2,1,2, 2],
[23, 3,2,2,1, 3],
[24, 3,2,2,2, 3]])
# 4 columns in the middle for Features
trainX = data[:, 1:5]
# first 19 rows for training data
trainY = data[0:19, :]
# last 5 rows for test data
testX = data[19:24, :]
# L1: Manhattan Distance
for i in trainX:
distance_L1 = 0
val = abs(ai - bi)
distance_L1 += val
print('Manhattan distance is: ', distance_L1)
# L2: Euclidean Distance
for i in something:
distance_L2 = 0
val = (ai - bi)**2
distance_L2 += val
print('Euclidean distance is: ', math.sqrt(distance_L2))
# L3: Chelyshev Distance
math.max(abs(ai - bi))
def myKNN(trainX, trainY, testX, distance, K):
""" trainX <- training input features
trainY <- training labels
testX <- test dataset
distance determines the distance metric and can be 1, 2, 3
(3 for L_chelyshev} ). Also, K is the KNN parameter. """
trainX = data[:, 1:5]
trainY = data[0:19, :]
testX = data[19:24, :]
# # distance equations
# L1_manhattan = math.sum((abs(ai - bi)))
# L2_euclidean = math.sum((ai - bi)**2)
# L_chelyshev = math.max(abs(ai - bi))
#################### STEP 2 - Decision Tree Toy Example ######################
# from sklearn.tree import DecisionTreeClassifier
# X = [[0, 0], [1, 1], [0, 1], [2, 2]]
# Y = [0, 1, 0, 1]
# clf = DecisionTreeClassifier()
# clf = clf.fit(X, Y)
# clf.predict([[1, 2]])
################# STEP 3 - Decision Tree With Larger Dataset #################
| 28.284211 | 79 | 0.426498 | 384 | 2,687 | 2.958333 | 0.291667 | 0.038732 | 0.029049 | 0.014085 | 0.024648 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139466 | 0.372907 | 2,687 | 95 | 80 | 28.284211 | 0.534718 | 0.333457 | 0 | 0.133333 | 0 | 0 | 0.030565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.066667 | 0 | 0.088889 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecbdfa159cc6c94130e99feee6fc9442f8f7112a | 4,440 | py | Python | nxsdk_modules_ncl/epl/data/gen_wgts_for_inference.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 54 | 2020-03-04T17:37:17.000Z | 2022-02-22T13:16:10.000Z | nxsdk_modules_ncl/epl/data/gen_wgts_for_inference.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 9 | 2020-08-26T13:17:54.000Z | 2021-11-09T09:02:00.000Z | nxsdk_modules_ncl/epl/data/gen_wgts_for_inference.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 26 | 2020-03-18T17:09:34.000Z | 2021-11-22T16:23:14.000Z | # Copyright(c) 2019-2020 Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable-all
import csv
import numpy as np
import os
class MCGCWeightsGen:
def __init__(self, inhToExcWgtsFile="inhToExcWgtsFile.txt",
excToInhWgtsFile="excToInhWgtsFile.txt",
numCores=72, numGCPerCore=46, numMCPerCore=1, numDelays=4,
minDelay=16):
dir_path = os.path.dirname(os.path.abspath(__file__))
self.i2eWgtsFile = os.path.join(dir_path, inhToExcWgtsFile)
self.e2iWgtsFile = os.path.join(dir_path, excToInhWgtsFile)
self.numCores = numCores
self.numGCPerCore = numGCPerCore
self.numMCPerCore = numMCPerCore
self.numDelays = numDelays
self.minDelay = minDelay
self.saveWgtsE2I()
self.saveWgtsAndDelaysI2E()
@property
def numGC(self):
return self.numCores * self.numGCPerCore
@property
def numMC(self):
return self.numCores * self.numMCPerCore
def saveWgtsE2I(self):
e2iWgtMat = np.zeros((self.numDelays, self.numGC, self.numMC))
print(e2iWgtMat.shape)
with open(self.e2iWgtsFile) as e2iFile:
csvReader = csv.reader(e2iFile, delimiter=',')
for row in csvReader:
int_row = [int(item) for item in row]
coreIdx, gcIdx, mcIdx, wgt, dly = tuple(int_row)
gcIdx = coreIdx * self.numGCPerCore + gcIdx
mcIdx = self.numMCPerCore * mcIdx
dlyIdx = dly - self.minDelay
if e2iWgtMat[dlyIdx, gcIdx, mcIdx] != 0:
raise ValueError("Duplicate weights")
if wgt != 0:
e2iWgtMat[dlyIdx, gcIdx, mcIdx] = wgt
#print(np.where(e2iWgtMat > 0))
np.save("e2iWgtMat", e2iWgtMat)
def saveWgtsAndDelaysI2E(self):
i2eWgtMat = np.zeros((2, self.numCores, self.numMCPerCore,
self.numGCPerCore))
i2eDlyMat = np.zeros(i2eWgtMat.shape)
print(i2eWgtMat.shape)
with open(self.i2eWgtsFile) as i2eFile:
csvReader = csv.reader(i2eFile, delimiter=',')
for row in csvReader:
int_row = [int(item) for item in row]
coreIdx, gcIdx, mcIdx, wgt, dly = tuple(int_row)
boxIdx = 0 if wgt > 0 else 1
if i2eWgtMat[boxIdx, coreIdx, mcIdx, gcIdx] != 0:
raise ValueError("Duplicate weights for core, gc , mc",
coreIdx, gcIdx, mcIdx, i2eWgtMat[coreIdx, mcIdx, gcIdx])
if wgt != 0:
i2eWgtMat[boxIdx, coreIdx, mcIdx, gcIdx] = wgt
i2eDlyMat[boxIdx, coreIdx, mcIdx, gcIdx] = dly
#print(np.where(i2eWgtMat > 0))
np.save("i2eWgtMat", i2eWgtMat)
np.save("i2eDlyMat", i2eDlyMat)
if __name__ == '__main__':
wgen = MCGCWeightsGen()
| 40.363636 | 93 | 0.647973 | 516 | 4,440 | 5.531008 | 0.387597 | 0.021023 | 0.023826 | 0.024177 | 0.199019 | 0.107218 | 0.107218 | 0.107218 | 0.107218 | 0.107218 | 0 | 0.017985 | 0.273649 | 4,440 | 109 | 94 | 40.733945 | 0.866977 | 0.357658 | 0 | 0.163934 | 0 | 0 | 0.045712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.04918 | 0.032787 | 0.180328 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecc0a2e456508d8dfa56614ecca79ec800ea879d | 3,363 | py | Python | server/permissions/users.py | dragorhast/server | a5ad238e630c3b575e3bc3c51718e7ebfff1e4d1 | [
"MIT"
] | 5 | 2018-11-28T11:33:25.000Z | 2022-03-27T12:50:02.000Z | server/permissions/users.py | dragorhast/server | a5ad238e630c3b575e3bc3c51718e7ebfff1e4d1 | [
"MIT"
] | 124 | 2018-10-07T21:31:02.000Z | 2019-03-26T11:51:00.000Z | server/permissions/users.py | dragorhast/server | a5ad238e630c3b575e3bc3c51718e7ebfff1e4d1 | [
"MIT"
] | null | null | null | from aiohttp.web_urldispatcher import View
from server.models import User, Bike, Reservation
from server.models.user import UserType
from server.permissions.permission import RoutePermissionError, Permission
from server.service.access.users import get_user
from server.service.verify_token import verify_token, TokenVerificationError
class UserIsAdmin(Permission):
"""Asserts that a given user is an admin."""
async def __call__(self, view: View, user: User = None, **kwargs):
if user is None:
raise RoutePermissionError("User does not exist.")
if "token" not in view.request:
raise RoutePermissionError("No admin firebase jwt was included in the Authorization header.")
if not view.request["token"] == user.firebase_id:
# an admin is fetching a user's details; we need to get the admin's details
user = await get_user(firebase_id=view.request["token"])
if user is None or not user.type is not UserType.USER:
raise RoutePermissionError("The supplied token doesn't have admin rights.")
@property
def openapi_security(self):
return [{"FirebaseToken": ["admin"]}]
class UserOwnsReservation(Permission):
"""Assert that a user owns the given reservation."""
async def __call__(self, view: View, user: User = None, reservation: Reservation = None, **kwargs):
if not reservation.user_id == user.id:
raise RoutePermissionError("The supplied token did not make this reservation.")
class UserIsRentingBike(Permission):
"""Asserts that the given user is renting the given bike."""
async def __call__(self, view: View, user: User = None, bike: Bike = None, **kwargs):
if not view.rental_manager.is_renting(user.id, bike.id):
raise RoutePermissionError("The supplied token does not have an active rental for this bike.")
@property
def openapi_security(self):
return [{"FirebaseToken": ["renting_user"]}]
class UserMatchesToken(Permission):
"""Asserts that the given user matches the firebase id."""
async def __call__(self, view: View, user: User = None, **kwargs):
if "token" not in view.request:
raise RoutePermissionError("No firebase jwt was included in the Authorization header.")
else:
token = view.request["token"]
if not user.firebase_id == token:
raise RoutePermissionError("The supplied token doesn't have access to this resource.")
@property
def openapi_security(self):
return [{"FirebaseToken": ["user"]}]
class UserCanPay(Permission):
"""Asserts that the given user matches the firebase id."""
async def __call__(self, view: View, user: User = None, **kwargs):
if not user.can_pay:
raise RoutePermissionError("User does not have any payment details associated with their account.")
@property
def openapi_security(self):
return [{"FirebaseToken": ["user"]}]
class ValidToken(Permission):
"""Asserts that the request has a valid firebase token."""
async def __call__(self, view: View, **kwargs):
try:
token = verify_token(view.request)
except TokenVerificationError as error:
raise RoutePermissionError(error.message)
else:
view.request["token"] = token
| 36.956044 | 111 | 0.680345 | 412 | 3,363 | 5.451456 | 0.254854 | 0.100178 | 0.032057 | 0.042743 | 0.459038 | 0.426981 | 0.363313 | 0.319679 | 0.233304 | 0.105966 | 0 | 0 | 0.223907 | 3,363 | 90 | 112 | 37.366667 | 0.860536 | 0.11121 | 0 | 0.303571 | 0 | 0 | 0.179175 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0.071429 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecc3b237f1452c998b773c172662c9bd6ab6636e | 315 | py | Python | Two-pointer/Move elements in the array/Remove elements #27.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | null | null | null | Two-pointer/Move elements in the array/Remove elements #27.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | null | null | null | Two-pointer/Move elements in the array/Remove elements #27.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | 1 | 2018-10-29T17:33:52.000Z | 2018-10-29T17:33:52.000Z | def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
zero = 0
for n in range(len(nums)):
if nums[n] != val:
nums[zero] = nums[n]
zero = zero + 1
return zero
| 22.5 | 36 | 0.396825 | 35 | 315 | 3.571429 | 0.542857 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 0.485714 | 315 | 13 | 37 | 24.230769 | 0.759259 | 0.152381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecd274b39828a2cbb60621e83b0c76718302d58c | 620 | py | Python | visual-option-chain/tasks.py | WillWcchan/Visual-Option-Chain-Graph | f9bfec4a262c2c3c3212e38113af4ad59517c52c | [
"MIT"
] | 1 | 2021-11-13T20:03:17.000Z | 2021-11-13T20:03:17.000Z | visual-option-chain/tasks.py | WillWcchan/Visual-Option-Chain-Graph | f9bfec4a262c2c3c3212e38113af4ad59517c52c | [
"MIT"
] | 5 | 2021-04-08T21:58:58.000Z | 2021-10-31T00:55:39.000Z | visual-option-chain/tasks.py | WillWcchan/Visual-Option-Chain-Graph | f9bfec4a262c2c3c3212e38113af4ad59517c52c | [
"MIT"
] | null | null | null | from celery import shared_task
from django.core.mail import send_mail
from datetime import datetime
from time import sleep
# Start the worker process and be on top of the visual-option-chain directory: celery -A visual-option-chain worker -l info -E
@shared_task
def send_email_task(subject, message, from_email, recipient_list):
send_mail(
subject=subject,
message=message,
from_email=from_email,
recipient_list=recipient_list,
fail_silently=False,
)
return None
@shared_task
def display_time():
print("The time is %s :" % str(datetime.now()))
return True
| 26.956522 | 126 | 0.722581 | 89 | 620 | 4.865169 | 0.52809 | 0.069284 | 0.078522 | 0.101617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203226 | 620 | 22 | 127 | 28.181818 | 0.876518 | 0.2 | 0 | 0.111111 | 0 | 0 | 0.032389 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecd534abd7f8a9a4fecc3c6b9c2956ced9c8bbfb | 4,631 | py | Python | model/predict_from_file.py | elephantum/image-tools | 38d704a1ade32ed1f6ae8c652d257c15b7cc2740 | [
"MIT"
] | null | null | null | model/predict_from_file.py | elephantum/image-tools | 38d704a1ade32ed1f6ae8c652d257c15b7cc2740 | [
"MIT"
] | null | null | null | model/predict_from_file.py | elephantum/image-tools | 38d704a1ade32ed1f6ae8c652d257c15b7cc2740 | [
"MIT"
] | null | null | null | """
Given a csv or txt file and a Tensorflow 1.15 SavedModel file, run image classification on the urls
and write the predicted label and confidence back to the file
"""
import argparse
import os
from io import BytesIO
import requests
import pandas as pd
from csv import writer as csv_writer
from tqdm import tqdm
from model.model import ImageClassification
from PIL import Image
from concurrent.futures import ThreadPoolExecutor
def predict_dataset(filepath, model_dir, url_col=None, progress_hook=None):
"""
Given a file with urls to images, predict the given SavedModel on the image and write the label
and confidene back to the file.
:param filepath: path to a valid txt or csv file with image urls to download.
:param model_dir: path to the Lobe Tensorflow SavedModel export.
:param url_col: if this is a csv, the column header name for the urls to download.
:param progress_hook: an optional function that will be run with progress_hook(currentProgress, totalProgress) when progress updates.
"""
print(f"Predicting {filepath}")
filepath = os.path.abspath(filepath)
filename, ext = _name_and_extension(filepath)
# read the file
# if this a .txt file, don't treat the first row as a header. Otherwise, use the first row for header column names.
if ext != '.xlsx':
csv = pd.read_csv(filepath, header=None if ext == '.txt' else 0)
else:
csv = pd.read_excel(filepath, header=0)
if ext in ['.csv', '.xlsx'] and not url_col:
raise ValueError(f"Please specify an image url column for the csv.")
url_col_idx = 0
if url_col:
try:
url_col_idx = list(csv.columns).index(url_col)
except ValueError:
raise ValueError(f"Image url column {url_col} not found in csv headers {csv.columns}")
num_items = len(csv)
print(f"Predicting {num_items} items...")
# load the model
print("Loading model...")
model = ImageClassification(model_dir=model_dir)
model.load()
print("Model loaded!")
# create our output csv
fname, ext = os.path.splitext(filepath)
out_file = f"{fname}_predictions.csv"
with open(out_file, 'w', encoding="utf-8", newline='') as f:
# our header names from the pandas columns
writer = csv_writer(f)
writer.writerow([*[str(col) if not pd.isna(col) else '' for col in csv.columns], 'label', 'confidence'])
# iterate over the rows and predict the label
with tqdm(total=len(csv)) as pbar:
with ThreadPoolExecutor() as executor:
model_futures = []
# make our prediction jobs
for i, row in enumerate(csv.itertuples(index=False)):
url = row[url_col_idx]
model_futures.append(executor.submit(predict_image, url=url, model=model, row=row))
# write the results from the predict (this should go in order of the futures)
for future in model_futures:
label, confidence, row = future.result()
with open(out_file, 'a', encoding="utf-8", newline='') as f:
writer = csv_writer(f)
writer.writerow([*[str(col) if not pd.isna(col) else '' for col in row], label, confidence])
pbar.update(1)
if progress_hook:
progress_hook(i+1, len(csv))
def predict_image(url, model, row):
label, confidence = '', ''
try:
response = requests.get(url, timeout=30)
if response.ok:
image = Image.open(BytesIO(response.content))
predictions = model.predict(image)
predictions.sort(key=lambda x: x[1], reverse=True)
label, confidence = predictions[0]
except Exception:
pass
return label, confidence, row
def _name_and_extension(filepath):
# returns a tuple of the filename and the extension, ignoring any other prefixes in the filepath
# raises if not a file
fpath = os.path.abspath(filepath)
if not os.path.isfile(fpath):
raise ValueError(f"File {filepath} doesn't exist.")
filename = os.path.split(fpath)[-1]
name, ext = os.path.splitext(filename)
return name, str.lower(ext)
def _valid_file(filepath):
# file must exist and have a valid extension
valid_extensions = ['.txt', '.csv', '.xlsx']
_, extension = _name_and_extension(filepath)
if extension not in valid_extensions:
raise ValueError(f"File {filepath} doesn't have one of the valid extensions: {valid_extensions}")
# good to go
return filepath
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Label an image dataset from csv or txt file.')
parser.add_argument('file', help='Path to your csv or txt file.')
parser.add_argument('model_dir', help='Path to your SavedModel from Lobe.')
parser.add_argument('--url', help='If this is a csv with column headers, the column that contains the image urls to download.')
args = parser.parse_args()
predict_dataset(filepath=args.file, model_dir=args.model_dir, url_col=args.url)
| 37.346774 | 134 | 0.734183 | 723 | 4,631 | 4.605809 | 0.276625 | 0.018018 | 0.019219 | 0.010811 | 0.095496 | 0.088288 | 0.075075 | 0.037237 | 0.037237 | 0.037237 | 0 | 0.00386 | 0.160872 | 4,631 | 123 | 135 | 37.650407 | 0.853062 | 0.253077 | 0 | 0.048193 | 0 | 0 | 0.176007 | 0.006713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0.012048 | 0.120482 | 0 | 0.204819 | 0.048193 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecd5add47e791d4dc866d0ec2a94e998ea16a8cb | 2,403 | py | Python | line_art/line_art.py | Ravenlocke/PythonArt | 30de6117f7639313344c9938087399acfc93ba80 | [
"MIT"
] | null | null | null | line_art/line_art.py | Ravenlocke/PythonArt | 30de6117f7639313344c9938087399acfc93ba80 | [
"MIT"
] | null | null | null | line_art/line_art.py | Ravenlocke/PythonArt | 30de6117f7639313344c9938087399acfc93ba80 | [
"MIT"
] | null | null | null | import click
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
from matplotlib.collections import LineCollection
number_to_degrees = {
k: (np.cos(np.pi * 2 / 10 * k), np.sin(np.pi * 2 / 10 * k))
for k in range(10)
}
@click.command()
@click.option(
"--seed", "-s", default=0, help="Seed for random number generation."
)
@click.option(
"--length",
"-l",
default=10_000,
help="Number of random numbers to generate.",
)
@click.option(
"--cmap", "-c", default="RdYlGn", help="The matplotlib colour map to use."
)
@click.option(
"--outfile",
default=None,
help="The outfile to save the image to (default None displays the image).",
)
@click.option(
"--dpi",
default=None,
type=int,
help="The DPI for the output image -- set for high-quality images if using a format like PNG.",
)
@click.option(
"--linewidth",
default=1.0,
help="The width of the plotted lines -- consider decreasing for large lengths.",
)
def run(seed, length, cmap, outfile=None, dpi=None, linewidth=1.0):
logger.info(f"Running with seed = {seed}")
np.random.seed(seed)
logger.info("Generating sequence of random numbers")
seq = np.random.randint(0, 10, length)
logger.info("Generating line coordinates")
current_point = (0, 0)
lines = []
for number in seq:
previous_x, previous_y = current_point
delta_x, delta_y = number_to_degrees[number]
new_point = [previous_x + delta_x, previous_y + delta_y]
lines.append([current_point, new_point])
current_point = new_point
logger.info("Generating colours for lines")
color_map = plt.get_cmap(cmap)
colors = [color_map(i) for i in np.linspace(0, 1, length)]
logger.info("Creating line collection")
line_collection = LineCollection(
lines, colors=colors, capstyle="round", linewidth=linewidth
)
# Plot
logger.info("Plotting")
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.add_collection(line_collection)
ax1.autoscale()
plt.gca().set_aspect("equal", adjustable="box")
plt.axis("off")
plt.tight_layout()
if outfile and outfile.endswith(".png") and dpi:
plt.savefig(outfile, dpi=dpi)
elif outfile:
plt.savefig(outfile)
else:
plt.show()
logger.success("Complete!")
if __name__ == "__main__":
run()
| 26.119565 | 99 | 0.648772 | 331 | 2,403 | 4.598187 | 0.398792 | 0.043364 | 0.039422 | 0.009198 | 0.010512 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016454 | 0.21598 | 2,403 | 91 | 100 | 26.406593 | 0.791401 | 0.001665 | 0 | 0.105263 | 0 | 0 | 0.239466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.065789 | 0 | 0.078947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecd6a85eca2c9043b2128295fe6e487ccb69bc48 | 1,277 | py | Python | src/utils/reddit.py | mzone242/Loth-Bot | 830b3659c86f36e84c12e13796f74c607ff2868e | [
"MIT"
] | null | null | null | src/utils/reddit.py | mzone242/Loth-Bot | 830b3659c86f36e84c12e13796f74c607ff2868e | [
"MIT"
] | null | null | null | src/utils/reddit.py | mzone242/Loth-Bot | 830b3659c86f36e84c12e13796f74c607ff2868e | [
"MIT"
] | null | null | null | import datetime
import logging
logger = logging.getLogger("utils.reddit")
subreddit = None
over_threshold = []
def load_subreddit(_subreddit):
global subreddit
subreddit = _subreddit
return
def fetch_posts(_limit):
try:
logger.info(f"Fetching top {_limit} posts from {subreddit.display_name}")
top = subreddit.top("day", limit=_limit)
over_threshold.clear()
count = 0
total = 0
time = int(datetime.datetime.now().timestamp())
for post in top:
if post is None:
logger.info(f"None post encountered from praw")
continue
if not post.author:
name = '[deleted]'
else:
name = post.author.name
_post = (post.id, post.score, int(post.created_utc), False, name, post.url, post.title)
total += 1
if _post[1] >= 100 and time - _post[2] < 86400:
over_threshold.append(_post)
count += 1
logger.info(f'{str(count)} posts found over 100 upvotes out of {str(total)} posts')
logger.info(f'Current time is {time}')
except Exception as exception:
logger.exception(exception)
return None
return over_threshold
| 28.377778 | 99 | 0.584182 | 151 | 1,277 | 4.827815 | 0.456954 | 0.071331 | 0.060357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019495 | 0.31715 | 1,277 | 44 | 100 | 29.022727 | 0.816514 | 0 | 0 | 0 | 0 | 0 | 0.1574 | 0.018794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecd8a0ddb4c88f7b415905177b5939de99f001aa | 1,220 | py | Python | mfcc-features.py | gitikadaswani/Audio-Genre-Classification | 5702ed067d41982018a3e166dcdcd7b8e8cc6fa9 | [
"MIT"
] | 20 | 2017-12-09T02:41:46.000Z | 2020-08-29T06:26:57.000Z | mfcc-features.py | debeat/Audio-Genre-Classification | 5702ed067d41982018a3e166dcdcd7b8e8cc6fa9 | [
"MIT"
] | 2 | 2018-04-20T14:13:05.000Z | 2020-01-21T17:14:46.000Z | mfcc-features.py | debeat/Audio-Genre-Classification | 5702ed067d41982018a3e166dcdcd7b8e8cc6fa9 | [
"MIT"
] | 11 | 2018-05-17T07:32:51.000Z | 2021-11-11T23:51:36.000Z | #from scikits.talkbox.features import mfcc
import scipy.io.wavfile
import numpy as np
import sys
import os
import glob
from utils1 import GENRE_DIR, GENRE_LIST
from python_speech_features import mfcc
#from librosa.feature import mfcc
# Given a wavfile, computes mfcc and saves mfcc data
def create_ceps(wavfile):
sampling_rate, song_array = scipy.io.wavfile.read(wavfile)
print(sampling_rate)
"""Get MFCC
ceps : ndarray of MFCC
mspec : ndarray of log-spectrum in the mel-domain
spec : spectrum magnitude
"""
ceps=mfcc(song_array)
#ceps, mspec, spec= mfcc(song_array)
print(ceps.shape)
#this is done in order to replace NaN and infinite value in array
bad_indices = np.where(np.isnan(ceps))
b=np.where(np.isinf(ceps))
ceps[bad_indices]=0
ceps[b]=0
write_ceps(ceps, wavfile)
# Saves mfcc data
def write_ceps(ceps, wavfile):
base_wav, ext = os.path.splitext(wavfile)
data_wav = base_wav + ".ceps"
np.save(data_wav, ceps)
def main():
for label, genre in enumerate(GENRE_LIST):
for fn in glob.glob(os.path.join(GENRE_DIR, genre)):
for wavfile in os.listdir(fn):
if wavfile.endswith("wav"):
create_ceps(os.path.join(GENRE_DIR, genre,wavfile))
if __name__ == "__main__":
main() | 25.416667 | 66 | 0.741803 | 199 | 1,220 | 4.396985 | 0.422111 | 0.034286 | 0.044571 | 0.036571 | 0.052571 | 0.052571 | 0 | 0 | 0 | 0 | 0 | 0.002899 | 0.151639 | 1,220 | 48 | 67 | 25.416667 | 0.842512 | 0.196721 | 0 | 0 | 0 | 0 | 0.018735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.241379 | 0 | 0.344828 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecdc6ba10786df9e7a33a53fdc2a2e452c9d0c0f | 1,551 | py | Python | 2022/fix_user_varbinaries_T298565.py | wikimedia/operations-software-schema-changes | d5fc0442126c9e4280b90302f675889724b085db | [
"Apache-2.0"
] | 1 | 2022-03-25T06:59:37.000Z | 2022-03-25T06:59:37.000Z | 2022/fix_user_varbinaries_T298565.py | wikimedia/operations-software-schema-changes | d5fc0442126c9e4280b90302f675889724b085db | [
"Apache-2.0"
] | null | null | null | 2022/fix_user_varbinaries_T298565.py | wikimedia/operations-software-schema-changes | d5fc0442126c9e4280b90302f675889724b085db | [
"Apache-2.0"
] | null | null | null | from auto_schema.schema_change import SchemaChange
# Copy this file and make adjustments
# Set to None or 0 to skip downtiming
downtime_hours = 6
ticket = 'T298565'
fields = {
'user_newpass_time': 'BINARY(14) DEFAULT NULL',
'user_email_authenticated': 'BINARY(14) DEFAULT NULL',
'user_email_token': 'BINARY(32) DEFAULT NULL',
'user_email_token_expires': 'BINARY(14) DEFAULT NULL',
'user_touched': 'BINARY(14) NOT NULL',
'user_token': 'BINARY(32) DEFAULT \'\' NOT NULL',
'user_registration': 'BINARY(14) DEFAULT NULL'
}
# Set this to false if you don't want to run on all dbs
# In that case, you have to specify the db in the command and check function.
all_dbs = True
# DO NOT FORGET to set the right port if it's not 3306
# Use None instead of [] to get all direct replicas of master of active dc
replicas = None
section = 's7'
# The check function must return true if schema change is applied
# or not needed, False otherwise.
for field in fields:
def check(db):
query_res = db.run_sql('desc user;')
if not query_res:
# Dry run
return True
field_def = query_res.split(field)[1].split('\n')[0]
return 'varbinary' not in field_def.lower()
schema_change = SchemaChange(
replicas=replicas,
section=section,
all_dbs=all_dbs,
check=check,
command='ALTER TABLE /*_*/user CHANGE {} {} {};'.format(field, field, fields[field]),
ticket=ticket,
downtime_hours=downtime_hours
)
schema_change.run()
| 31.653061 | 93 | 0.668601 | 225 | 1,551 | 4.48 | 0.448889 | 0.047619 | 0.059524 | 0.075397 | 0.108135 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0.024247 | 0.228885 | 1,551 | 48 | 94 | 32.3125 | 0.818562 | 0.277885 | 0 | 0 | 0 | 0 | 0.316216 | 0.043243 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.03125 | 0.03125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecdfc10584ead3508610d287f02bad431a223015 | 1,069 | py | Python | controllers/messagelog.py | uezo/linebot-project-template | 294d40f5c50a3bbee346314107b60e98f4e07bf0 | [
"MIT"
] | 14 | 2019-08-05T22:54:59.000Z | 2021-12-21T00:29:22.000Z | controllers/messagelog.py | whitecat-22/linebot-project-template | 294d40f5c50a3bbee346314107b60e98f4e07bf0 | [
"MIT"
] | 1 | 2021-06-17T09:30:33.000Z | 2021-06-18T07:16:37.000Z | controllers/messagelog.py | whitecat-22/linebot-project-template | 294d40f5c50a3bbee346314107b60e98f4e07bf0 | [
"MIT"
] | 5 | 2019-09-03T06:51:44.000Z | 2021-06-17T09:40:42.000Z | from flask import (
Blueprint,
current_app,
request,
abort,
render_template
)
from minette.serializer import loads
# メインから読み込むBlueprintの定義
bp = Blueprint("messagelog", __name__)
# メッセージログのハンドラー
@bp.route("/messagelog", methods=["GET"])
def messagelog():
# BOTインスタンスの取得
bot = current_app.line_adapter.bot
# パスワードのチェック
if request.args.get("key", "") != bot.config.get("messagelog_password"):
abort(401)
# メッセージログの取得と表示(やっつけなのでプロダクションではクエリやテーブルをきちんとチューニングしてください)
with bot.connection_provider.get_connection() as connection:
cursor = connection.cursor()
cursor.execute("select * from messagelog order by id desc limit 50")
ml = []
for r in cursor.fetchall():
d = dict(zip([column[0] for column in cursor.description], r))
d["request_json"] = loads(d["request_json"])
d["context_json"] = loads(d["context_json"])
d["response_json"] = loads(d["response_json"])
ml.append(d)
return render_template("messagelog.html", ml=ml)
| 30.542857 | 76 | 0.651076 | 120 | 1,069 | 5.65 | 0.541667 | 0.039823 | 0.044248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00722 | 0.222638 | 1,069 | 34 | 77 | 31.441176 | 0.808664 | 0.108513 | 0 | 0 | 0 | 0 | 0.195354 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0.04 | 0.08 | 0 | 0.16 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ece0d4591ac1dbef0075df8821e6f6390b749839 | 5,015 | py | Python | model/MolecularVAE_TF.py | DexiongYung/molecular-vae | ca0e5a58abfc89e693d06331f93e5a23b948b683 | [
"MIT"
] | null | null | null | model/MolecularVAE_TF.py | DexiongYung/molecular-vae | ca0e5a58abfc89e693d06331f93e5a23b948b683 | [
"MIT"
] | null | null | null | model/MolecularVAE_TF.py | DexiongYung/molecular-vae | ca0e5a58abfc89e693d06331f93e5a23b948b683 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from utilities import DEVICE
def vae_loss(x_decoded_mean, x, z_mean, z_sd):
bce_loss = F.binary_cross_entropy(x_decoded_mean, x, reduction='sum')
kl_loss = -0.5 * torch.sum(1 + z_sd - z_mean.pow(2) - z_sd)
return bce_loss + kl_loss
class MolecularVAE(nn.Module):
'''
MolecularVAE with teacher forcing
'''
def __init__(self, vocab: dict, sos_idx: int, pad_idx: int, args):
super(MolecularVAE, self).__init__()
self.max_name_len = args.max_name_length
self.encoder_mlp_size = args.mlp_encode
self.latent_size = args.latent
self.num_layers = args.num_layers
self.embed_dim = args.word_embed
self.conv_in_c = args.conv_in_sz
self.conv_out_c = args.conv_out_sz
self.conv_kernals = args.conv_kernals
self.vocab_size = len(vocab)
self.eps = args.eps
self.conv_1 = nn.Conv1d(self.max_name_len, self.conv_out_c[
0], kernel_size=self.conv_kernals[0])
self.conv_2 = nn.Conv1d(self.conv_in_c[0], self.conv_out_c[
1], kernel_size=self.conv_kernals[1])
self.conv_3 = nn.Conv1d(self.conv_in_c[1], self.conv_out_c[
2], kernel_size=self.conv_kernals[2])
c1_out_sz = self.vocab_size-(self.conv_kernals[0]) + 1
c2_out_sz = c1_out_sz-(self.conv_kernals[1]) + 1
c3_out_sz = self.conv_out_c[2] * \
((c2_out_sz-(self.conv_kernals[2])) + 1)
self.encoder_layer = nn.Linear(c3_out_sz, self.encoder_mlp_size)
self.mean_layer = nn.Linear(self.encoder_mlp_size, self.latent_size)
self.sd_layer = nn.Linear(self.encoder_mlp_size, self.latent_size)
self.decoder_layer_start = nn.Linear(
self.latent_size, self.latent_size)
self.gru = nn.LSTM(args.latent,
args.rnn_hidd, args.num_layers, batch_first=True)
self.gru_last = nn.LSTM(args.rnn_hidd + self.embed_dim,
args.rnn_hidd, 1, batch_first=True)
self.decode_layer_final = nn.Linear(args.rnn_hidd, self.vocab_size)
self.sos_idx = sos_idx
self.pad_idx = pad_idx
self.char_embedder = nn.Embedding(
num_embeddings=self.vocab_size,
embedding_dim=self.embed_dim,
padding_idx=pad_idx
)
self.selu = nn.SELU()
self.softmax = nn.Softmax()
nn.init.xavier_normal_(self.encoder_layer.weight)
nn.init.xavier_normal_(self.mean_layer.weight)
nn.init.xavier_normal_(self.sd_layer.weight)
nn.init.xavier_normal_(self.decoder_layer_start.weight)
nn.init.xavier_normal_(self.decode_layer_final.weight)
def encode(self, x):
x0 = self.selu(self.conv_1(x))
x1 = self.selu(self.conv_2(x0))
x2 = self.selu(self.conv_3(x1))
x3 = x2.view(x.size(0), -1)
x4 = F.selu(self.encoder_layer(x3))
return self.mean_layer(x4), F.softplus(self.sd_layer(x4))
def sampling(self, z_mean, z_sd):
epsilon = self.eps * torch.randn_like(z_sd)
return z_sd * epsilon + z_mean
def decode(self, z, idx_tensor: torch.Tensor = None):
z = F.selu(self.decoder_layer_start(z))
z = z.view(z.size(0), 1, z.size(-1)).repeat(1, self.max_name_len, 1)
output, _ = self.gru(z)
if idx_tensor is not None:
x_embed = self.char_embedder(idx_tensor)
tf_input = torch.cat((output, x_embed), dim=2)
all_outs, _ = self.gru_last(tf_input)
out_reshape = all_outs.contiguous().view(-1, output.size(-1))
y0 = F.softmax(self.decode_layer_final(out_reshape), dim=1)
y = y0.contiguous().view(all_outs.size(0), -1, y0.size(-1))
else:
batch_sz = z.shape[0]
char_inputs = torch.LongTensor(
[self.sos_idx] * batch_sz).to(DEVICE)
embed_char = self.char_embedder(char_inputs)
y = None
for i in range(self.max_len):
input = torch.cat((output[:, i, :], embed_char), dim=1)
if i == 0:
out, hn = self.gru_last(input.unsqueeze(1))
else:
out, hn = self.gru_last(input.unsqueeze(1), hn)
sm_out = F.softmax(self.decode_layer_final(out), dim=1)
samples = torch.distributions.Categorical(
sm_out).sample()
if i == 0:
y = sm_out
else:
y = torch.cat(y, sm_out, dim=1)
embed_char = self.char_embedder(samples)
y.append(out)
y = torch.Tensor(y)
return y
def forward(self, x, idx_tensor: torch.Tensor = None):
z_mean, z_sd = self.encode(x)
z = self.sampling(z_mean, z_sd)
return self.decode(z, idx_tensor), z_mean, z_sd
| 38.875969 | 76 | 0.592223 | 734 | 5,015 | 3.768392 | 0.192098 | 0.060738 | 0.037961 | 0.014461 | 0.271511 | 0.15799 | 0.116052 | 0.057845 | 0.03543 | 0.03543 | 0 | 0.020011 | 0.292522 | 5,015 | 128 | 77 | 39.179688 | 0.759583 | 0.00658 | 0 | 0.048544 | 0 | 0 | 0.000605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058252 | false | 0 | 0.038835 | 0 | 0.15534 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ece2b7f45bad32bc48fb693a6dbc789112e48513 | 2,391 | py | Python | gdmtl/datasets/sampler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | null | null | null | gdmtl/datasets/sampler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | null | null | null | gdmtl/datasets/sampler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | 1 | 2022-02-26T00:49:03.000Z | 2022-02-26T00:49:03.000Z | from __future__ import annotations
from typing import Iterator, List
import numpy as np
from more_itertools import chunked
from torch.utils.data import Sampler
from .rank_dataset import TokenCountDataset
class DynamicBatchSampler(Sampler): # type:ignore
def __init__(
self,
dataset: TokenCountDataset,
batch_tokens: int,
mod: int,
shuffle: bool = True,
):
self.dataset = dataset
self.batch_tokens = batch_tokens
self.batches: List[List[int]] = []
self.mod = mod
batch: List[int] = [0]
size = self.dataset.estimate_tokens(0)
total_size = size
for idx in range(1, len(self.dataset)):
if total_size + size > self.batch_tokens:
bsz = len(batch) // self.mod * self.mod
self.batches.append(batch[:bsz])
batch = batch[bsz:] + [idx]
# Only estimate the size for the first example of a batch. Following
# will be padded to the same size.
size = self.dataset.estimate_tokens(batch[0])
total_size = size * len(batch)
else:
batch.append(idx)
total_size += size
if batch:
self.batches.append(batch)
if shuffle:
np.random.shuffle(self.batches)
def __iter__(self) -> Iterator[List[int]]:
return iter(self.batches)
def __len__(self) -> int:
return len(self.batches)
def avg_bsz(self) -> float:
size: float = np.mean([len(x) for x in self.batches])
return size
class FixedBatchSampler(Sampler): # type:ignore
"""This batch sample is identical to the default pytorch BatchSampler except that it
supports post-sampling shuffle. This is useful in the case that the dataset needs to
be sorted by length and thus the shuffling must be delayed here.
"""
def __init__(
self, dataset: TokenCountDataset, batch_size: int, shuffle: bool = True
):
self.dataset = dataset
self.batch_size = batch_size
self.batches = list(chunked(list(range(len(self.dataset))), batch_size))
if shuffle:
np.random.shuffle(self.batches)
def __iter__(self) -> Iterator[List[int]]:
return iter(self.batches)
def __len__(self) -> int:
return len(self.batches)
| 30.653846 | 88 | 0.608532 | 294 | 2,391 | 4.795918 | 0.319728 | 0.085816 | 0.049645 | 0.025532 | 0.324823 | 0.283688 | 0.22695 | 0.22695 | 0.22695 | 0.163121 | 0 | 0.002392 | 0.300711 | 2,391 | 77 | 89 | 31.051948 | 0.840909 | 0.148892 | 0 | 0.327273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.109091 | 0.072727 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ece5b98101c0b7084789fda8de5123934565565b | 15,973 | py | Python | tamarco/core/microservice.py | System73/tamarco | c85bec267d39057a4cd5f1c9854d5e2840cebb1e | [
"MIT"
] | 9 | 2019-09-06T10:57:36.000Z | 2019-10-14T07:24:02.000Z | tamarco/core/microservice.py | System73/tamarco | c85bec267d39057a4cd5f1c9854d5e2840cebb1e | [
"MIT"
] | 4 | 2020-04-06T15:52:40.000Z | 2021-06-02T00:27:33.000Z | tamarco/core/microservice.py | System73/tamarco | c85bec267d39057a4cd5f1c9854d5e2840cebb1e | [
"MIT"
] | 3 | 2019-09-06T10:52:05.000Z | 2019-10-10T07:45:26.000Z | import asyncio
import logging
import sys
import time
import uuid
from collections import OrderedDict
from collections.abc import Callable
from functools import partial
from threading import Thread
from typing import Coroutine, Union
from tamarco.core.dependency_resolver import CantSolveDependencies, resolve_dependency_order
from tamarco.core.logging.logging import Logging
from tamarco.core.patterns import Singleton
from tamarco.core.settings.settings import Settings, SettingsView
from tamarco.core.signals import SignalsManager
from tamarco.core.tasks import TasksManager, get_task_wrapper, get_thread_wrapper
from tamarco.core.utils import Informer, ROOT_SETTINGS, get_fn_full_signature
from tamarco.resources.bases import BaseResource
from tamarco.resources.basic.metrics.resource import MetricsResource
from tamarco.resources.basic.registry.resource import Registry
from tamarco.resources.basic.status.resource import StatusResource
from tamarco.resources.debug.profiler import ProfilerResource
from tamarco.resources.io.http.resource import HTTPServerResource
logger = logging.getLogger("tamarco")
class MicroserviceBase(metaclass=Singleton):
# Name of the microservice, is used by the resources
# to report a name of service.
name = None
# Instance id of the microservice, name is shared
# among instances but the instance id is unique.
instance_id = uuid.uuid4()
# Name of the deploy, is used by the resources
# to report a deploy name, is loaded by settings.
deploy_name = None
# Loggers to be added by the application code.
extra_loggers_names = []
# Main event loop.
loop = asyncio.get_event_loop()
# Manager for task.
tasks_manager = TasksManager()
# Settings manager.
settings = Settings()
# Logging manager.
logging = Logging()
@property
def loggers_names(self):
"""All loggers used by the framework.
Returns:
list: list of loggers names used by the microservice.
"""
loggers = {"tamarco", "tamarco.tasks", "tamarco.settings", "asyncio"}
for resource in self.resources.values():
loggers.update(resource.loggers_names)
loggers.update(self.extra_loggers_names)
loggers.update({self.name})
return loggers
def __new__(cls, *args, **kwargs):
cls.resources = OrderedDict()
dependency_graph = {
attr_name: getattr(cls, attr_name).depends_on
for attr_name in dir(cls)
if isinstance(getattr(cls, attr_name), BaseResource)
}
try:
resources_dep_ordered = resolve_dependency_order(dependency_graph)
except CantSolveDependencies as e:
print(e, file=sys.stderr)
exit(12)
else:
for name in resources_dep_ordered:
cls.resources[name] = getattr(cls, name)
return super().__new__(cls, *args, **kwargs)
def __init__(self):
assert self.name is not None, "Error, name should be defined in your microservice class"
self.logger = None
self._configure_provisional_logger()
def _configure_provisional_logger(self):
"""Provisional logging used before be able to read the final configuration from the settings."""
self.logger = logging.getLogger(self.name)
stdout_handler = logging.StreamHandler(sys.stdout)
print(f"Configuring logger provisional logger of {self.name} to INFO and stdout")
self.logger.setLevel(logging.INFO)
self.logger.addHandler(stdout_handler)
self.logger.info(f"Configured {self.name} logger")
async def bind(self):
"""Call the bind function of all the resources.
It binds the resources to the microservice, allowing to the resources to identify their microservice.
"""
self.logger.info(f"Binding to microservice the resources: {list(self.resources.keys())}")
await self.settings.bind(self.loop)
for name, resource in self.resources.items():
try:
await resource.bind(self, name)
except Exception:
self.logger.exception(f"Unexpected exception binding the resource {resource}")
exit(11)
async def run_in_all_resources(self, method):
"""Run the method name in all the resources.
Args:
method (str): Method name to run in all the resources.
"""
for resource in self.resources.values():
self.logger.debug(f"Calling {method} of resource {resource.name}")
try:
await getattr(resource, method)()
except Exception:
self.logger.exception(f"Error in {method} of resource {resource}")
else:
if method == "start":
self.logger.info(f"Started {resource.name} from {self.name}")
async def start_logging(self):
"""Initializes the logging of the microservice."""
self.logger.info(f"Starting logging in microservice {self.name} with loggers: {self.loggers_names}")
await self.logging.start(
loggers=self.loggers_names, microservice_name=self.name, deploy_name=self.deploy_name, loop=self.loop
)
Informer.log_all_info(self.logger)
async def stop_settings(self):
"""Stops the settings of the microservice."""
self.logger.info("Stopping microservice settings")
await self.settings.stop()
async def start_settings(self):
"""Initializes the settings of the microservice."""
self.logger.info("Starting microservice settings")
await self.settings.start()
self.deploy_name = await self.settings.get(f"{ROOT_SETTINGS}.deploy_name")
await self._configure_logging_settings()
await self._configure_resource_settings()
async def _configure_logging_settings(self):
self.logger.info("Configuring logging settings")
self.logging.configure_settings(SettingsView(self.settings, f"{ROOT_SETTINGS}.logging", self.name))
async def _configure_resource_settings(self):
self.logger.info("Configuring resources settings")
for resource in self.resources.values():
await resource.configure_settings(
SettingsView(self.settings, f"{ROOT_SETTINGS}.resources.{resource.name}", self.name)
)
def _collect_tasks(self):
for attr_name in dir(self):
attr = getattr(self, attr_name)
if hasattr(attr, "_mark_task"):
self.tasks_manager.register_task(attr._name, attr)
elif hasattr(attr, "_mark_thread"):
self.tasks_manager.register_thread(attr._name, attr)
class MicroserviceContext(MicroserviceBase):
""""This class is used to use tamarco resources without using a full microservice,
for example a script.
"""
name = "microservice_context"
async def start(self):
self.tasks_manager.set_loop(self.loop)
await self.bind()
await self.start_settings()
await self.start_logging()
await self.run_in_all_resources("pre_start")
await self.run_in_all_resources("start")
await self.run_in_all_resources("post_start")
self._collect_tasks()
self.tasks_manager.start_all()
async def stop(self):
self.tasks_manager.stop_all()
await self.stop_settings()
await self.run_in_all_resources("stop")
await self.run_in_all_resources("post_stop")
class Microservice(MicroserviceBase):
"""Main class of a microservice.
This class is responsible for controlling the lifecycle of the microservice, also
builds and provides the necessary elements that a resource needs to work.
The resources of a microservice should be declared in this class. The microservice automatically takes the ownership
of all the declared resources.
"""
# The signals manager are responsive of handling the signal_number of the system, providing a graceful stopping in
# the service when necessary.
signals_manager = SignalsManager()
# Default http server resource. It is used by the metrics and status resource to expose information.
tamarco_http_report_server = HTTPServerResource()
# Default metric resource.
metrics = MetricsResource()
# Default status resource. It is responsive of apply the restart policies and expose the status of the resources
# an HTTP API.
status = StatusResource()
# Default profiler resource. It is responsive of profile de application when the setting is activated.
profiler = ProfilerResource()
# Default registry resource. It is responsive of maintain a etcd registry with all the alive microservice instances
# and their IPs to be used by a discovery system.
registry = Registry()
def __init__(self):
super().__init__()
self.tasks_manager.set_loop(self.loop)
self.signals_manager.set_loop(self.loop)
async def pre_start(self):
"""Pre start stage of lifecycle.
This method can be overwritten by the user to add some logic in the start.
"""
self.logger.info("============ Pre Starting ============")
await self.run_in_all_resources("pre_start")
async def start(self):
"""Start stage of lifecycle.
This method can be overwritten by the user to add some logic in the start.
"""
self.logger.info("============ Starting ============")
await self.run_in_all_resources("start")
self._collect_tasks()
self.tasks_manager.start_all()
async def post_start(self):
"""Post start stage of lifecycle.
This method can be overwritten by the user to add some logic in the start.
"""
self.logger.info("============ Post Starting ============")
await self.run_in_all_resources("post_start")
async def stop(self):
"""Stop stage of the lifecycle.
This method can be overwritten by the user to add some logic to the shut down.
This method should close all the I/O operations opened by the resources.
"""
self.logger.info("============ Stopping ============")
await self.run_in_all_resources("stop")
await self.stop_settings()
self.tasks_manager.stop_all()
async def post_stop(self):
"""Post stop stage of the lifecycle.
This method can be overwritten by the user to add some logic to the shut down.
"""
self.logger.info("============ Post Stopping ============")
await self.run_in_all_resources("post_stop")
async def _setup(self):
await self.bind()
await self.start_settings()
await self.start_logging()
await self.pre_start()
await self.start()
await self.post_start()
def run(self):
"""Run a microservice.
It initializes the main event loop of asyncio, so this function only are going to end when the microservice
ends its live cycle.
"""
self.logger.info(f"Running microservice {self.name}. Calling setup method")
try:
self.loop.run_until_complete(self._setup())
self.loop.run_forever()
except Exception:
self.logger.critical(
"Unexpected exception in the setup or during the run of the loop, stopping the " "microservice",
exc_info=True,
)
self.loop.run_until_complete(self.stop_gracefully())
async def stop_gracefully(self):
"""Stop the microservice gracefully.
Shut down the microservice. If after 30 seconds the microservice is not closed gracefully it forces a exit.
"""
thread = Thread(target=self._wait_and_force_exit)
thread.start()
await self.stop()
await self.post_stop()
if self.loop.is_running():
self.loop.stop()
def _wait_and_force_exit(self):
time.sleep(30)
self.logger.critical("Error stopping all the resources. Forcing exit.")
exit(1)
def task(name_or_fn):
"""Decorator to convert a method of a microservice in a asyncio task.
The task is started and stopped when the microservice starts and stops respectively.
Args:
name_or_fn: Name of the task or function. If function the task name is the declared name of the function.
"""
def decorator(name, fn):
wrapper = get_task_wrapper(fn, name)
wrapper._mark_task = True
wrapper._name = name
return wrapper
if name_or_fn is str:
name = name_or_fn
return partial(decorator, name)
elif callable(name_or_fn):
if not asyncio.iscoroutinefunction(name_or_fn):
raise Exception(f"Tamarco {name_or_fn} task not created! The function is not asynchronous")
fn = name_or_fn
name = get_fn_full_signature(fn)
return decorator(name, fn)
else:
raise Exception("task decorator should be used with a parameter (name) that is a str or without parameter")
def thread(name_or_fn):
"""Decorator to convert a method of a microservice in a thread.
The thread is started and stopped when the microservice starts and stops respectively.
Args:
name_or_fn: Name of the thread or function. If function the thread name is the declared name of the function.
"""
def decorator(name: str, fn: Callable):
wrapper = get_thread_wrapper(fn, name)
wrapper._mark_thread = True
wrapper._name = name
return wrapper
if name_or_fn is str:
name = name_or_fn
return partial(decorator, name)
elif callable(name_or_fn):
fn = name_or_fn
name = get_fn_full_signature(fn)
return decorator(name, fn)
else:
raise Exception("task decorator should be used with a parameter (name) that is a str or without parameter")
def task_timer(interval=1000, one_shot=False, autostart=False) -> Union[Callable, Coroutine]:
"""Decorator to declare a task that should repeated in time intervals.
Examples:
>>> @task_timer()
>>> async def execute(*arg,**kwargs)
>>> print('tick')
>>> @task_timer(interval=1000, oneshot=True, autostart=True)
>>> async def execute(*args,**kwargs)
>>> print('tick')
Args:
interval (int): Interval in milliseconds when the task is repeated.
one_shot (bool): Only runs the task once.
autostart (bool): Task is automatically initialized with the microservice.
"""
def wrapper_task_timer(fn: Union[str, Callable]) -> Union[Callable, Coroutine]:
"""Function that adds timer functionality"""
async def fn_with_sleep(*args, **kwargs):
try:
# Interval time in float (seconds transform)
interval_seconds = interval / 1000
# Oneshot param True always first all sleep after that execute and finish
execute_task = autostart and not one_shot
while True:
if execute_task:
logger.debug(
f"Executing task timer {fn.__name__} with the params: interval = {interval}, "
f"one_shot = {one_shot}, autostart = {autostart}"
)
await fn(*args, **kwargs)
if one_shot and execute_task:
break
execute_task = True
await asyncio.sleep(interval_seconds)
except Exception:
logger.exception(f"Unexpected exception running task timer {fn.__name__}. Timer will not recover")
# Change name timer function with original task name
fn_with_sleep.__name__ = fn.__name__
return task(fn_with_sleep)
return wrapper_task_timer
| 38.121718 | 120 | 0.65617 | 1,984 | 15,973 | 5.139113 | 0.169355 | 0.026481 | 0.019223 | 0.018341 | 0.298254 | 0.251177 | 0.219106 | 0.203511 | 0.161044 | 0.153393 | 0 | 0.001856 | 0.257998 | 15,973 | 418 | 121 | 38.212919 | 0.858421 | 0.191824 | 0 | 0.274194 | 0 | 0 | 0.148099 | 0.010585 | 0 | 0 | 0 | 0 | 0.004032 | 1 | 0.056452 | false | 0 | 0.092742 | 0 | 0.262097 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ece6b2f23aba6f4dbaac2a541f07ef48b3ac7e3b | 4,990 | py | Python | napari/utils/colormaps/_tests/test_categorical_colormap.py | MaksHess/napari | 64a144607342c02177fc62fa83a3442ace0a98e7 | [
"BSD-3-Clause"
] | 1,345 | 2019-03-03T21:14:14.000Z | 2022-03-31T19:46:39.000Z | napari/utils/colormaps/_tests/test_categorical_colormap.py | MaksHess/napari | 64a144607342c02177fc62fa83a3442ace0a98e7 | [
"BSD-3-Clause"
] | 3,904 | 2019-03-02T01:30:24.000Z | 2022-03-31T20:17:27.000Z | napari/utils/colormaps/_tests/test_categorical_colormap.py | MaksHess/napari | 64a144607342c02177fc62fa83a3442ace0a98e7 | [
"BSD-3-Clause"
] | 306 | 2019-03-29T17:09:10.000Z | 2022-03-30T09:54:11.000Z | import json
from itertools import cycle
import numpy as np
import pytest
from napari.utils.colormaps.categorical_colormap import CategoricalColormap
def test_default_categorical_colormap():
cmap = CategoricalColormap()
assert cmap.colormap == {}
color_cycle = cmap.fallback_color
np.testing.assert_almost_equal(color_cycle.values, [[1, 1, 1, 1]])
np.testing.assert_almost_equal(next(color_cycle.cycle), [1, 1, 1, 1])
def test_categorical_colormap_direct():
"""Test a categorical colormap with a provided mapping"""
colormap = {'hi': np.array([1, 1, 1, 1]), 'hello': np.array([0, 0, 0, 0])}
cmap = CategoricalColormap(colormap=colormap)
color = cmap.map(['hi'])
np.testing.assert_allclose(color, [[1, 1, 1, 1]])
color = cmap.map(['hello'])
np.testing.assert_allclose(color, [[0, 0, 0, 0]])
# test that the default fallback color (white) is applied
new_color_0 = cmap.map(['not a key'])
np.testing.assert_almost_equal(new_color_0, [[1, 1, 1, 1]])
new_cmap = cmap.colormap
np.testing.assert_almost_equal(new_cmap['not a key'], [1, 1, 1, 1])
# set a cycle of fallback colors
new_fallback_colors = [[1, 0, 0, 1], [0, 1, 0, 1]]
cmap.fallback_color = new_fallback_colors
new_color_1 = cmap.map(['new_prop 1'])
np.testing.assert_almost_equal(
np.squeeze(new_color_1), new_fallback_colors[0]
)
new_color_2 = cmap.map(['new_prop 2'])
np.testing.assert_almost_equal(
np.squeeze(new_color_2), new_fallback_colors[1]
)
def test_categorical_colormap_cycle():
color_cycle = [[1, 1, 1, 1], [1, 0, 0, 1]]
cmap = CategoricalColormap(fallback_color=color_cycle)
# verify that no mapping between prop value and color has been set
assert cmap.colormap == {}
# the values used to create the color cycle can be accessed via fallback color
np.testing.assert_almost_equal(cmap.fallback_color.values, color_cycle)
# map 2 colors, verify their colors are returned in order
colors = cmap.map(['hi', 'hello'])
np.testing.assert_almost_equal(colors, color_cycle)
# map a third color and verify the colors wrap around
third_color = cmap.map(['bonjour'])
np.testing.assert_almost_equal(np.squeeze(third_color), color_cycle[0])
def test_categorical_colormap_cycle_as_dict():
color_values = np.array([[1, 1, 1, 1], [1, 0, 0, 1]])
color_cycle = cycle(color_values)
fallback_color = {'values': color_values, 'cycle': color_cycle}
cmap = CategoricalColormap(fallback_color=fallback_color)
# verify that no mapping between prop value and color has been set
assert cmap.colormap == {}
# the values used to create the color cycle can be accessed via fallback color
np.testing.assert_almost_equal(cmap.fallback_color.values, color_values)
np.testing.assert_almost_equal(
next(cmap.fallback_color.cycle), color_values[0]
)
fallback_colors = np.array([[1, 0, 0, 1], [0, 1, 0, 1]])
def test_categorical_colormap_from_array():
cmap = CategoricalColormap.from_array(fallback_colors)
np.testing.assert_almost_equal(cmap.fallback_color.values, fallback_colors)
color_mapping = {
'typeA': np.array([1, 1, 1, 1]),
'typeB': np.array([1, 0, 0, 1]),
}
default_fallback_color = np.array([[1, 1, 1, 1]])
@pytest.mark.parametrize(
'params,expected',
[
({'colormap': color_mapping}, (color_mapping, default_fallback_color)),
(
{'colormap': color_mapping, 'fallback_color': fallback_colors},
(color_mapping, fallback_colors),
),
({'fallback_color': fallback_colors}, ({}, fallback_colors)),
(color_mapping, (color_mapping, default_fallback_color)),
],
)
def test_categorical_colormap_from_dict(params, expected):
cmap = CategoricalColormap.from_dict(params)
np.testing.assert_equal(cmap.colormap, expected[0])
np.testing.assert_almost_equal(cmap.fallback_color.values, expected[1])
def test_categorical_colormap_equality():
color_cycle = [[1, 1, 1, 1], [1, 0, 0, 1]]
cmap_1 = CategoricalColormap(fallback_color=color_cycle)
cmap_2 = CategoricalColormap(fallback_color=color_cycle)
cmap_3 = CategoricalColormap(fallback_color=[[1, 1, 1, 1], [1, 1, 0, 1]])
cmap_4 = CategoricalColormap(
colormap={0: np.array([0, 0, 0, 1])}, fallback_color=color_cycle
)
assert cmap_1 == cmap_2
assert cmap_1 != cmap_3
assert cmap_1 != cmap_4
# test equality against a different type
assert cmap_1 != color_cycle
@pytest.mark.parametrize(
'params',
[
{'colormap': color_mapping},
{'colormap': color_mapping, 'fallback_color': fallback_colors},
{'fallback_color': fallback_colors},
],
)
def test_categorical_colormap_serialization(params):
cmap_1 = CategoricalColormap(**params)
cmap_json = cmap_1.json()
json_dict = json.loads(cmap_json)
cmap_2 = CategoricalColormap(**json_dict)
assert cmap_1 == cmap_2
| 33.716216 | 82 | 0.690982 | 695 | 4,990 | 4.717986 | 0.130935 | 0.025008 | 0.026532 | 0.020738 | 0.501067 | 0.374504 | 0.269594 | 0.191522 | 0.184203 | 0.128088 | 0 | 0.031411 | 0.183367 | 4,990 | 147 | 83 | 33.945578 | 0.773252 | 0.114228 | 0 | 0.156863 | 0 | 0 | 0.044485 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.078431 | false | 0 | 0.04902 | 0 | 0.127451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf1ce20b4fefaf58c8c7d08d20616ef3be895a2 | 375 | py | Python | src/utils/utils.py | andrelopez/cron-parser-cli | 35df5d5d8113dde99bee95f834c63248ce459194 | [
"MIT"
] | null | null | null | src/utils/utils.py | andrelopez/cron-parser-cli | 35df5d5d8113dde99bee95f834c63248ce459194 | [
"MIT"
] | null | null | null | src/utils/utils.py | andrelopez/cron-parser-cli | 35df5d5d8113dde99bee95f834c63248ce459194 | [
"MIT"
] | null | null | null | from columnar import columnar
def print_table(minute: str, hour: str, day: str, month: str, week: str, cmd: str) -> str:
table = [
['minute', minute],
['hour', hour],
['day of month', day],
['month', month],
['day of week', week],
['command', cmd]
]
table = columnar(table, no_borders=True)
return str(table) | 25 | 90 | 0.541333 | 46 | 375 | 4.369565 | 0.413043 | 0.109453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.296 | 375 | 15 | 91 | 25 | 0.761364 | 0 | 0 | 0 | 0 | 0 | 0.119681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf35ef4c074dccc46a80355a4787bf3619f13a3 | 778 | py | Python | tests/utils/test_download.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | tests/utils/test_download.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | tests/utils/test_download.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | from gather.utils import download
import hashlib
from helpers import get_datapath
def test_download(requests_mock):
beaco2n_data = get_datapath(filename="test_data.csv", network="beaco2n")
binary_data = beaco2n_data.read_bytes()
mock_url = "https://example.com/some_csv.txt"
requests_mock.get(
mock_url,
content=binary_data,
status_code=200,
)
data = download(url=mock_url)
md5 = hashlib.md5(data).hexdigest()
assert md5 == "b62cdb3234e6afb87fc3de8605ae1b09"
requests_mock.get(
mock_url,
status_code=404,
)
data = download(url=mock_url)
assert not data
requests_mock.get(
mock_url,
status_code=666,
)
data = download(url=mock_url)
assert not data
| 19.948718 | 76 | 0.667095 | 97 | 778 | 5.103093 | 0.391753 | 0.09899 | 0.090909 | 0.115152 | 0.359596 | 0.270707 | 0.270707 | 0.141414 | 0 | 0 | 0 | 0.054237 | 0.241645 | 778 | 38 | 77 | 20.473684 | 0.784746 | 0 | 0 | 0.407407 | 0 | 0 | 0.107969 | 0.041131 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf408df66ed468cf11584dcfbbc451fc2a54142 | 917 | py | Python | LeNet/LeNet_pt.py | MuhammedAshraf2020/implementation-of-CNN-Models | 52be2b8d27c2340d30b6cab1883fffdb2f656343 | [
"MIT"
] | 2 | 2021-02-09T16:14:50.000Z | 2021-08-03T13:33:47.000Z | LeNet/LeNet_pt.py | MuhammedAshraf2020/implementation-of-CNN-Models | 52be2b8d27c2340d30b6cab1883fffdb2f656343 | [
"MIT"
] | null | null | null | LeNet/LeNet_pt.py | MuhammedAshraf2020/implementation-of-CNN-Models | 52be2b8d27c2340d30b6cab1883fffdb2f656343 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
# Create Model
class LeNet_pt(nn.Module):
def __init__(self):
super(LeNet_pt , self).__init__()
self.ConvModel = nn.Sequential(
nn.Conv2d(in_channels = 3 , out_channels = 6 , kernel_size = (5 , 5) , padding = (0 , 0) , stride = (1 , 1)),
nn.ReLU() ,
nn.AvgPool2d(kernel_size = (2 , 2) , stride = (2 , 2)) ,
nn.Conv2d(in_channels = 6 , out_channels = 16 , kernel_size = (5 , 5) , padding = (0 , 0) , stride = (1 , 1)),
nn.ReLU() ,
nn.AvgPool2d(kernel_size = (2 , 2) , stride = (2 , 2)) ,
nn.Conv2d(in_channels = 16 , out_channels = 120 , kernel_size = (5 , 5) , padding = (0 , 0) , stride = (1 , 1)) )
self.DenseModel = nn.Sequential(
nn.Linear(120 , 84),
nn.Linear(84 , 10))
def forward(self , x):
y = self.ConvModel(x)
y = y.reshape(y.shape[0] , -1)
y = self.DenseModel(y)
return y
| 35.269231 | 118 | 0.586696 | 140 | 917 | 3.692857 | 0.321429 | 0.096712 | 0.058027 | 0.104449 | 0.381044 | 0.381044 | 0.381044 | 0.381044 | 0.381044 | 0.381044 | 0 | 0.075036 | 0.244275 | 917 | 25 | 119 | 36.68 | 0.670996 | 0.013086 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
019e55ec1c50df9500d86cf12839f5b296083659 | 4,782 | py | Python | ELA_Training_Module_Final.py | Sawera557/PhotoChamp-Image-Forensic-Tool | e7550a97d33cdf58a66ea0efcc451178bfd88a8d | [
"MIT"
] | null | null | null | ELA_Training_Module_Final.py | Sawera557/PhotoChamp-Image-Forensic-Tool | e7550a97d33cdf58a66ea0efcc451178bfd88a8d | [
"MIT"
] | null | null | null | ELA_Training_Module_Final.py | Sawera557/PhotoChamp-Image-Forensic-Tool | e7550a97d33cdf58a66ea0efcc451178bfd88a8d | [
"MIT"
] | null | null | null | import itertools
import os
import random as random1
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image, ImageChops, ImageEnhance
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.utils.np_utils import to_categorical
from pylab import *
from sklearn import *
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
def train_Ela_Model(csv_file , lr , ep):
def convert_to_ela_image(path, quality):
filename = path
resaved_filename = filename.split('.')[0] + '.resaved.jpg'
im = Image.open(filename).convert('RGB')
im.save(resaved_filename, 'JPEG', quality=quality)
resaved_im = Image.open(resaved_filename)
ela_im = ImageChops.difference(im, resaved_im)
extrema = ela_im.getextrema()
max_diff = max([ex[1] for ex in extrema])
if max_diff == 0:
max_diff = 1
scale = 255.0 / max_diff
ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
return ela_im
dataset = pd.read_csv(csv_file)
X = []
Y = []
for index, row in dataset.iterrows():
X.append(array(convert_to_ela_image(row[0], 90).resize((128, 128))).flatten() / 255.0)
Y.append(row[1])
X = np.array(X)
Y = to_categorical(Y, 2)
X = X.reshape(-1, 128, 128, 3)
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.1, random_state=5, shuffle=True)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='valid', activation='relu', input_shape=(128, 128, 3)))
model.add(Conv2D(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(MaxPool2D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.50))
model.add(Dense(2, activation="softmax"))
model.summary()
optimizer = RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
#early_stopping = EarlyStopping(monitor='val_acc', min_delta=0, patience=2, verbose=0, mode='auto')
epochs = ep
batch_size = 5
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val, Y_val), verbose=2)
fig, ax = plt.subplots(3, 1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'], color='r', label="validation loss", axes=ax[0])
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['accuracy'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_accuracy'], color='r', label="Validation accuracy")
legend_ = ax[1].legend(loc='best', shadow=True)
def plot_confusion_matrix(cm_, classes, normalize=False, title_='Confusion matrix', cmap=cm.get_cmap("Spectral")):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm_, interpolation='nearest', cmap=cmap)
plt.title(title_)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
if normalize:
cm_ = cm_.astype('float') / cm_.sum(axis=1)[:, np.newaxis]
thresh = cm_.max() / 2.
for i, j in itertools.product(range(cm_.shape[0]), range(cm_.shape[1])):
plt.text(j, i, cm_[i, j],
horizontalalignment="center",
color="white" if cm_[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
Y_pred = model.predict(X_val)
Y_pred_classes = np.argmax(Y_pred, axis=1)
Y_true = np.argmax(Y_val, axis=1)
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
plot_confusion_matrix(confusion_mtx, classes=range(2))
#plt.show()
image_path = os.getcwd()+"\\Figures"
Models_path = os.getcwd()+"\\Re_Traind_Models"
file_number =random1.randint(1, 1000000)
plot_Name = image_path+"\\ELA_"+str(file_number)+".png"
Model_Name = Models_path+"\\ELA_"+str(file_number)+".h5"
plt.savefig(plot_Name , transparent =True , bbox_incehs="tight" , pad_inches = 2 , dpi = 50)
model.save(Model_Name)
return plot_Name , Model_Name
| 37.952381 | 123 | 0.645128 | 663 | 4,782 | 4.481146 | 0.351433 | 0.021542 | 0.024234 | 0.011444 | 0.080781 | 0.023561 | 0.023561 | 0.023561 | 0.023561 | 0 | 0 | 0.029098 | 0.216646 | 4,782 | 125 | 124 | 38.256 | 0.764015 | 0.045797 | 0 | 0 | 0 | 0 | 0.076067 | 0.00545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.163043 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a154d7d3f987732b81523447ce1b96222a9132 | 3,140 | py | Python | natural/bank.py | Foxlik/natural | 0e8b15b200525ee147579b025a787646f7534890 | [
"MIT"
] | 21 | 2015-03-02T15:41:25.000Z | 2020-05-20T12:46:03.000Z | natural/bank.py | Foxlik/natural | 0e8b15b200525ee147579b025a787646f7534890 | [
"MIT"
] | 12 | 2015-10-29T18:02:00.000Z | 2021-11-10T21:49:40.000Z | natural/bank.py | Foxlik/natural | 0e8b15b200525ee147579b025a787646f7534890 | [
"MIT"
] | 8 | 2015-10-29T17:50:13.000Z | 2020-01-16T09:40:53.000Z | from natural.constant import _, IBAN_ALPHABET
from natural.constant import BBAN_RULES, BBAN_PATTERN, BBAN_MAP
import re
def bban_compact(number):
'''
Printable compacted Basic Bank Account Number. Removes all the padding
characters.
:param number: string
>>> bban_compact('1234.56.78.90')
'1234567890'
>>> bban_compact('068-9999995-01')
'068999999501'
'''
return re.sub(r'[-. ]', '', str(number))
def bban_base10(number):
'''
Printable Basic Bank Account Number in base-10.
:param number: string
>>> bban_base10('01234567')
'45670123'
>>> bban_base10('ABCD')
'10111213'
'''
number = bban_compact(number)
number = number[4:] + number[:4]
return ''.join([str(IBAN_ALPHABET.index(char)) for char in number])
def _bban_regex(structure):
return re.compile(
r'^%s$' % BBAN_PATTERN.sub(
lambda m: '%s{%s}' % (BBAN_MAP[m.group(2)], m.group(1)),
structure,
)
)
def bban(value, country=None, validate=False):
'''
Printable Basic Bank Account Number (BBAN) for the given country code. The
``country`` must be a valid ISO 3166-2 country code.
:param value: string or int
:param country: string
>>> bban('068-9999995-01', 'BE')
'068999999501'
>>> bban('555', 'NL')
'555'
>>> bban('555', 'NL', validate=True)
Traceback (most recent call last):
...
ValueError: Invalid BBAN, number does not match specification
>>> bban('123', 'XY', validate=True)
Traceback (most recent call last):
...
ValueError: Invalid BBAN, country unknown
'''
value = bban_compact(value)
if validate:
country = country.upper()
try:
rules = BBAN_RULES[country]
except KeyError:
raise ValueError(_('Invalid BBAN, country unknown'))
regex = _bban_regex(rules['bban'])
if not regex.match(value):
raise ValueError(
_('Invalid BBAN, number does not match specification')
)
return value
def iban(number, validate=False):
'''
Printable International Bank Account Number (IBAN) as specified in ISO
13616.
:param number: string
>>> iban('BE43068999999501')
'BE43 0689 9999 9501'
>>> iban('XY32012341234123', validate=True)
Traceback (most recent call last):
...
ValueError: Invalid IBAN, country unknown
>>> iban('BE43068999999502', validate=True)
Traceback (most recent call last):
...
ValueError: Invalid IBAN, digits check failed
'''
number = bban_compact(number)
if validate:
country = number[:2]
if country not in BBAN_RULES:
raise ValueError(_('Invalid IBAN, country unknown'))
# Do the 10-mod-97 check
digits = bban_base10(number)
if int(digits) % 97 != 1:
raise ValueError(_('Invalid IBAN, digits check failed'))
# Check BBAN for country
bban(number[4:], country, validate=True)
groups = [number[x:x + 4] for x in range(0, len(number), 4)]
return ' '.join(groups)
| 25.528455 | 78 | 0.606369 | 369 | 3,140 | 5.084011 | 0.327913 | 0.072495 | 0.036247 | 0.053305 | 0.28678 | 0.201493 | 0.172175 | 0.172175 | 0.127932 | 0.127932 | 0 | 0.08518 | 0.267197 | 3,140 | 122 | 79 | 25.737705 | 0.730117 | 0.424204 | 0 | 0.095238 | 0 | 0 | 0.100439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.071429 | 0.02381 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a26e38bfd6477fceda79c2ab6901ad81a89ddd | 6,435 | py | Python | menu.py | ThinkDownstairs/coder | 7a42a9bac941039b96ccf2430e560cc60e2159df | [
"WTFPL"
] | 1 | 2018-03-20T06:01:17.000Z | 2018-03-20T06:01:17.000Z | menu.py | ThinkDownstairs/coder | 7a42a9bac941039b96ccf2430e560cc60e2159df | [
"WTFPL"
] | 19 | 2018-03-20T23:11:38.000Z | 2018-04-01T17:39:10.000Z | menu.py | ThinkDownstairs/coder | 7a42a9bac941039b96ccf2430e560cc60e2159df | [
"WTFPL"
] | null | null | null |
from collections import namedtuple
import state_manager
import background
import sound
import quit_
import howto
import locations
import pygame
import consts
class MenuEntry(object):
def __init__(self, name: str, typ) -> None:
super().__init__()
self._name = name
self._typ = typ
self._surface = None
self._surface_selected = None
self._x = 0
self._y = 0
self._h = 0
self._w = 0
def prepare(self, font: pygame.font.Font, font_selected: pygame.font.Font) -> None:
surface = font.render(self._name, True, (255, 255, 255), None)
surface_selected = font_selected.render(self._name, True, (255, 255, 255), None)
self._w = max(surface.get_width(), surface_selected.get_width()) + 20
self._h = max(surface.get_height(), surface_selected.get_height()) + 10
self._surface = pygame.Surface((self._w, self._h), pygame.SRCALPHA, None)
self._surface.blit(surface, (self._w // 2 - surface.get_width() // 2, self._h // 2 - surface.get_height() // 2))
self._surface_selected = pygame.Surface((self._w, self._h), pygame.SRCALPHA, None)
self._surface_selected.fill((0, 0, 0))
self._surface_selected.blit(surface_selected, (self._w // 2 - surface_selected.get_width() // 2, self._h // 2 - surface_selected.get_height() // 2))
def contains_pos(self, x: int, y: int) -> bool:
return (self._x < x < (self._x + self._surface.get_width())) and (self._y < y < (self._y + self._surface.get_height()))
def set_pos(self, x: int, y: int):
self._x = x
self._y = y
def render(self, target: pygame.Surface, selected: bool):
s = self._surface_selected if selected else self._surface
target.blit(s, (self._x, self._y))
if selected:
pygame.draw.rect(target, (255, 255, 255), (self._x, self._y, s.get_width(), s.get_height()), 3)
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
return hash(self) == hash(other)
height = property(lambda s: s._h)
width = property(lambda s: s._w)
typ = property(lambda s: s._typ)
class Menu(state_manager.State):
def __init__(self) -> None:
super().__init__()
self._menu_entries = []
self._initialized = False
self._mouse = (consts.SCREEN_W + 1, consts.SCREEN_H + 1)
self._idx = 0
self._background = None
self._font = pygame.font.Font(locations.font('DejaVuSansMono.ttf'), 24)
self._font_selected = pygame.font.Font(locations.font('DejaVuSansMono-Bold.ttf'), 28)
self._sound = sound.Sound()
def add(self, menu_entry: MenuEntry):
menu_entry.prepare(self._font, self._font_selected)
self._menu_entries.append(menu_entry)
def select(self, idx: int) -> None:
if idx != self._idx:
self._idx = idx % len(self._menu_entries)
self._sound.play(sound.Sounds.MENU_HOVER)
def render(self) -> None:
self.screen.fill((0, 0, 0))
self._background.render(self.screen)
for i in range(len(self._menu_entries)):
self._menu_entries[i].render(self.screen, self._idx == i)
pygame.draw.line(self.screen, (255, 255, 255), (self._mouse[0], self._mouse[1] - 10), (self._mouse[0], self._mouse[1] + 10), 2)
pygame.draw.line(self.screen, (255, 255, 255), (self._mouse[0] - 10, self._mouse[1]), (self._mouse[0] + 10, self._mouse[1]), 2)
#pygame.draw.rect(self.screen, (255, 255, 255), (*self._mouse, 10, 10))
pygame.display.flip()
def input(self) -> None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.state_manager.change_state(quit_.Quit)
elif event.type == pygame.KEYDOWN:
i = 0
if event.key in (pygame.K_UP, pygame.K_LEFT):
i = -1
elif event.key in (pygame.K_DOWN, pygame.K_RIGHT):
i = 1
if i != 0:
self.select(self._idx + i)
continue
if event.key in (pygame.K_RETURN, pygame.K_SPACE):
me = self._menu_entries[self._idx]
self._sound.play(sound.Sounds.MENU_SELECT)
self.state_manager.change_state(me.typ)
continue
elif event.type == pygame.MOUSEMOTION:
self._mouse = event.pos
for i in range(len(self._menu_entries)):
if self._menu_entries[i].contains_pos(*self._mouse):
self.select(i)
elif event.type == pygame.MOUSEBUTTONDOWN:
l, m, r = pygame.mouse.get_pressed()
if l == 1:
me = self._menu_entries[self._idx]
if me.contains_pos(*self._mouse):
self._sound.play(sound.Sounds.MENU_SELECT)
self.state_manager.change_state(me.typ)
def update(self, delta: int, fps: float):
self._background.update(delta)
def enter(self, prev_: state_manager.StateType) -> None:
self._background = background.FloatingEditors(consts.SCREEN_W, consts.SCREEN_H)
self._idx = 0
if not self._initialized:
h = sum([me.height + 10 for me in self._menu_entries])
w = max([me.width for me in self._menu_entries])
offset = h // len(self._menu_entries)
t = consts.SCREEN_H // 2 - h // 2
for i in range(len(self._menu_entries)):
self._menu_entries[i].set_pos(consts.SCREEN_W // 2 - self._menu_entries[i].width // 2, t)
t += offset
self._initialized = True
if __name__ == '__main__':
import game
import about
def init():
pygame.init()
screen = pygame.display.set_mode((consts.SCREEN_W, consts.SCREEN_H))
pygame.display.set_caption('Coder')
pygame.mouse.set_visible(0)
return screen
sm = state_manager.StateManager(init())
m = Menu()
m.add(MenuEntry('Start', game.Game))
m.add(MenuEntry('HowTo', howto.HowTo))
m.add(MenuEntry('About', about.About))
m.add(MenuEntry('Quit', None))
sm.add_state(m)
sm.add_state(game.Game())
sm.add_state(howto.HowTo())
sm.add_state(about.About())
sm.change_state(Menu)
sm.main_loop()
| 39 | 156 | 0.591142 | 856 | 6,435 | 4.191589 | 0.160047 | 0.035674 | 0.062709 | 0.031773 | 0.323579 | 0.268952 | 0.179208 | 0.14019 | 0.114827 | 0.114827 | 0 | 0.025558 | 0.276457 | 6,435 | 164 | 157 | 39.237805 | 0.74506 | 0.010878 | 0 | 0.108696 | 0 | 0 | 0.011474 | 0.003615 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.07971 | 0.021739 | 0.253623 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a35af78a818794456f060908d249837cf70d44 | 5,576 | py | Python | cpepgen/chemical_linkages.py | hjuinj/cpepgen | 965f84148f783bd1d19aec4c9b86841a598d4a9b | [
"MIT"
] | null | null | null | cpepgen/chemical_linkages.py | hjuinj/cpepgen | 965f84148f783bd1d19aec4c9b86841a598d4a9b | [
"MIT"
] | null | null | null | cpepgen/chemical_linkages.py | hjuinj/cpepgen | 965f84148f783bd1d19aec4c9b86841a598d4a9b | [
"MIT"
] | null | null | null | """
This file contains several reactions for RDKIT
NOTES:
- Check what happens with ASP ASP ester bond formation? need more specifications to do that side targets?
- check_reaction_mols, also checking how many of the reaction sites are present in one molecule? - (more specific smart: make_aa_backbone_bond = [N][C][O:5]=[C:2][OH:1].[N:3][CH2:4][C][=O][C]>>[O:5]=[C:2][N:3][CH2:4].[OH2:1] "
- Check in unittest also fail states
authors: Shuzhe Wang & Benjamin Schroeder
"""
from rdkit import Chem
from rdkit.Chem import AllChem
import typing as t
#GERNEAL Functions:
def check_reaction_mols(reactants: list, reaction_smarts: str) -> bool:
"""
This function checks if all requirement in the Educts are fullfilled for the reaction
:param reactants: list of Chem.Mol used as reactants
:param reaction_smarts: str defining the reaction
:return: boolean true if everything is fullfilled
"""
educts = [educt for educt in reaction_smarts.split(">>")[0].split(".")]
educts_mol = [AllChem.MolFromSmarts(educt) for educt in educts]
#TODO check Mol is None
if (len(reactants) > len(educts)):
raise ValueError("There are more rectants than expected for this reaction.\n Given reactants: "+str(len(reactants))+" expected reactants: "+str(len(educts)))
elif (len(reactants) < len(educts)):
raise ValueError("There are more "+str(len(educts))+" rectants expected for this reaction.\n But only "+str(len(reactants))+" reactants were given")
else:
for ind, reactant in enumerate(reactants):
if not (reactant.HasSubstructMatch(educts_mol[ind])):
raise ValueError("Reactant " + str(ind) + " with smile " + Chem.MolToSmiles(
reactants[ind]) + " has not the required reaction pattern: " + str(educts[ind]))
return True
return False
def do_reaction(reactants: list, reaction_smarts: str) -> t.List[Chem.Mol]:
"""
This function is executed to perform a reaction
:param reactants:
:param reaction_smarts:
:return: list[Chem.Mol] products
"""
try:
rxn = AllChem.ReactionFromSmarts(reaction_smarts)
except Exception as err:
raise ValueError("Could not generate reaction object in do_reaction function:\n\t"+str("\t".join(err.args)))
# reac1
ps = rxn.RunReactants(tuple(reactants))
if (ps == None):
raise Exception("Reaction did not happen.")
return ps[0]
def perform_generic_reaction(reactants:t.List[Chem.Mol], reaction_smarts:str):
"""
this wrapper is preforming an reaction with
:param reactants:
:param reaction_smarts:
:return:
"""
try:
check_reaction_mols(reactants=reactants, reaction_smarts=reaction_smarts)
products = do_reaction(reactants=reactants,
reaction_smarts=reaction_smarts)
except Exception as err:
raise ValueError("Could not perform Reaction.\n Failed in molecule checking Step\n\t" + str("\t".join(err.args)))
except ValueError as err:
raise ValueError("Could not perform Reaction.\n Failed in reaction Step:\n\t" + str("\t".join(err.args)))
return products
#REACTIONS:
def make_custom_bond(reactant1, reactant2, reaction_smarts):
products = perform_generic_reaction(reactants=[reactant1, reactant2], reaction_smarts=reaction_smarts)
return products
def make_peptide_bond(carboxylGroupContaining: Chem.Mol, nitrogenGroupContaining: Chem.Mol) -> (t.List[Chem.Mol], str):
reaction_smarts = {
"educts" : [
"[C:1](=[O])[OH:2]",
"[NH2,NH1,NH3+1:3]-[CH2,CH1:4]" ,
],
"products" : [
"[C:1](=[O])[N:3]-[C:4]",
"[OH2:2]",
],
}
reaction_smarts = [".".join(reaction_smarts[i]) for i in reaction_smarts]
reaction_smarts = ">>".join(reaction_smarts)
# reaction_smarts = '[O:5]=[C:2][OH:1].[N:3][CH2,CH1:4]>>[O:5]=[C:2][N:3][CH2:4].[OH2:1]'
products = perform_generic_reaction(reactants=[carboxylGroupContaining, nitrogenGroupContaining], reaction_smarts=reaction_smarts)
# return products, reaction_smarts
return products
def make_amide_bond(carboxylGroupContaining: Chem.Mol, nitrogenGroupContaining: Chem.Mol) -> (t.List[Chem.Mol], str):
return make_peptide_bond(carboxylGroupContaining, nitrogenGroupContaining)
def make_ester_bond(carboxylGroupContaining: Chem.Mol, alcoholGroupContaining: Chem.Mol) -> (t.List[Chem.Mol], str):
reaction_smarts = '[O:5]=[C:2][OH:1].[HO:3][CH2:4]>>[O:5]=[C:2][O:3][CH2:4].[OH2:1]'
products = perform_generic_reaction(reactants=[carboxylGroupContaining, alcoholGroupContaining], reaction_smarts=reaction_smarts)
# return products, reaction_smarts
return products
def make_disulfide_bond(thiolGroupContaining1: Chem.Mol, thiolGroupContaining2: Chem.Mol) -> (t.List[Chem.Mol], str):
reaction_smarts = '[C:1][SH:2].[HS:3][C:4]>>[C:1][S:2][S:3][C:4]'
products = perform_generic_reaction(reactants=[thiolGroupContaining1, thiolGroupContaining2], reaction_smarts=reaction_smarts)
# return products, reaction_smarts
return products
def make_disulphide_bond(thiolGroupContaining1: Chem.Mol, thiolGroupContaining2: Chem.Mol) -> (t.List[Chem.Mol], str):
return make_disulfide_bond(thiolGroupContaining1, thiolGroupContaining2)
a = Chem.MolFromSequence("A")
b = Chem.MolFromSequence("C")
# print(Chem.MolToPDBBlock(b))
out= make_amide_bond(a,b)
mol = out[0]
# Chem.CalcExplicitValence(mol)
mol.UpdatePropertyCache(strict = False)
print(Chem.MolToPDBBlock(mol), Chem.MolToSmiles(mol))
| 43.224806 | 230 | 0.695301 | 729 | 5,576 | 5.215364 | 0.245542 | 0.121515 | 0.047344 | 0.058916 | 0.430563 | 0.362704 | 0.308785 | 0.304314 | 0.287743 | 0.234087 | 0 | 0.017133 | 0.173063 | 5,576 | 128 | 231 | 43.5625 | 0.807417 | 0.222382 | 0 | 0.152778 | 0 | 0.027778 | 0.15715 | 0.037754 | 0 | 0 | 0 | 0.007813 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0.027778 | 0.305556 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a431b7bea5dccade1e08d80166cc10db69bf34 | 1,105 | py | Python | config.py | cclauss/MAX-Object-Detector | 4f38f7c1ba65cc39baa4a0f466618b91a65f46a1 | [
"Apache-2.0"
] | null | null | null | config.py | cclauss/MAX-Object-Detector | 4f38f7c1ba65cc39baa4a0f466618b91a65f46a1 | [
"Apache-2.0"
] | null | null | null | config.py | cclauss/MAX-Object-Detector | 4f38f7c1ba65cc39baa4a0f466618b91a65f46a1 | [
"Apache-2.0"
] | null | null | null | import os
# Flask settings
DEBUG=True
# Flask-restplus settings
RESTPLUS_MASK_SWAGGER=False
# Application settings
# API metadata
API_TITLE = 'Model Asset Exchange Server'
API_DESC = 'An API for serving models'
API_VERSION = '0.1'
# default model
# name of model to download
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
DEFAULT_MODEL_PATH = 'assets'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# Note: This needs to be downloaded and/or compiled into pb format.
PATH_TO_CKPT = '{}/{}/frozen_inference_graph.pb'.format(DEFAULT_MODEL_PATH, MODEL_NAME)
PATH_TO_LABELS = '{}/{}/mscoco_label_map.pbtxt'.format(DEFAULT_MODEL_PATH, 'data')
NUM_CLASSES = 90
# for image models, may not be required
MODEL_INPUT_IMG_SIZE = (299, 299)
MODEL_LICENSE = 'ApacheV2'
MODEL_META_DATA = {
'id': '{}-tf-mobilenet'.format(MODEL_NAME.lower()),
'name': '{} TensorFlow Model'.format(MODEL_NAME),
'description': '{} TensorFlow model trained on MobileNet'.format(MODEL_NAME),
'type': 'object_detection',
'license': '{}'.format(MODEL_LICENSE)
}
| 29.864865 | 97 | 0.744796 | 159 | 1,105 | 4.930818 | 0.566038 | 0.068878 | 0.061224 | 0.056122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021008 | 0.138462 | 1,105 | 36 | 98 | 30.694444 | 0.802521 | 0.284163 | 0 | 0 | 0 | 0 | 0.363636 | 0.116517 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a49750a5f79f46de5800bcca67ff60a286ab7c | 921 | py | Python | check_pharmacist.py | 136s/check_pharmacist | 02f60355523b5c4890fd314d6d000bfe54226db8 | [
"CC0-1.0"
] | null | null | null | check_pharmacist.py | 136s/check_pharmacist | 02f60355523b5c4890fd314d6d000bfe54226db8 | [
"CC0-1.0"
] | null | null | null | check_pharmacist.py | 136s/check_pharmacist | 02f60355523b5c4890fd314d6d000bfe54226db8 | [
"CC0-1.0"
] | null | null | null | import time
import pandas as pd
from selenium import webdriver
SEARCH_URL = "https://licenseif.mhlw.go.jp/search_iyaku/top.jsp"
SLEEP_SEC = 3
IN_CSV_NAME = "./list.csv"
OUT_CSV_NAME = "./output.csv"
# 名前を投げると「登録年」の list が返ってくる
def get_years(name) :
driver.get(SEARCH_URL)
time.sleep(SLEEP_SEC)
search_box = driver.find_element_by_name("name")
search_box.send_keys(name)
search_box.submit()
regi = driver.find_elements_by_class_name('REGISTRATION_TD')
years = []
for r in regi:
years.append(r.text)
return years
# csv は name, years カラムの 2 行からなる(ヘッダー付き)
df = pd.read_csv(IN_CSV_NAME)
df["years"] = df["years"].astype(str)
driver = webdriver.Chrome()
for i, _ in df.iterrows():
result = get_years(df.at[i, "name"])
df.at[i, "years"] = " ".join(result) # スペース区切りで格納
driver.quit()
df.to_csv(open(OUT_CSV_NAME, "w", encoding="utf_8_sig", newline=""), index=False)
| 26.314286 | 81 | 0.689468 | 147 | 921 | 4.102041 | 0.537415 | 0.046434 | 0.029851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003896 | 0.163952 | 921 | 34 | 82 | 27.088235 | 0.779221 | 0.082519 | 0 | 0 | 0 | 0 | 0.142687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a82caa0086ae5bd27db7ac800143f2aa34c7d3 | 3,983 | py | Python | Lessons/source/search.py | daisukiyo/cs-1.3 | e23056c509f429d83cb8be94714205c78a76549f | [
"MIT"
] | null | null | null | Lessons/source/search.py | daisukiyo/cs-1.3 | e23056c509f429d83cb8be94714205c78a76549f | [
"MIT"
] | 2 | 2019-04-25T00:49:24.000Z | 2019-05-15T23:22:36.000Z | Lessons/source/search.py | daisukiyo/cs-1.3 | e23056c509f429d83cb8be94714205c78a76549f | [
"MIT"
] | null | null | null | #!python
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
# implement linear_search_iterative and linear_search_recursive below, then
# change this to call your implementation to verify it passes all tests
# return linear_search_iterative(array, item)
return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
# loop over all array values until item is found
for index, value in enumerate(array):
if item == value:
return index # found
return None # not found
def linear_search_recursive(array, item, index=0):
# TODO: implement linear search recursively here
# check if index is out of bound
if index >= len(array):
return None
# if the value at a specific index of array is equivalent to the target value...
if array[index] == item:
# return the first index of item in array
return index
# recursively call the function while incrementing the index value
return linear_search_recursive(array, item, index + 1)
# once implemented, change linear_search to call linear_search_recursive
# to verify that your recursive implementation passes all tests
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
# implement binary_search_iterative and binary_search_recursive below, then
# change this to call your implementation to verify it passes all tests
# return binary_search_iterative(array, item)
return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
# TODO: implement binary search iteratively here
# initialize the lower and upper bound for the array
left = 0 # lower
right = len(array) - 1 # upper
while left <= right:
# find the rounded middle point via floor division
mid = (left + right) // 2
# if the value at a specific index of array is equivalent to the target value...
if item == array[mid]:
# return the first index of item in array
return(mid)
# if the targeted item is less than the middle point...
elif item < array[mid]:
# shift the upper bound to the middle point
right = mid - 1
# the targeted item is greater than the middle point
else:
# shift the lower bound to the middle point
left = mid + 1
return None
# once implemented, change binary_search to call binary_search_iterative
# to verify that your iterative implementation passes all tests
def binary_search_recursive(array, item, left=0, right=None):
# TODO: implement binary search recursively here
# initialize the lower and upper bound for the array
# if left == None:
# left = 0
if right == None:
right = len(array) -1
# check edge case where array is non-existent (lower bound cannot be greater than upper bound)
if left > right:
return None
else:
# find the rounded middle point via floor division
mid = (left + right) // 2
# if the value at a specific index of array is equivalent to the target value...
if item == array[mid]:
# return the first index of item in array
return(mid)
# if the targeted item is less than the middle point...
elif item < array[mid]:
# recursively calls itself with a new upper bound
return binary_search_recursive(array, item, left, mid-1)
# the targeted item is greater than the middle point
else:
# recursively calls itself with a new lower bound
return binary_search_recursive(array, item, mid+1, right)
# once implemented, change binary_search to call binary_search_recursive
# to verify that your recursive implementation passes all tests | 37.575472 | 99 | 0.667085 | 549 | 3,983 | 4.765027 | 0.176685 | 0.068807 | 0.053517 | 0.06422 | 0.700688 | 0.625382 | 0.535933 | 0.482416 | 0.482416 | 0.390291 | 0 | 0.004517 | 0.277429 | 3,983 | 106 | 100 | 37.575472 | 0.904448 | 0.583731 | 0 | 0.475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0.15 | false | 0 | 0 | 0 | 0.425 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01a966a9092da2d27ed743864a64cbfc07fefe41 | 2,560 | py | Python | tests/generators.py | jlausuch/pcw | df4bf3d071024f894169163e2f0ad756c0c944bd | [
"Apache-2.0"
] | null | null | null | tests/generators.py | jlausuch/pcw | df4bf3d071024f894169163e2f0ad756c0c944bd | [
"Apache-2.0"
] | 35 | 2020-11-11T11:14:36.000Z | 2022-03-28T17:06:01.000Z | tests/generators.py | jlausuch/pcw | df4bf3d071024f894169163e2f0ad756c0c944bd | [
"Apache-2.0"
] | 3 | 2020-11-12T10:39:07.000Z | 2020-12-22T09:51:38.000Z | from faker import Faker
from datetime import datetime
fake = Faker()
min_image_age_hours = 7
max_images_per_flavor = 1
max_image_age_hours = 20
azure_storage_resourcegroup = 'openqa'
ec2_max_snapshot_age_days = 1
ec2_max_volumes_age_days = 5
class MockImage:
def __init__(self, name, last_modified=None):
self.name = name
self.last_modified = last_modified
def mock_get_feature_property(feature: str, property: str, namespace: str = None):
if property == 'min-image-age-hours':
return min_image_age_hours
elif property == 'max-images-per-flavor':
return max_images_per_flavor
elif property == 'max-image-age-hours':
return max_image_age_hours
elif property == 'azure-storage-resourcegroup':
return azure_storage_resourcegroup
elif property == 'ec2-max-snapshot-age-days':
return ec2_max_snapshot_age_days
elif property == 'ec2-max-volumes-age-days':
return ec2_max_volumes_age_days
class ec2_meta_mock:
def __init__(self):
self.data = fake.uuid4()
class ec2_image_mock:
def __init__(self):
self.image_id = fake.uuid4()
self.meta = ec2_meta_mock()
self.name = fake.uuid4()
def ec2_tags_mock(tags={fake.uuid4(): fake.uuid4()}):
return [ {'Key': key, 'Value': tags[key]} for key in tags]
class ec2_instance_mock:
def __init__(self, **kwargs):
self.state = {'Name': fake.uuid4()}
self.instance_id = fake.uuid4()
self.image_id = fake.uuid4()
self.instance_lifecycle = fake.uuid4()
self.instance_type = fake.uuid4()
self.kernel_id = fake.uuid4()
self.launch_time = datetime.now()
self.public_ip_address = fake.uuid4()
self.security_groups = [{'GroupName': fake.uuid4()}, {'GroupName': fake.uuid4()}]
self.sriov_net_support = fake.uuid4()
self.tags = ec2_tags_mock(**kwargs)
self.state_reason = {'Message': fake.uuid4()}
self.image = ec2_image_mock()
class azure_instance_mock:
def __init__(self):
self.tags = fake.uuid4()
self.name = fake.uuid4()
self.id = fake.uuid4()
self.type = fake.uuid4()
self.location = fake.uuid4()
def gce_instance_mock():
return {
'name': fake.uuid4(),
'id': fake.uuid4(),
'machineType': fake.uuid4() + '/qq',
'zone': fake.uuid4() + '/qq',
'status': fake.uuid4(),
'creationTimestamp': datetime.now(),
'metadata': fake.uuid4(),
'tags': {'sshKeys': fake.uuid4()}
}
| 29.767442 | 89 | 0.639844 | 329 | 2,560 | 4.68997 | 0.246201 | 0.163318 | 0.126377 | 0.048607 | 0.205444 | 0.031108 | 0 | 0 | 0 | 0 | 0 | 0.023846 | 0.230078 | 2,560 | 85 | 90 | 30.117647 | 0.759006 | 0 | 0 | 0.101449 | 0 | 0 | 0.096484 | 0.037891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115942 | false | 0 | 0.028986 | 0.028986 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ae25899453c6d21187150fc54bebe1d1afb72a | 1,744 | py | Python | tests/builders/test_header_builder.py | pershinaM/openapi3-parser | 957c86727d6d4119e98a7bc6aa260adc9fa22477 | [
"MIT"
] | 4 | 2021-01-12T12:44:20.000Z | 2022-03-20T07:38:46.000Z | tests/builders/test_header_builder.py | pershinaM/openapi3-parser | 957c86727d6d4119e98a7bc6aa260adc9fa22477 | [
"MIT"
] | 17 | 2021-01-08T18:36:34.000Z | 2022-02-16T08:21:21.000Z | tests/builders/test_header_builder.py | pershinaM/openapi3-parser | 957c86727d6d4119e98a7bc6aa260adc9fa22477 | [
"MIT"
] | 5 | 2021-05-27T19:46:49.000Z | 2022-03-05T00:14:45.000Z | from unittest.mock import MagicMock
import pytest
from openapi_parser.builders import HeaderBuilder, SchemaFactory
from openapi_parser.enumeration import DataType
from openapi_parser.specification import Header, Integer, Schema, String
def _get_schema_factory_mock(expected_value: Schema) -> SchemaFactory:
mock_object = MagicMock()
mock_object.create.return_value = expected_value
return mock_object
string_schema = String(type=DataType.STRING)
integer_schema = Integer(type=DataType.INTEGER)
collection_data_provider = (
(
{
"X-Header": {
"schema": {
"type": "string"
}
}
},
[
Header(schema=string_schema, name="X-Header"),
],
_get_schema_factory_mock(string_schema)
),
(
{
"X-Header": {
"description": "The number of allowed requests in the current period",
"required": True,
"deprecated": True,
"schema": {
"type": "integer",
},
}
},
[
Header(
name="X-Header",
required=True,
description="The number of allowed requests in the current period",
deprecated=True,
schema=integer_schema
)
],
_get_schema_factory_mock(integer_schema)
),
)
@pytest.mark.parametrize(['data', 'expected', 'schema_factory'], collection_data_provider)
def test_build_collection(data: dict, expected: Header, schema_factory: SchemaFactory):
builder = HeaderBuilder(schema_factory)
assert expected == builder.build_list(data)
| 27.25 | 90 | 0.583142 | 161 | 1,744 | 6.086957 | 0.322981 | 0.079592 | 0.052041 | 0.061224 | 0.112245 | 0.112245 | 0.112245 | 0.112245 | 0.112245 | 0.112245 | 0 | 0 | 0.325115 | 1,744 | 63 | 91 | 27.68254 | 0.832625 | 0 | 0 | 0.153846 | 0 | 0 | 0.12844 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 1 | 0.038462 | false | 0 | 0.096154 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ae618867a8af68ce2b3329a0cb4a362b7294f8 | 808 | py | Python | src/model.py | senadkurtisi/Univariate-Time-Series-Forecasting | 6eb4bacae6d0fb5708e1b661a6b72cbc3c3d07a6 | [
"MIT"
] | null | null | null | src/model.py | senadkurtisi/Univariate-Time-Series-Forecasting | 6eb4bacae6d0fb5708e1b661a6b72cbc3c3d07a6 | [
"MIT"
] | null | null | null | src/model.py | senadkurtisi/Univariate-Time-Series-Forecasting | 6eb4bacae6d0fb5708e1b661a6b72cbc3c3d07a6 | [
"MIT"
] | null | null | null | import torch.nn as nn
class SeqForecast(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers=2):
super().__init__()
self.lstm = nn.LSTM(input_dim, hidden_dim,
num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, 1)
# Use He(uniform) initialization for linear layer
for name, param in self.fc.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.kaiming_uniform_(param)
def forward(self, input):
# Propagate the input trough lstm
_, (hidden, _) = self.lstm(input)
# Get the prediction for the next time step
out = self.fc(hidden[-1, :, :])
return out.view(-1, 1)
| 31.076923 | 60 | 0.57797 | 106 | 808 | 4.198113 | 0.509434 | 0.060674 | 0.062921 | 0.076404 | 0.116854 | 0.116854 | 0 | 0 | 0 | 0 | 0 | 0.012635 | 0.314356 | 808 | 25 | 61 | 32.32 | 0.790614 | 0.149752 | 0 | 0 | 0 | 0 | 0.014641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01af7bcc534be18e709a82c37c35fb57c5674b95 | 1,025 | py | Python | time_tools/countdown2.py | HideKobayashi/python_base | 9334b83bcf003978bcfda3dbd35f83fc3a6926aa | [
"MIT"
] | null | null | null | time_tools/countdown2.py | HideKobayashi/python_base | 9334b83bcf003978bcfda3dbd35f83fc3a6926aa | [
"MIT"
] | null | null | null | time_tools/countdown2.py | HideKobayashi/python_base | 9334b83bcf003978bcfda3dbd35f83fc3a6926aa | [
"MIT"
] | null | null | null | from time import sleep
def countdown(when_to_stop: int):
while when_to_stop > 0:
try:
m, s = divmod(when_to_stop, 60)
h, m = divmod(m, 60)
time_left = str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
print(time_left, end="\r")
sleep(1)
when_to_stop -= 1
except KeyboardInterrupt:
print(f"KeyboardInterrupt at {time_left}")
break
except Exception as e:
print("Exception: ", e)
print()
def countdown_cui():
print("Enter 'q' or type Ctrl-c to terminate.")
while True:
inp_s = input("Specify a number in seconds >> ")
if inp_s == "q":
break
try:
when_to_stop = int(inp_s)
except KeyboardInterrupt:
print("KeyboardInterrupt")
break
except:
print("Not a number!")
continue
countdown(when_to_stop)
if __name__ == "__main__":
countdown_cui() | 26.973684 | 87 | 0.520976 | 124 | 1,025 | 4.080645 | 0.443548 | 0.071146 | 0.118577 | 0.075099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015337 | 0.363902 | 1,025 | 38 | 88 | 26.973684 | 0.760736 | 0 | 0 | 0.212121 | 0 | 0 | 0.151072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.030303 | 0 | 0.090909 | 0.212121 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b034cce020fd91ff4ffe28a880e004894a3959 | 2,146 | py | Python | source/util.py | gilbertHuang/CG-diskusage | be448bb76419b43fb43c790836f9182a7773f8ff | [
"MIT"
] | null | null | null | source/util.py | gilbertHuang/CG-diskusage | be448bb76419b43fb43c790836f9182a7773f8ff | [
"MIT"
] | null | null | null | source/util.py | gilbertHuang/CG-diskusage | be448bb76419b43fb43c790836f9182a7773f8ff | [
"MIT"
] | null | null | null | import os
import constant
def human_size(number):
current_idx = 0
result = float(number)
while result > constant.size_diff:
if current_idx >= len(constant.size_unit):
break
result = result / constant.size_diff
current_idx += 1
return '{} {}'.format(round(result, constant.size_round), constant.size_unit[current_idx])
def classify_path(path, split_max, current_split=0):
if current_split == split_max:
yield list()
else:
split_path = path.split(os.path.sep)
current_max_len = len(split_path) - split_max + current_split + 1
for idx in range(current_max_len):
next_values = classify_path(os.path.sep.join(split_path[idx + 1:]),
current_split=current_split + 1,
split_max=split_max)
for next_value in next_values:
yield [split_path[idx]] + next_value
def classify_possible_path(path):
split_path_len = len(path.split(os.path.sep))
for split_max in range(1, split_path_len + 1):
for sort_key in classify_path(path=path, split_max=split_max):
yield os.path.sep.join(sort_key)
def make_dictionary_by_classification(iterable, ignore_list=None):
result = {}
for child in iterable:
child_path = child.part_path
for sort_key in classify_possible_path(child_path):
if ignore_list and sort_key in ignore_list:
continue
sort_value = result.setdefault(sort_key, dict())
sort_value.setdefault(constant.sort_size_name, 0)
sort_value[constant.sort_size_name] += child.total_size
sort_children = sort_value.setdefault(constant.sort_children_name, list())
sort_children.append(child)
return result
def sorted_dictionary(dictionary, max_key=10):
if max_key:
return sorted(dictionary.items(), key=lambda item: item[1][constant.sort_size_name], reverse=True)[:max_key]
else:
return sorted(dictionary.items(), key=lambda item: item[1][constant.sort_size_name], reverse=True)
| 35.766667 | 116 | 0.651911 | 286 | 2,146 | 4.608392 | 0.223776 | 0.048558 | 0.039454 | 0.060698 | 0.256449 | 0.115326 | 0.115326 | 0.115326 | 0.115326 | 0.115326 | 0 | 0.008135 | 0.255359 | 2,146 | 59 | 117 | 36.372881 | 0.816646 | 0 | 0 | 0.043478 | 0 | 0 | 0.00233 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.043478 | 0 | 0.23913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b0c7510a6ee5fc7d436bfbdd1231f6a49a1e9b | 1,639 | py | Python | helper/sqlitehelper.py | fidele000/SQLite-Helper | d848197705d0291370cbcc83cc8aadfd2eed884b | [
"MIT"
] | 1 | 2021-08-14T07:41:40.000Z | 2021-08-14T07:41:40.000Z | helper/sqlitehelper.py | fidele000/SQLite-Helper | d848197705d0291370cbcc83cc8aadfd2eed884b | [
"MIT"
] | null | null | null | helper/sqlitehelper.py | fidele000/SQLite-Helper | d848197705d0291370cbcc83cc8aadfd2eed884b | [
"MIT"
] | null | null | null | import sqlite3
class SQLiteHelper(object):
def __init__(self,db_name,):
self.db_name = db_name
def create_table(self,table_name,columns):
name=str(self.db_name)
self.conn = sqlite3.connect(name+'.db')
self.c=self.conn.cursor()
query='('
query+='id INTEGER PRIMARY KEY'
for key, value in columns.items():
query+=','+key+' '+value
query+=')'
#print(query)
self.c.execute("CREATE TABLE IF NOT EXISTS " + table_name+' '+query)
self.conn.commit()
#print("created")
def selectAll(self,table_name):
query="SELECT * FROM "+table_name
return self.c.execute(query).fetchall()
def getColumns(self,table_name):
cursor = self.c.execute('select * from '+table_name)
return list(map(lambda x: x[0], cursor.description))
def selectWhereId(self,table_name,id):
query="SELECT * FROM "+table_name
query+=' where id='+str(id)
cursor=self.c.execute(query).fetchone()
columns=self.getColumns(table_name)
data={}
for i in range(0,len(columns)):
data[columns[i]]=cursor[i]
return data
def insert(self,table_name,values):
query='('
query+='id'
values_="("
values_+='NULL'
for key,value in values.items():
values_+=",'"+str(value)+"'"
query+=','+key
query+=')'
values_+=")"
#print(values_)
self.c.execute("INSERT INTO "+table_name+query+"VALUES"+values_)
self.conn.commit()
#print("inserted") | 29.8 | 76 | 0.560098 | 194 | 1,639 | 4.597938 | 0.298969 | 0.110987 | 0.07287 | 0.063901 | 0.088565 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003431 | 0.288591 | 1,639 | 55 | 77 | 29.8 | 0.761578 | 0.035998 | 0 | 0.190476 | 0 | 0 | 0.08941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.02381 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b574a3a7d65deeb5fcdead6f4877e21a54e31f | 50,116 | py | Python | histo_GUI.py | pwilmart/PAW_pipeline | 73cf90ac4f316f48131956de3a6d82fdedfd1149 | [
"MIT"
] | 17 | 2018-09-06T14:04:27.000Z | 2022-03-03T11:13:15.000Z | histo_GUI.py | pwilmart/PAW_pipeline | 73cf90ac4f316f48131956de3a6d82fdedfd1149 | [
"MIT"
] | 3 | 2019-05-09T10:01:59.000Z | 2022-02-28T16:32:59.000Z | histo_GUI.py | pwilmart/PAW_pipeline | 73cf90ac4f316f48131956de3a6d82fdedfd1149 | [
"MIT"
] | 6 | 2019-03-18T12:35:55.000Z | 2022-01-07T13:28:53.000Z | """histo_GUI.py: Written by Billy Rathje, OHSU, 2014.
Also Phil Wilmarth, OHSU.
Library of support functions and classes for PAW pipeline programs.
The MIT License (MIT)
Copyright (c) 2017 Phillip A. Wilmarth and OHSU
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Direct questions to:
Technology & Research Collaborations, Oregon Health & Science University,
Ph: 503-494-8200, FAX: 503-494-4729, Email: techmgmt@ohsu.edu.
"""
###############################
###############################
# converting to Python 3 -PW 9/16/2017
from tkinter import *
import tkinter.ttk as ttk
from tkinter import filedialog
from tkinter import messagebox
import os
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from PAW_lib import FigureGenerator
from PAW_lib import DataInfoAndFilter
from PAW_lib import Threshold
import PAW_lib
import pickle
class Histogram:
''' A histogram object tracks information needed to plot histogram data, including
the textstring to display, the matplotlib, plot itself, the plot widget, x ranges, the placement of
the vertical bar, the placement of curve fitting information, and a pointer to the original object
storing numpy data (histogram) '''
def __init__(self, h, notebook):
self.ntt = h.ntt
self.z = h.z
self.histogram = h # Pointer to data object
self.plot = None # The figure to plot
self.xmin = -6 # position of x axis
self.xmax = 12
self.vertLine = None # green tracking bar
self.vertLineThresholdSet = None # location of black dotted line
self.textString = '' # The string to plot
self.headerString = '' # first header line about text view
self.FDRThreshold = 1.0 # Flag for initial FDR threshold 'guess' value
self.sparseCutoff = 50 # Cutoff for number of Forward - Reverse spectra qualifying as spare
self.histogram.sparseData = False # Does the current histogram contain spare data? Used in filtering step
self.ln = self.initLn() # Placement of the line in the text view
self.notebook = notebook # Pointer to parent notebook
# Gui pointers
self.can = None # Pointer to canvas widget for this plot
# Builds text string and plot
self.makeTextString()
self.makePlot()
self.checkSparse() # Remove lines from sparse figures
def checkSparse(self):
if(self.histogram.sparseData):
self.vertLine.set_visible(False)
self.vertLineThresholdSet.set_visible(False)
def initLn(self):
''' Sets the initial value of the line in the table view to the first case of 1% FDR,
or determines if the data is sparse and then sets the sparseData flag to True.
'''
# print('Z, ntt, mod:', self.z, self.ntt, self.histogram.mod)
x = self.histogram.histo[self.histogram.histo.FDR.astype(float) <= self.FDRThreshold]
# print('length of x:', len(x))
# test = x[x['DiscScore'] > 2]
test = self.histogram.histo[self.histogram.histo['DiscScore'] > 2]
# print('length of test:', len(test))
if len(test.index) > 0:
# Sparse data - set discScore to 100.0
if(test[test.index == test.index[0]].RRForward.item() - test[test.index == test.index[0]].RRReverse.item() <= self.sparseCutoff):
self.histogram.sparseData = True
# not sure about the next line
self.zeroData = True
print('Warning, sparse data frame...', ' ntt: ', self.ntt, ' z:', self.z, ' mod:', self.histogram.mod)
if len(x.index) > 0:
# Normal data, find first FDR <= self.FDRThreshold
self.histogram.threshold = x.index[0]
return x.index[0] + 1 # Off by one, I think because of removing header line
# Frame is empty (no points where FDR is below cutoff)
else:
print('Warning, zero data frame...', ' ntt: ', self.ntt, ' z:', self.z, ' mod:', self.histogram.mod)
self.histogram.zeroData = True
self.histogram.sparseData = True
return 0
def setLn(self, val):
'''
Sets the position of the line. This is called by the gui widget's callbacks.
'''
if(self.histogram.sparseData):
self.vertLine.set_visible(True) # Make green line visible
self.ln = val
#self.histogram.threshold = self.ln - 1
def updateVerticalLine(self):
'''
Updates the position of the green vertical line. Has to look for the discscore value
corresponding to the current line in the text file. Ln in the text file corresponds to an
index in the discscore column. This is called by the gui widget's callbacks.
'''
xval = self.histogram.histo['DiscScore'][self.ln]
self.vertLine.set_xdata((xval, xval))
def setThresh(self, event):
'''
This updates the black dotted line and sets the plot's score threshold to the value at the
current location in the table/plot. This is called from one of the gui widget's callbacks.
'''
if(self.histogram.sparseData): # Manually override sparse data cutoff
self.histogram.sparseData = False
self.vertLineThresholdSet.set_visible(True) # Make black dotted line visible
self.histogram.threshold = self.ln - 1
xval = self.histogram.histo['DiscScore'][self.ln]
self.vertLineThresholdSet.set_xdata((xval, xval))
self.can.draw()
def makeTextString(self):
'''
Construct the text string
'''
# I use a cStringIO buffer in order to speed up the construction of the string. This buffer is passsed
# into panda's toString method, which takes a 'buf' param
from io import StringIO
output = StringIO()
# Make the text string, but don't display the index values.
self.textString = self.histogram.histo.to_string(buf = output, index=False, col_space=12)
self.textString = output.getvalue()
output.close() # Close the buffer
splitString = self.textString.split('\n')
self.textString = '\n'.join(splitString[1:])
self.headerString = splitString[0] # Make a seperate header line for the titles
def makePlot(self):
'''
Construct a matplotlib plot for this histogram.
'''
self.plot = Figure(figsize=(5,3), dpi=100)
target = self.plot.add_subplot(111)
decoy = self.plot.add_subplot(111)
# center is used for the x axis, and is calculated from the bins computed with linspace.
mybins = np.linspace(-8, 12, 301)
center = (mybins[:-1] + mybins[1:]) / 2
if(self.notebook.plotType.get() == 'Smoothed Plot'):
target.fill(center, self.histogram.Smforward, color='b')
decoy.fill(center, self.histogram.Smreverse, color='r')
else:
target.fill(center, self.histogram.forward, color='b')
decoy.fill(center, self.histogram.reverse, color='r')
# Adds the green vertical line. L is a tuple returned by the plot call that can be used to update the position later
# with set_xdata.
greenVerticalLine = self.plot.add_subplot(111)
l = greenVerticalLine.plot((0.0, 0.0), (target.axis()[2], target.axis()[3]), '-g', linewidth = 2.0)
self.vertLine = l[0]
# The black dotted line. Set the placement of it to the 1% FDR position.
vertLineThresholdSet = self.plot.add_subplot(111)
xval = self.histogram.histo['DiscScore'][self.ln]
l = vertLineThresholdSet.plot((xval, xval), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
self.vertLineThresholdSet = l[0]
# If there are xmin or max values supplied, set them.
if self.xmin and self.xmax:
target.set_xlim(self.xmin, self.xmax)
decoy.set_xlim(self.xmin, self.xmax)
# set up labels
target.set_title('Z=%s, NTT=%s, %s' % (str(self.z), str(self.ntt), self.histogram.mod))
target.set_xlabel("Disc Score")
target.set_ylabel("Counts")
# makes sure the bottom axis is fully visible
self.plot.tight_layout()
def makePlotWidget(self, frcanvas):
''' Generate a tkinter canvas widget from the matplotlib plot. This is called
from setup() in the BRNotebook class, and frcanvas is the gui frame into which the widget
should be inserted. It should have already been constructed by the BRNotebook instance.
'''
canvas = BRCanvas(self.plot, master=frcanvas, gui=self.notebook.gui)
canvas.draw()
canvas.get_tk_widget().pack(side=LEFT, fill=BOTH, expand=YES)
canvas.get_tk_widget().bind('<Button-1>', canvas.focusCanvas) # These callbacks need to be set here because
canvas.get_tk_widget().bind('<Return>', self.setThresh) # they are handled differently for deltamass plots.
frcanvas.pack(side=TOP, expand=YES, fill=BOTH) # Other callbacks are set in the BRCanvas class that don't vary.
canvas.histogram = self # Retain reference to self # Allow access to this class from the canvas widget.
self.can = canvas # Keep track of the canvas widget in an instance variable.
class DeltaMassHistogram(Histogram):
''' A DeltaMassHistogram object tracks information needed to plot deltamasshistogram data, including
the textstring to display, the matplotlib, plot itself, the plot widget, x ranges, the placement of
the vertical bar, the placement of curve fitting information, and a pointer to the original object
storing numpy data (histogram).
It is a subclass of histogram, so any functions not present here will default to histogram's functions. Most
functions required custom implementations. Subclassing allows for function ovverriding, however, which makes for
easier calls to things like setting the placement of the vertical selection bar from outside of the class (the function
implementation will vary based on whether the histogram is a score or mass histogram, but outside the interface the call is the same)'''
def __init__(self, h, notebook, FULL_RANGE = False):
self.histogram = h # Original histogram data
self.notebook = notebook # The notebook the plot resides in
self.ln = self.initLn() # The current line in the text file
self.z = h.z
self.dm = h.dm
self.plot = None # The figure to plot
self.target = None
self.decoy = None # Currently tracking target and decoy plots to get data for zoom
self.xmin = None # For plotting x axis
self.xmax = None # For plotting x axis
self.vertLine = None # The moving green vertical line
self.textString = '' # The string to plot
self.headerString = '' # The first title line above the large string
self.FULL_RANGE = FULL_RANGE # Full range plot
self.zoomFactor = 2 # Factor by which to zoom plots
# 0 = low or left DM, 1 = high or right DM
self.currentDM = 0 # Tracks the current threshold being modified (left or right)
self.histogram.sparseData = False # for histogram/dmhistogram issues, remove later
self.makeTextString()
self.makePlot()
def makeTextString(self):
'''
Builds the text string to display as data
'''
from io import StringIO
output = StringIO()
self.textString = self.histogram.histo.to_string(buf=output, index=False, col_space=12)
self.textString = output.getvalue()
output.close()
splitString = self.textString.split('\n')
self.textString = '\n'.join(splitString[1:])
self.headerString = splitString[0]
def makePlot(self):
'''
Builds the dm plots
'''
self.plot = Figure(figsize=(5,3), dpi=100)
target = self.plot.add_subplot(111)
decoy = self.plot.add_subplot(111)
if self.dm == 0:
target.set_xlim(-0.05, 0.05)
decoy.set_xlim(-0.05, 0.05)
self.xmin = -0.05
self.xmax = 0.05
Forward = self.histogram.forwardDeltaMassZero
Reverse = self.histogram.reverseDeltaMassZero
smForward = self.histogram.smForwardDeltaMassZero
smReverse = self.histogram.smReverseDeltaMassZero
mybins = np.linspace(-0.05, 0.05, 201)
elif self.dm == 1:
target.set_xlim(0.9, 1.1)
decoy.set_xlim(0.9, 1.1)
self.xmin = 0.9
self.xmax = 1.1
Forward = self.histogram.forwardDeltaMassOne
Reverse = self.histogram.reverseDeltaMassOne
smForward = self.histogram.smForwardDeltaMassOne
smReverse = self.histogram.smReverseDeltaMassOne
mybins = np.linspace(0.90, 1.10, 401)
elif self.dm == 'ALL':
self.xmin = -self.histogram.dmRange
self.xmax = self.histogram.dmRange
Forward = self.histogram.forwardDeltaMass
Reverse = self.histogram.reverseDeltaMass
smForward = self.histogram.smForwardDeltaMass
smReverse = self.histogram.smReverseDeltaMass
mybins = self.histogram.mybins
elif self.dm == 2:
self.xmin = -self.histogram.dmRange
self.xmax = self.histogram.dmRange
Forward = self.histogram.forwardDeltaMass
Reverse = self.histogram.reverseDeltaMass
smForward = self.histogram.smForwardDeltaMass
smReverse = self.histogram.smReverseDeltaMass
mybins = self.histogram.mybins
center = (mybins[:-1] + mybins[1:]) / 2
#print('dm:', self.dm)
#print('len y:', len(smForward))
#print('len bins:', len(mybins))
#print('len x:', len(center))
if(self.notebook.plotType.get() == 'Smoothed Plot'):
#target.fill(center, smForward, color='b')
#decoy.fill(center, smReverse, color='r')
target.plot(center, smForward, color='b')
decoy.plot(center, smReverse, color='r')
else:
#target.fill(center, Forward, color='b')
#decoy.fill(center, Reverse, color='r')
target.plot(center, Forward, color='b')
decoy.plot(center, Reverse, color='r')
# Sets up the vertical line to display. Initializes it at the low threshold position.
# Target.axis()[2] and [3] correspond to the current placements of the y-axis, which the line should mirror
greenLine = self.plot.add_subplot(111)
l = greenLine.plot((self.histogram.thresholdLow, self.histogram.thresholdLow), (target.axis()[2], target.axis()[3]), '-g', linewidth = 2.0)
self.vertLine = l[0]
# Sets up the low and high threshold black dotted lines
# Ignores if no low/high thresholds present, like the out region that's not displayed.
if(self.histogram.thresholdLow and self.histogram.thresholdHigh):
low = self.plot.add_subplot(111)
l= low.plot((self.histogram.thresholdLow, self.histogram.thresholdLow), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
high = self.plot.add_subplot(111)
h = high.plot((self.histogram.thresholdHigh, self.histogram.thresholdHigh), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
self.low = l[0]
self.high = h[0]
# Set title and axis labels
if(self.dm == 0 or self.dm == 1 or self.dm == 2):
if self.dm != 2:
target.set_title(str(self.dm) + ' Da Delta Mass')
else:
target.set_title('Full Range Delta Mass')
else:
target.set_title('Full range Delta Mass')
target.set_xlabel("Deltamass (Da)")
target.set_ylabel("Counts")
target.set_xlim((self.xmin, self.xmax))
self.plot.tight_layout()
#track these for zooming
self.target = target
self.decoy = decoy
def makePlotWidget(self, frcanvas):
'''
Build the actual tkinter widget and assign callbacks
'''
canvas = BRCanvas(self.plot, master=frcanvas, gui=self.notebook.gui)
canvas.draw()
canvas.get_tk_widget().pack(side=LEFT, fill=BOTH, expand=YES)
canvas.get_tk_widget().bind('<Button-1>', canvas.focusCanvas)
canvas.histogram = self # Retain reference to self
self.can = canvas
canvas.get_tk_widget().bind('<Double-Button-1>', self.zoom)
canvas.get_tk_widget().bind('<Double-Button-3>', self.unZoom)
canvas.get_tk_widget().bind('<Return>', self.setThresh)
canvas.get_tk_widget().bind('<Left>', self.goToLeft)
canvas.get_tk_widget().bind('<Right>', self.goToRight)
def setThresh(self, event):
'''
Sets the threshold when the enter key is pressed depending on whether the left
or right threshold is selected.
'''
if self.currentDM == 0:
self.setLeftDM()
else:
self.setRightDM()
def setLeftDM(self):
'''
Helper method for setThresh. Sets the threshold attribute for left (low) threshold.
'''
self.histogram.thresholdLow = self.histogram.histo['deltaMass'][self.ln-1]
self.low.set_xdata((self.histogram.thresholdLow, self.histogram.thresholdLow))
self.can.draw()
def setRightDM(self):
'''
Helper method for set Thresh. Sets the threshold attribute for right (high) threshold.
'''
self.histogram.thresholdHigh = self.histogram.histo['deltaMass'][self.ln-1]
self.high.set_xdata((self.histogram.thresholdHigh, self.histogram.thresholdHigh))
self.can.draw()
def goToLeft(self, event):
'''
Sets left threshold as active when left arrow key is pressed, and jumps green display line to
that threshold.
'''
self.currentDM = 0
self.vertLine.set_xdata((self.histogram.thresholdLow, self.histogram.thresholdLow))
self.can.draw()
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdLow) < .0005]
self.ln = xval.index[0]
def goToRight(self, event):
'''
Sets right threshold as active when right arrow key is pressed, and jumps green display line to
that threshold.
'''
self.currentDM = 1
self.vertLine.set_xdata((self.histogram.thresholdHigh, self.histogram.thresholdHigh))
self.can.draw()
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdHigh) < .0005]
self.ln = xval.index[0]
def zoom(self, event):
ymin, ymax = self.target.get_ylim()
self.target.set_ylim(0, ymax - ymax/(self.zoomFactor))
self.can.draw()
def unZoom(self, event):
ymin, ymax = self.target.get_ylim()
self.target.set_ylim(0, ymax + ymax*(self.zoomFactor))
self.can.draw()
def updateVerticalLine(self):
#print('vertical line method')
#print('len frame:', len(self.histogram.histo))
#print('line:', self.ln)
if self.ln > 1:
xval = self.histogram.histo['deltaMass'][self.ln-1]
else:
xval = self.histogram.histo['deltaMass'][1]
#print('xval:', xval)
self.vertLine.set_xdata((xval, xval))
def setLn(self, val):
'''
Setter for current line position in table
'''
self.ln = val
def initLn(self):
if(self.histogram.thresholdLow and self.histogram.thresholdHigh):
# Setting the deltamass location in the table to the low threshold calculated will fail because the calculated
# number is to a different level of float accuracy, so make sure they're within less than five thousandths of each other.
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdLow) < .0005]
if xval.empty: # If there's no location for low threshold, just set to 1. Probably will happen for all
return 1 # low mass data
else:
return xval.index[0]
else:
return 1
class BRNotebook(ttk.Notebook):
''' BRNotebook manages a ttk notebook object'''
def __init__(self, gui=None, container = [], plotType = '', **args):
ttk.Notebook.__init__(self, **args)
self.histograms = {} # Dictionary of histograms
self.deltaMassHistograms = {} # Dictionary of histograms
self.fr = None # main frame
self.gui = gui # reference to main GUI object
self.container = container.container # container of histogram data to process
self.containerDeltaMass = container.dmContainer # container of delatamass histograms
self.containerStats = container.globalStats
self.rawContainer = container # This is just a pointer back to the main containe - it has list and flag information
#self.txtStats = container.txtStats
self.plotType = plotType # Smoothed or not
self.setup_deltaMass() # First setup deltamass plots by default.
#self.setup() # initialization function to set up view...
#self.setup_stats()
def saveDMFigures(self):
'''
Save dm pdf figures
'''
sqt_container = os.path.dirname(os.getcwd()) # assumes we have set location to the folder with SQT files
filter_container = os.path.join(sqt_container, 'filtered_files') # put the threshold figures in the filtered_files folder
self.fig_container = os.path.join(filter_container, 'ThresholdFigures') # put the threshold figures in the filtered_files folder
if not os.path.exists(filter_container):
os.mkdir(filter_container)
if not os.path.exists(self.fig_container):
os.mkdir(self.fig_container)
for figs in self.deltaMassHistograms.values():
for fig in figs: # using relative folder paths
fig_file_name = os.path.join(self.fig_container, 'Mass_Figure_dm=' + str(fig.dm) + '_z=' + str(fig.z) + '.pdf')
fig.plot.savefig(fig_file_name)
def saveScoreFigures(self):
'''
Save score pdf figures (we have already set up folder for DM figures)
'''
for figs in self.histograms.values():
for fig in figs:
fig_file_name = os.path.join(self.fig_container, 'Score_Figure_dm=' + str(fig.histogram.dm) + '_z=' + str(fig.z) +
'_ntt=' + str(fig.ntt) + '_mod=' + str(fig.histogram.mod) + '.pdf')
fig.plot.savefig(fig_file_name)
def setup(self):
'''
Setup score view
'''
# remove deltamass tabs from window
for tab in self.tabs():
self.forget(tab)
# Create names for dm windows to reference when building tabs
if self.rawContainer.accurateMass:
daWindow = {0: '0 Da', 1: '1 Da', 2: 'out'}
else:
daWindow = {0: 'All'}
# Loop through score data and set up the canvases
for dm, dmFig in enumerate(self.container):
for f, fig in enumerate(dmFig):
theZ = self.rawContainer.zList[f] # get z value for place in list
# set up canvases
self.fr = Frame(self.gui.root) # self.fr is a large canvas for the whole tab
frcanvas = Frame(self.fr) # frcanvas is a frame for the entire mod notebook on top of the textview
nb = ttk.Notebook(frcanvas) # notebook to hold each mod
frames = {}
for mod in self.rawContainer.modList:
frames[mod] = Frame() # Make a frame for each mod
for fig in fig: # Loops over ntt
for fig in fig: # loops over mod
h = Histogram(fig, self)
h.makePlotWidget(frames[fig.mod]) # add plot to frame for specific mod
if f not in self.histograms:
self.histograms[f] = [h]
else:
self.histograms[f].append(h)
for frcan in frames:
string = frcan
if frcan == ' ': #
string = 'Unmod' # I think this if is no longer needed - unmod is set in loading_TXT_files...
nb.add(frames[frcan], text = string) # add each mod frame to notebook as seperate tab
nb.pack(side=TOP, expand=YES, fill=BOTH)
frcanvas.pack(side=TOP, expand=YES, fill=BOTH)
self.add(self.fr, text = (theZ, '+', '_', str(daWindow[dm]))) # Add the whole frame with textview and canvases to main notebook
histPointer = h
# Now set up the text view
# Get just header line
headerString = histPointer.headerString
# set up header text view
frtext = Frame(self.fr) # Add text view as a frame in the frame holding frcanvas (the canvases notebook) and the text view
text = Text(frtext, relief=SUNKEN)
text.insert('1.0', headerString)
text.pack(side=TOP, expand=NO, fill=X)
text.config(width=1, height=1)
text['wrap'] = 'none'
text['state'] = 'disabled'
# Set up main text view
textString = histPointer.textString
text = BRText(frtext, relief=SUNKEN, notebook=self, gui=self.gui)
text.insert('1.0', textString)
text.focus()
text.bind('<Button-1>', text.select)
text.bind('<Up>', text.upKey)
text.bind('<Down>', text.downKey)
text.bind('<Return>', text.setThresh)
text.pack(side=LEFT, expand=YES, fill=BOTH)
text['state'] = 'disabled' # prevents text editing
text['wrap'] = 'none'
sbar = Scrollbar(frtext)
sbar.config(command=text.yview)
text.config(yscrollcommand=sbar.set)
sbar.pack(side=RIGHT, fill=Y)
hbar = Scrollbar(frtext, orient='horizontal')
hbar.config(command=text.xview)
text.config(xscrollcommand=hbar.set)
hbar.pack(side=BOTTOM, fill=X)
frtext.pack(side=BOTTOM, expand=YES, fill=BOTH)
for histo in self.histograms[f]:
if histo.histogram.dm == daWindow[dm]:
histo.can.text = text # Add pointer to current text view in canvas
text.canvas = histo.can # Keep pointer to canvas in text view
text.see("%d.0" % text.canvas.histogram.ln) # Go to current set line
text.refreshView()
def setup_deltaMass(self):
import pprint
for f in range(len(self.containerDeltaMass[0])):
self.fr = Frame(self.gui.root)
plotsContainer = Frame(self.fr)
plotsContainer.pack(side=TOP, expand=YES, fill=BOTH)
frcanvas = Frame(plotsContainer)
bottom_fr = Frame(plotsContainer)
frcanvas.pack(side=TOP, expand=YES, fill=X)
bottom_fr.pack(side=BOTTOM, expand=YES, fill=X)
for x, fig in enumerate(self.containerDeltaMass):
# set up canvases
# Since there's nothing in the container for full mass range, at the end of the list,
# make an extra plot for full mass range ONLY if data has accurate mass. The low mass
# container will only have 1 plot, the full range plot, so no need in that case.
if self.gui.ACCURATE_MASS and x == (len(self.containerDeltaMass) - 1):
h = DeltaMassHistogram(fig[f], self, FULL_RANGE=True)
h.makePlotWidget(frcanvas)
else:
h = DeltaMassHistogram(fig[f], self)
h.makePlotWidget(bottom_fr)
if f not in self.deltaMassHistograms:
self.deltaMassHistograms[f] = [h]
else:
self.deltaMassHistograms[f].append(h)
# Get just header line
headerString = self.deltaMassHistograms[f][len(self.deltaMassHistograms[f])-1].headerString
# set up related text view
frtext = Frame(self.fr)
text = Text(frtext, relief=SUNKEN)
text.insert('1.0', headerString)
text.pack(side=TOP, expand=NO, fill=X)
text.config(width=1, height=1)
text['wrap'] = 'none'
text['state'] = 'disabled'
textString = self.deltaMassHistograms[f][len(self.deltaMassHistograms[f])-1].textString
text = BRText(frtext, relief=SUNKEN, notebook=self, gui=self.gui)
text.insert('1.0', textString)
#text.tag_add(SEL, '1.0', '1.200')
text.focus()
text.bind('<Button-1>', text.select)
text.bind('<Up>', text.upKey)
text.bind('<Down>', text.downKey)
text.bind('<Return>', text.setThresh)
text.bind('<Left>', text.goToLeft)
text.bind('<Right>', text.goToRight)
text.pack(side=LEFT, expand=YES, fill=BOTH)
text['wrap'] = 'none'
text['state'] = 'disabled' # prevents text editing
sbar = Scrollbar(frtext)
sbar.config(command=text.yview)
text.config(yscrollcommand=sbar.set)
sbar.pack(side=RIGHT, fill=Y)
hbar = Scrollbar(frtext, orient='horizontal')
hbar.config(command=text.xview)
text.config(xscrollcommand=hbar.set)
hbar.pack(side=BOTTOM, fill=X)
frtext.pack(side=BOTTOM, expand=YES, fill=BOTH)
for histo in self.deltaMassHistograms[f]:
histo.can.text = text
text.canvas = histo.can
#if not text.canvas.histogram.ln:
# continue
text.see("%d.0" % text.canvas.histogram.ln)
text.refreshView()
theZ = self.rawContainer.zList[f]
self.add(self.fr, text = (theZ, '+_DM'))
f += 1
def setup_stats(self):
self.fr = Frame(self.gui.root)
statsContainer = Frame(self.fr)
Label(statsContainer, text = '1+ Target\t').grid(column = 1, row = 0)
Label(statsContainer, text = '1+ Decoy\t').grid(column = 2, row = 0)
Label(statsContainer, text = '2+\t').grid(column = 3, row = 0)
Label(statsContainer, text ='2+\t').grid(column = 4, row = 0)
Label(statsContainer, text ='3+\t').grid(column = 5, row = 0)
Label(statsContainer, text ='3+\t').grid(column = 6, row = 0)
Label(statsContainer, text ='4+\t').grid(column = 7, row = 0)
Label(statsContainer, text ='4+\t').grid(column = 8, row = 0)
Label(statsContainer, text ='Unmod').grid(column = 0, row = 1)
Label(statsContainer, text ='Full').grid(column = 0, row = 2)
DM = len(self.containerDeltaMass) - 1
NTT = len(self.container[0][0])
if(NTT == 3):
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][0][0])).grid(column = z+1, row = 2)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][0][0])).grid(column = z+2, row = 2)
Label(statsContainer, text ='Semi').grid(column = 0, row = 3)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][1][0])).grid(column = z+1, row = 3)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][1][0])).grid(column = z+2, row = 3)
Label(statsContainer, text ='Non').grid(column = 0, row = 4)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][2][0])).grid(column = z+1, row = 4)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][2][0])).grid(column = z+2, row = 4)
if(NTT < 3):
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][0][0])).grid(column = z+1, row = 2)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][0][0])).grid(column = z+2, row = 2)
Label(statsContainer, text ='Semi').grid(column = 0, row = 3)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][1][0])).grid(column = z+1, row = 3)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][1][0])).grid(column = z+2, row = 3)
Label(statsContainer, text ='Non').grid(column = 0, row = 4)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = "----------").grid(column = z+1, row = 4)
Label(statsContainer, text = "----------").grid(column = z+2, row = 4)
Label(statsContainer, text ='Totals').grid(column = 0, row = 5)
Label(statsContainer, text = self.containerStats.target_filtered).grid(column = 1, row = 5)
Label(statsContainer, text = self.containerStats.decoy_filtered).grid(column = 3, row = 5)
statsContainer.pack()
self.add(self.fr, text = 'Stats')
class BRCanvas(FigureCanvasTkAgg):
''' Manages a FigureCanvasTkAgg object '''
def __init__(self, parent=None, gui=None, **args):
FigureCanvasTkAgg.__init__(self, parent, **args)
self.text = None # Associated text view
self.ntt = 0 # Ntt for canvas
self.charge = 0 # Charge for canvas
self.gui = gui # Reference to main gui object
self.histogram = None
def focusCanvas(self, event):
''' Sets focus to current canvas and updates text view '''
self.text.canvas = self
self.text['state'] = 'normal'
self.text.focus()
self.text.delete('0.0', END)
self.text.insert('1.0', self.histogram.textString)
self.text.see("%d.0" % self.text.canvas.histogram.ln)
#self.text.tag_remove(SEL, '0.0', END)
self.text.tag_remove('highlight', '0.0', END)
self.text.tag_add('highlight', "%d.0" % self.text.canvas.histogram.ln, "%d.0" % (self.text.canvas.histogram.ln + 1))
self.text.tag_configure('highlight', background = 'sky blue')
#self.text.tag_add(SEL, "%d.0" % self.text.canvas.histogram.ln, "%d.0" % (self.text.canvas.histogram.ln + 1))
self.text['state'] = 'disabled'
self.get_tk_widget().focus_set()
return 'break'
class BRText(Text):
''' Manages a text view object '''
def __init__(self, parent=None, notebook=None, gui=None, **args):
Text.__init__(self, parent, takefocus=0, **args)
self.canvas = None # Associated canvas
self.notebook = notebook # Reference to notebook textview is in
self.gui = gui # Reference to main GUI object
def refreshView(self):
''' Helper method, updates figure for text view and redraws canvas '''
#self.tag_remove(SEL, '0.0', END)
#self.tag_add(SEL, "%d.0" % self.canvas.histogram.ln, "%d.0" % (self.canvas.histogram.ln + 1))
self.tag_remove('highlight', '0.0', END)
self.tag_add('highlight', "%d.0" % self.canvas.histogram.ln, "%d.0" % (self.canvas.histogram.ln + 1))
self.tag_configure('highlight', background = 'sky blue')
self.focus()
# Updates differently depending on whether class is deltamass or score histogram
self.canvas.histogram.updateVerticalLine()
self.canvas.draw()
def select(self, event):
''' Callback for selection of textview line with mouse'''
self.canvas.histogram.setLn(int(self.index(CURRENT).split('.')[0])) # Gets just the line number
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def upKey(self, event):
''' Callback for selection of textview line with upKey'''
self.canvas.histogram.setLn(int(self.canvas.histogram.ln) - 1)
self.see("%d.0" % self.canvas.histogram.ln)
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def downKey(self, event):
''' Callback for selection of textview line with downKey'''
self.canvas.histogram.setLn(int(self.canvas.histogram.ln) + 1)
self.see("%d.0" % self.canvas.histogram.ln)
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def setThresh(self, event):
self.canvas.histogram.setThresh(event)
return 'break'
def goToRight(self, event):
self.canvas.histogram.goToRight(event)
return 'break'
def goToLeft(self, event):
self.canvas.histogram.goToLeft(event)
return 'break'
class GUI:
''' Manages the main GUI window.
Also starts parsing in files, loading pandas structures'''
def __init__(self, folder=None):
self.root = Tk()
self.root.title('PAW Histogram GUI')
if not folder:
folder = os.getcwd()
self.folder = folder
# this is the starter window
self.modal = Toplevel(self.root)
self.modal.geometry("%dx%d%+d%+d" % (300, 200, 250, 125))
self.modal.title('PAW Set Up Dialog')
ttk.Button(self.modal, text="Select Top Hit Summary Files", command=self.select_files).pack(pady=5)
variable = StringVar(self.modal)
variable.set("Plot") # default value
ttk.OptionMenu(self.modal, variable, "Standard Plots", "Smoothed Plots").pack(pady=5)
self.massAccuracy = StringVar(self.modal)
self.massAccuracy.set("High") # default value (gets over-written during file loading)
ttk.OptionMenu(self.modal, self.massAccuracy, "High Resolution", "Low Resolution").pack(pady=5)
ttk.Button(self.modal, text="Load and Plot Histograms", command=self.exit_modal).pack(pady=5)
self.root.protocol('WM_DELETE_WINDOW', self.onExit) # handle exit
self.modal.protocol('WM_DELETE_WINDOW', self.exit_modal) # cannot get setup window to delete on mac
self.root.withdraw()
self.modal.attributes('-topmost', 1)
# self.modal.attributes('-topmost', 0)
self.root.wait_window(self.modal) # this waits for the user to set the files, resolution, etc.
self.root.deiconify()
# when we get here, we are starting the histogramming
# Setup flags
self.sparseDiscScore = 100.0
self.ACCURATE_MASS = True
self.SMOOTHED = True
if self.massAccuracy.get() == 'Low Resolution':
self.ACCURATE_MASS = False
if variable.get() == 'Plot':
self.SMOOTHED = False
# Make histograms
self.container = FigureGenerator(files=self.txt_files, accurateMass=self.ACCURATE_MASS, smoothed=self.SMOOTHED)
print('Generating GUI...')
# Main frames
self.buttonFrame = Frame(self.root)
self.buttonFrame.pack(side=TOP, fill=X)
self.computeScoreHistograms = ttk.Button(self.buttonFrame, text = 'Compute Score Histograms', takefocus=0, command=self.compute_score_histograms)
self.computeScoreHistograms.pack(side=LEFT, padx=5, pady=2)
ttk.Button(self.buttonFrame, text = 'Show mass windows', takefocus=0, command=self.get_masses).pack(side=LEFT, padx=5, pady=2)
self.notebook = BRNotebook(gui=self, container=self.container, plotType = variable, takefocus=0)
self.notebook.pack(side=BOTTOM)
#f = Frame(self.root)
#Label(f, text = 'Sidebar: Additional widgets here...').pack()
#f.pack(side=BOTTOM)
mainloop()
def onExit(self):
import sys
"""Properly closes down the GUI program."""
self.root.withdraw()
self.root.update_idletasks()
self.root.destroy()
sys.exit()
def pickleDeltaMass(self):
with open("output.pkl", "wb") as fout:
pickle.dump(self.container, fout)
def pickleScore(self):
with open("output_scores.pkl", "wb") as fout:
pickle.dump(self.container, fout)
def exit_modal(self):
self.modal.withdraw()
self.modal.update_idletasks()
self.modal.destroy()
def select_files(self):
self.txt_files = PAW_lib.get_files(self.folder, [('Text files', '*.txt'), ('PAW Text files', '*.PAW.txt')],
'Select the Top-hit TXT files') # returns full paths
if not self.txt_files: sys.exit() # cancel button response
self.folder = os.path.dirname(self.txt_files[0])
os.chdir(self.folder)
def get_scores(self):
s = ''
for score in self.container.container:
for score in score:
for score in score:
for score in score:
s += (str(score.dm) + " , "
+ str(score.z) + " +, "
+ str(score.ntt) + " tt, "
+ str(score.mod) + " mod: ")
if(score.sparseData):
s += str(100) + '\n'
else:
s += ('%0.4f' % score.histo.DiscScore[score.threshold]) + '\n'
messagebox.showinfo(title = "Scores", message = s)
#self.get_stats()
def get_masses(self):
s = ''
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
if dm == 2:
s += '\n' + ('%d' % self.container.dmContainer[dm][z].z) + "+ , " + " Outside"
s += "\n\tLow: " + ('%0.4f' % -self.container.dmContainer[dm][z].dmRange) + " \n\tHigh: " + ('%0.4f' % self.container.dmContainer[dm][z].dmRange)
elif dm == 0 or dm == 1:
s += '\n' + ('%d' % self.container.dmContainer[dm][z].z) + "+ , " + ('%d' % self.container.dmContainer[dm][z].dm) + " Da"
s += "\n\tLow: " + ('%0.4f' % self.container.dmContainer[dm][z].thresholdLow) + " \n\tHigh: " + ('%0.4f' % self.container.dmContainer[dm][z].thresholdHigh)
messagebox.showinfo(title = "Mass Thresholds", message = s)
def compute_score_histograms(self):
self.container.regenerateScorePlots()
self.notebook.setup()
ttk.Button(self.buttonFrame, text = 'Show score thresholds', takefocus=0, command=self.get_scores).pack(side=LEFT, padx=5, pady=2)
ttk.Button(self.buttonFrame, text = 'Filter Data', takefocus=0, command=self.exportToFilterer).pack(side=LEFT, padx=5, pady=2)
self.computeScoreHistograms['state'] = 'disabled'
# Save figures
self.notebook.saveDMFigures()
def get_stats(self):
self.container.get_stats_helper()
def exportToFilterer(self):
import time
filterer = DataInfoAndFilter(self.folder, self.container.f.getFrame(), self.container.txtObjects,
self.container.dmList, self.container.zList, self.container.nttList, self.container.specialCharsList,
self.container.minLength, self.container.maxMods, self.container.peptideMassTol)
filterer.get_pre_stats()
masses = [[Threshold() for dm in self.container.dmList] for z in self.container.zList]
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
if(dm == 2):
masses[z][dm].low = -1 * self.container.dmContainer[dm][z].dmRange
masses[z][dm].high = self.container.dmContainer[dm][z].dmRange
else:
masses[z][dm].low = self.container.dmContainer[dm][z].thresholdLow
masses[z][dm].high = self.container.dmContainer[dm][z].thresholdHigh
scores = [[[[100.0 for mod in self.container.specialCharsList] for ntt in self.container.nttList] for z in self.container.zList] for dm in self.container.dmList]
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
for ntt in range(len(self.container.nttList)):
for mod in range(len(self.container.specialCharsList)):
ref = self.container.container[dm][z][ntt][mod]
if ref.sparseData:
scores[dm][z][ntt][mod] = self.sparseDiscScore
else:
scores[dm][z][ntt][mod] = ref.histo.DiscScore[ref.threshold]
filter_frame = filterer.filter_with_stats(masses, scores) # probably do not need to have a returned dataframe
filterer.copy_params_files()
for obj in filterer.write:
print('\n...finished.', time.asctime(), file=obj)
filterer.log_file.close()
# Save figures
self.notebook.saveScoreFigures()
def _quit(self):
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
#########################
# default folder location
folder = os.getcwd() # this is a safe default
folder = "E:" # or set to something useful for your system
#########################
gui = GUI(folder)
| 49.424063 | 176 | 0.572412 | 5,897 | 50,116 | 4.826692 | 0.14075 | 0.040649 | 0.023434 | 0.007378 | 0.43741 | 0.390121 | 0.342269 | 0.308787 | 0.273548 | 0.229526 | 0 | 0.013532 | 0.31876 | 50,116 | 1,013 | 177 | 49.472853 | 0.820158 | 0.236591 | 0 | 0.408683 | 0 | 0 | 0.043517 | 0 | 0.001497 | 0 | 0 | 0 | 0 | 1 | 0.076347 | false | 0 | 0.026946 | 0 | 0.13024 | 0.007485 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b6d2a73e93d2c6c35cc68f44da69c9de7a5da2 | 851 | py | Python | app/email.py | ta4tsering/pyrrha-bo | d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17 | [
"MIT"
] | 1 | 2020-08-30T04:36:25.000Z | 2020-08-30T04:36:25.000Z | app/email.py | ta4tsering/pyrrha-bo | d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17 | [
"MIT"
] | null | null | null | app/email.py | ta4tsering/pyrrha-bo | d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17 | [
"MIT"
] | 1 | 2020-08-30T04:33:07.000Z | 2020-08-30T04:33:07.000Z |
from flask import render_template
from flask_mail import Message
from smtplib import SMTPDataError
from threading import Thread
from app import mail
import logging
logger = logging.getLogger(__name__)
def _async(app, msg):
with app.app_context():
try:
mail.send(msg)
except SMTPDataError as e:
logger.warning(str(e))
def send_email_async(app, recipient, subject, template, bcc=None, **kwargs):
if not isinstance(recipient, list):
recipient = [recipient]
msg = Message(
app.config['EMAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['EMAIL_SENDER'],
recipients=recipient, bcc=bcc)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
Thread(target=_async, args=(app, msg)).start()
| 26.59375 | 76 | 0.673325 | 104 | 851 | 5.355769 | 0.461538 | 0.075404 | 0.050269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213866 | 851 | 31 | 77 | 27.451613 | 0.832586 | 0 | 0 | 0 | 0 | 0 | 0.049412 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b8932f958e37ec8c50e7edf71a0763c83642f8 | 2,821 | py | Python | example/runCtaTrading.py | WongLynn/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | 11 | 2019-10-28T13:01:48.000Z | 2021-06-20T03:38:09.000Z | example/runCtaTrading.py | Rayshawn8/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | null | null | null | example/runCtaTrading.py | Rayshawn8/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | 6 | 2019-10-28T13:16:13.000Z | 2020-09-08T08:03:41.000Z | import multiprocessing
import os
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import okexGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
def findConnectKey():
files=os.listdir(".")
for file in files:
if file.find("_connect.json")>=0:
return file.replace("_connect.json","")
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*30)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
# le.addConsoleHandler()
# le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(okexGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
KEY = findConnectKey()
me.connect(KEY)
le.info(u'连接行情和交易接口')
sleep(5) # 等待接口初始化
me.dataEngine.saveContracts() # 保存合约信息到文件
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
cta.initAll()
cta.startAll()
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess() # 7*24 全时段无人值守
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess() | 25.645455 | 71 | 0.561503 | 293 | 2,821 | 5.313993 | 0.40273 | 0.038536 | 0.044958 | 0.021195 | 0.106615 | 0.106615 | 0.106615 | 0.106615 | 0.106615 | 0.106615 | 0 | 0.012425 | 0.286778 | 2,821 | 110 | 72 | 25.645455 | 0.761431 | 0.127969 | 0 | 0.138889 | 0 | 0 | 0.047658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.138889 | 0 | 0.194444 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01b946264ae1c245632a78a4b8778f16966ec15a | 881 | py | Python | promesque/lib/exporter_logger.py | croesnick/prometheus_elasticsearch | f7cfc838b5cae5f3cbe2c4df53f3bfa60f0c5373 | [
"MIT"
] | 1 | 2019-04-17T20:12:23.000Z | 2019-04-17T20:12:23.000Z | promesque/lib/exporter_logger.py | croesnick/promesque | f7cfc838b5cae5f3cbe2c4df53f3bfa60f0c5373 | [
"MIT"
] | null | null | null | promesque/lib/exporter_logger.py | croesnick/promesque | f7cfc838b5cae5f3cbe2c4df53f3bfa60f0c5373 | [
"MIT"
] | null | null | null | import logging
import sys
LOG_LEVEL_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
class ExporterError(Exception):
pass
class ExporterLogger(logging.Logger):
def __init__(self, name, path=None, level='error', fmt='%(asctime)s [%(levelname)-5.5s]: %(message)s'):
self._path = path
self._level = self.level(level)
super(ExporterLogger, self).__init__(name, self._level)
self._formatter = logging.Formatter(fmt)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(self._formatter)
self.addHandler(stream_handler)
@classmethod
def level(cls, level):
return LOG_LEVEL_MAP[level.lower()]
@property
def formatter(self):
return self._formatter
| 23.810811 | 108 | 0.664018 | 99 | 881 | 5.69697 | 0.434343 | 0.047872 | 0.039007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002878 | 0.211124 | 881 | 36 | 109 | 24.472222 | 0.808633 | 0 | 0 | 0 | 0 | 0 | 0.089671 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0.038462 | 0.076923 | 0.076923 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ba7e0e7bfae1bd8cee7ccbdab4b4b865b274ba | 1,181 | py | Python | 2_ProcessSRA_hpcc-batch_runcc.py | ShiuLab/RNAseq_pipeline | e5e91fb5a5c257e9df67089bafd55045f6fa5049 | [
"MIT"
] | 4 | 2020-03-04T16:51:37.000Z | 2021-04-19T15:46:00.000Z | 2_ProcessSRA_hpcc-batch_runcc.py | ShiuLab/RNAseq_pipeline | e5e91fb5a5c257e9df67089bafd55045f6fa5049 | [
"MIT"
] | null | null | null | 2_ProcessSRA_hpcc-batch_runcc.py | ShiuLab/RNAseq_pipeline | e5e91fb5a5c257e9df67089bafd55045f6fa5049 | [
"MIT"
] | 7 | 2018-06-04T20:58:01.000Z | 2021-09-08T00:31:33.000Z | # IMPORT
import os,sys
# MAIN
print('''
inp1 = file with list of SRA files
inp2 = bowtie index base (full path)
inp3 = SE (0) or PE (1) or paired processed as single (2)
inp3 and on:
Any additional parameters for ProcessSRA_hpcc2.py
These will be appended exactly as they appear
''')
files = sys.argv[1]
bowtie_index = sys.argv[2]
SE = sys.argv[3]
out_cmd = "module load SRAToolkit; module load FastQC; module load Trimmomatic; \
module load TopHat2; module load Boost; module load SAMtools; module load python; \
python /mnt/home/john3784/Github/RNAseq_pipeline/\
ProcessSRA_hpcc2.py %s %s %s"
if len(sys.argv) > 4:
additional_commands = " ".join(sys.argv[4:])
out_cmd = out_cmd+" "+additional_commands
file_list = [f.strip() for f in open(files,"r").readlines()]
output = open(files+".runcc","w")
for file in file_list:
output.write(out_cmd %(file, bowtie_index, SE)+"\n")
# out_commands = ["module load SRAToolkit; module load Trimmomatic; \
# module load TopHat2; module load Boost; python /mnt/home/lloydjo1/\
# Projects/7_intergenic_transcription_poaceae/_scripts/ProcessSRA_hpcc.py\ " + \
# f + " -genome " + bowtie_index + "\n" for f in file_list]
output.close()
| 33.742857 | 83 | 0.724809 | 184 | 1,181 | 4.538043 | 0.494565 | 0.131737 | 0.040719 | 0.062275 | 0.186826 | 0.126946 | 0.126946 | 0.126946 | 0.126946 | 0 | 0 | 0.021782 | 0.144793 | 1,181 | 34 | 84 | 34.735294 | 0.804951 | 0.240474 | 0 | 0 | 0 | 0 | 0.28764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01bcb1ac1ff2e44f3b34c8c2ddb2306aa7a0a5f8 | 19,976 | py | Python | src/beam/views.py | django-beam/django-beam | cba5874bfef414e65051c2534cf03c772a4da98c | [
"BSD-3-Clause"
] | 5 | 2018-05-27T08:15:06.000Z | 2020-11-10T20:38:56.000Z | src/beam/views.py | django-beam/django-beam | cba5874bfef414e65051c2534cf03c772a4da98c | [
"BSD-3-Clause"
] | 68 | 2018-05-26T19:41:57.000Z | 2022-01-26T14:46:46.000Z | src/beam/views.py | django-beam/django-beam | cba5874bfef414e65051c2534cf03c772a4da98c | [
"BSD-3-Clause"
] | 1 | 2020-06-24T03:58:47.000Z | 2020-06-24T03:58:47.000Z | from typing import List, Optional, Type
from beam.registry import default_registry, register
from django.apps import apps
from django.contrib import messages
from django.contrib.admin.utils import NestedObjects
from django.core.exceptions import FieldDoesNotExist, PermissionDenied
from django.db import router
from django.forms import all_valid
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import gettext as _
from django.views import generic
from django.views.generic.base import ContextMixin, TemplateView
from django_filters.filterset import filterset_factory
from extra_views import SearchableListMixin
from .actions import Action
from .components import Component, ListComponent
from .inlines import RelatedInline
class ComponentMixin(ContextMixin):
component: Optional[Component] = None
viewset = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["viewset"] = self.viewset
context["component"] = self.component
context["popup"] = self.request.GET.get("_popup")
return context
@property
def model(self):
return self.component.model
def get_queryset(self):
return self.component.queryset
def get_form_class(self):
if self.component.form_class:
return self.component.form_class
return super().get_form_class()
@property
def fields(self):
if self.component.fields:
return self.component.fields
return super().fields
def get_inline_classes(self):
return self.component.inline_classes
def has_perm(self):
try:
obj = self.get_object()
except AttributeError:
obj = None
return self.component.has_perm(self.request.user, obj)
def handle_no_permission(self):
if self.request.user.is_authenticated:
raise PermissionDenied("You shall not pass")
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(self.request.get_full_path())
def dispatch(self, request, *args, **kwargs):
if not self.has_perm():
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class InlinesMixin(ContextMixin):
inline_classes: List[Type[RelatedInline]] = []
def get_inline_classes(self):
return self.inline_classes
def get_inlines(self, object=None):
inlines = []
for inline_class in self.get_inline_classes():
inlines.append(
inline_class(
parent_instance=object if object is not None else self.object,
parent_model=self.model,
request=self.request,
)
)
return inlines
def get_context_data(self, **kwargs):
if "inlines" not in kwargs:
kwargs["inlines"] = self.get_inlines()
return super().get_context_data(**kwargs)
class CreateWithInlinesMixin(InlinesMixin):
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
# we have to make sure that the same instance is used for form and inlines
inlines = self.get_inlines(object=form.save(commit=False))
else:
inlines = self.get_inlines()
if all_valid(inline.formset for inline in inlines) and form.is_valid():
return self.form_valid(form, inlines)
return self.form_invalid(form, inlines)
def form_valid(self, form, inlines):
self.object = form.save()
for inline in inlines:
inline.formset.save()
return redirect(self.get_success_url())
def form_invalid(self, form, inlines):
return self.render_to_response(
self.get_context_data(form=form, inlines=inlines)
)
class UpdateWithInlinesMixin(InlinesMixin):
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
inlines = self.get_inlines()
if form.is_valid() and all_valid(inline.formset for inline in inlines):
return self.form_valid(form, inlines)
return self.form_invalid(form, inlines)
def form_valid(self, form, inlines):
self.object = form.save()
for inline in inlines:
inline.formset.save()
return redirect(self.get_success_url())
def form_invalid(self, form, inlines):
return self.render_to_response(
self.get_context_data(form=form, inlines=inlines)
)
class CreateView(ComponentMixin, CreateWithInlinesMixin, generic.CreateView):
def get_template_names(self):
return super().get_template_names() + ["beam/create.html"]
def get_success_url(self):
return self.viewset.links["detail"].reverse(
obj=self.object, request=self.request
)
def get_success_message(self):
return _('The {model} "{name}" was added successfully.').format(
model=self.model._meta.verbose_name,
name=str(self.object),
)
def form_valid(self, form, inlines):
response = super().form_valid(form, inlines)
success_message = self.get_success_message()
if success_message:
messages.success(self.request, success_message)
if self.request.GET.get("_popup"):
return self.popup_response()
return response
def popup_response(self):
return HttpResponse(
"<script>"
"window.opener.postMessage("
'{{id: "{id}", result: "created", source: "{source}", text: "{text}"}}, '
"document.origin"
");"
"window.close()"
"</script>".format(
id=escape(self.object.pk),
source=escape(self.request.GET["_popup"]),
text=escape(str(self.object)),
)
)
class UpdateView(ComponentMixin, UpdateWithInlinesMixin, generic.UpdateView):
def get_template_names(self):
return super().get_template_names() + ["beam/update.html"]
def get_success_message(self):
return _('The {model} "{name}" was changed successfully.').format(
model=self.model._meta.verbose_name,
name=str(self.object),
)
def form_valid(self, form, inlines):
response = super().form_valid(form, inlines)
success_message = self.get_success_message()
if success_message:
messages.success(self.request, success_message)
return response
def get_success_url(self):
if self.request.POST.get("submit", None) == "save_and_continue_editing":
return self.request.get_full_path()
return self.viewset.links["detail"].reverse(
obj=self.object, request=self.request
)
class SortableListMixin(ComponentMixin):
sort_param = "o"
sort_separator = ","
def get_sort_fields(self):
if self.component.list_sort_fields is None:
return [
# cast to string to support virtual fields
str(field)
for field in self.component.fields
if self.get_sort_column_for_field(str(field))
]
for field in self.component.list_sort_fields:
if self.get_sort_column_for_field(field) is None:
raise Exception(
"Unable to determine sort column for explicit sort field {} on {}".format(
field, self.viewset
)
)
return self.component.list_sort_fields
def get_sort_fields_columns(self):
return self.component.list_sort_fields_columns or {}
def get_sort_column_for_field(self, field_name):
explicit = self.get_sort_fields_columns()
if field_name in explicit:
return explicit[field_name]
try:
field = self.model._meta.get_field(field_name)
return field.name
except FieldDoesNotExist:
return None
def get_sort_fields_from_request(self) -> List[str]:
fields = []
sort_fields = set(self.get_sort_fields())
for field in self.request.GET.get(self.sort_param, "").split(
self.sort_separator
):
if field.startswith("-"):
sort_field = field[1:]
else:
sort_field = field
if sort_field in sort_fields:
fields.append(field)
return fields
def get_sort_columns(self, fields):
columns = []
for field in fields:
if field.startswith("-"):
descending = True
field = field[1:]
else:
descending = False
column = self.get_sort_column_for_field(field)
if not column:
continue
columns.append("-" + column if descending else column)
return columns
def sort_queryset(self, qs):
current_sort_fields = self.get_sort_fields_from_request()
current_sort_columns = self.get_sort_columns(current_sort_fields)
if current_sort_columns:
qs = qs.order_by(*current_sort_columns)
return qs
def get_queryset(self):
qs = super().get_queryset()
return self.sort_queryset(qs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["sortable_fields"] = set(self.get_sort_fields())
context["sorted_fields"] = self.get_sort_fields_from_request()
return context
class FiltersetMixin(ComponentMixin):
filterset_class = None
filterset_fields = None
filterset = None
def get_filterset_fields(self):
if self.filterset_fields is not None:
return self.filterset_fields
return self.component.list_filterset_fields
def get_filterset_class(self):
if self.filterset_class:
return self.filterset_class
if self.component.list_filterset_class:
return self.component.list_filterset_class
elif self.component.list_filterset_fields:
return filterset_factory(
model=self.model, fields=self.get_filterset_fields()
)
return None
def get_filterset_kwargs(self):
"""
Returns the keyword arguments for instantiating the filterset.
"""
kwargs = {
"data": self.request.GET or None,
"request": self.request,
"prefix": "filter",
"queryset": self.component.queryset,
}
return kwargs
def get_filterset(self):
filterset_class = self.get_filterset_class()
if not filterset_class:
return None
return filterset_class(**self.get_filterset_kwargs())
def get_queryset(self):
qs = super().get_queryset()
if self.filterset and self.filterset.is_bound and self.filterset.is_valid():
qs = self.filterset.filter_queryset(qs)
return qs
def dispatch(self, request, *args, **kwargs):
self.filterset = self.get_filterset()
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filterset"] = self.filterset
return context
class InlineActionMixin(InlinesMixin):
def get_action_qs(self, inline):
ids = self.request.POST.getlist("_action_select[]")
select_across = self.request.POST.get("_action_select_across") == "all"
objects = inline.get_queryset()
if not select_across:
objects = objects.filter(pk__in=ids)
if not select_across and len(objects) != len(ids):
messages.error(
self.request,
_(
"There was an error finding the objects you selected. "
"This could be caused by another user changing them concurrently. "
"Please try again."
),
)
return objects.none()
return objects
def get_action(self):
for inline in self.get_inlines(self.get_object()):
action = inline.get_action()
if action and action.is_bound:
return inline, action
return None, None
def handle_action(self, inline, action):
form = action.get_form()
if form and not form.is_valid():
return None
result: Optional[HttpResponse] = action.apply(
queryset=self.get_action_qs(inline)
)
success_message: str = action.get_success_message()
if success_message:
messages.success(self.request, success_message)
if result:
return result
return redirect(self.request.get_full_path())
def post(self, request, *args, **kwargs):
inline, action = self.get_action()
if action:
response = self.handle_action(inline, action)
if response:
return response
return self.get(request, *args, **kwargs)
class ListActionsMixin(ComponentMixin):
component: ListComponent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["actions"] = self.actions
return context
def get_action_qs(self):
ids = self.request.POST.getlist("_action_select[]")
select_across = self.request.POST.get("_action_select_across") == "all"
objects = self.get_queryset()
if not select_across:
objects = objects.filter(pk__in=ids)
if not select_across and len(objects) != len(ids):
messages.error(
self.request,
_(
"There was an error finding the objects you selected. "
"This could be caused by another user changing them concurrently. "
"Please try again."
),
)
return objects.none()
return objects
def get_actions(self):
selected_action = self.request.POST.get("_action_choice")
actions = []
action_class: Type[Action]
for index, action_class in enumerate(self.component.list_actions_classes):
action_id = "{}-{}".format(index, action_class.name)
action = action_class(
data=self.request.POST if action_id == selected_action else None,
model=self.model,
id=action_id,
request=self.request,
)
if action.has_perm(self.request.user):
actions.append(action)
return actions
def get_action(self):
for action in self.actions:
if action.is_bound:
return action
return None
def handle_action(self, action):
form = action.get_form()
if form and not form.is_valid():
return None
result: Optional[HttpResponse] = action.apply(queryset=self.get_action_qs())
success_message: str = action.get_success_message()
if success_message:
messages.success(self.request, success_message)
if result:
return result
return redirect(self.request.get_full_path())
def dispatch(self, request, *args, **kwargs):
self.actions = self.get_actions()
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
action = self.get_action()
if action:
response = self.handle_action(action)
if response:
return response
return self.get(request, *args, **kwargs)
class ListView(
ListActionsMixin,
FiltersetMixin,
SearchableListMixin,
SortableListMixin,
ComponentMixin,
generic.ListView,
):
@property
def search_fields(self):
return self.component.list_search_fields
def get_paginate_by(self, queryset):
return self.component.list_paginate_by
def get_search_query(self):
if not self.search_fields:
return ""
return super().get_search_query()
def get_template_names(self):
return super().get_template_names() + ["beam/list.html"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["search_query"] = self.get_search_query()
context["list_item_link_layout"] = self.component.list_item_link_layout
return context
class DetailView(InlineActionMixin, ComponentMixin, InlinesMixin, generic.DetailView):
def get_template_names(self):
return super().get_template_names() + ["beam/detail.html"]
class DeleteView(ComponentMixin, InlinesMixin, generic.DeleteView):
def get_template_names(self):
return super().get_template_names() + ["beam/delete.html"]
def get_success_url(self):
return self.viewset.links["list"].reverse(request=self.request)
def get_success_message(self):
return _('The {model} "{name}" was deleted successfully.').format(
model=self.model._meta.verbose_name, name=str(self.object)
)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
nested, protected = self.get_nested_objects(self.object)
if protected:
return HttpResponseForbidden()
success_message = self.get_success_message()
response = self.delete(request, *args, **kwargs)
if success_message:
messages.success(request, success_message)
return response
@classmethod
def get_nested_objects(cls, obj):
using = router.db_for_write(cls.model)
collector = NestedObjects(using=using)
collector.collect([obj])
nested = collector.nested(cls._format_obj)
return nested, list(map(cls._format_obj, collector.protected))
@staticmethod
def _format_obj(obj):
return '%s "%s"' % (obj._meta.verbose_name, str(obj))
def get_context_data(self, **kwargs):
context = super(DeleteView, self).get_context_data(**kwargs)
nested, protected = self.get_nested_objects(self.get_object())
context.update(
{
"object": self.object,
"object_name": self._format_obj(self.object),
"nested_objects": nested,
"protected_objects": protected,
}
)
return context
class DashboardView(TemplateView):
template_name = "beam/dashboard.html"
viewsets = None
registry = default_registry
def build_registry(self, viewsets):
registry = {}
for viewset in viewsets:
register(registry, viewset)
return registry
def get_registry(self):
if self.viewsets:
return self.build_registry(self.viewsets)
else:
return self.registry
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
grouped = []
for app_label, viewsets_dict in self.get_registry().items():
group = {
"app_label": app_label,
"app_config": apps.get_app_config(app_label),
"viewsets": viewsets_dict.values(),
}
grouped.append(group)
context["grouped_by_app"] = grouped
return context
| 32.064205 | 94 | 0.621195 | 2,241 | 19,976 | 5.338242 | 0.119589 | 0.028087 | 0.021065 | 0.011368 | 0.473711 | 0.40876 | 0.381844 | 0.346401 | 0.325086 | 0.319903 | 0 | 0.00014 | 0.284842 | 19,976 | 622 | 95 | 32.115756 | 0.837253 | 0.014718 | 0 | 0.36701 | 0 | 0.002062 | 0.057899 | 0.0058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142268 | false | 0.002062 | 0.041237 | 0.043299 | 0.437113 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01bd5639140a6a95c9baa58e324b49f893790914 | 4,557 | py | Python | e3nn/tensor/fourier_tensor.py | mister-bailey/e3nn | 43d4b12f5ba5947583feb35f4e0662b73aae5618 | [
"MIT"
] | null | null | null | e3nn/tensor/fourier_tensor.py | mister-bailey/e3nn | 43d4b12f5ba5947583feb35f4e0662b73aae5618 | [
"MIT"
] | null | null | null | e3nn/tensor/fourier_tensor.py | mister-bailey/e3nn | 43d4b12f5ba5947583feb35f4e0662b73aae5618 | [
"MIT"
] | null | null | null | # pylint: disable=not-callable, no-member, invalid-name, line-too-long, missing-docstring, arguments-differ
import numpy as np
import torch
from e3nn import rs
from e3nn.kernel_mod import FrozenKernel
from e3nn.tensor.spherical_tensor import projection
class FourierTensor:
def __init__(self, signal, mul, lmax, p_val=0, p_arg=0):
"""
f: s2 x r -> R^N
Rotations
[D(g) f](x) = f(g^{-1} x)
Parity
[P f](x) = p_val f(p_arg x)
f(x) = sum F^l . Y^l(x)
This class contains the F^l
Rotations
[D(g) f](x) = sum [D^l(g) F^l] . Y^l(x) (using equiv. of Y and orthogonality of D)
Parity
[P f](x) = sum [p_val p_arg^l F^l] . Y^l(x) (using parity of Y)
"""
if signal.shape[-1] != mul * (lmax + 1)**2:
raise ValueError(
"Last tensor dimension and Rs do not have same dimension.")
self.signal = signal
self.lmax = lmax
self.mul = mul
self.Rs = rs.convention([(mul, l, p_val * p_arg**l)
for l in range(lmax + 1)])
self.radial_model = None
@classmethod
def from_geometry(cls, vectors, radial_model, lmax, sum_points=True):
"""
:param vectors: tensor of shape [..., xyz]
:param radial_model: function of signature R+ -> R^mul
:param lmax: maximal order of the signal
"""
size = vectors.shape[:-1]
vectors = vectors.reshape(-1, 3) # [N, 3]
radii = vectors.norm(2, -1)
radial_functions = radial_model(radii)
*_size, R = radial_functions.shape
Rs = [(R, L) for L in range(lmax + 1)]
mul_map = rs.map_mul_to_Rs(Rs)
radial_functions = torch.einsum('nr,dr->nd',
radial_functions.repeat(1, lmax + 1),
mul_map) # [N, signal]
Ys = projection(vectors / radii.unsqueeze(-1), lmax) # [N, l * m]
irrep_map = rs.map_irrep_to_Rs(Rs)
Ys = torch.einsum('nc,dc->nd', Ys, irrep_map) # [N, l * mul * m]
signal = Ys * radial_functions # [N, l * mul * m]
if sum_points:
signal = signal.sum(0)
else:
signal = signal.reshape(*size, -1)
new_cls = cls(signal, R, lmax)
new_cls.radial_model = radial_model
return new_cls
def plot(self, box_length, center=None, n=30,
radial_model=None, relu=True):
muls, _ls, _ps = zip(*self.Rs)
# We assume radial functions are repeated across L's
assert len(set(muls)) == 1
num_L = len(self.Rs)
if radial_model is None:
radial_model = self.radial_model
def new_radial(x):
return radial_model(x).repeat(1, num_L) # Repeat along filter dim
r, f = plot_on_grid(box_length, new_radial, self.Rs, n=n)
# Multiply coefficients
f = torch.einsum('xd,d->x', f, self.signal)
f = f.relu() if relu else f
if center is not None:
r += center.unsqueeze(0)
return r, f
def change_lmax(self, lmax):
new_Rs = [(self.mul, l) for l in range(lmax + 1)]
if self.lmax == lmax:
return self
elif self.lmax > lmax:
new_signal = self.signal[:rs.dim(new_Rs)]
return FourierTensor(new_signal, self.mul, lmax)
elif self.lmax < lmax:
new_signal = torch.zeros(rs.dim(new_Rs))
new_signal[:rs.dim(self.Rs)] = self.signal
return FourierTensor(new_signal, self.mul, lmax)
def __add__(self, other):
if self.mul != other.mul:
raise ValueError("Multiplicities do not match.")
lmax = max(self.lmax, other.lmax)
new_self = self.change_lmax(lmax)
new_other = other.change_lmax(lmax)
return FourierTensor(new_self.signal + new_other.signal, self.mul, self.lmax)
def plot_on_grid(box_length, radial_model, Rs, n=30):
l_to_index = {}
set_of_l = set([l for mul, l, p in Rs])
start = 0
for l in set_of_l:
l_to_index[l] = [start, start + 2 * l + 1]
start += 2 * l + 1
r = np.mgrid[-1:1:n * 1j, -1:1:n * 1j, -1:1:n * 1j].reshape(3, -1)
r = r.transpose(1, 0)
r *= box_length / 2.
r = torch.from_numpy(r)
Rs_in = [(1, 0)]
Rs_out = Rs
def radial_lambda(_ignored):
return radial_model
grid = FrozenKernel(Rs_in, Rs_out, radial_lambda, r)
f = grid()
f = f[..., 0]
return r, f
| 32.55 | 107 | 0.553434 | 673 | 4,557 | 3.601783 | 0.23477 | 0.058993 | 0.019802 | 0.004951 | 0.123762 | 0.088284 | 0.059406 | 0.006188 | 0 | 0 | 0 | 0.017147 | 0.321703 | 4,557 | 139 | 108 | 32.784173 | 0.767066 | 0.159754 | 0 | 0.043956 | 0 | 0 | 0.029636 | 0 | 0 | 0 | 0 | 0 | 0.010989 | 1 | 0.087912 | false | 0 | 0.054945 | 0.021978 | 0.252747 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01bf2602a5233dc77af2a63254f7948ab7be45bf | 1,189 | py | Python | utils/validations.py | WycliffeMuchumi/Stream-101-API | 9892685c37ff6f3e1e9017bfa5321968a5255c9e | [
"MIT"
] | null | null | null | utils/validations.py | WycliffeMuchumi/Stream-101-API | 9892685c37ff6f3e1e9017bfa5321968a5255c9e | [
"MIT"
] | 1 | 2021-06-04T09:45:05.000Z | 2021-06-04T09:45:05.000Z | utils/validations.py | muchumi/Stream-101-API | 9892685c37ff6f3e1e9017bfa5321968a5255c9e | [
"MIT"
] | 1 | 2021-06-04T09:43:58.000Z | 2021-06-04T09:43:58.000Z | import re
from flask import make_response, jsonify
"""
Validates key-value pairs of request dictionary body.
"""
def validate_users_key_pair_values(request):
keys = ['firstName','lastName','userName','email','phoneNumber','password']
errors = []
for key in keys:
if key not in request.json:
errors.append(key)
return errors
"""
Validates key-value pairs of request dictionary body.
"""
def validate_videos_key_pair_values(request):
keys = ['title','description','video_content']
errors = []
for key in keys:
if key not in request.json:
errors.append(key)
return errors
def check_for_blanks(data):
blanks = []
for key, value in data.items():
if value == "":
blanks.append(key)
return blanks
def check_for_non_strings(data):
non_strings = []
for key, value in data.items():
if key != 'id' and not isinstance(value, str):
non_strings.append(key)
return non_strings
def check_for_non_ints(data):
non_ints = []
for key, value in data.items():
if not isinstance(value, int):
non_ints.append(key)
return non_ints | 24.770833 | 79 | 0.634987 | 156 | 1,189 | 4.685897 | 0.333333 | 0.05472 | 0.102599 | 0.053352 | 0.497948 | 0.432285 | 0.432285 | 0.333789 | 0.333789 | 0.333789 | 0 | 0 | 0.254836 | 1,189 | 48 | 80 | 24.770833 | 0.825056 | 0 | 0 | 0.382353 | 0 | 0 | 0.075472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0.029412 | 0.058824 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01c21e6d6040d019aab4c4a5d4b11c12c95b4826 | 3,395 | py | Python | agents/guiPlayerAgent.py | Interpause/not-just-gomoku | fc327b2e37f6c0ee8ef4e5e2ee65309c6c9a39be | [
"MIT"
] | 1 | 2018-08-19T14:06:10.000Z | 2018-08-19T14:06:10.000Z | agents/guiPlayerAgent.py | Interpause/not-just-gomoku | fc327b2e37f6c0ee8ef4e5e2ee65309c6c9a39be | [
"MIT"
] | null | null | null | agents/guiPlayerAgent.py | Interpause/not-just-gomoku | fc327b2e37f6c0ee8ef4e5e2ee65309c6c9a39be | [
"MIT"
] | null | null | null | from tkinter import *
from agents.baseAgent import baseAgent
class guiPlayerAgent(baseAgent):
'''Extends baseAgent to provide a GUI for the player to use to play.'''
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
#Window initialization
self.window = Tk()
self.window.resizable(0,0)
self.window.title("5 Line Game V2.2 - guiPlayerAgent")
self.frame=Frame(self.window)
Grid.rowconfigure(self.window, 0, weight=1)
Grid.columnconfigure(self.window, 0, weight=1)
self.frame.grid(row=0, column=0, sticky=N+S+E+W)
Grid.rowconfigure(self.frame, 0, weight=1)
Grid.columnconfigure(self.frame, 0, weight=1)
#actual extensions
self.input = None
self.btns = []
maxSize = 600
unit = maxSize/3
xSize = self.state.length()*unit
ySize = self.state.height()*unit
while(xSize > maxSize or ySize > maxSize):
unit -= 1
xSize = self.state.length()*unit
ySize = self.state.height()*unit
self.window.geometry("%dx%d"%(xSize,ySize))
return
#helper functions
#enables all buttons
def enable(self):
'''Enables all grid buttons.'''
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(state=NORMAL)
return
#disables all buttons
def disable(self):
'''Disables all grid buttons.'''
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(state=DISABLED)
return
#changes/setups button texts
def btnupdate(self):
'''Updates grid buttons to represent current self.state.'''
if self.btns == []:
self.btninit()
grid = self.state.grid()
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(text=grid[y][x])
return
#handler for buttons
def passInput(self,coord):
'''Click handler for grid buttons.'''
self.input = coord
return
#setup buttons in grid and gives them above handler
def btninit(self):
'''Creates grid of buttons.'''
for y in range(self.state.height()):
row = []
for x in range(self.state.length()):
btn = Button(self.frame,text=" ",height=1,width=1,command=lambda coord=(x,y): self.passInput(coord))
btn.grid(column=x, row=y, sticky=N+S+E+W)
btn.configure(state=DISABLED)
row.append(btn)
self.btns.append(row)
for x in range(self.state.length()):
Grid.columnconfigure(self.frame, x, weight=1)
for y in range(self.state.height()):
Grid.rowconfigure(self.frame, y, weight=1)
return
#extendables
#setter
def update(self,state,curPiece):
super().update(state,curPiece)
self.btnupdate()
self.window.update()
return
#getter
def getMove(self,state):
super().getMove(state)
self.input = None
self.enable()
while(self.input == None):
self.window.update()
self.disable()
return self.input
| 30.044248 | 116 | 0.568778 | 416 | 3,395 | 4.622596 | 0.276442 | 0.084243 | 0.057202 | 0.083203 | 0.307852 | 0.273531 | 0.24129 | 0.227769 | 0.180447 | 0.180447 | 0 | 0.010161 | 0.304271 | 3,395 | 112 | 117 | 30.3125 | 0.80398 | 0.130191 | 0 | 0.337838 | 0 | 0 | 0.013393 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0.027027 | 0.027027 | 0 | 0.256757 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01c28000c41ee4fb2f2523d9f0173a71adcfa81a | 1,802 | py | Python | airflow/utils/dag_backup_helper.py | harishjami1382/test2 | f778cc7290904a84bed06f65fa5dbb49a63639f0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/utils/dag_backup_helper.py | harishjami1382/test2 | f778cc7290904a84bed06f65fa5dbb49a63639f0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/utils/dag_backup_helper.py | harishjami1382/test2 | f778cc7290904a84bed06f65fa5dbb49a63639f0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
from airflow.exceptions import AirflowException
from airflow import configuration as conf
def backup_folder_exists():
import commands
remote_base_path = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
if not remote_base_path.startswith('s3://'):
raise AirflowException("There seems to be some problem with your defloc location")
remote_loc = remote_base_path + '/dags_backup/'
execute_cmd = 's3cmd ls -c /usr/lib/hustler/s3cfg {} | wc -l'.format(remote_loc)
resp = commands.getoutput(execute_cmd)
if resp != '0':
return True
return False
def untar_and_save_dags(tempDir):
# import tarfile
import commands
airflow_home = os.environ['AIRFLOW_HOME']
if not os.path.exists(airflow_home + "/dags"):
os.makedirs(airflow_home + "/dags")
cmd = "tar -vxzf {}/dags.tar.gz -C {}/dags/".format(tempDir, airflow_home)
commands.getoutput(cmd=cmd)
# tar = tarfile.open(tempDir + '/dags.tar.gz')
# tar.extractall(path = airflow_home + "/dags/")
# for member in tar.getmembers():
# tar.extract(member, airflow_home + '/dags/')
def pull_from_s3(remote_loc, localDir):
execute_cmd = ['s3cmd', 'get', '-c', '/usr/lib/hustler/s3cfg']
execute_cmd.extend([remote_loc, localDir + '/', '--force'])
process = subprocess.Popen(execute_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
def push_to_s3(dags_folder):
remote_base_path = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_loc = remote_base_path + '/dags_backup/'
execute_cmd = ['s3cmd', 'put', '-c', '/usr/lib/hustler/s3cfg']
execute_cmd.extend(['dags.tar.gz', remote_loc])
process = subprocess.Popen(execute_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
| 36.04 | 91 | 0.690899 | 241 | 1,802 | 4.962656 | 0.356846 | 0.06689 | 0.058528 | 0.035117 | 0.367057 | 0.351171 | 0.351171 | 0.351171 | 0.292642 | 0.292642 | 0 | 0.006684 | 0.169811 | 1,802 | 49 | 92 | 36.77551 | 0.792781 | 0.103774 | 0 | 0.294118 | 0 | 0 | 0.20261 | 0.068365 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01c6baf0c32d93767acdfe8d0d19a3e357b410d4 | 3,223 | py | Python | FigureGeneration/makeFigure1.py | federatedcloud/Lake_Problem_DPS | 07600c49ed543165ccdc642c1097b3bed87c28f0 | [
"BSD-3-Clause"
] | null | null | null | FigureGeneration/makeFigure1.py | federatedcloud/Lake_Problem_DPS | 07600c49ed543165ccdc642c1097b3bed87c28f0 | [
"BSD-3-Clause"
] | 3 | 2018-10-03T21:12:42.000Z | 2019-07-08T21:32:43.000Z | FigureGeneration/makeFigure1.py | federatedcloud/Lake_Problem_DPS | 07600c49ed543165ccdc642c1097b3bed87c28f0 | [
"BSD-3-Clause"
] | 2 | 2020-06-29T17:30:42.000Z | 2020-06-30T22:01:49.000Z | import matplotlib.pyplot as plt
from scipy.optimize import root
import matplotlib
import numpy as np
def makeFigure1():
def fun(x):
return [(x[0]**qq)/(1+x[0]**qq) - bb*x[0]]
b = [0.4,0.3,0.2,0.1]
q = [2.5,3,3.5,4]
x = np.arange(0,2.6,0.1)
y = np.zeros(len(x))
cmap = matplotlib.cm.get_cmap('RdYlGn')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
# for b = 0.4, plot different values of q
colors = []
for i in range(len(b)):
colors.append(cmap(0.25*(i+1)))
lines=[]
line1, = ax.plot(x,b[0]*x,c='k', linewidth=2)
lines.append(line1)
for j in range(len(q)):
for i in range(len(x)):
y[i] = x[i]**q[j]/(1+x[i]**q[j])
line1, = ax.plot(x, y, c = colors[j], linewidth=2)
lines.append(line1)
bb = b[0]
qq = q[j]
soln = root(fun,1.0)
lines.append(ax.scatter(soln.x,b[0]*soln.x,facecolor='none',edgecolor='k',s=30))
soln = root(fun,2.5)
lines.append(ax.scatter(soln.x,b[0]*soln.x,facecolor='k',edgecolor='k',s=30))
lines.append(ax.scatter(0,0,facecolor='k',edgecolor='k',s=30))
legend1 = plt.legend([lines[0], lines[1], lines[4], lines[7], lines[10]],\
['b = 0.4', 'q = 2.5', 'q = 3.0', 'q = 3.5', 'q = 4.0'], loc='lower right')
plt.setp(legend1.get_title(),fontsize=14)
plt.gca().add_artist(legend1)
plt.legend([lines[3], lines[2]],['Stable Equilibria','Unstable Equilibria'],loc='upper left')
ax.set_ylabel('Fluxes of P',fontsize=16)
ax.tick_params(axis='y',labelsize=14)
ax.set_xlim(0,2.5)
ax.set_ylim(0,1)
ax.set_title('a) Effect of q on Lake Dynamics',loc='left')
ax = fig.add_subplot(2,1,2)
colors = []
for i in range(len(b)):
colors.append(cmap(1-(0.25*i)))
#for q = 2.5, plot different values of b
for i in range(len(x)):
y[i] = x[i]**q[0]/(1+x[i]**q[0])
lines = []
line1, = ax.plot(x,y,c='k',label='q = ' + str(q[0]),linewidth=2)
lines.append(line1)
for i in range(len(b)):
line1, = ax.plot(x,b[i]*x,c=colors[i],label='b = ' + str(b[i]),linewidth=2)
lines.append(line1)
bb = b[i]
qq = q[0]
soln = root(fun,1.0)
lines.append(ax.scatter(soln.x,b[i]*soln.x,facecolor='none',edgecolor='k',s=30))
soln = root(fun,2.5)
lines.append(ax.scatter(soln.x,b[i]*soln.x,facecolor='k',edgecolor='k',s=30))
lines.append(ax.scatter(0,0,facecolor='k',edgecolor='k',s=30))
ax.legend([lines[0], lines[1], lines[4], lines[7], lines[10]],\
['q = 2.5', 'b = 0.4', 'b = 0.3', 'b = 0.2', 'b = 0.1'],\
scatterpoints = 1, loc='upper left')
ax.set_xlabel('Lake P Concentration,$X_t$',fontsize=16)
ax.set_ylabel('Fluxes of P',fontsize=16)
ax.tick_params(axis='both',labelsize=14)
ax.set_xlim(0,2.5)
ax.set_ylim(0,1)
ax.set_title('b) Effect of b on Lake Dynamics',loc='left')
fig.set_size_inches([8,11.85])
fig.savefig('Figure1.pdf')
fig.clf()
return None
makeFigure1() | 35.417582 | 98 | 0.531492 | 552 | 3,223 | 3.068841 | 0.206522 | 0.012987 | 0.035419 | 0.070838 | 0.609799 | 0.546635 | 0.468713 | 0.434475 | 0.434475 | 0.434475 | 0 | 0.065071 | 0.260937 | 3,223 | 91 | 99 | 35.417582 | 0.646096 | 0.024201 | 0 | 0.333333 | 0 | 0 | 0.099902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.053333 | 0.013333 | 0.106667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01c6c6c8a827d3cf331a524a92625ca459d8fdce | 4,882 | py | Python | shutdown/start_clouds/gcp_node_scenarios.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 115 | 2016-07-15T12:24:42.000Z | 2022-02-21T20:40:09.000Z | shutdown/start_clouds/gcp_node_scenarios.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 452 | 2016-05-19T13:55:19.000Z | 2022-03-24T11:25:20.000Z | shutdown/start_clouds/gcp_node_scenarios.py | RH-ematysek/svt | 3c4f99d453c6956b434f1a90e0658a95f3fda0a4 | [
"Apache-2.0"
] | 112 | 2016-05-16T08:48:55.000Z | 2022-01-12T13:13:37.000Z | import sys
import time
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import logging
class gcp_node_scenarios():
def __init__(self, project):
self.project = project
logging.info("project " + str(self.project) + "!")
credentials = GoogleCredentials.get_application_default()
self.client = discovery.build('compute', 'v1', credentials=credentials,
cache_discovery=False)
# Node scenario to stop the node
def node_stop_scenario(self, node):
logging.info('stop scenario')
try:
logging.info("Starting node_stop_scenario injection")
instance_id, zone = self.get_instance_id(node)
logging.info("Stopping the node %s with instance ID: %s " % (node, instance_id))
self.stop_instances(zone, instance_id)
self.wait_until_stopped(zone, instance_id, 80)
logging.info("Node with instance ID: %s is in stopped state" % instance_id)
except Exception as e:
logging.error("Failed to stop node instance. Encountered following exception: %s. "
"Test Failed" % (e))
logging.error("node_stop_scenario injection failed!")
sys.exit(1)
# Node scenario to start the node
def node_start_scenario(self, node):
try:
logging.info("Starting node_start_scenario injection")
instance_id, zone = self.get_instance_id(node)
logging.info("Starting the node %s with instance ID: %s " % (node, instance_id))
self.start_instances(zone, instance_id)
self.wait_until_running(zone, instance_id, 80)
logging.info("Node with instance ID: %s is in running state" % instance_id)
logging.info("node_start_scenario has been successfully injected!")
except Exception as e:
logging.error("Failed to start node instance. Encountered following "
"exception: %s. Test Failed" % (e))
logging.error("node_start_scenario injection failed!")
sys.exit(1)
# Get the instance ID of the node
def get_instance_id(self, node):
zone_request = self.client.zones().list(project=self.project)
while zone_request is not None:
zone_response = zone_request.execute()
for zone in zone_response['items']:
instances_request = self.client.instances().list(project=self.project,
zone=zone['name'])
while instances_request is not None:
instance_response = instances_request.execute()
if "items" in instance_response.keys():
for instance in instance_response['items']:
if instance['name'] in node:
return instance['name'], zone['name']
instances_request = self.client.zones().list_next(
previous_request=instances_request,
previous_response=instance_response)
zone_request = self.client.zones().list_next(previous_request=zone_request,
previous_response=zone_response)
logging.info('no instances ')
# Start the node instance
def start_instances(self, zone, instance_id):
self.client.instances().start(project=self.project, zone=zone, instance=instance_id) \
.execute()
# Stop the node instance
def stop_instances(self, zone, instance_id):
self.client.instances().stop(project=self.project, zone=zone, instance=instance_id) \
.execute()
# Get instance status
def get_instance_status(self, zone, instance_id, expected_status, timeout):
# statuses: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING,
# and TERMINATED.
i = 0
sleeper = 5
while i <= timeout:
instStatus = self.client.instances().get(project=self.project, zone=zone,
instance=instance_id).execute()
logging.info("Status of vm " + str(instStatus['status']))
if instStatus['status'] == expected_status:
return True
time.sleep(sleeper)
i += sleeper
logging.info("Status of %s was not %s in a")
# Wait until the node instance is running
def wait_until_running(self, zone, instance_id, timeout):
self.get_instance_status(zone, instance_id, 'RUNNING', timeout)
# Wait until the node instance is stopped
def wait_until_stopped(self, zone, instance_id, timeout):
self.get_instance_status(zone, instance_id, 'TERMINATED', timeout) | 47.862745 | 95 | 0.607743 | 544 | 4,882 | 5.284926 | 0.194853 | 0.097391 | 0.053565 | 0.031304 | 0.443478 | 0.416348 | 0.36487 | 0.339826 | 0.250087 | 0.19687 | 0 | 0.002939 | 0.30295 | 4,882 | 102 | 96 | 47.862745 | 0.841904 | 0.070258 | 0 | 0.123457 | 0 | 0 | 0.14904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.061728 | 0 | 0.209877 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01c8616bda4dba690dc0fe4b0df6b2da85332a5b | 6,004 | py | Python | balanced_treatment_within_subject/__init__.py | UMBEE/modified-otree-snippets | 23b5baa28edc04b5a8c8d7607567ef29383b6777 | [
"MIT"
] | null | null | null | balanced_treatment_within_subject/__init__.py | UMBEE/modified-otree-snippets | 23b5baa28edc04b5a8c8d7607567ef29383b6777 | [
"MIT"
] | 1 | 2022-02-03T18:27:00.000Z | 2022-02-03T20:01:50.000Z | balanced_treatment_within_subject/__init__.py | UMBEE/modified-otree-snippets | 23b5baa28edc04b5a8c8d7607567ef29383b6777 | [
"MIT"
] | null | null | null | from otree.api import *
import itertools
doc = """
Within-subject design, with three treatment conditions. Orders of the treatment
will be balanced as long as the subjects arrive in multiples of 6.
"""
class C(BaseConstants):
NAME_IN_URL = 'balanced_treatment_within_subject'
PLAYERS_PER_GROUP = None
# On treatment and how they repeat
NUM_Repeated_Rounds_Treatment_Condition = 10
NUM_Treatment_Conditions = 3
# On what to show before/after the rounds
NUM_LeaderPage = 1
NUM_FinishPage = 1
# Welcome page + ROUNDS + Results page
NUM_ROUNDS = (
NUM_LeaderPage # Start page
+ NUM_Treatment_Conditions #Intro for each treatment
+ NUM_Repeated_Rounds_Treatment_Condition * NUM_Treatment_Conditions
+ NUM_FinishPage # Finish page
)
ROUNDS_for_TreatmentIntro = [
# Here, name the rounds in which TreatmentIntro needs to be displayed
(NUM_LeaderPage + 1),
# Then,
(
NUM_LeaderPage + 1 + NUM_Repeated_Rounds_Treatment_Condition
+ 1),
(
NUM_LeaderPage + 1 + NUM_Repeated_Rounds_Treatment_Condition
+ 1 + NUM_Repeated_Rounds_Treatment_Condition + 1)
]
FULL_list_of_rounds = list(range(1,NUM_ROUNDS + 1))
ROUNDS_for_T1 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[0] : ROUNDS_for_TreatmentIntro[0] + NUM_Repeated_Rounds_Treatment_Condition]
ROUNDS_for_T2 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[1] : ROUNDS_for_TreatmentIntro[1] + NUM_Repeated_Rounds_Treatment_Condition]
ROUNDS_for_T3 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[2] : ROUNDS_for_TreatmentIntro[2] + NUM_Repeated_Rounds_Treatment_Condition]
ROUND_for_FinishPage = NUM_ROUNDS
# Parameters of the simple demand function
# print(ROUNDS_for_TreatmentIntro)
# print(ROUNDS_for_T1)
# print(ROUNDS_for_T2)
# print(ROUNDS_for_T3)
class Subsession(BaseSubsession):
pass
def creating_session(subsession: Subsession):
treatments = itertools.cycle(
itertools.permutations([1,2,3])
)
if subsession.round_number == 1:
for p in subsession.get_players():
treatment = next(treatments)
# print('treatment is', treatment)
p.participant.treatment_sequence = treatment # Log the assigned treatment ordering
class Group(BaseGroup):
pass
class Player(BasePlayer):
treatment_sequence = models.StringField()
worker_page_choice = models.BooleanField(initial=0)
# PAGES
class WelcomePage(Page):
def vars_for_template(player):
return dict(
num_of_treatments = C.NUM_Treatment_Conditions,
num_of_repeats = C.NUM_Repeated_Rounds_Treatment_Condition,
total_rounds = C.NUM_ROUNDS
)
@staticmethod
def is_displayed(player: Player):
return player.round_number <= C.NUM_LeaderPage
def before_next_page(player : Player, timeout_happened):
player.treatment_sequence = "-".join([str(T) for T in player.participant.treatment_sequence])
class ExpConditionIntro(Page):
def vars_for_template(player):
# Decide current treatment by treatment_sequence and round_number
# This could have been a player method
if player.round_number == C.ROUNDS_for_TreatmentIntro[0]:
treatment_for_round = player.participant.treatment_sequence[0]
elif player.round_number == C.ROUNDS_for_TreatmentIntro[1]:
treatment_for_round = player.participant.treatment_sequence[1]
elif player.round_number == C.ROUNDS_for_TreatmentIntro[2]:
treatment_for_round = player.participant.treatment_sequence[2]
return dict(
treatment_for_round = treatment_for_round,
sequence = player.participant.treatment_sequence # for diagnostics only. Remove for production.
)
pass
@staticmethod
def is_displayed(player: Player):
if player.round_number in C.ROUNDS_for_TreatmentIntro:
return True
class WorkerPage(Page):
def vars_for_template(player):
# Decide current treatment by treatment_sequence and round_number
# This could have been a player method
if player.round_number in C.ROUNDS_for_T1:
rounds_under_same_treatment = C.ROUNDS_for_T1
treatment_for_round = player.participant.treatment_sequence[0]
elif player.round_number in C.ROUNDS_for_T2:
rounds_under_same_treatment = C.ROUNDS_for_T2
treatment_for_round = player.participant.treatment_sequence[1]
print("Found you")
elif player.round_number in C.ROUNDS_for_T3:
rounds_under_same_treatment = C.ROUNDS_for_T3
treatment_for_round = player.participant.treatment_sequence[2]
else:
print(player.round_number)
print(C.ROUNDS_for_T2)
# Then, report the current page number, as a progress indicator
round_count_out_of_Total = rounds_under_same_treatment.index(player.round_number) + 1
return dict(
treatment_for_round = treatment_for_round,
round_count_out_of_Total = round_count_out_of_Total
)
@staticmethod
def is_displayed(player: Player):
if player.round_number in C.ROUNDS_for_T1:
return True
elif player.round_number in C.ROUNDS_for_T2:
return True
elif player.round_number in C.ROUNDS_for_T3:
return True
class Results(Page):
@staticmethod
def is_displayed(player: Player):
if player.round_number == C.ROUND_for_FinishPage:
return True
page_sequence = [WelcomePage, ExpConditionIntro, WorkerPage, Results]
| 35.738095 | 142 | 0.669887 | 703 | 6,004 | 5.391181 | 0.217639 | 0.066491 | 0.062797 | 0.061741 | 0.490765 | 0.437731 | 0.420053 | 0.324011 | 0.227704 | 0.209499 | 0 | 0.011615 | 0.268654 | 6,004 | 167 | 143 | 35.952096 | 0.851514 | 0.12525 | 0 | 0.342342 | 0 | 0 | 0.036527 | 0.006311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.018018 | 0.018018 | 0.387387 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01cc57f987fa0e55546aee7eb123287a97e6bf0f | 2,085 | py | Python | math_utils/discretize.py | RaczeQ/naive-bayes-classifier | c8adc960885118a13677e3c5ec4039b976810bee | [
"MIT"
] | null | null | null | math_utils/discretize.py | RaczeQ/naive-bayes-classifier | c8adc960885118a13677e3c5ec4039b976810bee | [
"MIT"
] | null | null | null | math_utils/discretize.py | RaczeQ/naive-bayes-classifier | c8adc960885118a13677e3c5ec4039b976810bee | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.cluster import KMeans
class DiscretizeParam(object):
feature_name = None
discretize_function = None
buckets_amount = None
def __init__(self, feature_name, discretize_function, buckets_amount):
self.feature_name = feature_name
self.discretize_function = discretize_function
self.buckets_amount = buckets_amount
def __repr__(self):
return "DP<{}, {}, {}>".format(self.feature_name, self.discretize_function.__name__, self.buckets_amount)
class Discretizer(object):
bucket_models = {}
frequency_models = {}
kmean_models = {}
def bucket_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.bucket_models.keys():
min_value = min(values)
max_value = max(values)
Discretizer.bucket_models[key] = np.linspace(min_value, max_value, buckets)
bins = Discretizer.bucket_models[key]
idx = np.digitize(current_value, bins)
return idx
def frequency_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.frequency_models.keys():
split = np.array_split(np.sort(values), buckets)
cutoffs = [x[-1] for x in split]
cutoffs = cutoffs[:-1]
Discretizer.frequency_models[key] = cutoffs
cutoffs = Discretizer.frequency_models[key]
idx = np.digitize(current_value, cutoffs, right=True)
return idx
def kbins_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.kmean_models.keys():
values2D = np.array([[v, 0] for v in values])
Discretizer.kmean_models[key] = KMeans(n_clusters=buckets, random_state=0).fit(values2D)
kmeans = Discretizer.kmean_models[key]
curr_val2D = np.array([[current_value, 0]])
val = kmeans.predict(curr_val2D)
return val[0]
| 40.096154 | 114 | 0.692086 | 257 | 2,085 | 5.346304 | 0.2607 | 0.088064 | 0.076419 | 0.09607 | 0.3377 | 0.289665 | 0.289665 | 0.240175 | 0.240175 | 0.240175 | 0 | 0.006072 | 0.210072 | 2,085 | 51 | 115 | 40.882353 | 0.828172 | 0 | 0 | 0.113636 | 0 | 0 | 0.006883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.045455 | 0.022727 | 0.431818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01cc779657651e4920bdc21cc6224a00f4c92b64 | 3,652 | py | Python | magenta/models/polyamp/instrument_family_mappings.py | Jss7268/magenta | 10e0b2c50baaa01a9c942ed3334b5b2cca761bef | [
"Apache-2.0"
] | null | null | null | magenta/models/polyamp/instrument_family_mappings.py | Jss7268/magenta | 10e0b2c50baaa01a9c942ed3334b5b2cca761bef | [
"Apache-2.0"
] | 1 | 2020-03-01T16:02:10.000Z | 2020-03-01T16:02:10.000Z | magenta/models/polyamp/instrument_family_mappings.py | Jss7268/magenta | 10e0b2c50baaa01a9c942ed3334b5b2cca761bef | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Jack Spencer Smith.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from enum import Enum
class Family(Enum):
BASS = 0
BRASS = 1
FLUTE = 2
GUITAR = 3
KEYBOARD = 4
MALLET = 5
ORGAN = 6
REED = 7
STRING = 8
SYNTH_LEAD = 9
VOCAL = 10
OTHER = 11
DRUMS = 12
midi_instrument_to_family = collections.defaultdict(lambda: Family.OTHER)
midi_instrument_to_family.update({
0: Family.KEYBOARD,
1: Family.KEYBOARD,
2: Family.KEYBOARD,
3: Family.KEYBOARD,
4: Family.KEYBOARD,
5: Family.KEYBOARD,
6: Family.KEYBOARD,
7: Family.KEYBOARD,
8: Family.MALLET,
9: Family.MALLET,
10: Family.MALLET,
11: Family.MALLET,
12: Family.MALLET,
13: Family.MALLET,
14: Family.MALLET,
15: Family.MALLET,
16: Family.ORGAN,
17: Family.ORGAN,
18: Family.ORGAN,
19: Family.ORGAN,
20: Family.ORGAN,
21: Family.ORGAN,
22: Family.ORGAN,
23: Family.ORGAN,
24: Family.GUITAR,
25: Family.GUITAR,
26: Family.GUITAR,
27: Family.GUITAR,
28: Family.GUITAR,
29: Family.GUITAR,
30: Family.GUITAR,
31: Family.GUITAR,
32: Family.BASS,
33: Family.BASS,
34: Family.BASS,
35: Family.BASS,
36: Family.BASS,
37: Family.BASS,
38: Family.BASS,
39: Family.BASS,
40: Family.STRING,
41: Family.STRING,
42: Family.STRING,
43: Family.STRING,
44: Family.STRING,
45: Family.STRING,
46: Family.STRING,
47: Family.STRING, # TIMPANI?
48: Family.STRING,
49: Family.STRING,
50: Family.STRING,
51: Family.STRING,
52: Family.VOCAL,
53: Family.VOCAL,
54: Family.VOCAL,
55: Family.STRING, # orch hit
56: Family.BRASS,
57: Family.BRASS,
58: Family.BRASS,
59: Family.BRASS,
60: Family.BRASS,
61: Family.BRASS,
62: Family.BRASS,
63: Family.BRASS,
64: Family.REED,
65: Family.REED,
66: Family.REED,
67: Family.REED,
68: Family.REED,
69: Family.REED,
70: Family.REED,
71: Family.REED,
72: Family.FLUTE,
73: Family.FLUTE,
74: Family.FLUTE,
75: Family.FLUTE,
76: Family.FLUTE,
77: Family.FLUTE,
78: Family.FLUTE,
79: Family.FLUTE,
80: Family.SYNTH_LEAD,
81: Family.SYNTH_LEAD,
82: Family.SYNTH_LEAD,
83: Family.SYNTH_LEAD,
84: Family.SYNTH_LEAD,
85: Family.VOCAL,
86: Family.SYNTH_LEAD,
87: Family.SYNTH_LEAD,
105: Family.GUITAR,
106: Family.GUITAR,
107: Family.GUITAR,
108: Family.GUITAR,
109: Family.MALLET,
110: Family.REED,
111: Family.STRING,
112: Family.REED,
113: Family.MALLET,
114: Family.MALLET,
})
family_to_midi_instrument = {
0: 33, # Acoustic Bass
1: 57, # Trumpet
2: 74, # Flute
3: 25, # Acoustic Nylon Guitar
4: 1, # keyboard / Acoustic Grand Piano
5: 9, # mallet / Celesta
6: 17, # organ / Drawbar Organ
7: 66, # reed / Alto Sax
8: 49, # string / String Ensemble
9: 83, # synth lead / Square
10: 54, # vocal / Voice Oohs
11: 118,
12: 119, # TODO actual percussion
}
| 24.026316 | 74 | 0.627601 | 506 | 3,652 | 4.496047 | 0.407115 | 0.073846 | 0.046154 | 0.014066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096965 | 0.260131 | 3,652 | 151 | 75 | 24.18543 | 0.745004 | 0.21851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006623 | 0 | 1 | 0 | false | 0 | 0.015152 | 0 | 0.121212 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01cec5edb4d937a0934dcf6391bcc265af95690c | 1,123 | py | Python | test/group-test.py | Afsio/sd-groupcast | 65df8d308280cc2096449a1bc6431eae38c54f5a | [
"MIT"
] | null | null | null | test/group-test.py | Afsio/sd-groupcast | 65df8d308280cc2096449a1bc6431eae38c54f5a | [
"MIT"
] | null | null | null | test/group-test.py | Afsio/sd-groupcast | 65df8d308280cc2096449a1bc6431eae38c54f5a | [
"MIT"
] | null | null | null | import unittest
from project import group_s
from project import printer_s
from project import member_s
from pyactor.context import set_context, create_host, shutdown, sleep
class TestGroup(unittest.TestCase):
def setUp(self):
# Gets executed before every test
set_context()
self.h = create_host()
self.p = self.h.spawn('printer', printer_s.Printer)
self.g = self.h.spawn('group', group_s.Group, [self.p])
def tearDown(self):
# Gets executed after every test
shutdown()
def test_members_leave(self):
# Test if members join and leave correctly if not announced
m1 = self.h.spawn('m1', member_s.Member, [self.p, self.g])
self.g.join(m1)
self.g.init_start()
self.assertTrue(len(self.g.get_members()) == 1)
sleep(12)
#Test that if no announces are made by the peer, it gets kicked out
self.assertTrue(len(self.g.get_members()) == 0)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| 30.351351 | 75 | 0.668744 | 155 | 1,123 | 4.696774 | 0.451613 | 0.041209 | 0.070055 | 0.049451 | 0.087912 | 0.087912 | 0.087912 | 0 | 0 | 0 | 0 | 0.009185 | 0.224399 | 1,123 | 36 | 76 | 31.194444 | 0.826636 | 0.165628 | 0 | 0 | 0 | 0 | 0.023605 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.130435 | false | 0 | 0.217391 | 0 | 0.391304 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01d2669a411e95e812fc676660fcb5c7124775a0 | 1,458 | py | Python | charlie/Include/filelogger.py | V-Perotto/Projekt-Charlie | da27d28b1194c999d17431aa990706482d7bb1a1 | [
"CC0-1.0"
] | 1 | 2021-03-20T02:03:55.000Z | 2021-03-20T02:03:55.000Z | charlie/Include/filelogger.py | V-Perotto/Projekt-Charlie | da27d28b1194c999d17431aa990706482d7bb1a1 | [
"CC0-1.0"
] | 1 | 2021-04-06T04:48:01.000Z | 2021-04-06T04:48:01.000Z | charlie/Include/filelogger.py | V-Perotto/Projekt-Charlie | da27d28b1194c999d17431aa990706482d7bb1a1 | [
"CC0-1.0"
] | null | null | null |
from os import getcwd
from os.path import isfile
from os.path import join
from os import listdir
from datetime import datetime
# Obrigado Fabrício por me mostrar como criar um log em python e por ser um
# colega incrível :)
def filelog(name, desc):
# print("ENTROU")
if name == "PY_F":
path_way = getcwd()
path_logs = path_way + "/results/log_py/ERROR"
if name == "PY_S":
path_way = getcwd()
path_logs = path_way + "/results/log_py/SUCCESS"
if name == "RF":
path_way = getcwd()
path_logs = path_way + "/results/log_rf/"
# Obtem a data atual e hora
data = datetime.now().strftime("%d-%m-%Y")
data_hora = datetime.now().strftime("%d-%m-%Y__%H:%M:%S")
if (len(listdir(path_logs)) != 0):
onlyfiles = [f for f in listdir(path_logs) if isfile(join(path_logs, f))]
if (True in [True for x in onlyfiles if (data in x)]):
with open(join(path_logs,"CHARLIE_"+ data + ".txt"), "a") as file:
file.write(data_hora+" : " + desc + '\n')
return
with open(join(path_logs,"CHARLIE_"+ data + ".txt"), "a") as file:
file.write("|===================================================|\n" +
"|\t\t\t\t PROJEKT CHARLIE \t\t\t\t\t|\n" + # pov: cursed python
"|===================================================|\n\n\n" +
data_hora + " : " + desc + '\n')
return | 36.45 | 88 | 0.521262 | 199 | 1,458 | 3.688442 | 0.356784 | 0.087193 | 0.020436 | 0.069482 | 0.408719 | 0.356948 | 0.297003 | 0.297003 | 0.297003 | 0.245232 | 0 | 0.000935 | 0.266804 | 1,458 | 40 | 89 | 36.45 | 0.685688 | 0.104938 | 0 | 0.241379 | 0 | 0.034483 | 0.220939 | 0.121632 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01d3fee0f84d79a6c7dc8e8254bea86a141e910d | 2,631 | py | Python | django_project/work_evid/models.py | grumpa/work_evid | 4798dc6ffde232981981f40c962a922321fcda3a | [
"BSD-3-Clause"
] | null | null | null | django_project/work_evid/models.py | grumpa/work_evid | 4798dc6ffde232981981f40c962a922321fcda3a | [
"BSD-3-Clause"
] | 1 | 2015-01-02T07:30:40.000Z | 2015-01-02T07:30:40.000Z | django_project/work_evid/models.py | grumpa/work_evid | 4798dc6ffde232981981f40c962a922321fcda3a | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from django.utils.translation import ugettext as _
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
class Firm(models.Model):
"Simple firm database."
name = models.CharField(max_length=60, verbose_name=_('firm name'))
periode = models.IntegerField(default=1,
verbose_name=_('periode [months]'),
help_text=_('How often we make an invoice.'))
from_date = models.DateField(default=timezone.now, verbose_name=_('from date'))
description = models.TextField(blank=True, verbose_name=_('description'))
show_in_list = models.BooleanField(default=True, verbose_name=_('show in list'))
def get_absolute_url(self):
return reverse('work_evid:firm_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['name']
verbose_name = _('firm')
def __str__(self):
return self.name
class Work(models.Model):
"Work evidence model."
firm = models.ForeignKey(Firm, verbose_name=_('firm'))
date = models.DateField(default=timezone.now, verbose_name=_('work date'))
item_price = models.DecimalField(max_digits=15,
decimal_places=2,
verbose_name=_('price for item'))
items = models.DecimalField(max_digits=10,
decimal_places=2,
default=1,
verbose_name=_('ammount of items'))
what_brief = models.CharField(max_length=80, verbose_name=_('what (briefly)'))
what_detailed = models.TextField(blank=True, verbose_name=_('describe detailed'))
@property
def full_price(self):
"Returns item price multiplied by ammount."
return self.items * self.item_price
#full_price = property(_get_full_price)
def get_absolute_url(self):
return reverse('work_evid:work_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-date']
class Todo(models.Model):
firm = models.ForeignKey(Firm, verbose_name=_('firm'))
date = models.DateField(default=timezone.now, verbose_name=_('created'))
todo = models.TextField(blank=True, verbose_name=_('todo'))
finished = models.BooleanField(default=False, verbose_name=_('finished'))
def __str__(self):
return '{0} {1} {2}'.format(self.date, self.firm[:12], self.todo[:40])
def get_absolute_url(self):
return reverse('work_evid:todo_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['finished', '-date']
| 36.541667 | 85 | 0.633979 | 309 | 2,631 | 5.161812 | 0.320388 | 0.110345 | 0.037618 | 0.048903 | 0.359875 | 0.359875 | 0.294044 | 0.294044 | 0.194357 | 0.115361 | 0 | 0.010015 | 0.240973 | 2,631 | 71 | 86 | 37.056338 | 0.788683 | 0.052071 | 0 | 0.230769 | 0 | 0 | 0.143966 | 0.024447 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0.096154 | 0.711538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01d934196d480dae649338a69187b0e002359237 | 1,638 | py | Python | cadence/tests/test_itask.py | simkimsia/temporal-python-sdk | 6b35da3eb0d3da87d61c1ce0ff8b33f08e8c3263 | [
"MIT"
] | 141 | 2019-05-01T00:19:22.000Z | 2022-03-29T13:30:31.000Z | cadence/tests/test_itask.py | simkimsia/temporal-python-sdk | 6b35da3eb0d3da87d61c1ce0ff8b33f08e8c3263 | [
"MIT"
] | 19 | 2019-08-10T08:19:30.000Z | 2021-05-26T01:38:39.000Z | cadence/tests/test_itask.py | simkimsia/temporal-python-sdk | 6b35da3eb0d3da87d61c1ce0ff8b33f08e8c3263 | [
"MIT"
] | 29 | 2019-05-15T03:44:09.000Z | 2022-03-29T21:36:17.000Z | import asyncio
from asyncio.events import AbstractEventLoop
from unittest import TestCase
from unittest.mock import Mock, MagicMock
from cadence.decision_loop import ReplayDecider, ITask
from cadence.tests.test_decision_context import run_once
class TestAwaitTill(TestCase):
def setUp(self) -> None:
self.event_loop: AbstractEventLoop = asyncio.get_event_loop()
self.decider: ReplayDecider = Mock()
self.decider.get_and_increment_next_id = MagicMock(return_value="0")
self.decider.event_loop = Mock()
self.future = self.event_loop.create_future()
self.decider.event_loop.create_future = MagicMock(return_value=self.future)
self.itask = ITask(decider=self.decider)
def tearDown(self) -> None:
self.task.cancel()
def test_await_till(self):
self.task = self.event_loop.create_task(self.itask.await_till(lambda *args: None))
run_once(self.event_loop)
assert self.itask.awaited
def test_await_till_no_progress(self):
self.task = self.event_loop.create_task(self.itask.await_till(lambda *args: None))
run_once(self.event_loop)
assert self.itask.awaited
run_once(self.event_loop)
assert self.itask.awaited
def test_unblock(self):
blocked = True
def check_blocked():
nonlocal blocked
return not blocked
self.task = self.event_loop.create_task(self.itask.await_till(check_blocked))
run_once(self.event_loop)
blocked = False
self.itask.unblock()
run_once(self.event_loop)
assert not self.itask.awaited
| 33.428571 | 90 | 0.698413 | 213 | 1,638 | 5.15493 | 0.258216 | 0.106557 | 0.118397 | 0.07286 | 0.336066 | 0.317851 | 0.294171 | 0.294171 | 0.294171 | 0.294171 | 0 | 0.000777 | 0.214286 | 1,638 | 48 | 91 | 34.125 | 0.85237 | 0 | 0 | 0.263158 | 0 | 0 | 0.000611 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.157895 | false | 0 | 0.157895 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01de93cc01e93ba0250a3e2becff8b0167586038 | 820 | py | Python | utils/decompress_gz_to_json.py | zahidayar/BRON | 585b365ec081eae758c7c7e7160ceca3ac9c2f6f | [
"MIT"
] | 23 | 2020-10-02T12:59:19.000Z | 2022-03-07T17:53:25.000Z | utils/decompress_gz_to_json.py | zahidayar/BRON | 585b365ec081eae758c7c7e7160ceca3ac9c2f6f | [
"MIT"
] | 9 | 2020-09-30T18:47:39.000Z | 2022-03-08T17:21:41.000Z | utils/decompress_gz_to_json.py | zahidayar/BRON | 585b365ec081eae758c7c7e7160ceca3ac9c2f6f | [
"MIT"
] | 11 | 2020-12-30T19:21:52.000Z | 2022-03-25T03:00:42.000Z | import json
import gzip
import argparse
"""
Decompresses GZ file to JSON file
"""
def decompress_gz_to_json(gz_path, save_path):
with gzip.open(gz_path, "rt", encoding="utf-8") as f:
decompressed = json.load(f)
with open(save_path, 'w') as f:
f.write(json.dumps(decompressed, indent=4, sort_keys=True))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Decompress GZ file to JSON file")
parser.add_argument(
"--gz_path", type=str, required=True, help="Location of .gz file to be decompressed"
)
parser.add_argument(
"--save_path", type=str, required=True, help="Location to save file as .json"
)
args = parser.parse_args()
gz_path = args.gz_path
save_path = args.save_path
decompress_gz_to_json(gz_path, save_path)
| 27.333333 | 92 | 0.684146 | 121 | 820 | 4.38843 | 0.38843 | 0.067797 | 0.045198 | 0.079096 | 0.312618 | 0.252354 | 0.252354 | 0.120527 | 0 | 0 | 0 | 0.00304 | 0.197561 | 820 | 29 | 93 | 28.275862 | 0.803951 | 0 | 0 | 0.1 | 0 | 0 | 0.174583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ded03c96a46ab8c9a1fc28b70d091d4b45aa01 | 2,981 | py | Python | tests/tangelo-watch.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 40 | 2015-01-09T02:56:33.000Z | 2019-03-01T05:34:13.000Z | tests/tangelo-watch.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 98 | 2015-01-05T12:51:29.000Z | 2019-01-23T20:16:48.000Z | tests/tangelo-watch.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 21 | 2015-01-05T19:11:49.000Z | 2020-08-19T04:16:16.000Z | import fixture
import nose
import requests
import os
import pprint
import time
def get_times(response):
"""
Parse a response from a watch script to get the reported times.
:param response: the response from a requests.get call.
:returns: a dictionary of parsed times.
"""
times = {}
for part in response.content.split("Watch ")[1:]:
name = part.split(" [")[0]
timestamp = part.split(" [")[1].split("]")[0]
times[name] = timestamp
pprint.pprint(times)
return times
def start_tangelo():
"""Start tangelo with the watch plugin."""
return fixture.start_tangelo("--watch")
def touch_file(path):
"""
Use os.utime to touch a file, but add a delay to make sure things have a
chance to change.
:param path: path to touch.
"""
time.sleep(2)
os.utime(path, None)
@nose.with_setup(start_tangelo, fixture.stop_tangelo)
def test_watch_plugin():
times = []
# Check the original time
response = requests.get(fixture.url("watch_a"))
assert "Watch A" in response.content
times.append(get_times(response))
# Calling this again shouldn't change any import time.
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2] == times[-1]
# Touch script A and check that we now get a new time for A, but not for
# the sub scripts.
touch_file("tests/web/watch_a.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] == times[-1]["B"]
assert times[-2]["C"] == times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# Touch script B and check that script A updates with that, too.
touch_file("tests/web/watch_b.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] != times[-1]["B"]
assert times[-2]["C"] == times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# And again with script D which is several layers in
touch_file("tests/web/watch_d.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] != times[-1]["B"]
assert times[-2]["C"] != times[-1]["C"]
assert times[-2]["D"] != times[-1]["D"]
# Touching script C and then loading E should show a new C time
touch_file("tests/web/watch_c.py")
response = requests.get(fixture.url("watch_e"))
times.append(get_times(response))
assert times[-2]["C"] != times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# Touch script B. Calling E should not show any difference in times.
touch_file("tests/web/watch_b.py")
response = requests.get(fixture.url("watch_e"))
times.append(get_times(response))
assert times[-2] == times[-1]
# All done
| 31.052083 | 76 | 0.628313 | 448 | 2,981 | 4.107143 | 0.220982 | 0.095652 | 0.104348 | 0.098913 | 0.491304 | 0.440761 | 0.440761 | 0.440761 | 0.417391 | 0.417391 | 0 | 0.015553 | 0.201946 | 2,981 | 95 | 77 | 31.378947 | 0.757881 | 0.246897 | 0 | 0.517241 | 0 | 0 | 0.092449 | 0 | 0 | 0 | 0 | 0 | 0.293103 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.206897 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e1988dca92df66d159a4de20a9672aa6ee93e3 | 1,394 | py | Python | dfs/path-sum-II.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | dfs/path-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | dfs/path-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
ret = []
if not root:
return ret
curr = [root.val]
def dfs(root, sum):
if sum == 0 and root.left is None and root.right is None:
print(curr)
ret.append(curr[:])
for n in [root.left, root.right]:
if n is not None:
curr.append(n.val)
dfs(n, sum-n.val)
curr.pop()
dfs(root, sum-root.val)
return ret
class Solution:
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
if not root:
return []
Solution.res = []
def helper(root, sum, path):
if not root.left and not root.right and sum == root.val:
Solution.res.append(path+[root.val])
return
helper(root.left, sum-root.val, path+[root.val])
helper(root.right, sum-root.val, path+[root.val])
helper(root,sum,[])
return Solution.res | 25.345455 | 69 | 0.472023 | 168 | 1,394 | 3.892857 | 0.25 | 0.085627 | 0.061162 | 0.070336 | 0.293578 | 0.293578 | 0.293578 | 0.293578 | 0.198777 | 0.198777 | 0 | 0.00122 | 0.411765 | 1,394 | 55 | 70 | 25.345455 | 0.796341 | 0.185796 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.366667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e1c6dfef1b13a42336acc4b9cb04fd28520995 | 4,405 | py | Python | train.py | matt-quant-heads-io/pcgil2 | acb32c5766fca5be580f4aa56b1b7c660c39048d | [
"MIT"
] | null | null | null | train.py | matt-quant-heads-io/pcgil2 | acb32c5766fca5be580f4aa56b1b7c660c39048d | [
"MIT"
] | null | null | null | train.py | matt-quant-heads-io/pcgil2 | acb32c5766fca5be580f4aa56b1b7c660c39048d | [
"MIT"
] | null | null | null | #pip install tensorflow==1.15
#Install stable-baselines as described in the documentation
import sys
import model
from model import FullyConvPolicyBigMap, FullyConvPolicySmallMap, CustomPolicyBigMap, CustomPolicySmallMap
from utils import get_exp_name, max_exp_idx, load_model, make_vec_envs
from stable_baselines import PPO2
from stable_baselines.results_plotter import load_results, ts2xy
import tensorflow as tf
import numpy as np
import os
n_steps = 0
log_dir = 'runs'
best_mean_reward, n_steps = -np.inf, 0
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward
# Print stats every 1000 calls
if (n_steps + 1) % 10 == 0:
x, y = ts2xy(load_results(log_dir), 'timesteps')
# print(f"len(x) is {len(x)}")
if len(x) > 100:
#pdb.set_trace()
mean_reward = np.mean(y[-100:])
# print(x[-1], 'timesteps')
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, we save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print(f"Saving new best model: idx {n_steps}")
_locals['self'].model.save(os.path.join(log_dir, f'best_model.pkl'))
else:
_locals['self'].model.save(os.path.join(log_dir, 'latest_model.pkl'))
else:
# print('{} monitor entries'.format(len(x)))
pass
n_steps += 1
# Returning False will stop training early
return True
def main(game, representation, experiment, steps, n_cpu, render, logging, **kwargs):
env_name = '{}-{}-v0'.format(game, representation)
exp_name = get_exp_name(game, representation, experiment, **kwargs)
resume = kwargs.get('resume', False)
if representation == 'wide':
policy = FullyConvPolicyBigMap
if game == "sokoban":
policy = FullyConvPolicySmallMap
else:
policy = CustomPolicyBigMap
if game == "sokoban":
policy = CustomPolicySmallMap
if game == "binary":
kwargs['cropped_size'] = 28
elif game == "zelda":
kwargs['cropped_size'] = 22
elif game == "sokoban":
kwargs['cropped_size'] = 10
n = max_exp_idx(exp_name)
global log_dir
if not resume:
n = n + 1
log_dir = 'runs/{}_{}_{}'.format(exp_name, n, 'log')
# os.mkdir(log_dir)
if not resume:
os.mkdir(log_dir)
else:
model = load_model(log_dir)
kwargs = {
**kwargs,
'render_rank': 0,
'render': render,
}
used_dir = log_dir
if not logging:
used_dir = None
env = make_vec_envs(env_name, representation, log_dir, n_cpu, **kwargs)
# print(f"\nenv from make_vec_envs: {env}\n")
#if not resume or model is None:
if not resume:
model = PPO2(policy, env, verbose=1, n_steps=16, tensorboard_log="./runs")
# print(f"policy: {policy}")
# print(f"\nmake_vec_envs params: \n"
# f"env_name: {env_name}\n"
# f"representation: {representation},\n"
# f"log_dir: {log_dir}, \n"
# f"n_cpu: {n_cpu}\n"
# f"**kwargs: {kwargs}")
else:
if 'orderless' in env_name:
model = PPO2.load("/Users/matt/pcgil2/pcgil2/runs/zeldaorderless_wide_zeldaorderless_7_log/best_model.pkl")
else:
model = PPO2.load("/Users/matt/pcgil2/pcgil2/runs/zeldaham_wide_zeldahamm_4_log/best_model.pkl")
model.set_env(env)
if not logging:
model.learn(total_timesteps=int(steps), tb_log_name=exp_name)
else:
model.learn(total_timesteps=int(steps), tb_log_name=exp_name, callback=callback)
################################## MAIN ########################################
#game = 'zeldaham'
game = 'zeldaorderless'
representation = 'wide'
experiment = 'zeldaorderless'
# experiment = 'zeldahamm'
#steps = 1e8
steps = 1e6
render = False
logging = True
# n_cpu = 50
n_cpu = 8
kwargs = {
'resume': True
}
if __name__ == '__main__':
main(game, representation, experiment, steps, n_cpu, render, logging, **kwargs)
| 33.625954 | 122 | 0.614075 | 565 | 4,405 | 4.580531 | 0.286726 | 0.030139 | 0.032458 | 0.012751 | 0.180835 | 0.139876 | 0.139876 | 0.139876 | 0.11051 | 0.083462 | 0 | 0.016697 | 0.252213 | 4,405 | 130 | 123 | 33.884615 | 0.768974 | 0.195233 | 0 | 0.179775 | 0 | 0 | 0.1434 | 0.047021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022472 | false | 0.011236 | 0.101124 | 0 | 0.134831 | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e37e55485ff713b5ef926c51471d0b3aa60ce8 | 5,752 | py | Python | run_scripts/lfd_upper_bound_exp_script.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | run_scripts/lfd_upper_bound_exp_script.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | run_scripts/lfd_upper_bound_exp_script.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from copy import deepcopy
from gym.spaces import Dict
from rllab.misc.instrument import VariantGenerator
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.envs import get_meta_env, get_meta_env_params_iters
from rlkit.envs.wrappers import ScaledMetaEnv
from rlkit.torch.sac.policies import ReparamTanhMultivariateGaussianPolicy
from rlkit.torch.networks import Mlp
from rlkit.torch.irl.few_shot_LfD_upper_bound import UpperBound
from rlkit.torch.irl.encoders.mlp_encoder import TimestepBasedEncoder, WeightShareTimestepBasedEncoder
from rlkit.torch.irl.encoders.conv_seq_encoder import ConvTrajEncoder, R2ZMap, Dc2RMap, NPEncoder
import yaml
import argparse
import importlib
import psutil
import os
from os import path
import argparse
import joblib
from time import sleep
EXPERT_LISTING_YAML_PATH = '/h/kamyar/oorl_rlkit/rlkit/torch/irl/experts.yaml'
def experiment(variant):
with open(EXPERT_LISTING_YAML_PATH, 'r') as f:
listings = yaml.load(f.read())
expert_dir = listings[variant['expert_name']]['exp_dir']
specific_run = listings[variant['expert_name']]['seed_runs'][variant['expert_seed_run_idx']]
file_to_load = path.join(expert_dir, specific_run, 'extra_data.pkl')
extra_data = joblib.load(file_to_load)
# this script is for the non-meta-learning airl
train_context_buffer, train_test_buffer = extra_data['meta_train']['context'], extra_data['meta_train']['test']
test_context_buffer, test_test_buffer = extra_data['meta_test']['context'], extra_data['meta_test']['test']
# set up the envs
env_specs = variant['env_specs']
meta_train_env, meta_test_env = get_meta_env(env_specs)
# set up the policy and training algorithm
obs_dim = int(np.prod(meta_train_env.observation_space.spaces['obs'].shape))
action_dim = int(np.prod(meta_train_env.action_space.shape))
print('obs dim: %d' % obs_dim)
print('act dim: %d' % action_dim)
sleep(3)
# make the disc model
z_dim = variant['algo_params']['z_dim']
# make the MLP
hidden_sizes = [variant['algo_params']['mlp_hid_dim']] * variant['algo_params']['mlp_layers']
obs_task_params_dim = int(np.prod(meta_train_env.observation_space.spaces['obs_task_params'].shape))
mlp = Mlp(
hidden_sizes,
output_size=obs_task_params_dim if variant['algo_params']['training_regression'] else 1,
input_size=z_dim if variant['algo_params']['training_regression'] else z_dim + 2*obs_task_params_dim,
batch_norm=variant['algo_params']['mlp_use_bn']
)
# Make the encoder
encoder = TimestepBasedEncoder(
2*obs_dim + action_dim, #(s,a,s')
variant['algo_params']['r_dim'],
variant['algo_params']['z_dim'],
variant['algo_params']['enc_hid_dim'],
variant['algo_params']['r2z_hid_dim'],
variant['algo_params']['num_enc_layer_blocks'],
hid_act='relu',
use_bn=True,
within_traj_agg=variant['algo_params']['within_traj_agg']
)
# ---------------
# encoder = WeightShareTimestepBasedEncoder(
# obs_dim,
# action_dim,
# 64,
# variant['algo_params']['r_dim'],
# variant['algo_params']['z_dim'],
# variant['algo_params']['enc_hid_dim'],
# variant['algo_params']['r2z_hid_dim'],
# variant['algo_params']['num_enc_layer_blocks'],
# hid_act='relu',
# use_bn=True,
# within_traj_agg=variant['algo_params']['within_traj_agg']
# )
# ---------------
# traj_enc = ConvTrajEncoder(
# variant['algo_params']['np_params']['traj_enc_params']['num_conv_layers'],
# # obs_dim + action_dim,
# obs_dim + action_dim + obs_dim,
# variant['algo_params']['np_params']['traj_enc_params']['channels'],
# variant['algo_params']['np_params']['traj_enc_params']['kernel'],
# variant['algo_params']['np_params']['traj_enc_params']['stride'],
# )
# Dc2R_map = Dc2RMap(
# variant['algo_params']['np_params']['Dc2r_params']['agg_type'],
# traj_enc,
# state_only=False
# )
# r2z_map = R2ZMap(
# variant['algo_params']['np_params']['r2z_params']['num_layers'],
# variant['algo_params']['np_params']['traj_enc_params']['channels'],
# variant['algo_params']['np_params']['r2z_params']['hid_dim'],
# variant['algo_params']['z_dim']
# )
# encoder = NPEncoder(
# Dc2R_map,
# r2z_map,
# )
train_task_params_sampler, test_task_params_sampler = get_meta_env_params_iters(env_specs)
algorithm = UpperBound(
meta_train_env,
train_context_buffer,
train_test_buffer,
test_context_buffer,
test_test_buffer,
mlp,
encoder,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
return 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
if exp_specs['use_gpu']:
print('\n\nUSING GPU\n\n')
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
experiment(exp_specs)
| 34.035503 | 115 | 0.677156 | 758 | 5,752 | 4.791557 | 0.255937 | 0.084802 | 0.131057 | 0.066079 | 0.35793 | 0.296256 | 0.237885 | 0.219438 | 0.170705 | 0.170705 | 0 | 0.004296 | 0.190716 | 5,752 | 168 | 116 | 34.238095 | 0.77594 | 0.246349 | 0 | 0.021277 | 0 | 0 | 0.142757 | 0.01143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010638 | false | 0 | 0.276596 | 0 | 0.297872 | 0.031915 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e3a685fc855932c7cffb03a56fb066ee005d8b | 2,535 | py | Python | tldap/helpers.py | Karaage-Cluster/python-tldap-debian | 9d3d7a28df61d9cf97c0c0f62a5eea9d5767c213 | [
"BSD-3-Clause"
] | null | null | null | tldap/helpers.py | Karaage-Cluster/python-tldap-debian | 9d3d7a28df61d9cf97c0c0f62a5eea9d5767c213 | [
"BSD-3-Clause"
] | null | null | null | tldap/helpers.py | Karaage-Cluster/python-tldap-debian | 9d3d7a28df61d9cf97c0c0f62a5eea9d5767c213 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
import six
import copy
class CaseInsensitiveDict(dict):
"""
Case insensitve dictionary for searches however preserves the case for
retrieval.
"""
def __init__(self, d={}):
self.lc = {}
for k, v in six.iteritems(d):
self.lc[k.lower()] = k
super(CaseInsensitiveDict, self).__init__(d)
def __setitem__(self, key, value):
try:
old_key = self.lc[key.lower()]
except KeyError:
pass
else:
if key != old_key:
super(CaseInsensitiveDict, self).__delitem__(old_key)
self.lc[key.lower()] = key
super(CaseInsensitiveDict, self).__setitem__(key, value)
def __delitem__(self, key):
key = self.lc[key.lower()]
del self.lc[key.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
def __getitem__(self, key):
key = self.lc[key.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def __contains__(self, key):
try:
key = self.lc[key.lower()]
except KeyError:
return False
else:
return super(CaseInsensitiveDict, self).__contains__(key)
def get(self, key, default=None):
try:
key = self.lc[key.lower()]
except KeyError:
return default
else:
return super(CaseInsensitiveDict, self).get(key, default)
def get_correct_key(self, key):
return self.lc[key.lower()]
def __copy__(self):
clone = self.__class__()
for k, v in six.iteritems(self):
clone[k] = v
return clone
def __deepcopy__(self, memo):
clone = self.__class__()
for k, v in six.iteritems(self):
clone[k] = copy.deepcopy(v, memo)
return clone
| 30.178571 | 74 | 0.624458 | 324 | 2,535 | 4.685185 | 0.361111 | 0.039526 | 0.047431 | 0.073781 | 0.293149 | 0.225955 | 0.161397 | 0.109354 | 0.109354 | 0.056653 | 0 | 0.004913 | 0.277318 | 2,535 | 83 | 75 | 30.542169 | 0.82369 | 0.298225 | 0 | 0.372549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0.019608 | 0.039216 | 0.019608 | 0.392157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e4a93d122c374bc7b77d38adfb718aea0aeaed | 899 | py | Python | vmraid/patches/v5_0/bookmarks_to_stars.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v5_0/bookmarks_to_stars.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v5_0/bookmarks_to_stars.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import json
import vmraid
import vmraid.defaults
from vmraid.desk.like import _toggle_like
from six import string_types
def execute():
for user in vmraid.get_all("User"):
username = user["name"]
bookmarks = vmraid.db.get_default("_bookmarks", username)
if not bookmarks:
continue
if isinstance(bookmarks, string_types):
bookmarks = json.loads(bookmarks)
for opts in bookmarks:
route = (opts.get("route") or "").strip("#/ ")
if route and route.startswith("Form"):
try:
view, doctype, docname = opts["route"].split("/")
except ValueError:
continue
if vmraid.db.exists(doctype, docname):
if (doctype=="DocType"
or int(vmraid.db.get_value("DocType", doctype, "issingle") or 0)
or not vmraid.db.table_exists(doctype)):
continue
_toggle_like(doctype, docname, add="Yes", user=username)
| 26.441176 | 70 | 0.696329 | 118 | 899 | 5.169492 | 0.449153 | 0.052459 | 0.036066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001357 | 0.1802 | 899 | 33 | 71 | 27.242424 | 0.826323 | 0 | 0 | 0.111111 | 0 | 0 | 0.067853 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.222222 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01e4d64999a4e0f305613c36fd0071cb9705dd30 | 4,061 | py | Python | data_create.py | Primtee/triplet-loss-train-for-speaker-recognition | 8d0f405eddbbb129bd7bf60565390cdaca0a8aa8 | [
"MIT"
] | 13 | 2019-04-01T02:38:59.000Z | 2022-03-02T20:18:13.000Z | data_create.py | Primtee/triplet-loss-train-for-speaker-recognition | 8d0f405eddbbb129bd7bf60565390cdaca0a8aa8 | [
"MIT"
] | 1 | 2019-07-22T02:33:57.000Z | 2019-07-22T02:33:57.000Z | data_create.py | Primtee/triplet-loss-train-for-speaker-recognition | 8d0f405eddbbb129bd7bf60565390cdaca0a8aa8 | [
"MIT"
] | 8 | 2019-04-02T01:49:19.000Z | 2021-04-23T09:22:20.000Z | # coding=utf-8
__author__ = 'NXG'
import os, wave
import contextlib
import collections
from math import ceil
from dataprovider.create.data_management import mik_dir
saved_original_voice_path = '/data/validation_clip/'
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
"""
wave file basic info:
_wave_params:
nchannels=1,
sampwidth=2,
framerate=8000,
nframes=1088000,
comptype='NONE',
compname='not compressed'
"""
num_channels = wf.getnchannels()
print('voice channel is:', num_channels)
assert num_channels == 1
sample_width = wf.getsampwidth()
# assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000) #
pcm_data = wf.readframes(wf.getnframes()) # nframs is: 1088000 read all data one time
# note: len of pcm_data is 2176000
print('the voice length is:{} and sample_rate is:{}'.format(len(pcm_data), sample_rate))
return pcm_data, sample_rate # return row data & sample rate
def write_wave(write_path, audio, sample_rate):
print('write path:', (write_path, sample_rate))
wf = wave.open(write_path, 'wb') # mik_dir
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
wf.close()
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_collect(frame_duration_ms, audio, sample_rate):
# audio: all the data
frame_segment = []
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) # 30ms 30ms / 1000ms 2 <-> s bytes
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0 # sub second
while offset + n < len(audio): # if 1s n = 8000*2
"""
0-8000*2 0.0 1
8000*2- 8000*2+8000 1, 1
8000*2+8000- 8000*2+8000+8000 2 1
"""
frame_segment.append(Frame(audio[offset:offset + n], timestamp, duration))
timestamp += duration
offset += n
print('collect all frams:', len(frame_segment))
return frame_segment # 4533
def vad_check(sample_rate, frame_duration_ms, padding_duration_ms, frames, write_path):
num_padding_frames = int(padding_duration_ms // frame_duration_ms) # 3000 /30 100 frame
ring_buffer = collections.deque(maxlen=num_padding_frames)
for index_, frame in enumerate(frames):
ring_buffer.append(frame)
human_voiced = b''.join([seg.bytes for seg in ring_buffer])
human_voiced_len = len(human_voiced)
if human_voiced_len < 16000: # human voice length less than 0.5s
ring_buffer.clear()
return False # not human voice
else:
if human_voiced_len < 16000 * 6: # human voice length in [0.5s, 1s]
full_human_voice_length = 16000 * 6
copy_num = ceil(full_human_voice_length / human_voiced_len)
for copy_step in range(0, copy_num, 1):
human_voiced = human_voiced.__add__(human_voiced) # Modify here
write_wave(write_path, human_voiced, sample_rate)
return True
def check(*path):
audio, sample_rate = read_wave(path[1]) # read the wav format voice data
frames = frame_collect(30, audio, sample_rate)
frames = list(frames)
segments = vad_check(sample_rate, 30, len(frames) * 30, frames, path[2])
print('segments:', segments)
return segments
# 语音流是否小于3秒
if __name__ == '__main__':
path = 'D:/save'
save_path_root = 'D"/enrance_voice'
dir_path = os.listdir(path)
for cur_path in dir_path:
name = os.listdir(os.path.join(path, cur_path))
for cur_name in name:
cur_name = os.path.join(path, cur_path, cur_name) # 音频文件全路径
save_name = os.path.join(save_path_root, cur_path, cur_name)
check(3, cur_name, save_name)
| 35.008621 | 98 | 0.638513 | 551 | 4,061 | 4.46461 | 0.30127 | 0.069106 | 0.02439 | 0.013821 | 0.08252 | 0.017073 | 0 | 0 | 0 | 0 | 0 | 0.055925 | 0.260281 | 4,061 | 115 | 99 | 35.313043 | 0.762983 | 0.099729 | 0 | 0 | 0 | 0 | 0.04912 | 0.006796 | 0 | 0 | 0 | 0 | 0.025641 | 1 | 0.076923 | false | 0 | 0.064103 | 0 | 0.217949 | 0.064103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ee6ced64104669ebecf6c3325e44ad9e03b6ac | 5,344 | py | Python | saving.py | pabilbado/IVanalyzer | 4ebb5333508906328c9b7df5311ea0616ba9344f | [
"MIT"
] | null | null | null | saving.py | pabilbado/IVanalyzer | 4ebb5333508906328c9b7df5311ea0616ba9344f | [
"MIT"
] | null | null | null | saving.py | pabilbado/IVanalyzer | 4ebb5333508906328c9b7df5311ea0616ba9344f | [
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
intro= """\
\\documentclass[a4paper]{article}
%% Language and font encodings
\\usepackage[english]{babel}
\\usepackage[utf8x]{inputenc}
\\usepackage[T1]{fontenc}
%% Sets page size and margins
\\usepackage[a4paper,top=3cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
%% Useful packages
\\usepackage{amsmath}
\\usepackage{graphicx}
\\usepackage[colorinlistoftodos]{todonotes}
\\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\\title{Your Paper}
\\author{You}
\\begin{document}
\\maketitle
\\newpage
"""
banner= """\
___ _ ___ _ _
/ __|___ ___ _ _ __ _ ___ __ _ _ _ __| | | _ \__ _| |__| |___ ___
| (_ / -_) _ \ '_/ _` / -_) / _` | ' \/ _` | | _/ _` | '_ \ / _ (_-<
\___\___\___/_| \__, \___| \__,_|_||_\__,_| |_| \__,_|_.__/_\___/__/
|___/
___ _
/ __|___ __| |___
| (__/ _ \/ _` / -_)
\___\___/\__,_\___|
###################################################################################
"""
def writehdf5(dic,parameters,fig,fig1,fig2,localtime=""):
if localtime=="":
localtime = str(time.asctime( time.localtime(time.time())))
datafile=localtime.replace(" ","")
os.system("mkdir results/Plots/"+datafile)
fig.savefig("results/Plots/"+datafile+"/IV.png")
fig1.savefig("results/Plots/"+datafile+"/Cond.png")
fig2.savefig("results/Plots/"+datafile+"/Resistance.png")
hdf = pd.HDFStore("results/data/{0}.h5".format(localtime))
data = pd.DataFrame(data=dic)
hdf.put(value=data, key='df', format='table', data_columns=True)
parametersdic={'Minimum_current':[parameters[0]],'Maximum_current':[parameters[1]],'Step_current':[parameters[2]], 'Amplitude_oscillation':[parameters[3]], 'Frequency_oscillation':[parameters[4]], 'Lengt_wire':[parameters[5]], 'Diameter_wire':[parameters[6]], 'Ac_Gain':[parameters[8]],'Dc_gain': [parameters[9]]}
dp = pd.DataFrame(data=parametersdic)
hdf.put(value=dp, key='dp', format='table', data_columns=True)
hdf.close()
return localtime
def readhdf5():
hdf = pd.HDFStore("results/data.h5")
return hdf['{0}'.format(input("Name of the dataframe to load within the file:"))]
def Latex():
datafiles = os.listdir("results/data")
with open('results/Log.tex','w') as log:
log.write(intro)
os.system("mkdir results/Plots/")
for datafile in datafiles:
print(datafile)
# fig, ax= plt.subplots()
# ax.grid(color='b', linestyle='--', linewidth=0.5)
# fig.suptitle('V-I Plot', fontsize=10)
# plt.ylabel('Voltage (V)')
# plt.xlabel('Current (A)')
# ax.errorbar(dic['I'], dic['V'], yerr=dic['eV'], color='b', fmt='o' ,capthick=2 )
#
# fig1, bx = plt.subplots()
# fig1.suptitle('Differential conductance - Current Plot', fontsize=10)
# plt.xlabel('Current (A)')
# plt.ylabel('Dif Conductance (A/V)')
# bx.grid(color='b', linestyle='--', linewidth=0.5)
# bx.errorbar(dic['I'], dic['dC'], yerr=dic['edC'], color='r', fmt='o' ,capthick=2 )
#
#
# fig2, cx = plt.subplots()
# fig1.suptitle('Differential Resistance - Current Plot', fontsize=10)
# plt.xlabel('Current (A)')
# plt.ylabel('Dif Resistance (V/A)')
# cx.grid(color='b', linestyle='--', linewidth=0.5)
# cx.errorbar(dic['I'], dic['dR'], yerr=dic['edR'], color='g', fmt='o', capthick=2)
print(datafile)
try:
hdf = pd.HDFStore("results/data/"+datafile)
datafile = datafile[:-3].replace(" ","")
parameters=hdf['dp']
log.write("\\section{"+datafile+"}\n")
os.system("clear")
log.write("\\begin{figure}[h] \n\\centering \n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/IV.png}\n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/Cond.png}\n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/Resistance.png}\n")
log.write("\\end{figure}\\\\\\\\\n\n")
os.system("clear")
log.write("All values are given in SI units\\\\ \n")
for parameter in parameters.keys():
log.write(parameter+": $"+str(parameters[parameter][0])+"$\\\\ \n")
log.write("File data stored in results/data/"+datafile+".h5")
os.system("clear")
hdf.close()
except:
log.write("\\section{"+datafile+"}\n")
log.write("Data corrupt or format not expected. File should be checked manually.")
log.write("\\newpage\n")
os.system("clear")
print(banner)
print("Generating Latex class:")
print(datafile +"Added")
log.write("\\end{document}")
os.system("clear")
print("All data has been saved")
return
Latex()
| 35.865772 | 317 | 0.537051 | 552 | 5,344 | 4.949275 | 0.375 | 0.040996 | 0.019766 | 0.029649 | 0.252562 | 0.146047 | 0.129941 | 0.096999 | 0.096999 | 0.096999 | 0 | 0.014755 | 0.264409 | 5,344 | 148 | 318 | 36.108108 | 0.680234 | 0.154004 | 0 | 0.139785 | 0 | 0.043011 | 0.448866 | 0.140952 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.043011 | 0 | 0.107527 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01f0b340264833a77f844be5ad80f6b639b8a21f | 1,896 | py | Python | connect4_driver.py | YeungJonathan/ConnectFourGame | a9fc8c063a6484fb9a5cbfea788b9db6c99bc43a | [
"MIT"
] | null | null | null | connect4_driver.py | YeungJonathan/ConnectFourGame | a9fc8c063a6484fb9a5cbfea788b9db6c99bc43a | [
"MIT"
] | null | null | null | connect4_driver.py | YeungJonathan/ConnectFourGame | a9fc8c063a6484fb9a5cbfea788b9db6c99bc43a | [
"MIT"
] | 2 | 2018-10-18T20:55:29.000Z | 2019-05-05T22:20:08.000Z | from connect4_board import Board
from connect4_board import Player
import random
class Driver:
def __init__(self):
playerOne = input('Input Player 1 name: ')
playerTwo = input('Input Player 2 name: ')
self.p1 = Player(1,'X', playerOne)
self.p2 = Player(2,'O', playerTwo)
self.board = Board()
def prompt(self, name):
while True:
try:
userInput = int(input(name+' Insert Column (1-7): '))
if not (1 <= userInput <= 7):
raise ValueError
if self.board.board[0][userInput-1]!='.':
raise Exception
except ValueError:
print('Please enter an integer from 0-7')
continue
except Exception:
print('Column reached maximum. ',end = '')
continue
return userInput-1
def decideStart(self):
#player 1 start if random integer is even
#player 2 if odd
if (random.randint(0,9) % 2 == 0):
print(self.p1.getPlayerName(),'Start!')
return self.p1, self.p2
else:
print(self.p2.getPlayerName(),'Start!')
return self.p2, self.p1
def printBoard(self):
print(self.board, end = '')
print('-------------')
print('1 2 3 4 5 6 7')
def playerInsert(self, player):
'''
Method to insert player moves
Method to change update the board
'''
userinput = self.prompt(player.getPlayerName())
row = self.board.findRow(userinput, player)
player.addMoves(row, userinput)
self.printBoard()
if player.checkWin():
print(player.getPlayerName(),'Won')
return True
return False
def tick(self):
starter, second = self.decideStart()
endGame = False
self.printBoard()
while not endGame:
#starter insert
endGame = self.playerInsert(starter) or self.board.isFull()
if endGame is True:
break
#second insert
endGame = self.playerInsert(second) or self.board.isFull()
if self.board.isFull():
print('The game is tied!')
if __name__ == '__main__':
main = Driver()
main.tick() | 24.947368 | 62 | 0.658755 | 254 | 1,896 | 4.862205 | 0.330709 | 0.051012 | 0.036437 | 0.037247 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023873 | 0.204641 | 1,896 | 76 | 63 | 24.947368 | 0.795093 | 0.077004 | 0 | 0.067797 | 0 | 0 | 0.108997 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.050847 | 0 | 0.254237 | 0.20339 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01f46813c24eb72d17e7473237ce843aed94ffa2 | 13,244 | py | Python | main.py | fab-jul/ppfin | f3e51583d42590eceb6d3920a351f8f2639792c1 | [
"MIT"
] | null | null | null | main.py | fab-jul/ppfin | f3e51583d42590eceb6d3920a351f8f2639792c1 | [
"MIT"
] | null | null | null | main.py | fab-jul/ppfin | f3e51583d42590eceb6d3920a351f8f2639792c1 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('otp.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
import argparse
import urwid
import data_controller
import symbol_values
_BACKGROUND = urwid.SolidFill(u'\N{MEDIUM SHADE}')
_BASE_CURRENCY = 'CHF'
_main_event_loop = urwid.AsyncioEventLoop()
_PALETTE = [
('brand', 'bold,underline,dark blue', ''),
('underline', 'underline', ''),
('bold', 'bold', ''),
('err', 'dark red,bold', ''),
('reversed', 'standout', ''),
('up', 'dark green', ''),
('upbold', 'dark green,bold', ''),
('neutral', '', ''),
('neutralbold', 'bold', ''),
('down', 'dark red', ''),
('downbold', 'dark red,bold', ''),
]
_STYLES = {palette_entry[0] for palette_entry in _PALETTE}
_BOLD_MAP = {key: key + 'bold'
for key in _STYLES if key in _STYLES and key + 'bold' in _STYLES}
class Controller:
def __init__(self):
self.stack = [_BACKGROUND]
self.view = urwid.Padding(self.stack[-1], left=1, right=1)
def unhandled_input(self, key):
try:
self.stack[-1].unhandled_input(key)
except AttributeError:
pass
def _update(self):
self.view.original_widget = self.stack[-1]
def push(self, w):
self.stack.append(w)
self._update()
def pop(self):
self.stack.pop()
try:
self.stack[-1].refresh()
except AttributeError:
pass
self._update()
def make_button(title, callback_fn):
button = urwid.Button(title)
urwid.connect_signal(button, 'click', callback_fn)
return urwid.AttrMap(button, None, focus_map='reversed')
def boldify(w):
return urwid.AttrMap(w, attr_map=_BOLD_MAP)
def on_main(fn):
def callback():
_main_event_loop.alarm(0, lambda: fn())
return callback
class Header(urwid.WidgetWrap):
_ALIGNS = {'l': 'left', 'r': 'right'}
def __init__(self, *titles, aligns=None):
titles = [('underline', title) if not isinstance(title, tuple) else title
for title in titles]
if not aligns:
aligns = ''.join('l' for _ in titles)
aligns = [Header._ALIGNS[align] for align in aligns]
if len(aligns) != len(titles):
raise ValueError
super().__init__(
urwid.Columns([urwid.Text(title, align=align)
for title, align in zip(titles, aligns)]))
class SummaryView(urwid.WidgetWrap):
def __init__(self, dc: data_controller.DataController, controller: Controller):
self.dc = dc
self.controller = controller
self.focus_walker = None
self._last_focus = None
symbol_values.Ticker.register_callback(
'SummaryView',
on_main(self.refresh))
# lambda: controller.main_loop.event_loop.alarm(0, lambda *_: self.refresh()))
with self.dc.connect():
super(SummaryView, self).__init__(self._get_menu())
def unhandled_input(self, key):
if key == 'r':
self.refresh()
def refresh(self):
logger.info('***\nREFRESH\n***')
with self.dc.connect():
self._set_w(self._get_menu())
def __del__(self):
symbol_values.Ticker.remove_callback('SummaryView')
def _get_menu(self):
body = [urwid.Text(('brand', 'ppfin')), urwid.Divider()]
# Normal (category-0) Accounts
accs = self.dc.get_all_accounts(category=0)
body += [Header('Account', 'Diff', 'Balance', aligns='lrr')]
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(acc.get_diff_to_last().attr_str(), align='right'),
urwid.Text(str(acc.get_balance()), align='right')]))
total_diff = sum(acc.get_diff_to_last() for acc in accs).attr_str()
total = sum(acc.get_balance() for acc in accs)
# Special (category-1) Accounts
accs = self.dc.get_all_accounts(category=1)
if accs:
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(''),
urwid.Text(str(acc.get_balance()), align='right')]))
total += acc.get_balance()
body += [urwid.Columns([
urwid.Text(('bold', 'Total')),
boldify(urwid.Text(total_diff, align='right')),
urwid.Text(('bold', str(total)), align='right')])]
body += [urwid.Divider(),
make_button('Update Balances', self._update_balances),
make_button('Add Account', self._add_account),
urwid.Divider()]
# Shares
symbol_overviews = self.dc.get_all_symbol_overviews()
if not symbol_overviews:
body += [urwid.Text('No Shares!')]
else:
body += [Header('Symbol', 'Shares', 'Gain', 'Possession', aligns='lrrr')]
for so in symbol_overviews:
body.append(urwid.Columns([
make_button(so.symbol, self._update_share),
urwid.Text(str(so.quantity), align='right'),
urwid.Text(so.get_current_total_gain().attr_str(),
align='right'),
urwid.Text(str(so.get_current_total_value()),
align='right')]))
total_gain = sum(
so.get_current_total_gain(currency=_BASE_CURRENCY)
for so in symbol_overviews)
total_share_value = sum(
so.get_current_total_value(currency=_BASE_CURRENCY)
for so in symbol_overviews)
body += [
urwid.Columns([
urwid.Text(('bold', 'Total')),
urwid.Text(''),
urwid.Text(('bold', str(total_gain)), align='right'),
urwid.Text(('bold', str(total_share_value)), align='right'),
])
]
body += [urwid.Divider(),
make_button('Update Shares', self._update_shares),
make_button('Add Share', self._add_share),
urwid.Divider()]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._cache_focus_value())
if self._last_focus is not None:
self.focus_walker.set_focus(self._last_focus)
return urwid.ListBox(self.focus_walker)
def _show_account(self, account_name):
self.controller.push(AccountDetailView(
self.dc, self.controller, account_name))
def _cache_focus_value(self):
self._last_focus = self.focus_walker.focus
def _update_share(self, k):
raise ValueError(k.get_label())
def _update_shares(self, _):
pass
def _add_share(self, _):
def done(_):
name = name_edit.get_edit_text()
currency = cur_edit.get_edit_text()
try:
self.dc.add_stock_symbol(name, currency)
except data_controller.SymbolExistsException:
pass # TODO: maybe handle
self.controller.pop()
header = urwid.Text('Add Share')
name_edit = urwid.Edit("Symbol: ")
cur_edit = urwid.Edit("Currency: ")
widget = urwid.Pile([
header,
name_edit,
cur_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
def _update_balances(self, _):
self.controller.push(UpdateView(self.dc, self.controller))
def _add_account(self, _):
def done(_):
name, _ = name_edit.get_text()
name = name.replace('Name: ', '')
self.dc.create_account(name, _BASE_CURRENCY) # TODO
self.controller.pop()
name_edit = urwid.Edit("Name: ")
header = urwid.Text('Add Account')
widget = urwid.Pile([
header,
name_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
class AccountDetailView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller,
account_name: str):
self.dc = dc
self.controller = controller
self.account_name = account_name
super().__init__(self._get())
def _get(self):
transactions = self.dc.get_account_transactions(self.account_name)
body = [Header('Date', 'Info', 'Amount', aligns='llr')]
for t in transactions:
body.append(urwid.Columns([
urwid.Text(t.date),
urwid.Text(t.info),
urwid.Text(t.value.attr_str(), align='right'),
]))
body += [
urwid.Divider(),
make_button('Done', lambda _: self.controller.pop())]
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
class UpdateView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller):
self.dc = dc
self.controller = controller
self.done_button: urwid.AttrMap = None
self.focus_walker: urwid.SimpleFocusListWalker = None
self.accs = None
super(UpdateView, self).__init__(self._get_menu())
def refresh(self):
self._set_w(self._get_menu())
def unhandled_input(self, key):
if key == 'enter':
# is_ok = self._validate()
current_idx = self.focus_walker.focus
# current_widget = self.focus_walker[current_idx]
next_position = self.focus_walker.next_position(current_idx)
if isinstance(self.focus_walker[next_position], urwid.Divider):
next_position += 1
# if not isinstance(current_widget, urwid.Edit):
# return
self.focus_walker.set_focus(next_position)
def _get_menu(self):
body = [urwid.Text('Update'), urwid.Divider()]
self.accs = self.dc.get_all_accounts(category=0)
if not self.accs:
raise NotImplemented
indent = max(len(acc.name) for acc in self.accs) + 5
for acc in self.accs:
label = acc.name + ':'
indent_acc = (indent - len(label)) * ' '
body.append(urwid.Edit(f"{label}{indent_acc}"))
# make_button(acc.name, lambda _:...),
# urwid.Text(acc.get_formatted_balance(), align='right')]))
def done(_):
all_ok = self._validate()
if all_ok:
self._commit()
self.controller.pop()
self.done_button = make_button('Done', done)
body += [urwid.Divider(),
self.done_button,
make_button('Cancel', lambda _: self.controller.pop()),
]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._validate())
return urwid.ListBox(self.focus_walker)
def _commit(self):
edit_fields = [e for e in self.focus_walker
if isinstance(e, urwid.Edit)]
assert len(edit_fields) == len(self.accs)
with self.dc.connect():
for e, acc in zip(edit_fields, self.accs):
assert acc.name in e.caption
value = e.get_edit_text()
if not value:
continue
value = float(value)
diff = value - acc.get_balance()
self.dc.add_transaction(acc.name, diff)
def _validate(self):
all_ok = True
for i, e in enumerate(self.focus_walker):
if not isinstance(e, urwid.Edit):
continue
value = e.get_edit_text()
if not value:
continue
try:
float(value)
is_ok = True
except ValueError:
is_ok = False
caption = e.caption
if is_ok and '!' in caption:
caption = caption.replace('!', ':')
e.set_caption(caption)
if not is_ok and '!' not in caption:
caption = caption.replace(':', '!')
e.set_caption(('err', caption))
all_ok = all_ok and is_ok
if not all_ok:
self.done_button.set_attr_map({None: 'err'})
self.done_button.original_widget.set_label(
'Errors: All values must be floats!')
else:
self.done_button.set_attr_map({None: None})
self.done_button.original_widget.set_label(
'Done')
return all_ok
class MainWindow:
def __init__(self, dc: data_controller.DataController):
self.dc = dc
self.controller = Controller()
self.controller.push(SummaryView(dc, self.controller))
self.main_loop = None
def make_main_loop(self):
self.main_loop = urwid.MainLoop(self.draw(),
palette=_PALETTE,
unhandled_input=self.controller.unhandled_input,
event_loop=_main_event_loop)
return self.main_loop
def draw(self):
top = urwid.Overlay(self.controller.view, _BACKGROUND,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80),
min_width=20, min_height=9)
return top
def item_chosen(button, choice):
raise urwid.ExitMainLoop()
response = urwid.Text([u'You chose ', choice, u'\n'])
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', exit_program)
main.original_widget = urwid.Filler(urwid.Pile([response,
urwid.AttrMap(done, None, focus_map='reversed')]))
def exit_program(button):
raise urwid.ExitMainLoop()
def main():
p = argparse.ArgumentParser()
p.add_argument('--database', '-db', required=True)
flags = p.parse_args()
dc = data_controller.DataController(flags.database)
mw = MainWindow(dc)
loop = mw.make_main_loop()
loop.run()
if __name__ == '__main__':
main()
| 30.168565 | 100 | 0.627001 | 1,641 | 13,244 | 4.828154 | 0.155393 | 0.028398 | 0.032185 | 0.018932 | 0.35378 | 0.291809 | 0.274391 | 0.190963 | 0.140098 | 0.121166 | 0 | 0.00227 | 0.234899 | 13,244 | 438 | 101 | 30.237443 | 0.779631 | 0.029447 | 0 | 0.284483 | 0 | 0 | 0.059813 | 0 | 0 | 0 | 0 | 0.002283 | 0.005747 | 1 | 0.112069 | false | 0.011494 | 0.014368 | 0.002874 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01f72bd21f2a381c2c81de43a8ad15b68badbae6 | 4,917 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/cloudscale.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/cloudscale.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/cloudscale.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
API_URL = 'https://api.cloudscale.ch/v1/'
def cloudscale_argument_spec():
return dict(
api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
no_log=True,
required=True,
type='str'),
api_timeout=dict(default=30, type='int'),
)
class AnsibleCloudscaleBase(object):
def __init__(self, module):
self._module = module
self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
self._result = {
'changed': False,
'diff': dict(before=dict(), after=dict()),
}
def _get(self, api_call):
resp, info = fetch_url(self._module, API_URL + api_call,
headers=self._auth_header,
timeout=self._module.params['api_timeout'])
if info['status'] == 200:
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 404:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
'"%s".' % api_call, fetch_url_info=info)
def _post_or_patch(self, api_call, method, data):
# This helps with tags when we have the full API resource href to update.
if API_URL not in api_call:
api_endpoint = API_URL + api_call
else:
api_endpoint = api_call
headers = self._auth_header.copy()
if data is not None:
# Sanitize data dictionary
# Deepcopy: Duplicate the data object for iteration, because
# iterating an object and changing it at the same time is insecure
for k, v in deepcopy(data).items():
if v is None:
del data[k]
data = self._module.jsonify(data)
headers['Content-type'] = 'application/json'
resp, info = fetch_url(self._module,
api_endpoint,
headers=headers,
method=method,
data=data,
timeout=self._module.params['api_timeout'])
if info['status'] in (200, 201):
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for '
'"%s".' % (method, api_call), fetch_url_info=info)
def _post(self, api_call, data=None):
return self._post_or_patch(api_call, 'POST', data)
def _patch(self, api_call, data=None):
return self._post_or_patch(api_call, 'PATCH', data)
def _delete(self, api_call):
resp, info = fetch_url(self._module,
API_URL + api_call,
headers=self._auth_header,
method='DELETE',
timeout=self._module.params['api_timeout'])
if info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
'"%s".' % api_call, fetch_url_info=info)
def _param_updated(self, key, resource):
param = self._module.params.get(key)
if param is None:
return False
if resource and key in resource:
if param != resource[key]:
self._result['changed'] = True
patch_data = {
key: param
}
self._result['diff']['before'].update({key: resource[key]})
self._result['diff']['after'].update(patch_data)
if not self._module.check_mode:
href = resource.get('href')
if not href:
self._module.fail_json(msg='Unable to update %s, no href found.' % key)
self._patch(href, patch_data)
return True
return False
def get_result(self, resource):
if resource:
for k, v in resource.items():
self._result[k] = v
return self._result
| 36.969925 | 106 | 0.548709 | 563 | 4,917 | 4.563055 | 0.268206 | 0.066174 | 0.021409 | 0.028026 | 0.360841 | 0.352666 | 0.341767 | 0.330479 | 0.317244 | 0.245232 | 0 | 0.008469 | 0.351637 | 4,917 | 132 | 107 | 37.25 | 0.797365 | 0.082774 | 0 | 0.204082 | 0 | 0 | 0.109753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091837 | false | 0 | 0.05102 | 0.030612 | 0.27551 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01fa9fa16e7ec6eb680b54dc81280b527fab92e8 | 751 | py | Python | lesson-2/task2.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | lesson-2/task2.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | lesson-2/task2.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | # 2. Для списка реализовать обмен значений соседних элементов, т.е. Значениями обмениваются элементы с индексами 0 и
# 1, 2 и 3 и т.д. При нечетном количестве элементов последний сохранить на своем месте. Для заполнения списка
# элементов необходимо использовать функцию input().
my_list = []
list_len = input("Сколько элементов хотите ввести? ")
while (not list_len.isdecimal()) or int(list_len) == 0:
list_len = input("Нужно ввести натуральное число! ")
list_len = int(list_len)
while len(my_list) < list_len:
my_list.append(input("#"))
print(my_list)
for index, elem in enumerate(my_list):
if index % 2 == 0 and index < len(my_list) - 1:
my_list[index], my_list[index + 1] = my_list[index + 1], my_list[index]
print(my_list)
| 39.526316 | 116 | 0.723036 | 119 | 751 | 4.411765 | 0.470588 | 0.125714 | 0.08381 | 0.068571 | 0.066667 | 0.066667 | 0.066667 | 0 | 0 | 0 | 0 | 0.017572 | 0.166445 | 751 | 18 | 117 | 41.722222 | 0.821086 | 0.363515 | 0 | 0.166667 | 0 | 0 | 0.139241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01fde946914f4c6f922ba5d95ad8427ac784b8c5 | 1,759 | py | Python | omniinsight/objs.py | omnibuildplatform/omni-insight | 8a83ac5742a41c07c7ee3f442c2e104b026aa484 | [
"MulanPSL-1.0"
] | null | null | null | omniinsight/objs.py | omnibuildplatform/omni-insight | 8a83ac5742a41c07c7ee3f442c2e104b026aa484 | [
"MulanPSL-1.0"
] | null | null | null | omniinsight/objs.py | omnibuildplatform/omni-insight | 8a83ac5742a41c07c7ee3f442c2e104b026aa484 | [
"MulanPSL-1.0"
] | null | null | null | import os
import yaml
class ProjectData:
def __init__(self, name, sig):
self.name = name
self.sig = sig
class RpmData:
def __init__(self, name):
self.name = name
self.id = ''
self.short_name = ''
self.arch = ''
self.group = ''
self.description = ''
self.requires = []
self.provides = []
self.oe_release = ''
self.sig = ''
self.project = ''
def to_dict(self):
rpm_dict = {
'name': self.name,
'short_name': self.short_name,
'arch': self.arch,
'group': self.group,
'description': self.description,
'requires': self.requires,
'provides': self.provides,
'oe_release': self.oe_release,
'sig': self.sig,
'project': self.project
}
return rpm_dict
class SigData:
def __init__(self, name):
self.name = name
self.mentors = []
self.maintainers = []
self.committers = []
self.description = ''
def to_dict(self):
sig_dict = {
'name': self.name,
'mentors': self.mentors,
'maintainers': self.maintainers,
'committers': self.committers,
'description': self.description
}
return sig_dict
def parse_sig_yaml(self, file_path):
with open(file_path, 'r') as sig_yaml:
yaml_data = yaml.load(sig_yaml, Loader=yaml.SafeLoader)
self.description = yaml_data.get('description')
self.mentors = yaml_data.get('mentors')
self.maintainers = yaml_data.get('maintainers')
self.committers = yaml_data.get('committers')
| 25.867647 | 67 | 0.528709 | 180 | 1,759 | 4.972222 | 0.227778 | 0.080447 | 0.053631 | 0.050279 | 0.069274 | 0.069274 | 0.069274 | 0.069274 | 0 | 0 | 0 | 0 | 0.351904 | 1,759 | 67 | 68 | 26.253731 | 0.785088 | 0 | 0 | 0.196429 | 0 | 0 | 0.086981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.035714 | 0 | 0.232143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01fe3b13e6d2f50b47795caf8363733e1ca35753 | 812 | py | Python | main/coin-change/coin-change-cache.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | main/coin-change/coin-change-cache.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | main/coin-change/coin-change-cache.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
import functools
def count_ways(coins, total):
length = len(coins)
@functools.lru_cache(maxsize=None)
def _solve(index, subtot):
value = coins[index]
return sum(solve(index + 1, next_subtot)
for next_subtot in range(subtot, -1, -value))
def solve(index, subtot):
if subtot == 0:
return 1
if index == length:
return 0
return _solve(index, subtot)
return solve(0, total)
def read_record(length):
ret = list(map(int, input().split()))
if len(ret) != length:
raise ValueError('wrong record length')
return ret
def run():
total, length = read_record(2)
coins = read_record(length)
print(count_ways(coins, total))
if __name__ == '__main__':
run()
| 21.945946 | 64 | 0.598522 | 104 | 812 | 4.5 | 0.442308 | 0.08547 | 0.102564 | 0.081197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013722 | 0.28202 | 812 | 36 | 65 | 22.555556 | 0.789022 | 0.025862 | 0 | 0 | 0 | 0 | 0.034177 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.038462 | 0 | 0.461538 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01fe4da65b5536a35e63367696f21e9c251539ba | 4,686 | py | Python | WebCrawler/WebCrawler.py | chenyz2000/MiniProjects | 33182e6b98190cd72aed228ab6c4b64a8ea4ebdb | [
"MIT"
] | null | null | null | WebCrawler/WebCrawler.py | chenyz2000/MiniProjects | 33182e6b98190cd72aed228ab6c4b64a8ea4ebdb | [
"MIT"
] | null | null | null | WebCrawler/WebCrawler.py | chenyz2000/MiniProjects | 33182e6b98190cd72aed228ab6c4b64a8ea4ebdb | [
"MIT"
] | null | null | null | import math
import random
import time
import re
from queue import Queue
import urllib.request
import urllib.error
import jieba
from bs4 import BeautifulSoup
urlSet = set()
urlList = []
doc = 0
que = Queue()
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
]
ipList = ["112.85.129.100:9999", "112.85.175.4:9999", "112.87.70.92:9999"]
# proxy_support = urllib.request.ProxyHandler({"http": random.choice(ipList)})
# opener = urllib.request.build_opener(proxy_support)
# urllib.request.install_opener(opener)
def get_html(url):
req = urllib.request.Request(url=url, headers={'User-Agent': random.choice(user_agents)})
link = urllib.request.urlopen(req, timeout=1)
return link.read()
def getSave(url):
soup = BeautifulSoup(get_html(url), 'html.parser') # 初始化BeautifulSoup库,并设置解析器
# 提取超链接
for a in soup.findAll('a', href=True):
u = a.get("href")
if u and ('@suda.edu.cn' not in u) and ("javascript" not in u):
if u[0:4] == "http" and "suda" not in u:
break
if u[0:4] != "http":
if u[0] == '/':
u = re.findall("http.*edu.cn", url)[0]+u
else:
site = re.findall("http.*/", url)[0]
if site[-2] == '/':
site = re.findall("http.*/", url+'/')[0]
u = site+u
if u[-1] == '/':
u = u[0:len(u)-1]
if u not in urlSet:
que.put(u)
urlSet.add(u)
# 提取正文
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
soup.prettify()
content = re.sub("<[^>]*>", '', soup.prettify())
content = re.sub("\s{2,}", "\n", content)
with open("{}".format(doc), "w", encoding='utf-8') as f:
f.write(content)
def search():
query = input("网站爬取完毕,请输入查询:").split() # 输入查询
queryDict = {} # 单词在查询中的出现次数
for i in query:
if i in queryDict:
queryDict[i] += 1
else:
queryDict[i] = 1
queryDf = {i: 0 for i in queryDict} # 用来之后记录查询词的df值,默认不存在为0
fenciDict = [] # 各个文档分词结果的单词计数
for i in range(len(urlList)):
with open("{}".format(i), "r", encoding='utf-8') as f:
s = f.read()
fenci = jieba.lcut_for_search(s)
fenciSet = set(fenci)
fenciDict.append({i: fenci.count(i) for i in fenciSet})
# 与上面对query的处理类似
for word in queryDf:
if word in fenciDict[i]:
queryDf[word] += 1
# 若关键词在文档中出现,则df加1
similarList = []
for i in range(len(urlList)):
sum_qd = 0.0 # 作分子
sum_q2 = 0.0
sum_d2 = 0.0 # sum_q2*sum_d2的平方根作分母
for word in queryDict:
w_query = 1.0 + math.log10(queryDict[word]) # word在query中的tf-idf权重
w_doc = 0 # word在第i个文档中的tf-idf权重
if word in fenciDict[i]:
w_doc = (1.0 + math.log10(fenciDict[i][word])) * math.log10(10000.0 / queryDf[word])
sum_qd += w_query * w_doc
sum_q2 += w_query ** 2
sum_d2 += w_doc ** 2
similar = 0.0 # 余弦相似度
len_q2d2 = math.sqrt(sum_q2 * sum_d2)
if math.fabs(len_q2d2) > 1e-5:
similar = sum_qd / len_q2d2
similarList.append((i, similar)) # 文档编号和余弦相似度的元祖
similarList.sort(key=lambda x: x[1], reverse=True)
for i in range(min(10,len(similarList))):
d = similarList[i][0]
print(urlList[d], similarList[i][1])
if __name__ == "__main__":
que.put("http://www.suda.edu.cn")
#while not que.empty():
for i in range(100): #可以选择for控制循环次数进行测试
url = que.get()
urlList.append(url)
#print(url) #打印出访问的网站
flag = False
for i in range(3): # 超时超过三次即认为访问失败
try:
getSave(url)
flag = True
break
except:
pass
if flag:
doc += 1
else:
#print("false") # 可体现出什么网站访问失败
pass
# 控制访问时间间隔,可调整
time.sleep(0.2)
if doc % 10 == 0:
time.sleep(1.5)
search()
| 33.234043 | 119 | 0.532224 | 634 | 4,686 | 3.87224 | 0.317035 | 0.010998 | 0.019552 | 0.022403 | 0.123829 | 0.062729 | 0.028513 | 0.020367 | 0 | 0 | 0 | 0.067317 | 0.321596 | 4,686 | 140 | 120 | 33.471429 | 0.704939 | 0.109902 | 0 | 0.096491 | 0 | 0.04386 | 0.155888 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0.017544 | 0.078947 | 0 | 0.114035 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ff0fb86e36d267d898975db026d9e74086232c | 7,398 | py | Python | dags/dop/airflow_module/operator/dbt_k8_operator.py | bytecodeio/google_data_services_template | 08b64972e9899971d5c4f892480aa0c067b53c3b | [
"MIT"
] | 63 | 2021-03-30T12:09:40.000Z | 2022-03-04T14:30:11.000Z | dags/dop/airflow_module/operator/dbt_k8_operator.py | bytecodeio/google_data_services_template | 08b64972e9899971d5c4f892480aa0c067b53c3b | [
"MIT"
] | null | null | null | dags/dop/airflow_module/operator/dbt_k8_operator.py | bytecodeio/google_data_services_template | 08b64972e9899971d5c4f892480aa0c067b53c3b | [
"MIT"
] | 8 | 2021-03-30T12:15:55.000Z | 2021-08-22T14:25:30.000Z | import logging
import os
from typing import List, Dict
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.sensors.base_sensor_operator import apply_defaults
from dop.component.configuration.env import env_config
from dop.airflow_module.operator import dbt_operator_helper
# List of files generated by dbt docs generate
# https://docs.getdbt.com/reference/commands/cmd-docs
DBT_DOC_FILES = ["index.html", "manifest.json", "catalog.json"]
DBT_DOC_FOLDER = "target"
DBT_USER = "dbtuser"
DBT_RUN_RESULTS_PATH = "target/run_results.json"
# See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
node_pool_affinity = {
"nodeAffinity": {
# requiredDuringSchedulingIgnoredDuringExecution means in order
# for a pod to be scheduled on a node, the node must have the
# specified labels. However, if labels on a node change at
# runtime such that the affinity rules on a pod are no longer
# met, the pod will still continue to run on the node.
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
# When nodepools are created in Google Kubernetes
# Engine, the nodes inside of that nodepool are
# automatically assigned the label
# 'cloud.google.com/gke-nodepool' with the value of
# the nodepool's name.
"key": "cloud.google.com/gke-nodepool",
"operator": "In",
"values": ["kubernetes-task-pool"],
}
]
}
]
}
}
}
def retrieve_commit_hash():
with open(
os.path.sep.join([env_config.service_project_path, ".commit-hash"])
) as fp:
return fp.read()
class DbtK8Operator(KubernetesPodOperator):
template_fields = (
"action",
"target",
"dbt_project_name",
"image_tag",
"dbt_arguments",
"gcr_pull_secret_name",
"arguments",
)
ui_color = "#FF694B"
@apply_defaults
def __init__(
self,
dbt_project_name: str,
dbt_version: str,
dbt_arguments: List[Dict],
*args,
**kwargs,
):
"""
:param dbt_project_name: the name for the dbt project name inline with what's defined in `.dbt-project-repos.json`
:param dbt_version: Not used
:param args:
:param kwargs: must contain the Task entity
"""
task = kwargs["task"]
self.dbt_project_name = dbt_project_name
self.dbt_version = "N/A, this is fixed in the docker image"
self.action = task.kind.action
self.target = task.kind.target
self.dbt_arguments = dbt_arguments
self.gcr_pull_secret_name = env_config.gcr_pull_secret_name
self.image_tag = retrieve_commit_hash()
self._full_refresh = (
False # used to trigger DBT full refresh, modified via execute() override
)
self.arguments = [self.parse_bash_command()]
super(DbtK8Operator, self).__init__(
name=kwargs["task_id"],
cmds=["/bin/bash", "-c"],
arguments=self.arguments,
get_logs=True,
namespace="default",
image=f"eu.gcr.io/{env_config.infra_project_id}/dop-dbt:{self.image_tag}",
is_delete_operator_pod=True,
env_vars={
"DOP_PROJECT_ID": env_config.project_id,
"DOP_LOCATION": env_config.location,
},
image_pull_secrets=self.gcr_pull_secret_name,
affinity=node_pool_affinity,
*args,
**kwargs,
)
def execute(self, context):
"""
Override the parent method to ingest required contexts
"""
dag_run_conf = context["dag_run"].conf if context["dag_run"].conf else {}
full_refresh = dag_run_conf.get("full_refresh", False)
self._full_refresh = full_refresh
logging.info(f"### IS FULL REFRESH ENABLED: {self._full_refresh}")
self.arguments = [self.parse_bash_command(context=context)]
logging.info(f"### Updated arguments: {self.arguments}")
super(DbtK8Operator, self).execute(context=context)
def parse_bash_command(self, context=None):
full_refresh_cmd = ""
if self.target != "run":
full_refresh_cmd = ""
elif self.dbt_arguments:
if self._full_refresh and "--full-refresh" not in [
arg.get("option") for arg in self.dbt_arguments
]:
full_refresh_cmd = "--full-refresh"
elif self._full_refresh:
full_refresh_cmd = "--full-refresh"
cmd_for_additional_arguments = ""
# docs arguments are only used to copy files to GCS, not in the task execution
if self.dbt_arguments and self.target != "docs generate":
cmd_for_additional_arguments = dbt_operator_helper.implode_arguments(
dbt_arguments=self.dbt_arguments
)
cmd_to_run_dbt = (
f"pipenv run dbt --no-use-colors {self.target} --project-dir ./{self.dbt_project_name}"
f" --vars {dbt_operator_helper.parsed_cmd_airflow_context_vars(context=context)}"
f" {cmd_for_additional_arguments}"
f" {full_refresh_cmd};"
f" gsutil cp /home/{DBT_USER}/{self.dbt_project_name}/{DBT_RUN_RESULTS_PATH} gs://{os.getenv('GCS_BUCKET')}/dbt/{DBT_RUN_RESULTS_PATH}"
)
if self.target == "docs generate":
command = self.copy_docs_to_gcs_command()
if command:
cmd_to_run_dbt += f"; {command}"
return cmd_to_run_dbt
def copy_docs_to_gcs_command(self):
"""
Generate gsutil command line to copy doc files generated with dbt docs generate to GCS
"""
command = []
gcs_bucket = dbt_operator_helper.extract_argument(
self.dbt_arguments, "--bucket"
)
if not gcs_bucket:
logging.warning("No bucket argument provided. Skipping copy to GCS")
return ""
gcs_path = dbt_operator_helper.extract_argument(
self.dbt_arguments, "--bucket-path", ""
)
for doc_file in DBT_DOC_FILES:
doc_file_path = (
f"/home/{DBT_USER}/{self.dbt_project_name}/{DBT_DOC_FOLDER}/{doc_file}"
)
logging.info(f"Copying {doc_file} to gs://{gcs_bucket}/{gcs_path}")
command.append(
f"gsutil cp {doc_file_path} gs://{gcs_bucket}/{gcs_path}/{doc_file}"
)
return ";".join(command)
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
dbt_operator_helper.save_run_results_in_bq(
env_config.project_id,
self.dbt_project_name,
f"gs://{os.getenv('GCS_BUCKET')}/dbt/{DBT_RUN_RESULTS_PATH}",
)
| 36.264706 | 147 | 0.598946 | 865 | 7,398 | 4.865896 | 0.285549 | 0.047042 | 0.033262 | 0.025659 | 0.156332 | 0.075077 | 0.059397 | 0.059397 | 0.044191 | 0.018532 | 0 | 0.001165 | 0.303731 | 7,398 | 203 | 148 | 36.44335 | 0.815958 | 0.177751 | 0 | 0.055556 | 0 | 0.013889 | 0.226285 | 0.100927 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.048611 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
01ff66c7fe5c77fb55b20587a4618914f352a1af | 3,471 | py | Python | birthday.py | jordanvtskier12/Birthday-quiz | 8eb6cfda35ae9e7b3a0b2b7fe9d12d851e778d53 | [
"MIT"
] | null | null | null | birthday.py | jordanvtskier12/Birthday-quiz | 8eb6cfda35ae9e7b3a0b2b7fe9d12d851e778d53 | [
"MIT"
] | null | null | null | birthday.py | jordanvtskier12/Birthday-quiz | 8eb6cfda35ae9e7b3a0b2b7fe9d12d851e778d53 | [
"MIT"
] | null | null | null | """
birthday.py
Author: Jordan
Credit: none
Assignment:
Your program will ask the user the following questions, in this order:
1. Their name.
2. The name of the month they were born in (e.g. "September").
3. The year they were born in (e.g. "1962").
4. The day they were born on (e.g. "11").
If the user's birthday fell on October 31, then respond with:
You were born on Halloween!
If the user's birthday fell on today's date, then respond with:
Happy birthday!
Otherwise respond with a statement like this:
Peter, you are a winter baby of the nineties.
Example Session
Hello, what is your name? Eric
Hi Eric, what was the name of the month you were born in? September
And what year were you born in, Eric? 1972
And the day? 11
Eric, you are a fall baby of the stone age.
"""
name=str(input("Hello, what is your name? "))
month=str(input("Hi "+name+", what was the name of the month you were born in? "))
year=int(input("And what year were you born in, "+name+"? "))
day=int(input("And the day? "))
months = ["" , "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ""]
winter=["December","January","February"]
spring=['March','May','April']
summer=['June','July','August']
fall=['September','October','November']
from datetime import datetime
from calendar import month_name
todaymonth = datetime.today().month
todaydate = datetime.today().day
birthmonth = months[todaymonth]
if month==birthmonth and day==todaydate:
print("Happy birthday!")
elif month=="October" and day==31:
print("You were born on Halloween!")
elif month in winter and year>=2000:
print(str(name + ", you are a winter baby of the two thousands."))
elif month in spring and year>=2000:
print(str(name + ", you are a spring baby of the two thousands."))
elif month in summer and year>=2000:
print(str(name +", you are a summer baby of the two thousands."))
elif month in fall and year>=2000:
print(str(name + ", you are a fall baby of the two thousands."))
elif month in winter and year>=1990 and year<= 2000:
print(str(name + ", you are a winter baby of the nineties."))
elif month in spring and year>=1990 and year<= 2000:
print(str(name + ", you are a spring baby of the nineties."))
elif month in summer and year>=1990 and year<=2000:
print(str(name + ", you are a summer baby of the nineties."))
elif month in fall and year>=1990 and year<=2000:
print(str(name + ", you are a fall baby of the nineties."))
elif month in winter and year>=1980 and year<= 1990:
print(str(name + ", you are a winter baby of the eighties."))
elif month in spring and year>=1980 and year<= 1990:
print(str(name + ", you are a spring baby of the eighties."))
elif month in summer and year>=1980 and year<=1990:
print(str(name + ", you are a summer baby of the eighties."))
elif month in fall and year>=1980 and year<=1990:
print(str(name + ", you are a fall baby of the eighties."))
elif month in winter and year<=1980:
print(str(name + ", you are a winter baby of the Stone Age."))
elif month in spring and year<=1980:
print(str(name + ", you are a spring baby of the Stone Age."))
elif month in summer and year<=1980:
print(str(name +", you are a summer baby of the Stone Age."))
elif month in fall and year<=1980:
print(str(name + ", you are a fall baby of the Stone Age."))
| 33.057143 | 139 | 0.673869 | 579 | 3,471 | 4.037997 | 0.177893 | 0.071856 | 0.053892 | 0.102652 | 0.670231 | 0.627887 | 0.590676 | 0.469204 | 0.389222 | 0.355004 | 0 | 0.041862 | 0.201671 | 3,471 | 105 | 140 | 33.057143 | 0.801877 | 0.228464 | 0 | 0 | 0 | 0 | 0.366904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.36 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf00295358fe63143dc1908bc85278216ed3616f | 5,093 | py | Python | models/dyn_model.py | zhouxian/GNS-PyTorch | c2401e11cfaee06c2108369dc55e15d8a2b52a7c | [
"MIT"
] | 1 | 2022-03-24T14:15:11.000Z | 2022-03-24T14:15:11.000Z | models/dyn_model.py | zhouxian/GNS-PyTorch | c2401e11cfaee06c2108369dc55e15d8a2b52a7c | [
"MIT"
] | null | null | null | models/dyn_model.py | zhouxian/GNS-PyTorch | c2401e11cfaee06c2108369dc55e15d8a2b52a7c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from config import _C as C
from models.layers.GNN_dmwater import GraphNet
from scipy import spatial
import numpy as np
import utils
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.node_dim_in = C.NET.NODE_FEAT_DIM_IN
self.edge_dim_in = C.NET.EDGE_FEAT_DIM_IN
self.hidden_size = C.NET.HIDDEN_SIZE
self.out_size = C.NET.OUT_SIZE
num_layers = C.NET.GNN_LAYER
self.particle_emb = nn.Embedding(C.NUM_PARTICLE_TYPES, C.NET.PARTICLE_EMB_SIZE)
self.node_encoder = nn.Sequential(
nn.Linear(self.node_dim_in, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.LayerNorm(self.hidden_size)
)
self.edge_encoder = nn.Sequential(
nn.Linear(self.edge_dim_in, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.LayerNorm(self.hidden_size)
)
self.graph = GraphNet(layers=num_layers)
self.decoder = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.out_size),
)
def _construct_graph_nodes(self, poss, particle_type, metadata):
vels = utils.time_diff(poss)
vels = (vels - metadata['vel_mean'])/metadata['vel_std']
n_vel, d_vel = vels.shape[1], vels.shape[2]
assert n_vel == C.N_HIS - 1
vels = vels.reshape([-1, n_vel*d_vel])
pos_last = poss[:, -1]
dist_to_walls = torch.cat(
[pos_last - metadata['bounds'][:, 0],
-pos_last + metadata['bounds'][:, 1]], 1)
dist_to_walls = torch.clip(dist_to_walls/C.NET.RADIUS, -1, 1)
type_emb = self.particle_emb(particle_type)
node_attr = torch.cat([vels,
dist_to_walls,
type_emb], axis=1)
return node_attr
def _construct_graph_edges(self, pos):
device = pos.device
collapsed = False
n_particles = pos.shape[0]
# Calculate undirected edge list using KDTree
point_tree = spatial.cKDTree(pos.detach().cpu().numpy())
undirected_pairs = np.array(list(point_tree.query_pairs(C.NET.RADIUS, p=2))).T
undirected_pairs = torch.from_numpy(undirected_pairs).to(device)
pairs = torch.cat([undirected_pairs, torch.flip(undirected_pairs, dims=(0,))], dim=1).long()
if C.NET.SELF_EDGE:
self_pairs = torch.stack([torch.arange(n_particles, device=device),
torch.arange(n_particles, device=device)])
pairs = torch.cat([pairs, self_pairs], dim=1)
# check if prediction collapsed in long term unrolling
if pairs.shape[1] > C.NET.MAX_EDGE_PER_PARTICLE * n_particles:
collapsed = True
senders = pairs[0]
receivers = pairs[1]
# Calculate corresponding relative edge attributes (distance vector + magnitude)
dist_vec = (pos[senders] - pos[receivers])
dist_vec = dist_vec / C.NET.RADIUS
dist = torch.linalg.norm(dist_vec, dim=1, keepdims=True)
edges = torch.cat([dist_vec, dist], dim=1)
return edges, senders, receivers, collapsed
def forward(self, poss, particle_type, metadata, nonk_mask, tgt_poss, num_rollouts=10, phase='train'):
pred_accns = []
pred_poss = []
for i in range(num_rollouts):
nodes = self._construct_graph_nodes(poss, particle_type, metadata)
edges, senders, receivers, collapsed = self._construct_graph_edges(poss[:, -1])
nodes = self.node_encoder(nodes)
edges = self.edge_encoder(edges)
nodes, edges = self.graph(nodes, edges, senders, receivers)
pred_accn = self.decoder(nodes)
pred_acc = pred_accn * metadata['acc_std'] + metadata['acc_mean']
pred_accns.append(pred_accn)
prev_vel = poss[:, -1] - poss[:, -2]
pred_pos = poss[:, -1] + prev_vel + pred_acc
# replace kinematic nodes
pred_pos = torch.where(nonk_mask[:, None].bool(), pred_pos, tgt_poss[:, i])
poss = torch.cat([poss[:, 1:], pred_pos[:, None]], dim=1)
pred_poss.append(pred_pos)
if collapsed:
break
pred_accns = torch.stack(pred_accns).permute(1, 0, 2)
pred_poss = torch.stack(pred_poss).permute(1, 0, 2)
outputs = {
'pred_accns': pred_accns,
'pred_poss': pred_poss,
'pred_collaposed': collapsed
}
return outputs
| 36.378571 | 106 | 0.591793 | 653 | 5,093 | 4.376723 | 0.229709 | 0.06648 | 0.088174 | 0.056683 | 0.227782 | 0.175647 | 0.130861 | 0.130861 | 0.130861 | 0.130861 | 0 | 0.009744 | 0.294718 | 5,093 | 140 | 107 | 36.378571 | 0.785913 | 0.039073 | 0 | 0.133333 | 0 | 0 | 0.016564 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 1 | 0.038095 | false | 0 | 0.07619 | 0 | 0.152381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf02ba4cab7446a1e4a8e195ce66a394e9365faf | 1,224 | py | Python | reuse_model_layer.py | MorvanZhou/Tensorflow2-Tutorial | 871c627786d557f04db2dc5334da664a314d85f7 | [
"Apache-2.0"
] | 193 | 2019-10-22T07:15:34.000Z | 2022-03-30T12:45:55.000Z | reuse_model_layer.py | LAJsisyphean/Tensorflow2-Tutorial | 871c627786d557f04db2dc5334da664a314d85f7 | [
"Apache-2.0"
] | null | null | null | reuse_model_layer.py | LAJsisyphean/Tensorflow2-Tutorial | 871c627786d557f04db2dc5334da664a314d85f7 | [
"Apache-2.0"
] | 51 | 2019-11-06T12:52:41.000Z | 2022-03-30T07:31:45.000Z | from tensorflow import keras
import numpy as np
data_x = np.random.normal(size=[1000, 1])
noise = np.random.normal(size=[1000, 1]) * 0.2
data_y = data_x * 3. + 2. + noise
train_x, train_y = data_x[:900], data_y[:900]
test_x, test_y = data_x[900:], data_y[900:]
# define your reusable layers in here
l1 = keras.layers.Dense(10, activation=keras.activations.relu)
class Model(keras.Model):
def __init__(self):
super(Model, self).__init__()
self.l1 = l1 # this is a reusable layer
self.l2 = keras.layers.Dense(1) # this is NOT a reusable layer
def call(self, x, training=None, mask=None):
x = self.l1(x)
x = self.l2(x)
return x
model1 = Model()
model2 = Model()
model1.build((None, 1))
model2.build((None, 1))
model1.compile(
optimizer=keras.optimizers.SGD(0.01),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()],
)
# train model1 for a while
model1.fit(train_x, train_y, batch_size=32, epochs=3, validation_split=0.2, shuffle=True)
print("l1 is reused: ", np.all(model1.l1.get_weights()[0] == model2.l1.get_weights()[0]))
print("l2 is reused: ", np.all(model1.l2.get_weights()[0] == model2.l2.get_weights()[0])) | 28.465116 | 89 | 0.669935 | 198 | 1,224 | 4.005051 | 0.393939 | 0.025221 | 0.055486 | 0.045397 | 0.148802 | 0.100883 | 0.042875 | 0 | 0 | 0 | 0 | 0.065025 | 0.170752 | 1,224 | 43 | 90 | 28.465116 | 0.716256 | 0.093137 | 0 | 0 | 0 | 0 | 0.025316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0 | 0.206897 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf0473077d1f391cb46585a278833cc7fc757836 | 1,932 | py | Python | usage/python/tools/source/globus/usage/cwscorev2packet.py | jtfrey/globus-toolkit | ee55e99c6d6a6dd2dbd4246c0537e0b083069a5d | [
"Apache-2.0"
] | 44 | 2015-02-04T22:01:05.000Z | 2021-01-27T21:18:47.000Z | usage/python/tools/source/globus/usage/cwscorev2packet.py | jtfrey/globus-toolkit | ee55e99c6d6a6dd2dbd4246c0537e0b083069a5d | [
"Apache-2.0"
] | 69 | 2015-04-07T16:07:26.000Z | 2020-06-17T20:00:34.000Z | usage/python/tools/source/globus/usage/cwscorev2packet.py | ellert/globus-toolkit | 14761278bf048b0d9bd3d46ab4c3c987b968f2d3 | [
"Apache-2.0"
] | 51 | 2015-04-07T14:29:47.000Z | 2021-09-23T08:44:18.000Z | # Copyright 1999-2009 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object definition for processing C WS Core (version 2) usage packets.
"""
from globus.usage.cwscorev1packet import CWSCoreV1Packet
class CWSCoreV2Packet(CWSCoreV1Packet):
"""
C WS Core Usage Packet (version 2). Adds a container id, start/stop events
and service list
"""
insert_statement = '''
INSERT INTO c_ws_core_packets(
component_code,
version_code,
send_time,
ip_address,
container_id,
event_type,
service_list)
VALUES (%s, %s, %s, %s, %s, %s, %s)'''
def values(self, dbclass):
"""
Return a values tuple which matches the parameters in the class's
insert_statement.
Arguments:
self -- A CWSCoreV2Packet object
dbclass -- Database driver module for driver-specific type bindings
Returns:
Tuple containing
(component_code, version_code, send_time, ip_address,
container_id, event_type, service_list)
"""
return (
self.component_code,
self.packet_version,
dbclass.Timestamp(*self.send_time),
self.ip_address,
self.data.get('ID'),
self.data.get('EVENT'),
self.data.get('SERVICES'))
| 32.745763 | 78 | 0.628882 | 235 | 1,932 | 5.07234 | 0.506383 | 0.050336 | 0.012584 | 0.013423 | 0.126678 | 0.126678 | 0.120805 | 0.120805 | 0.120805 | 0.120805 | 0 | 0.013879 | 0.291408 | 1,932 | 58 | 79 | 33.310345 | 0.85683 | 0.544513 | 0 | 0 | 0 | 0.047619 | 0.412467 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf06d9a54893683667a74e4a18de2351b9f5889b | 1,101 | py | Python | looker_prometheus_exporter/tests/test_metric_fetcher.py | nested-tech/looker-prometheus-exporter | 7352ea9ea6e5aab7049b39882c7b3832baafc18b | [
"MIT"
] | null | null | null | looker_prometheus_exporter/tests/test_metric_fetcher.py | nested-tech/looker-prometheus-exporter | 7352ea9ea6e5aab7049b39882c7b3832baafc18b | [
"MIT"
] | null | null | null | looker_prometheus_exporter/tests/test_metric_fetcher.py | nested-tech/looker-prometheus-exporter | 7352ea9ea6e5aab7049b39882c7b3832baafc18b | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch, MagicMock
from requests import Response
from looker_prometheus_exporter.looker_metric_fetcher import LookerMetricFetcher
from looker_prometheus_exporter.looker_auth import LookerAuthenticationError
class TestMetricFetcher(TestCase):
@patch("requests.post")
@patch("looker_prometheus_exporter.looker_metric_fetcher.LookerAuth.get_token", return_value="i_r_bad_token")
def test_raises_auth_error_appropriately(self, mocked_token_getter, mocked_post):
metric_fetcher = LookerMetricFetcher(
client_id="i_r_id", client_secret="i_r_secret", looker_base_url="https://example.com", dashboard_id=42
)
mock_response = MagicMock(Response)
mocked_post.return_value = mock_response
mock_response.status_code = 401
mock_response.json.return_value = {
"message": "Requires authentication.",
"documentation_url": "http://docs.looker.com/"
}
with self.assertRaises(LookerAuthenticationError):
metric_fetcher._fetch_metrics()
| 39.321429 | 114 | 0.749319 | 124 | 1,101 | 6.290323 | 0.483871 | 0.066667 | 0.092308 | 0.115385 | 0.158974 | 0.110256 | 0 | 0 | 0 | 0 | 0 | 0.005482 | 0.171662 | 1,101 | 27 | 115 | 40.777778 | 0.849781 | 0 | 0 | 0 | 0 | 0 | 0.182561 | 0.06267 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf07234d71c7b574935a5582101336be79944027 | 5,801 | py | Python | src/baseline/exnn/exnn/xnn.py | fau-is/gam_comparison | c47e8f8ced281e0a71b7959a211cb5b289ac7606 | [
"MIT"
] | 1 | 2022-03-24T11:26:56.000Z | 2022-03-24T11:26:56.000Z | src/baseline/exnn/exnn/xnn.py | fau-is/gam_comparison | c47e8f8ced281e0a71b7959a211cb5b289ac7606 | [
"MIT"
] | null | null | null | src/baseline/exnn/exnn/xnn.py | fau-is/gam_comparison | c47e8f8ced281e0a71b7959a211cb5b289ac7606 | [
"MIT"
] | null | null | null | import tensorflow as tf
from .base import BaseNet
class xNN(BaseNet):
"""
Explainable neural network (xNN).
xNN is based on the Explainable neural network (Joel et al. 2018) with the following implementation details:
1. Categorical variables should be first converted by one-hot encoding, and we directly link each of the dummy variables as a bias term to final output.
2. The projection layer weights are initialized with univariate coefficient or combination of coefficients, considering the number of subnetworks. See the projection_layer function for details.
3. We train the network and early stop if no improvement occurs in certain epochs.
4. The subnetworks whose scaling factors are close to zero are pruned for parsimony consideration.
5. The pruned network will then be fine-tuned.
Parameters
----------
:type subnet_num: int
:param subnet_num: the number of subnetworks.
:type meta_info: dict
:param meta_info: the meta information of the dataset.
:type subnet_arch: list
:param subnet_arch: optional, default=(10, 6).
The architecture of each subnetworks, the ith element represents the number of neurons in the ith layer.
:type task_type: string
:param task_type: optional, one of {"Regression", "Classification"}, default="Regression". Only support binary classification at current version.
:type batch_size: int
:param batch_size: optional, default=1000, size of minibatches for stochastic optimizers.
:type training_epochs: int
:param training_epochs: optional, default=10000, maximum number of training epochs.
:type activation: tf object
:param activation: optional, default=tf.tanh, activation function for the hidden layer of subnetworks. It can be any tensorflow activation function object.
:type lr_bp: float
:param lr_bp: optional, default=0.001, learning rate for weight updates.
:type beta_threshold: float
:param beta_threshold: optional, default=0.01, percentage threshold for pruning the subnetworks, which means the subnetworks that sum up to 95% of the total sclae will be kept.
:type tuning_epochs: int
:param tuning_epochs: optional, default=500, the number of tunning epochs.
:type l1_proj: float
:param l1_proj: optional, default=0.001, the strength of L1 penalty for projection layer.
:type l1_subnet: float
:param l1_subnet: optional, default=0.001, the strength of L1 penalty for scaling layer.
:type verbose: bool
:param verbose: optional, default=False. If True, detailed messages will be printed.
:type val_ratio: float
:param val_ratio: optional, default=0.2. The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1.
:type early_stop_thres: int
:param early_stop_thres: optional, default=1000. Maximum number of epochs if no improvement occurs.
:type random_state: int
:param random_state: optional, default=0, the random seed.
References
----------
.. J. Vaughan, A. Sudjianto, E. Brahimi, J. Chen, and V. N. Nair, "Explainable neural networks based on additive index models," The RMA Journal, pp. 40-49, October 2018.
"""
def __init__(self, subnet_num, meta_info, subnet_arch=[10, 6], task_type="Regression",
activation_func=tf.tanh, batch_size=1000, training_epochs=10000, lr_bp=0.001,
beta_threshold=0.05, tuning_epochs=500, l1_proj=0.001, l1_subnet=0.001,
verbose=False, val_ratio=0.2, early_stop_thres=1000, random_state=0):
super(xNN, self).__init__(meta_info=meta_info,
subnet_num=subnet_num,
subnet_arch=subnet_arch,
task_type=task_type,
proj_method="random",
activation_func=activation_func,
bn_flag=False,
lr_bp=lr_bp,
l1_proj=l1_proj,
l1_subnet=l1_subnet,
l2_smooth=0,
batch_size=batch_size,
training_epochs=training_epochs,
tuning_epochs=tuning_epochs,
beta_threshold=beta_threshold,
verbose=verbose,
val_ratio=val_ratio,
early_stop_thres=early_stop_thres,
random_state=random_state)
@tf.function
def train_step_init(self, inputs, labels):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, training=True)
pred_loss = self.loss_fn(labels, pred)
regularization_loss = tf.math.add_n(self.proj_layer.losses + self.output_layer.losses)
total_loss = pred_loss + regularization_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
@tf.function
def train_step_finetune(self, inputs, labels):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, training=True)
pred_loss = self.loss_fn(labels, pred)
total_loss = pred_loss
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(self.trainable_weights)):
if self.trainable_weights[i].name != self.proj_layer.weights[0].name:
train_weights_list.append(self.trainable_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
| 44.282443 | 197 | 0.666264 | 757 | 5,801 | 4.924703 | 0.326288 | 0.052307 | 0.037554 | 0.01529 | 0.136803 | 0.111052 | 0.07618 | 0.07618 | 0.07618 | 0.07618 | 0 | 0.025605 | 0.259438 | 5,801 | 130 | 198 | 44.623077 | 0.842179 | 0.513705 | 0 | 0.166667 | 0 | 0 | 0.006088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |