hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a88165daff3e7a8ccb7f735405f607962cc1a6d2 | 483 | py | Python | example/google_drive/views.py | msi007/google-auth | 3406894a452356d6015ec5725f3824631aacf369 | [
"MIT"
] | null | null | null | example/google_drive/views.py | msi007/google-auth | 3406894a452356d6015ec5725f3824631aacf369 | [
"MIT"
] | null | null | null | example/google_drive/views.py | msi007/google-auth | 3406894a452356d6015ec5725f3824631aacf369 | [
"MIT"
] | null | null | null | from requests import get
from django.http import HttpResponse
from google_auth.views import get_token
from google_auth.decorators import google_auth_required
@google_auth_required
def google_drive(request):
url = 'https://www.googleapis.com/drive/v3/files'
access_token = get_token(request)
headers = {
'authorization': 'Bearer ' + access_token,
'content-type': 'application/json'
}
res = get(url, headers=headers)
return HttpResponse(res)
| 26.833333 | 55 | 0.730849 | 61 | 483 | 5.606557 | 0.540984 | 0.116959 | 0.081871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002513 | 0.175983 | 483 | 17 | 56 | 28.411765 | 0.856784 | 0 | 0 | 0 | 0 | 0 | 0.184265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a882b77ced94cf4b7d61d840fce415a14637157e | 1,809 | py | Python | biliup/plugins/huya.py | The-D66/biliup | d1e1eb7327b1e06c36892309c337bf6453acfbbc | [
"MIT"
] | 2 | 2022-03-21T13:36:43.000Z | 2022-03-21T13:37:00.000Z | biliup/plugins/huya.py | The-D66/biliup | d1e1eb7327b1e06c36892309c337bf6453acfbbc | [
"MIT"
] | null | null | null | biliup/plugins/huya.py | The-D66/biliup | d1e1eb7327b1e06c36892309c337bf6453acfbbc | [
"MIT"
] | null | null | null | import base64
import html
import json
import requests
from .. import config
from ..engine.decorators import Plugin
from ..plugins import match1, logger, fake_headers
from ..engine.download import DownloadBase
@Plugin.download(regexp=r'(?:https?://)?(?:(?:www|m)\.)?huya\.com')
class Huya(DownloadBase):
def __init__(self, fname, url, suffix='flv'):
super().__init__(fname, url, suffix)
def check_stream(self):
logger.debug(self.fname)
res = requests.get(self.url, timeout=5, headers=fake_headers)
res.close()
huya = match1(res.text, '"stream": "([a-zA-Z0-9+=/]+)"')
if huya:
huyacdn = config.get('huyacdn') if config.get('huyacdn') else 'AL'
huyajson1 = json.loads(base64.b64decode(huya).decode())['data'][0]['gameStreamInfoList']
huyajson2 = json.loads(base64.b64decode(huya).decode())['vMultiStreamInfo']
ratio = huyajson2[0]['iBitRate']
ibitrate_list = []
sdisplayname_list = []
for key in huyajson2:
ibitrate_list.append(key['iBitRate'])
sdisplayname_list.append(key['sDisplayName'])
if len(sdisplayname_list) > len(set(sdisplayname_list)):
ratio = max(ibitrate_list)
huyajson = huyajson1[0]
for cdn in huyajson1:
if cdn['sCdnType'] == huyacdn:
huyajson = cdn
absurl = f'{huyajson["sFlvUrl"]}/{huyajson["sStreamName"]}.{huyajson["sFlvUrlSuffix"]}?' \
f'{huyajson["sFlvAntiCode"]}'
self.raw_stream_url = html.unescape(absurl) + "&ratio=" + str(ratio)
self.room_title = json.loads(base64.b64decode(huya).decode())['data'][0]['gameLiveInfo']['roomName']
return True
| 41.113636 | 112 | 0.594251 | 194 | 1,809 | 5.43299 | 0.453608 | 0.060721 | 0.042695 | 0.068311 | 0.106262 | 0.106262 | 0.074004 | 0.074004 | 0 | 0 | 0 | 0.021513 | 0.254837 | 1,809 | 43 | 113 | 42.069767 | 0.760386 | 0 | 0 | 0 | 0 | 0 | 0.162521 | 0.077944 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a883f82a41139726fae8e50c7709bab6bdae4431 | 2,011 | py | Python | pycofe/proc/analyse_ensemble.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | null | null | null | pycofe/proc/analyse_ensemble.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | null | null | null | pycofe/proc/analyse_ensemble.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | 1 | 2021-02-25T06:54:15.000Z | 2021-02-25T06:54:15.000Z | ##!/usr/bin/python
#
# ============================================================================
#
# 05.07.17 <-- Date of Last Modification.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ----------------------------------------------------------------------------
#
# COORINATE ENSEMBLE ANALYSIS
#
# Makes structural alignment of an ensemble with Gesamt, reports all
# Gesamt's scores etc. and puts export data widget
#
# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017
#
# ============================================================================
#
# python native imports
import os
import sys
# ccp4-python imports
import pyrvapi
# application imports
#from dtypes import dtype_xyz
from varut import command
# ============================================================================
# import coordinate files function
def gesamt_xyz() : return "gesamt.pdb"
def run ( body, panelId, ensemble ): # body is reference to the main Import class
ensemble.nModels = len(ensemble.xyzmeta["xyz"])
if ensemble.nModels > 1:
# make command-line parameters for Gesamt
ensFileName = os.path.join ( body.outputDir(),ensemble.files[0] )
cmd = []
for model in ensemble.xyzmeta["xyz"]:
cmd += [ ensFileName, "-s", "/" + str(model["model"]) ]
cmd += [ "-o",gesamt_xyz(),"-o-cs" ]
if ensemble.nModels==2:
cmd += ["-domains"]
body.storeReportDocument ( panelId )
cmd += [ "--rvapi-rdoc",body.reportDocumentName() ]
# run gesamt
body.runApp ( "gesamt",cmd )
meta = body.restoreReportDocument()
try:
ensemble.meta = eval(meta)
ensemble.rmsd = ensemble.meta["rmsd"]
except:
ensemble.meta = None
else:
body.putMessage1 ( panelId,"Single-chain ensemble, " + \
str(ensemble.xyzmeta["xyz"][0]["chains"][0]["size"]) +\
" residues",0,1 )
return
| 27.547945 | 82 | 0.486325 | 185 | 2,011 | 5.27027 | 0.572973 | 0.046154 | 0.055385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012492 | 0.24366 | 2,011 | 72 | 83 | 27.930556 | 0.628534 | 0.416211 | 0 | 0 | 0 | 0 | 0.092415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.137931 | 0.034483 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a88c3c8df96aca1812496bb187762068936a4c17 | 480 | py | Python | main.py | brightmaraba/yt_downloader | 77af689657d2055eff581ca019034aab7db2cf6c | [
"MIT"
] | null | null | null | main.py | brightmaraba/yt_downloader | 77af689657d2055eff581ca019034aab7db2cf6c | [
"MIT"
] | null | null | null | main.py | brightmaraba/yt_downloader | 77af689657d2055eff581ca019034aab7db2cf6c | [
"MIT"
] | null | null | null | from pytube import YouTube
def download_video(url):
yt = YouTube(url)
videos = yt.streams.all()
video = list(enumerate(videos))
for i in video:
print(i)
print("Enter the format you want to download: ")
download_format = int(input("Enter the format number: "))
videos[download_format].download()
print("Downloaded Succesfully")
if __name__ == "__main__":
print("Enter the URL of the video: ")
url = input()
download_video(url) | 26.666667 | 61 | 0.660417 | 63 | 480 | 4.84127 | 0.52381 | 0.078689 | 0.104918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.222917 | 480 | 18 | 62 | 26.666667 | 0.817694 | 0 | 0 | 0 | 0 | 0 | 0.253638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.133333 | 0.266667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a88f1850dd35f86be244a233f4c30786ae93a037 | 786 | py | Python | colorific/workers.py | stkrizh/colorific | 2b49e9598523bfcf12b0195f4718341bd09af901 | [
"MIT"
] | 1 | 2020-07-19T15:04:40.000Z | 2020-07-19T15:04:40.000Z | colorific/workers.py | stkrizh/colorific | 2b49e9598523bfcf12b0195f4718341bd09af901 | [
"MIT"
] | 7 | 2020-07-19T13:45:52.000Z | 2022-03-02T09:26:18.000Z | colorific/workers.py | stkrizh/colorific | 2b49e9598523bfcf12b0195f4718341bd09af901 | [
"MIT"
] | null | null | null | import asyncio
from concurrent.futures import ProcessPoolExecutor
from typing import Coroutine, List
from aiohttp.web import Application
from .settings import config
async def setup(app: Application):
await on_startup(app)
yield
await on_cleanup(app)
async def on_startup(app: Application):
"""
Warm up workers.
"""
n_workers: int = config.colorific.pool_exec_size
app["executor"] = ProcessPoolExecutor(max_workers=n_workers)
loop = asyncio.get_running_loop()
futures: List[Coroutine] = [
loop.run_in_executor(app["executor"], warm_up) for _ in range(n_workers)
]
await asyncio.wait(futures)
async def on_cleanup(app: Application):
executor = app["executor"]
executor.shutdown(wait=True)
def warm_up():
...
| 21.243243 | 80 | 0.711196 | 100 | 786 | 5.42 | 0.44 | 0.04428 | 0.04428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19084 | 786 | 36 | 81 | 21.833333 | 0.852201 | 0 | 0 | 0 | 0 | 0 | 0.03183 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a88fb684348ff346fc0607e97ede6ffceb4ff5c4 | 2,851 | py | Python | logrec/dataprep/lang/param_finder.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 2 | 2019-04-02T13:46:55.000Z | 2022-03-18T02:33:51.000Z | logrec/dataprep/lang/param_finder.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 119 | 2018-09-10T13:45:41.000Z | 2022-03-11T23:55:07.000Z | logrec/dataprep/lang/param_finder.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 1 | 2019-04-02T14:29:24.000Z | 2019-04-02T14:29:24.000Z | import logging
from logrec.dataprep import base_project_dir
from logrec.dataprep.lang.param_mutator import ParamMutator
file160 = f'{base_project_dir}/160.json'
import json
logger = logging.getLogger(__name__)
def metric(code_percent, code_non_eng, code_non_eng_uq, code_str_percent, code_str_non_eng, code_str_non_eng_uq):
count_good = 0
count_bad = 0
for file in tagged_files:
if (file['code_percent'] >= code_percent and file['code_non_eng'] >= code_non_eng and file[
'code_non_eng_uq'] >= code_non_eng_uq) or (
file['code_str_percent'] >= code_str_percent and file['code_str_non_eng'] >= code_str_non_eng and file[
'code_str_non_eng_uq'] >= code_str_non_eng_uq):
if file['noneng']:
count_good += 1
else:
count_bad += 1
precision = float(count_good) / (count_good + count_bad) if (count_good + count_bad) > 0 else 0
recall = float(count_good) / noneng
return precision * 5 / 10 + recall * 5 / 10, precision, recall
if __name__ == '__main__':
with open(file160) as f:
tagged_files = json.load(f)
total = len(tagged_files)
noneng = sum([i['noneng'] for i in tagged_files])
eng = total - noneng
possibel_var_values, (keys, mutations) = ParamMutator(
[{'name': 'code_percent', 'start': 0.005, 'end': 0.05, 'plus_or_mult': 'plus', 'koef': 0.0005},
{'name': 'code_non_eng', 'start': 2.01, 'end': 60, 'plus_or_mult': 'mult', 'koef': 1.4},
{'name': 'code_non_eng_uq', 'start': 2.01, 'end': 30, 'plus_or_mult': 'mult', 'koef': 1.5},
{'name': 'code_str_percent', 'start': 0.005, 'end': 0.05, 'plus_or_mult': 'plus', 'koef': 0.0005},
{'name': 'code_str_non_eng', 'start': 2.01, 'end': 60, 'plus_or_mult': 'mult', 'koef': 1.4},
{'name': 'code_str_non_eng_uq', 'start': 2.01, 'end': 30, 'plus_or_mult': 'mult', 'koef': 1.4}]) \
.mutate(2000000, 5)
results = []
for mutation in mutations:
val, prec, recall = metric(*mutation)
results.append({'params': mutation, 'metric': val, 'prec': prec, 'recall': recall})
sorted_results = sorted(results, key=lambda x: x['metric'], reverse=True)
for sorted_result in sorted_results[:100]:
print(sorted_result)
# Results of a few runs
# {'params': (0.006, 2.01, 2.01, 0.019, 3.939599999999999, 2.01), 'metric': 0.933664996420902, 'prec': 0.84251968503937, 'recall': 0.9727272727272728}
# {'params': (0.0075, 2.8139999999999996, 4.522499999999999, 0.0195, 2.01, 2.01), 'metric': 0.9158249158249158, 'prec': 0.9259259259259259, 'recall': 0.9090909090909091}
# {'params': (0.007, 5.515439999999998, 4.522499999999999, 0.0185, 2.01, 2.8139999999999996), 'metric': 0.9175084175084174, 'prec': 0.9259259259259259, 'recall': 0.9090909090909091}
| 47.516667 | 185 | 0.640828 | 409 | 2,851 | 4.200489 | 0.278729 | 0.055879 | 0.046566 | 0.060536 | 0.337602 | 0.284051 | 0.19383 | 0.193248 | 0.16298 | 0.16298 | 0 | 0.164917 | 0.198176 | 2,851 | 59 | 186 | 48.322034 | 0.586614 | 0.181691 | 0 | 0 | 0 | 0 | 0.19201 | 0.011598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.095238 | 0 | 0.142857 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a894f6affe55625105069cd6c85f6128ac5c3a46 | 2,604 | py | Python | openimages/demo.py | lehy/shufflenet-v2-chainer | 1ac313328f30b12d61243daf9e6e5a5fffe82c0d | [
"MIT"
] | null | null | null | openimages/demo.py | lehy/shufflenet-v2-chainer | 1ac313328f30b12d61243daf9e6e5a5fffe82c0d | [
"MIT"
] | null | null | null | openimages/demo.py | lehy/shufflenet-v2-chainer | 1ac313328f30b12d61243daf9e6e5a5fffe82c0d | [
"MIT"
] | null | null | null | import chainer
import shufflenet_v2
import chainertools
import cv2
import numpy as np
import time
def main(args):
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
snapshot_file = args.snapshot
label_encoder = chainertools.openimages.openimages_label_encoder(
".")
k = shufflenet_v2.guess_k(snapshot_file)
net = shufflenet_v2.ShuffleNetV2(k, label_encoder.num_classes())
chainer.serializers.load_npz(
snapshot_file, net, "updater/model:main/predictor/")
if args.gpu >= 0:
net.to_gpu(args.gpu)
camera_id = -1
camera = cv2.VideoCapture(camera_id)
dt_filtered = 0.
alpha = 0.1
while True:
success, frame = camera.read()
if not success:
raise RuntimeError("could not read frame from camera")
t0 = time.time()
frame_small_orig = cv2.resize(frame, (224, 224))
frame_small = cv2.cvtColor(frame_small_orig, cv2.COLOR_BGR2RGB)
frame_small = np.transpose(frame_small, (2, 0, 1))
input = net.xp.asarray([frame_small], dtype=np.float32)
# print(input.shape, input)
output = net(input)
output = chainer.functions.sigmoid(output)
output = chainer.cuda.to_cpu(output.data[0])
t1 = time.time()
labels_idx = np.where(output > 0.5)[0]
readable_labels = [label_encoder.readable_label_of_encoded_label(
lab) for lab in labels_idx]
dt = t1 - t0
dt_filtered = alpha * dt + (1 - alpha) * dt_filtered
fps = 1. / dt_filtered
print("{:.2f} fps".format(fps))
print(list(zip(readable_labels, output[labels_idx])))
cv2.imshow(snapshot_file, frame)
cv2.waitKey(1)
def parse_command_line():
parser = argparse.ArgumentParser(
description="Demonstration of multilabel classification with Shufflenet v2.")
parser.add_argument(
'--gpu', help='Run on gpu (integer id starting at 0) or cpu (-1)', type=int, default=-1)
parser.add_argument('--snapshot', help='Model snapshot file',
default="shufflenet-v2-snapshots/x1/snapshot_iter_335305")
return parser.parse_args()
if __name__ == '__main__':
import argparse
args = parse_command_line()
main(args)
| 37.73913 | 96 | 0.577573 | 296 | 2,604 | 4.875 | 0.429054 | 0.04158 | 0.022176 | 0.030492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029495 | 0.322965 | 2,604 | 68 | 97 | 38.294118 | 0.788996 | 0.009601 | 0 | 0 | 0 | 0 | 0.11331 | 0.029492 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.122807 | 0 | 0.175439 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a89537fb75c8fd9c57e46a877a19d0c0d1fd8490 | 1,157 | py | Python | 08/solution2.py | k0mmsussert0d/aoc2020 | a3fa08828833c3f60be3e47767e58fe691a7c070 | [
"MIT"
] | null | null | null | 08/solution2.py | k0mmsussert0d/aoc2020 | a3fa08828833c3f60be3e47767e58fe691a7c070 | [
"MIT"
] | null | null | null | 08/solution2.py | k0mmsussert0d/aoc2020 | a3fa08828833c3f60be3e47767e58fe691a7c070 | [
"MIT"
] | null | null | null | def move(lines, p):
i = 0
acc = 0
steps = []
seen = set()
while True:
steps.append(i)
if i >= len(lines):
return acc
if i in seen:
return acc if p else False
seen.add(i)
if lines[i][0] == 'acc':
acc += lines[i][1]
i += 1
elif lines[i][0] == 'jmp':
i += lines[i][1]
elif lines[i][0] == 'nop':
i += 1
def permut(lines):
for idx, line in enumerate(lines):
if line[0] == 'jmp':
prev = line[0]
lines[idx][0] = 'nop'
if acc := move(lines, False):
return acc
lines[idx][0] = prev
elif line[0] == 'nop':
prev = line[0]
lines[idx][0] = 'jmp'
if acc := move(lines, False):
return acc
lines[idx][0] = prev
if __name__ == '__main__':
lines = []
with open('input1', 'r') as file:
for i in file.readlines():
parts = i.split(' ')
lines.append([parts[0], int(parts[1].strip())])
print(move(lines, True))
print(permut(lines))
| 25.152174 | 59 | 0.429559 | 148 | 1,157 | 3.304054 | 0.297297 | 0.06135 | 0.07362 | 0.04499 | 0.294479 | 0.294479 | 0.167689 | 0.167689 | 0.167689 | 0.167689 | 0 | 0.029499 | 0.414002 | 1,157 | 45 | 60 | 25.711111 | 0.69174 | 0 | 0 | 0.268293 | 0 | 0 | 0.031979 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0 | 0 | 0.146341 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8958fdfb1fa1541b812e3886953f59e0a1690b1 | 19,069 | py | Python | common/lang.py | LordKBX/EbookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | 1 | 2021-06-03T01:44:50.000Z | 2021-06-03T01:44:50.000Z | common/lang.py | LordKBX/eBookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | null | null | null | common/lang.py | LordKBX/eBookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | null | null | null | import os
import re
import locale
import json.decoder
import traceback
import common.vars
from jsonschema import validate
from common.json_shema import JSONSchemaGenerator
def trim_lang(text: str = None) -> str:
if text is not None:
return text.replace('{APP_NAME}', common.vars.app_name)
else:
return None
class Dictionary:
def __init__(self, data: dict):
self.data = data
self.language = locale.getdefaultlocale()[0]
def __getitem__(self, value: str) -> any:
"""
get an item from storage data
:param value: index
:return: str|None
"""
try:
if value not in self.data:
return None
else:
if type(self.data[value]) == "dict":
return Dictionary(self.data[value])
elif type(self.data[value]) == "str":
return trim_lang(self.data[value])
else:
return self.data[value]
except Exception:
traceback.print_exc()
return None
class Lang:
default_language = 'en_US'
language = None
translations = dict()
translations[default_language] = {
"Label": "English (U.S.A)",
"NotImplemented": "Not implemented",
"Global": {
"ArchiverErrorTitle": "Attention",
"ArchiverErrorText": "Archiver folder not defined"
},
"Library": {
"WindowTitle": "EbookCollection - Ebook manager",
"AddBookWindowTitle": "EbookCollection - New books selection",
"HeaderBlockBtnAddBook": "Add Ebook",
"HeaderBlockBtnCreateBook": "Create Empty Ebook",
"HeaderBlockBtnDelBook": "Delete Ebook",
"HeaderBlockBtnSettings": "Settings",
"SortingBlockTreeAll": "All",
"SortingBlockTreeSeries": "Series",
"SortingBlockTreeAuthors": "Authors",
"SortingBlockSearchLabel": "Filter",
"CentralBlockTableTitle": "Title",
"CentralBlockTableAuthors": "Author",
"CentralBlockTableSeries": "Serie",
"CentralBlockTableTags": "Tags",
"CentralBlockTableModified": "Modified",
"CentralBlockTableAdded": "Imported",
"CentralBlockTableContextMenu": {
"EditMetadata": "Edit Metadata",
"EditBook": "Edit eBook"
},
"InfoBlockTitleLabel": "Title",
"InfoBlockSerieLabel": "Serie",
"InfoBlockAuthorsLabel": "Author(s)",
"InfoBlockFileFormatsLabel": "Format(s)",
"InfoBlockSizeLabel": "Size",
"InfoBlockSynopsisLabel": "Synopsis",
"InfoBlockLinkContestMenu": {
"open": "Open file with default application",
"edit": "Edit file",
"delete": "Delete file",
"deleteBook": "Delete book"
},
"DialogConfirmDeleteBookWindowTitle": "Delete Ebook",
"DialogConfirmDeleteBookWindowTitle2": "Delete Ebook file",
"DialogConfirmDeleteBookWindowText": "Confirm Ebook remove ?",
"DialogConfirmDeleteBookWindowText2": "Confirm file remove ?",
"DialogConfirmDeleteBookBtnYes": "Yes",
"DialogConfirmDeleteBookBtnNo": "No",
"blockHeaderTitle": " Toolbar",
"blockSortTitle": " Filters",
"blockInfoTitle": " Infos",
"emptyBooks": {
"WindowTitle": "Add empty eBook",
"Number": "Quantity",
"Authors": "Authors",
"Series": "Series",
"SeriesVolume": "Volume number",
"Name": "eBook(s) title",
"Format": "File format"
},
"emptyBookCreation": {
"Cover": "Cover",
"Chapter1": "Chapters 1",
"Author": "Author:",
"Authors": "Authors:"
},
"Metadata": {
"WindowTitle": "Editing Metadata"
}
},
"Generic": {
"DialogBtnOk": "Ok",
"DialogBtnSave": "Save",
"DialogBtnYes": "Yes",
"DialogBtnNo": "No",
"DialogBtnCancel": "Cancel"
},
"Reader": {
"WindowTitle": "EbookCollection: Reader",
"DialogInfoNoFileWindowTitle": "File Error",
"DialogInfoNoFileWindowText": "File Path not given",
"DialogInfoBadFileWindowTitle": "File Error",
"DialogInfoBadFileWindowText": "Invalid file format",
"ContentTableHeader": "Content Table",
"ContentTableTxtCover": "Cover",
"ContentTableTxtEnd": "End",
"ContentTableTxtPageX": "Page {}",
"ContentTableTxtChapterX": "Chapter {}: {}",
"InfoBlockHeader": "Informations",
"InfoBlockText": "File: {FILE}\n\nTitle: {TITLE}\n\nSeries: {SERIES}\n\nAuthors: {AUTHORS}\n\nFormat: {FORMAT}\n\nSize: {SIZE}",
"ContextMenuInfo": "Show eBook informations",
"ContextMenuCT": "Show Content Table",
"ContextMenuCopyText": "Copy text",
"ContextMenuCopyHTML": "Copy HTML code"
},
"Editor": {
"WindowTitle": "EbookCollection: Editor",
"DialogInfoNoFileWindowTitle": "File Error",
"DialogInfoNoFileWindowText": "File Path not given",
"DialogInfoBadFileWindowTitle": "File Error",
"DialogInfoBadFileWindowText": "Invalid file format",
"BlockToolbar": {
"Header": "Toolbar",
"Save": "Save eBook",
"CheckPointLoad": "Load session checkpoint",
"CheckPointCreate": "Create session checkpoint",
"FileManager": "File Management",
"EditContentTable": "Edit Content Table"
},
"BlockFileListHeader": "Files explorer",
"BlockContentTableHeader": "Content table",
"BlockPreviewHeader": "Preview",
"CentralZoneEmpty": "Please double click a file in the file explorer or an index in the content table for opening it in the editing zone",
"ContentTableHeader": "Content Table",
"FileTableHeader": "File Explorer",
"WebViewDefaultPageContent": [
"<?xml version=\"1.0\" encoding=\"utf-8\"?><html xmlns=\"http://www.w3.org/1999/xhtml\" lang=\"fr\">",
"<head><title>Live Preview</title></head>",
"<body><h3>Live Preview</h3>",
"<p>You could see a here the live preview of the HTML file being edited. The preview will update automatically as you make your changes.</p>",
"<p style=\"font-size:x-small;\">Note that this is a quick preview only, this is not intended to simulate a real digital book reader. Some aspects of your eBook will not work, such as page breaks and page margins.</p>",
"</body></html>"
],
"DialogConfirmSaveWindowTitle": "Save File",
"DialogConfirmSaveWindowText": "Do you confirm saving the file changes ?",
"DialogCreateCheckpointWindowTitle": "Create session checkpoint",
"DialogCreateCheckpointWindowText": "Checkpoint {} successfuly created",
"ChechpointWindow": {
"WindowTitle": "Load Checkpoint",
"btnOk": "Ok",
"btnCancel": "Cancel"
},
"LinkWindow": {
"WindowTitle": "Add/modify link",
"labelUrl": "Link URL",
"labelText": "Link text",
"btnOk": "Ok",
"btnCancel": "Cancel"
},
"ImgWindow": {
"WindowTitle": "Add/modify image",
"labelUrl": "Image URL",
"labelText": "Image alternate texte",
"btnOk": "Ok",
"btnCancel": "Cancel"
},
"FilesWindow": {
"WindowTitle": "Files manager",
"ImportWindowTitle": "Import file",
"FileNameWindowTitle": "Input name",
"FileNameWindowLabel": "Name",
"btnOk": "Ok",
"btnCancel": "Cancel"
},
"ContentTableWindow": {
"WindowTitle": "Content Table Editor",
"ListLabel": "Content Table",
"AddIndexLabel": "Insert new index",
"AddIndexPlaceholder": "Index name",
"ModifyIndexLabel": "Modify index",
"BtnRename": "Rename index",
"BtnDelete": "Delete index",
"NameWindowTitle": "Input name",
"NameWindowLabel": "Name",
"btnOk": "Ok",
"btnCancel": "Cancel"
},
"EditPane": {
"Save": "Save File in session",
"Undo": "Undo",
"Redo": "Redo",
"Cut": "Cut",
"Copy": "Copy",
"Paste": "Paste",
"Debug": "Debug document",
"Comment": "Comment selection",
"Prettify": "Prettify File",
"Bold": "Bold",
"Italic": "Italic",
"Underline": "Underline",
"Strikethrough": "Strikethrough",
"Sub": "Put text in sub line",
"Sup": "Put text in super line",
"TextColor": "Text Color",
"BackColor": "Back Color",
"AlignLeft": "Align Left",
"AlignCenter": "Align Center",
"AlignRight": "Align Right",
"Align Justify": "Align Justify",
"List": "List",
"NumericList": "Numeric List",
"Link": "Link",
"Image": "Image"
},
"ColorPicker": {
"WindowTitle": "Color Picker",
"Palette": "Color palette",
"ChromaGraph": "Chroma Graph",
"RgbBox": "RGB",
"RgbR": "R",
"RgbG": "G",
"RgbB": "B",
"RgbHexa": "Hexa",
"Preview": "Color Preview"
}
},
"Time": {
"template": {
"numeric_date": "%m/%d/%Y",
"numeric_datetime": "%m/%d/%Y %H:%M",
"textual_date": "$month %d %Y",
"textual_datetime": "$month %d %Y at %H:%M"
},
"months_short": ["jan.", "feb.", "march", "april", "may", "june", "july.", "aug.", "sept.", "oct.", "nov.",
"dec."],
"months_full": [
"January", "February", "March", "April", "May", "June", "July", "August",
"September", "October", "November", "December"
]
},
"Settings": {
"WindowTitle": "Settings",
"TabGlobalTitle": "Global",
"TabMetadataTitle": "Metadata",
"TabPluginsTitle": "Plugins",
"TabAboutTitle": "About",
"TabSyncTitle": "Sync",
"LanguageGroupTitle": "Language",
"LanguageAutomatic": "< System defined >",
"LanguageImportTitle": "Import translation file",
"Import": "Import",
"ImportErrorTitle": "ERROR",
"ImportErrorFileType": "File type invalid",
"ImportErrorFileCorrupted": "File corrupted",
"StyleGroupTitle": "Style",
"StyleLight": "Light",
"StyleDark": "Dark",
"StyleImportTitle": "Import style file",
"LibraryGroupTitle": "Library",
"LibraryFolder": "Library storage folder",
"LibraryFolderBrowse": "Browse...",
"ArchiverGroupTitle": "Archiver",
"ArchiverGroupTitleNT": "7zip",
"ArchiverFolder": "Folder Path",
"ArchiverFolderTest": "Test",
"ArchiverFolderBrowse": "...",
"DefaultCoverGroupTitle": "Default Cover Style",
"DefaultCoverBackground": "Background Color",
"DefaultCoverPattern": "Pattern",
"DefaultCoverPatternColor": "Pattern Color",
"DefaultCoverTitle": "Title Color",
"DefaultCoverSeries": "Series Color",
"DefaultCoverAuthors": "Authors Color",
"eBookImportGroupTitle": "eBook Import settings",
"eBookImportFilenameTpl": "File name parse template",
"eBookImportFilenameTplSeparator": "File name parse separator",
"AboutLabel": "{APP_NAME}\n\n License MIT\n\n Copyright (c) 2020-2021 Boulain Kévin",
"AboutBtnLicense": "License",
"AboutBtnWebsite": "Web Site",
"pluginsSettingsButton": "Settings",
"pluginsUninstallButton": "Uninstall",
"pluginsSettingsTitle": "Settings of plugin-in '{}'",
"pluginsForApp": "For App {}",
"pluginsArchetype": "Type {}",
"DialogConfirmDeletePluginWindowTitle": "Uninstall Plug-in",
"DialogConfirmDeletePluginWindowText": "Confirm Plug-in Uninstall ?",
"DialogConfirmDeletePluginBtnYes": "Yes",
"DialogConfirmDeletePluginBtnNo": "No",
"SyncInterfaceGroupTitle": "Listening interface",
"SyncInterfaceIP": "IP",
"SyncInterfacePort": "Port",
"SyncInterfaceProtocol": "Protocol",
"SyncIdentificationGroupTitle": "Identification",
"SyncIdentificationUser": "User",
"SyncIdentificationPassword": "Password"
}
}
def __init__(self):
self.set_lang(locale.getdefaultlocale()[0])
self.__load_langs()
def __getitem__(self, value: str) -> any:
"""
get an translation
:param value: index
:return: str|None
"""
ln = self.test_lang(self.language)
try:
if '/' in value:
return self.get(value, ln)
if value not in self.translations[ln]:
return None
else:
if type(self.translations[ln][value]) == "dict":
return Dictionary(self.translations[ln][value])
elif type(self.translations[ln][value]) == "str":
return trim_lang(self.translations[ln][value])
else:
return self.translations[ln][value]
except Exception:
return None
def get(self, path: str, lang: str = None, compress: bool = True) -> str:
lang = self.test_lang(lang)
if lang not in self.translations:
return None
path_tab = path.split('/')
try:
base = self.translations[lang]
for obj in path_tab:
if obj in base:
if isinstance(base[obj], dict) is True:
base = base[obj]
else:
if isinstance(base[obj], list) is True:
if compress is True:
return trim_lang("".join(base[obj]))
else:
return base[obj]
if isinstance(base[obj], str) is True:
return trim_lang(base[obj])
else:
return base[obj]
else:
return None
except Exception:
traceback.print_exc()
return None
def test_lang(self, language_code: str = None) -> str:
if language_code is None:
language_code = self.language
if language_code == 'auto':
language_code = locale.getdefaultlocale()[0]
if language_code not in self.translations:
passed = False
tl = locale.getdefaultlocale()[0].split('_')
for lc in self.translations:
tlc = lc.split('_')
if tl[0] == tlc[0]:
language_code = lc
passed = True
break
if passed is False:
language_code = self.default_language
return language_code
def __load_langs(self) -> None:
# self.translations.clear()
directory = common.vars.app_directory + os.sep + "ressources" + os.sep + "langs"
directory2 = common.vars.app_user_directory + os.sep + "imports" + os.sep + "langs"
jssgenerator = JSONSchemaGenerator()
encoder = json.encoder.JSONEncoder()
print(self.translations)
tab = encoder.encode(self.translations[self.default_language])
jssgenerator.load(tab)
schema = jssgenerator.generate()
try:
if common.vars.debug is True:
with open(common.vars.app_directory + os.sep + "doc" + os.sep + "packages" + os.sep + "lang" + os.sep + "lang.json_schema", 'wt', encoding='utf8') as file:
file.write(json.dumps(schema, indent=4))
except Exception:
traceback.print_exc()
ext = "json"
for dir in [directory, directory2]:
try:
print(dir)
for root, directories, files in os.walk(dir, topdown=False):
for name in files:
if re.search("\\.({})$".format(ext), name) is None:
continue
else:
try:
nm = name.replace(".json", "")
fp = open(dir + os.sep + name, "r", encoding="utf8")
content = fp.read()
fp.close()
# test JSON validity
decoder = json.decoder.JSONDecoder()
tab = decoder.decode(content)
# test package JSON schema
validate(instance=tab, schema=schema)
print('lang '+nm+' OK')
self.translations[nm] = eval(content)
except Exception:
traceback.print_exc()
except Exception:
pass
def refresh(self) -> None:
self.__load_langs()
def set_lang(self, lang: str) -> bool:
if lang not in self.translations and lang != "auto":
return False
else:
self.language = lang
return True
def get_langs(self) -> list:
output = []
for lang in self.translations:
output.append({
"code": lang,
"name": self.translations[lang]['Label']
})
return output
| 39.644491 | 235 | 0.501442 | 1,465 | 19,069 | 6.477133 | 0.377474 | 0.028665 | 0.011382 | 0.011592 | 0.115713 | 0.081568 | 0.048688 | 0.039836 | 0.033512 | 0.033512 | 0 | 0.002861 | 0.376842 | 19,069 | 480 | 236 | 39.727083 | 0.795674 | 0.010331 | 0 | 0.143192 | 0 | 0.011737 | 0.371348 | 0.079613 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025822 | false | 0.011737 | 0.046948 | 0 | 0.140845 | 0.016432 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a896475cbbfcc93f2a5dd59f79b84d8e9bd32ed4 | 2,000 | py | Python | job_web/handlers/event.py | csudragonzl/master | e5ea6418ff9676b8c68bf4bbe14cdf06ccae27d8 | [
"MIT"
] | null | null | null | job_web/handlers/event.py | csudragonzl/master | e5ea6418ff9676b8c68bf4bbe14cdf06ccae27d8 | [
"MIT"
] | null | null | null | job_web/handlers/event.py | csudragonzl/master | e5ea6418ff9676b8c68bf4bbe14cdf06ccae27d8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort,\
current_app, request
from flask_login import current_user
from ..models import Event
from sqlalchemy import func,extract
from ..forms import EXP
import pymysql
import pandas as pd
event = Blueprint('event', __name__, url_prefix='/event')
@event.route('/')
def index():
page = request.args.get('page', default=1, type=int)
kw = request.args.get('kw')
fenxi = request.args.get('fenxi')
Event.is_enable = True
flt = {Event.is_enable is True}
if kw is not None and kw != '':
flt.update({Event.name.like('%{}%'.format(kw))})
pagination = Event.query.filter(*flt).paginate(
page=page,
per_page=current_app.config['EVENT_INDEX_PER_PAGE'],
error_out=False
)
if fenxi is not None and fenxi != '':
return render_template('event/index.html',pagination=pagination, kw=kw, active='event')
return render_template('event/all_event.html', pagination=pagination,
kw=kw, filter=EXP, active='event')
@event.route('/<string:event_id>')
def detail(event_id):
event_obj = Event.query.get_or_404(event_id)
if not event_obj.is_enable:
abort(404)
# if request.args.get('job'):
# page = request.args.get('page', default=1, type=int)
# pagination = event_obj.enabled_jobs().order_by(Event.updated_at.desc()).paginate(
# page=page, per_page=current_app.config['COMPANY_DETAIL_PER_PAGE'], error_out=False)
# return render_template('event/detail.html', pagination=pagination, panel='jobs', company=event_obj)
return render_template('event/detail.html', event=event_obj, panel='about', active='detail')
@event.route("/event_address", methods=['GET', 'POST'])
def event_address():
return render_template('event/event_address.html')
# return render_template('event/event2.html')
| 36.363636 | 114 | 0.6495 | 261 | 2,000 | 4.796935 | 0.329502 | 0.078275 | 0.095847 | 0.119808 | 0.253994 | 0.177316 | 0.121406 | 0.121406 | 0.059105 | 0 | 0 | 0.006337 | 0.211 | 2,000 | 54 | 115 | 37.037037 | 0.787072 | 0.224 | 0 | 0 | 0 | 0 | 0.12349 | 0.016107 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.194444 | 0.027778 | 0.388889 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a896f0f803f92c7322f0270186c9fa435a022ae1 | 4,469 | py | Python | sampling/randm.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | 2 | 2021-08-10T14:18:26.000Z | 2022-01-15T04:58:35.000Z | sampling/randm.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | null | null | null | sampling/randm.py | ShivanganaRawat/ALPO_Segmentation | 14b75d1dce39dd1d308128d978a6c5ed0dc39442 | [
"MIT"
] | 1 | 2022-02-15T09:44:47.000Z | 2022-02-15T09:44:47.000Z | import os
import sys
import json
import csv
import random
import argparse
import torch
import dataloaders
import models
import inspect
import math
from datetime import datetime
from utils import losses
from utils import Logger
from utils.torchsummary import summary
from trainer import Trainer
from torchvision import transforms
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import wandb
from wandb import AlertLevel
class Random_Sampling():
def __init__(self):
pass
def get_instance(self, module, name, config, *args):
# GET THE CORRESPONDING CLASS / FCT
return getattr(module, config[name]['type'])(*args, **config[name]['args'])
def create_episodedir(self, cfg, episode):
episode_dir = os.path.join(cfg['exp_dir'], "episode"+str(episode))
if not os.path.exists(episode_dir):
os.mkdir(episode_dir)
else:
print("=============================")
print("Episode directory already exists: {}. Reusing it may lead to loss of old data in the directory.".format(episode_dir))
print("=============================")
cfg['episode'] = episode
cfg['episode_dir'] = episode_dir
cfg['trainer']['save_dir'] = os.path.join(episode_dir,cfg['trainer']['original_save_dir'])
cfg['trainer']['log_dir'] = os.path.join(episode_dir,cfg['trainer']['original_log_dir'])
cfg['labeled_loader']['args']['load_from'] = os.path.join(episode_dir, "labeled.txt")
cfg['unlabeled_loader']['args']['load_from'] = os.path.join(episode_dir, "unlabeled.txt")
return cfg
def random_sample(self, args, config):
#create the train image set
unlabeled_file = os.path.join(config["episode_dir"],"unlabeled.txt")
unlabeled_reader = csv.reader(open(unlabeled_file, 'rt'))
unlabeled_image_set = [r[0] for r in unlabeled_reader]
#create initial labeled and unlabeled image set
new_batch = random.sample(unlabeled_image_set, args.batch_size)
labeled = os.path.join(config['episode_dir'],"labeled.txt")
labeled_reader = csv.reader(open(labeled, 'rt'))
labeled_image_set = [r[0] for r in labeled_reader]
new_labeled = labeled_image_set + new_batch
new_labeled.sort()
unlabeled = os.path.join(config['episode_dir'],"unlabeled.txt")
unlabeled_reader = csv.reader(open(unlabeled, 'rt'))
unlabeled_image_set = [r[0] for r in unlabeled_reader]
new_unlabeled = list(set(unlabeled_image_set) - set(new_batch))
new_unlabeled.sort()
return new_labeled, new_unlabeled
def train_model(self, args, config):
train_logger = Logger()
# DATA LOADERS
labeled_loader = self.get_instance(dataloaders, 'labeled_loader', config)
val_loader = self.get_instance(dataloaders, 'val_loader', config)
test_loader = self.get_instance(dataloaders, 'test_loader', config)
# MODEL
model = self.get_instance(models, 'arch', config, labeled_loader.dataset.num_classes)
#print(f'\n{model}\n')
# LOSS
loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])
# TRAINING
trainer = Trainer(
model=model,
loss=loss,
resume=args.resume,
config=config,
train_loader=labeled_loader,
val_loader=val_loader,
test_loader=test_loader,
train_logger=train_logger)
trainer.train()
config['checkpoint_dir'] = trainer._get_checkpoint_dir()
config_save_path = os.path.join(config['checkpoint_dir'], 'updated_config.json')
with open(config_save_path, 'w') as handle:
json.dump(config, handle, indent=4, sort_keys=True)
return config
def update_pools(self, args, config, episode):
new_labeled, new_unlabeled = self.random_sample(args, config)
config = self.create_episodedir(config, episode+1)
with open(os.path.join(config['episode_dir'], "labeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_labeled:
writer.writerow([image])
with open(os.path.join(config['episode_dir'], "unlabeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_unlabeled:
writer.writerow([image])
return config
| 34.376923 | 136 | 0.639964 | 558 | 4,469 | 4.935484 | 0.234767 | 0.054466 | 0.039942 | 0.034858 | 0.252723 | 0.217865 | 0.217865 | 0.212055 | 0.168482 | 0.111111 | 0 | 0.001464 | 0.235623 | 4,469 | 129 | 137 | 34.643411 | 0.804742 | 0.035802 | 0 | 0.108696 | 0 | 0 | 0.134016 | 0.013495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0.01087 | 0.23913 | 0.01087 | 0.369565 | 0.032609 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8988735f8441f763b8761906afae7c54ffd24a3 | 2,144 | py | Python | custom_resources/backup.py | ikben/custom-resources | 6857c4bc86dd118e44221efa43e8298f748bf116 | [
"Apache-2.0"
] | 2 | 2019-05-02T13:06:40.000Z | 2019-12-20T15:48:31.000Z | custom_resources/backup.py | ikben/custom-resources | 6857c4bc86dd118e44221efa43e8298f748bf116 | [
"Apache-2.0"
] | 2 | 2019-09-26T14:18:49.000Z | 2021-09-01T10:52:08.000Z | custom_resources/backup.py | ikben/custom-resources | 6857c4bc86dd118e44221efa43e8298f748bf116 | [
"Apache-2.0"
] | 1 | 2019-08-02T15:26:19.000Z | 2019-08-02T15:26:19.000Z | from six import string_types
from troposphere import Tags
from .LambdaBackedCustomResource import LambdaBackedCustomResource
class BackupVault(LambdaBackedCustomResource):
props = {
'BackupVaultName': (string_types, True),
'BackupVaultTags': (dict, True),
}
@classmethod
def _lambda_policy(cls):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"backup:CreateBackupVault",
"backup:DeleteBackupVault",
"backup-storage:MountCapsule",
"backup:DeleteBackupVaultAccessPolicy",
"backup:DeleteBackupVaultNotification",
"kms:CreateGrant",
"kms:GenerateDataKey",
"kms:Decrypt",
"kms:RetireGrant",
"kms:DescribeKey",
],
"Resource": "*",
}],
}
class BackupPlan(LambdaBackedCustomResource):
props = {
'BackupPlan': (dict, True),
'BackupPlanTags ': (Tags, False),
}
@classmethod
def _lambda_policy(cls):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"backup:CreateBackupPlan",
"backup:DeleteBackupPlan",
"backup:UpdateBackupPlan",
],
"Resource": "*",
}],
}
class BackupSelection(LambdaBackedCustomResource):
props = {
'BackupPlanId': (string_types, True),
'BackupSelection': (dict, True),
}
@classmethod
def _lambda_policy(cls):
return {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"backup:CreateBackupSelection",
"backup:DeleteBackupSelection",
"iam:PassRole",
],
"Resource": "*",
}],
}
| 27.487179 | 66 | 0.467351 | 123 | 2,144 | 8.073171 | 0.447154 | 0.033233 | 0.060423 | 0.07855 | 0.263847 | 0.263847 | 0.263847 | 0.263847 | 0.263847 | 0.263847 | 0 | 0.019185 | 0.416511 | 2,144 | 77 | 67 | 27.844156 | 0.77458 | 0 | 0 | 0.492537 | 0 | 0 | 0.278451 | 0.126866 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0.014925 | 0.044776 | 0.044776 | 0.223881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8a17b75720219a9ea11cf2ca647fb362cd471b9 | 3,397 | py | Python | venv/Lib/site-packages/apkutils2/gui/__main__.py | AntoData/appium-framework | 936c264971a0755fc815b273961e3deb0f516f6b | [
"MIT"
] | null | null | null | venv/Lib/site-packages/apkutils2/gui/__main__.py | AntoData/appium-framework | 936c264971a0755fc815b273961e3deb0f516f6b | [
"MIT"
] | null | null | null | venv/Lib/site-packages/apkutils2/gui/__main__.py | AntoData/appium-framework | 936c264971a0755fc815b273961e3deb0f516f6b | [
"MIT"
] | null | null | null | # coding: utf-8
# Author: codeskyblue 2018-06-04
import argparse
import ctypes
import os
import pathlib
import sys
import tkinter as tk
import winreg
import apkutils
def _bind_apk_right_menu():
if not ctypes.windll.shell32.IsUserAnAdmin():
ctypes.windll.shell32.ShellExecuteW(
None, "runas", sys.executable, __file__ + " --bind", None, 0)
return
with winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r"*\shell") as key:
print(key)
with winreg.CreateKeyEx(key, "APK Parser", 0, winreg.KEY_SET_VALUE) as shell_key:
icon_path = str(pathlib.Path(__file__).joinpath(
"../android.ico").resolve())
winreg.SetValueEx(shell_key, "Icon", 0, winreg.REG_SZ, icon_path)
with winreg.CreateKey(shell_key, "command") as cmd_key:
winreg.SetValue(
cmd_key, "", 1, " ".join(
[sys.executable.replace("python.exe", "pythonw.exe"), os.path.abspath(__file__), "--file", "\"%1\""]))
def _unbind_reg():
if not ctypes.windll.shell32.IsUserAnAdmin():
ctypes.windll.shell32.ShellExecuteW(
None, "runas", sys.executable, __file__ + " --unbind", None, 0)
return
try:
winreg.DeleteKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Classes\*\shell\APK Parser\command")
winreg.DeleteKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Classes\*\shell\APK Parser")
except FileNotFoundError:
pass
class TKList(object):
def __init__(self):
self._row = 0
def add_row(self, *widgets):
for (column, widget) in enumerate(widgets):
if isinstance(widget, str):
text = tk.Text(height=1)
text.insert(tk.INSERT, widget)
# ref: https://tkdocs.com/tutorial/text.html
text.bind("<FocusIn>", lambda event: text.tag_add(
tk.SEL, "1.0", "1.end"))
widget = text
widget.grid(row=self._row, column=column)
self._row += 1
def main(path):
root = tk.Tk()
if path:
apk = apkutils2.APK(path)
mf = apk.manifest
grid = TKList()
grid.add_row(tk.Label(text="Filename"), os.path.basename(path))
grid.add_row(tk.Label(text="Package Name"), mf.package_name)
grid.add_row(tk.Label(text="Main Activity"), mf.main_activity)
grid.add_row(tk.Label(text="Version Name"), mf.version_name)
grid.add_row(tk.Label(text="Version Code"), mf.version_code)
else:
tk.Button(root, text="Bind to *.apk Right MENU",
command=_bind_apk_right_menu).pack(padx=10, pady=5)
tk.Button(root, text="Unbind",
command=_unbind_reg).pack(padx=10, pady=5, side=tk.LEFT)
tk._default_root.title("APK Parser")
tk.mainloop()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str, help="parsed file")
parser.add_argument("--bind", action="store_true",
help="Bind right-click menu")
parser.add_argument("--unbind", action="store_true",
help="Unbind right-click menu")
args = parser.parse_args()
if args.bind:
_bind_apk_right_menu()
elif args.unbind:
_unbind_reg()
else:
main(args.file)
| 33.303922 | 127 | 0.59582 | 419 | 3,397 | 4.630072 | 0.348449 | 0.018557 | 0.025773 | 0.030928 | 0.247938 | 0.232474 | 0.210825 | 0.16701 | 0.16701 | 0.16701 | 0 | 0.014534 | 0.270827 | 3,397 | 101 | 128 | 33.633663 | 0.768672 | 0.025611 | 0 | 0.125 | 0 | 0 | 0.118875 | 0.016939 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.0125 | 0.1 | 0 | 0.2 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8a259ee1cd9f65b011066a03a8a6d62ab0eaeac | 384 | py | Python | services/backend/app/core/models/subjects.py | moxxiq/online-diary | 5949cb5631d49622a31885519a880b17a0816988 | [
"MIT"
] | null | null | null | services/backend/app/core/models/subjects.py | moxxiq/online-diary | 5949cb5631d49622a31885519a880b17a0816988 | [
"MIT"
] | null | null | null | services/backend/app/core/models/subjects.py | moxxiq/online-diary | 5949cb5631d49622a31885519a880b17a0816988 | [
"MIT"
] | null | null | null | from sqlalchemy import (
Column,
DateTime,
Integer,
MetaData,
String,
Date,
Table,
UniqueConstraint,
)
from sqlalchemy.sql import func
metadata = MetaData()
subjects = Table(
"subjects",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String, nullable=False),
UniqueConstraint('name', name="uq_subjects_name"),
) | 18.285714 | 54 | 0.653646 | 39 | 384 | 6.358974 | 0.564103 | 0.112903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.226563 | 384 | 21 | 55 | 18.285714 | 0.835017 | 0 | 0 | 0 | 0 | 0 | 0.088312 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8a3bd98e6fdcb402c11815ee2b2b0abe1c238c5 | 7,235 | py | Python | corehq/ex-submodules/dimagi/utils/decorators/profile.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/ex-submodules/dimagi/utils/decorators/profile.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/ex-submodules/dimagi/utils/decorators/profile.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import random
from functools import wraps
import cProfile
import resource
import os
import gc
import logging
from datetime import datetime
from django.conf import settings
from corehq.util.decorators import ContextDecorator
from corehq.util.python_compatibility import soft_assert_type_text
from dimagi.utils.modules import to_function
import six
logger = logging.getLogger(__name__)
try:
PROFILE_LOG_BASE = settings.PROFILE_LOG_BASE
except Exception:
PROFILE_LOG_BASE = "/tmp"
# Source: http://code.djangoproject.com/wiki/ProfilingDjango
def profile(log_file):
return profile_prod(log_file, 1, None)
def profile_prod(log_file, probability, limit):
"""Profile some callable.
This decorator uses the hotshot profiler to profile some callable (like
a view function or method) and dumps the profile data somewhere sensible
for later processing and examination.
:param log_file: If it's a relative path, it places it under the PROFILE_LOG_BASE.
It also inserts a time stamp into the file name, such that
'my_view.prof' become 'my_view-2018-06-07T12:46:56.367347.prof', where the time stamp is in UTC.
This makes it easy to run and compare multiple trials.
:param probability: A number N between 0 and 1 such that P(profile) ~= N
:param limit: The maximum number of profiles to record.
"""
assert isinstance(probability, (int, float)), 'probability must be numeric'
assert 0 <= probability <= 1, 'probability must be in range [0, 1]'
assert not limit or isinstance(limit, int), 'limit must be an integer'
if not os.path.isabs(log_file):
log_file = os.path.join(PROFILE_LOG_BASE, log_file)
def _outer(f):
if probability <= 0:
return f
base, ext = os.path.splitext(log_file)
header = '=' * 100
logger.warn("""
%(header)s
Profiling enabled for %(module)s.%(name)s with probability %(prob)s and limit %(limit)s.
Output will be written to %(base)s-[datetime]%(ext)s
%(header)s
""", {
'header': header, 'module': f.__module__, 'name': f.__name__,
'prob': probability, 'base': base, 'ext': ext, 'limit': limit
})
class Scope:
"""Class to keep outer scoped variable"""
profile_count = 0
@wraps(f)
def _inner(*args, **kwargs):
hit_limit = limit and Scope.profile_count > limit
if hit_limit or random.random() > probability:
return f(*args, **kwargs)
else:
Scope.profile_count += 1
# Add a timestamp to the profile output when the callable
# is actually called.
final_log_file = '{}-{}{}'.format(base, datetime.now().isoformat(), ext)
prof = cProfile.Profile()
try:
ret = prof.runcall(f, *args, **kwargs)
finally:
prof.dump_stats(final_log_file)
return ret
return _inner
return _outer
try:
from line_profiler import LineProfiler
def line_profile(follow=[]):
"""
Perform line profiling of a function.
Will output the profile stats per line of each function included in the profiler.
Output will be printed once per function call so take care not to use this on
functions that get called many times.
:param follow: list of additional functions that should be profiled
Example output
--------------
File: demo.py
Function: demo_follow at line 67
Total time: 1.00391 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
67 def demo_follow():
68 1 34 34.0 0.0 r = random.randint(5, 10)
69 11 81 7.4 0.0 for i in xrange(0, r):
70 10 1003800 100380.0 100.0 time.sleep(0.1)
File: demo.py
Function: demo_profiler at line 72
Total time: 1.80702 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
72 @line_profile(follow=[demo_follow])
73 def demo_profiler():
74 1 17 17.0 0.0 r = random.randint(5, 10)
75 9 66 7.3 0.0 for i in xrange(0, r):
76 8 802921 100365.1 44.4 time.sleep(0.1)
77
78 1 1004013 1004013.0 55.6 demo_follow()
"""
def inner(func):
@wraps(func)
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
for f in follow:
if isinstance(f, six.string_types):
soft_assert_type_text(f)
f = to_function(f)
profiler.add_function(f)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except ImportError:
def line_profile(follow=[]):
"Helpful if you accidentally leave in production!"
def inner(func):
def nothing(*args, **kwargs):
return func(*args, **kwargs)
return nothing
return inner
class resident_set_size(ContextDecorator):
"""Shows how much memory was allocated to the python process (the resident set
size) before and after the function this wraps. Can also be used as a context manager.
Can be used to debug memory leaks.
`man getrusage` for more information
"""
def __init__(self, enter_debugger=False):
self.initial_size = 0
self.enter_debugger = enter_debugger
def __enter__(self):
self.initial_size = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('Resident Set Size before: {}kb'.format(self.initial_size))
def __exit__(self, exc_type, exc_val, exc_tb):
gc.collect()
final_size = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('Resident Set Size after: {}kb'.format(final_size))
print('Resident Set Size total: {}kb'.format(final_size - self.initial_size))
if self.enter_debugger:
try:
import ipdb
# NOTE: hit 'u' when debugger starts to enter the previous frame
ipdb.set_trace()
except ImportError:
import pdb
# NOTE: hit 'u' when debugger starts to enter the previous frame
pdb.set_trace()
| 36.913265 | 104 | 0.565031 | 873 | 7,235 | 4.540664 | 0.347079 | 0.017659 | 0.017659 | 0.015136 | 0.105449 | 0.094349 | 0.094349 | 0.094349 | 0.076186 | 0.058527 | 0 | 0.035872 | 0.34112 | 7,235 | 195 | 105 | 37.102564 | 0.795679 | 0.374983 | 0 | 0.175926 | 0 | 0.009259 | 0.110719 | 0.006112 | 0 | 0 | 0 | 0 | 0.046296 | 1 | 0.12037 | false | 0 | 0.194444 | 0.018519 | 0.453704 | 0.046296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8a82d9664743c7ab6d5b9920c3bc0a2ddca2495 | 5,236 | py | Python | src/cirrus_ngs/awsCluster/ConnectionManager.py | ucsd-ccbb/cirrus-ngs | 8f51450b3d971b03d4fd08a1aab11d5a076aa23e | [
"MIT"
] | 8 | 2017-01-20T00:00:45.000Z | 2022-02-11T00:20:45.000Z | src/cirrus_ngs/cfnCluster/ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 3 | 2018-03-23T19:09:06.000Z | 2018-03-26T19:49:55.000Z | src/cirrus_ngs/cfnCluster/ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 2 | 2018-03-29T06:24:31.000Z | 2019-04-01T18:34:53.000Z | #"""
#Copyright 2017 ...
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE."""
#TODO fix licensing stuff
#TODO find person to give the copyright to
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import paramiko
from scp import SCPClient
## connecting the master instance of a CFNCluster by the hostname, username and private key file
## return a ssh client
def connect_master(hostname, username, private_key_file):
"""Connects to a remote host.
Uses a user's username, hostname, and private key file to
connect to username@hostname. Used to connect to cluster head node.
Args:
hostname: A string representing the IP address of the target host.
username: A string representing the target username to connect to.
private_key_file: A string path to the private key file needed
to connect to the host.
Returns:
A paramiko SSHClient instance that stores the connection to
username@hostname. <http://docs.paramiko.org/en/2.4/api/client.html>
"""
private_key = paramiko.RSAKey.from_private_key_file(private_key_file)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print("connecting")
ssh_client.connect(hostname=hostname, username=username, pkey=private_key)
print("connected")
return ssh_client
## executing command using the ssh client
def execute_command(ssh_client, command,verbose=False):
"""Runs a command on a remote host.
Used to execute scripts on the cluster from local controllers.
Only the stderr and stdout from the scripts are recorded,
return values/exit statuses are not.
Args:
ssh_client: A paramiko SSHClient instance that describes
the connection to the remote host.
command: A string command to be run on the remote host.
Returns:
A string concatenation of the stdout and stderr
of the command. Will not provide useful information
if command does not output anything to stderr or
stdout.
"""
if verbose:
print("Executing {}".format(command))
stdin, stdout, stderr = ssh_client.exec_command(command)
result = stdout.read().decode("utf-8")
result += stderr.read().decode("utf-8")
return result
def list_dir(ssh_client, directory):
return execute_command(ssh_client, "ls {}".format(directory)).split()
def copy_file(ssh_client, localpath, remotepath):
"""Copies local file to remote host.
Uses scp to copy a local file to specified location on
the remote host. Local file remains unchanged.
Args:
ssh_client: A paramiko SSHClient instance that describes
the connection to the remote host.
localpath: A string absolute path to the local file to be copied.
remotepath: A string absolute path to the target location
of the file to be copied.
Returns:
None
"""
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh_client.get_transport())
print(localpath)
print(remotepath)
scp.put(localpath, remotepath)
def copy_gatk(ssh_client, localpath):
"""Copies GATK jar file to remote host.
GATK licensing does allow for the distribution of its
jar file in the cluster snapshot, so users must download
it on their own and upload it to the cluster.
Args:
ssh_client: A paramiko SSHClient instance that describes
the connection to the remote host.
localpath: A string absolute path to the GATK jar file.
Returns:
None
"""
# SCPCLient takes a paramiko transport as its only argument
remotepath = "/shared/workspace/software/gatk/3.8-0/"
copy_file(ssh_client, localpath, remotepath)
## close the ssh connection
def close_connection(ssh_client):
"""Closes connection to remote host.
After this function the SSHClient instance
is no longer able to perform anything on the remote host.
If further operatinos are needed connect_master must
be run again.
Args:
ssh_client: A paramiko SSHClient instance that describes
the connection to the remote host.
Returns:
None
"""
ssh_client.close()
| 35.863014 | 96 | 0.719633 | 740 | 5,236 | 5.027027 | 0.331081 | 0.048387 | 0.026344 | 0.034946 | 0.169892 | 0.161828 | 0.136022 | 0.136022 | 0.136022 | 0.136022 | 0 | 0.002934 | 0.218869 | 5,236 | 145 | 97 | 36.110345 | 0.906601 | 0.694805 | 0 | 0 | 0 | 0 | 0.082827 | 0.028875 | 0 | 0 | 0 | 0.006897 | 0 | 1 | 0.2 | false | 0 | 0.066667 | 0.033333 | 0.366667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8aa59d7445676369a9f134a896c08122a9c475e | 1,076 | py | Python | src/utils/writer/writer.py | wwang107/master-thesis | a386613a3351f1c7aeeb8877c8d8586c5bc3e314 | [
"MIT",
"Unlicense"
] | null | null | null | src/utils/writer/writer.py | wwang107/master-thesis | a386613a3351f1c7aeeb8877c8d8586c5bc3e314 | [
"MIT",
"Unlicense"
] | null | null | null | src/utils/writer/writer.py | wwang107/master-thesis | a386613a3351f1c7aeeb8877c8d8586c5bc3e314 | [
"MIT",
"Unlicense"
] | null | null | null | from torch.utils.tensorboard import SummaryWriter
from os import path
import torch
import numpy as np
import cv2
class TensorBoardWriter:
def __init__(self, log_directory=None):
super().__init__()
self.log_directory = log_directory
if log_directory is not None:
self.trainWriter = SummaryWriter(
log_dir=path.join(log_directory, 'train'))
self.validWriter = SummaryWriter(
log_dir=path.join(log_directory, 'validation'))
def add_scalar(self, mode, loss, step):
if mode == 'train':
self.trainWriter.add_scalar('loss', loss, step)
elif mode == 'valid':
self.validWriter.add_scalar('loss', loss, step)
def add_images(self, mode, images, step):
if mode == 'train':
writer = self.trainWriter
elif mode == 'valid':
writer = self.validWriter
for key in images:
stack = np.stack(images[key], axis=0)/255
writer.add_images(key, stack, step, dataformats='NHWC')
| 30.742857 | 67 | 0.606877 | 125 | 1,076 | 5.056 | 0.384 | 0.113924 | 0.03481 | 0.063291 | 0.189873 | 0.123418 | 0.123418 | 0 | 0 | 0 | 0 | 0.006562 | 0.291822 | 1,076 | 34 | 68 | 31.647059 | 0.822835 | 0 | 0 | 0.148148 | 0 | 0 | 0.04368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.185185 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8aad5d2db4e223823c0caa3806833c4332d5a57 | 846 | py | Python | Pyhton files/dice.py | PacktPublishing/Python-for-Beginners---Start-to-Code-with-Python | c82d1ccb3db691352966e61ef1b0e2646d4d135e | [
"MIT"
] | 5 | 2021-12-09T16:43:21.000Z | 2022-01-24T10:10:19.000Z | Pyhton files/dice.py | PacktPublishing/Python-for-Beginners---Start-to-Code-with-Python | c82d1ccb3db691352966e61ef1b0e2646d4d135e | [
"MIT"
] | null | null | null | Pyhton files/dice.py | PacktPublishing/Python-for-Beginners---Start-to-Code-with-Python | c82d1ccb3db691352966e61ef1b0e2646d4d135e | [
"MIT"
] | 6 | 2021-12-10T06:26:37.000Z | 2022-03-01T05:51:57.000Z | import random
min = 1
max = 6
computerScore = 0
playerScore = 0
inPlay = True
x = 20
while x < 10:
test = random.randint(min,max)
print(test)
x+=1
def gamePlay():
global inPlay
global computerScore
global playerScore
while inPlay:
player = random.randint(min,max)
computer = random.randint(min,max)
print(f"You Got {player} vs {computer}")
if(player == computer):
print("Tie Game")
elif (player > computer):
print("Player Wins")
playerScore += 1
elif (player < computer):
print("Computer Wins")
computerScore += 1
inPlay = input("Roll Again ? ")
if inPlay == "exit" :
break
gamePlay()
print("Game Over")
print(f"Computer Score : {computerScore } vs Player Score : {playerScore }") | 24.171429 | 76 | 0.57565 | 96 | 846 | 5.072917 | 0.40625 | 0.080082 | 0.098563 | 0.117043 | 0.098563 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0189 | 0.312057 | 846 | 35 | 76 | 24.171429 | 0.817869 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.030303 | 0 | 0.060606 | 0.212121 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ad2e0efa2c70aef99f6346cbf12341b6baa35e | 1,692 | py | Python | setup.py | bthate/genocide | 8de7a2cccee7315ae6cf5661738ba1335e30a5ba | [
"DOC"
] | null | null | null | setup.py | bthate/genocide | 8de7a2cccee7315ae6cf5661738ba1335e30a5ba | [
"DOC"
] | null | null | null | setup.py | bthate/genocide | 8de7a2cccee7315ae6cf5661738ba1335e30a5ba | [
"DOC"
] | null | null | null | # This file is placed in the Public Domain.
#
# http://genocide.rtfd.io - EM_T04_OTP-CR-117_19
import os
from setuptools import setup
def read():
return open("README.rst", "r").read()
def uploadlist(dir):
upl = []
for file in os.listdir(dir):
if not file or file.startswith('.'):
continue
d = dir + os.sep + file
if os.path.isdir(d):
upl.extend(uploadlist(d))
else:
if file.endswith(".pyc") or file.startswith("__pycache"):
continue
upl.append(d)
return upl
setup(
name='genocide',
version='41',
url='https://github.com/bthate/genocide',
author='Bart Thate',
author_email='bthate67@gmail.com',
description="EM_T04_OTP-CR-117_19 - prosecute king netherlands for genocide - http://genocide.rtfd.io",
long_description=read(),
license='Public Domain',
packages=["gcd", "genocide"],
zip_safe=True,
include_package_data=True,
data_files=[
("share/genocide", uploadlist("files")),
("man/man1", ["files/genocide.1.gz"]),
("share/doc/genocide", uploadlist("docs")),
("share/doc/genocide/jpg", uploadlist("docs/jpg")),
("share/doc/genocide/pdf", uploadlist("docs/pdf")),
("share/doc/genocide/_templates", uploadlist("docs/_templates")),
],
scripts=["bin/genocide"],
classifiers=['Development Status :: 3 - Alpha',
'License :: Public Domain',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Utilities'
]
)
| 31.333333 | 107 | 0.55792 | 186 | 1,692 | 4.989247 | 0.55914 | 0.034483 | 0.068966 | 0.038793 | 0.032328 | 0.032328 | 0 | 0 | 0 | 0 | 0 | 0.017515 | 0.291371 | 1,692 | 54 | 108 | 31.333333 | 0.756464 | 0.052009 | 0 | 0.044444 | 0 | 0 | 0.324797 | 0.045597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.044444 | 0.022222 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8aeb0a8ffbd2c27f2fa8c0538c0281cc7626d0d | 1,720 | py | Python | maintain_frontend/services/charge_services.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | 1 | 2019-10-03T13:58:29.000Z | 2019-10-03T13:58:29.000Z | maintain_frontend/services/charge_services.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | null | null | null | maintain_frontend/services/charge_services.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | 1 | 2021-04-11T05:24:57.000Z | 2021-04-11T05:24:57.000Z | from flask import current_app
from maintain_frontend.exceptions import ApplicationError
from maintain_frontend.models import LocalLandChargeHistoryItem, LightObstructionNoticeItem
from maintain_frontend.services.charge_id_services import validate_charge_id
def get_history_update_info_by_charge_id(charge_id, local_land_charge_service):
hist_response = local_land_charge_service.get_history_for_charge(charge_id)
if hist_response.status_code == 404:
current_app.logger.info("Search service reports '{}' not found - Returning error".format(charge_id))
raise ApplicationError(404)
hist_response.raise_for_status()
history_items = list(reversed(LocalLandChargeHistoryItem.from_json(hist_response.json())))
if len(history_items) > 1:
updated_date = history_items[0].entry_timestamp.strftime('%-d %B %Y')
updated = True
else:
updated_date = None
updated = False
return updated, updated_date
def get_lon_by_charge_id(charge_id, local_land_charge_service):
validate_charge_id(charge_id)
current_app.logger.info("Retrieving charge information from charge_id='{}'".format(charge_id))
response = local_land_charge_service.get_by_charge_number(charge_id)
if response.status_code == 404:
current_app.logger.info("Search service reports '{}' not found - Returning error".format(charge_id))
raise ApplicationError(404)
response.raise_for_status()
charge_item = LightObstructionNoticeItem.from_json(response.json()[0]['item'])
display_id = response.json()[0]['display_id']
current_app.logger.info("Retrieved charge for local_land_charge='{}'".format(charge_id))
return display_id, charge_item
| 38.222222 | 108 | 0.76686 | 222 | 1,720 | 5.59009 | 0.306306 | 0.096696 | 0.060435 | 0.070911 | 0.348106 | 0.312651 | 0.259468 | 0.259468 | 0.259468 | 0.195004 | 0 | 0.010862 | 0.143605 | 1,720 | 44 | 109 | 39.090909 | 0.831636 | 0 | 0 | 0.133333 | 0 | 0 | 0.130814 | 0.012791 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8af22e6963cde236cd50136e3cfcbfe79dad636 | 1,097 | py | Python | apigateway/apigateway/api/app.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | 1 | 2018-05-14T09:59:51.000Z | 2018-05-14T09:59:51.000Z | apigateway/apigateway/api/app.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | null | null | null | apigateway/apigateway/api/app.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | 2 | 2019-04-20T15:00:58.000Z | 2020-06-16T14:42:35.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from apigateway.api import config as api_config
from apigateway.api import hooks
def get_pecan_config():
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app():
config = get_pecan_config()
app_hooks = [hooks.DBHook()]
app_conf = dict(config.app)
app = pecan.make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
hooks=app_hooks,
**app_conf
)
return app
| 28.868421 | 78 | 0.695533 | 154 | 1,097 | 4.831169 | 0.545455 | 0.080645 | 0.034946 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004651 | 0.216044 | 1,097 | 37 | 79 | 29.648649 | 0.860465 | 0.497721 | 0 | 0 | 0 | 0 | 0.033645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8b4efd58e014ff6aa9c3012dc9f5a09a9910025 | 5,337 | py | Python | rdfalchemy/engine/__init__.py | pdwood/RDFAlchemy3 | c45241d4f74cfac8d816c69e206a5c43ec742b43 | [
"MIT"
] | 28 | 2015-03-20T14:01:07.000Z | 2022-03-22T15:09:27.000Z | rdfalchemy/engine/__init__.py | pdwood/RDFAlchemy3 | c45241d4f74cfac8d816c69e206a5c43ec742b43 | [
"MIT"
] | 5 | 2017-09-27T19:41:30.000Z | 2021-12-09T15:22:42.000Z | rdfalchemy/engine/__init__.py | pdwood/RDFAlchemy3 | c45241d4f74cfac8d816c69e206a5c43ec742b43 | [
"MIT"
] | 11 | 2016-02-07T16:33:24.000Z | 2021-05-08T11:00:42.000Z | """
"""
import os
import re
import cgi
import urllib
def create_engine(url='', identifier="", create=False):
"""
:returns: returns an opened rdflib ConjunctiveGraph
:param url: a string of the url
:param identifier: URIRef of the default context for writing e.g.:
- create_engine('sleepycat://~/working/rdf_db')
- create_engine('kyotocabinet://~/working/rdf_db')
- create_engine('zodb:///var/rdflib/Data.fs')
- create_engine('zodb://localhost:8672')
- create_engine(
'sesame://www.example.com:8080/openrdf-sesame/repositories/Test')
- create_engine('sparql://www.example.com:2020/sparql')
for zodb:
the key in the Zope database is hardcoded as 'rdflib', urls ending in `.fs`
indicate FileStorage, otherwise ClientStoreage is assumed which requires
a ZEO Server to be running
for sqlalchemy, prepend the string "sqlachemy+" to a valid SQLAlchemy dburi
form:
- create_engine('sqlalchemy+sqlite://')
- create_engine('sqlalchemy+sqlite:////absolute/path/to/foo.db')
- create_engine('sqlalchemy+mysql://myname@localhost/rdflibdb')
- create_engine('sqlalchemy+postgresql://myname@localhost/rdflibdb')
etc.
"""
if url == '' or url.startswith('IOMemory'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('IOMemory')
elif url.lower().startswith('sleepycat://'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('Sleepycat', identifier=identifier)
openstr = os.path.abspath(os.path.expanduser(url[12:]))
db.open(openstr, create=create)
elif url.lower().startswith('kyotocabinet://'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('Kyotocabinet', identifier=identifier)
openstr = os.path.abspath(os.path.expanduser(url[15:]))
db.open(openstr, create=create)
elif url.lower().startswith('sqlalchemy+'):
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('SQLAlchemy', identifier=identifier)
db.open(url[11:], create=create)
elif url.lower().startswith('zodb://'):
import ZODB
# import transaction
from rdflib import ConjunctiveGraph
db = ConjunctiveGraph('ZODB')
if url.endswith('.fs'):
from ZODB.FileStorage import FileStorage
openstr = os.path.abspath(os.path.expanduser(url[7:]))
if not os.path.exists(openstr) and not create:
raise("File not found: %s" % openstr)
fs = FileStorage(openstr)
else:
from ZEO.ClientStorage import ClientStorage
schema, opts = _parse_rfc1738_args(url)
fs = ClientStorage((opts['host'], int(opts['port'])))
# get the Zope Database
zdb = ZODB.DB(fs)
# open it
conn = zdb.open()
#get the root
root = conn.root()
# get the Conjunctive Graph
if 'rdflib' not in root and create:
root['rdflib'] = ConjunctiveGraph('ZODB')
db = root['rdflib']
elif url.lower().startswith('sesame://'):
from rdfalchemy.sparql.sesame2 import SesameGraph
db = SesameGraph("http://" + url[9:])
elif url.lower().startswith('sparql://'):
from rdfalchemy.sparql import SPARQLGraph
db = SPARQLGraph("http://" + url[9:])
else:
raise "Could not parse string '%s'" % url
return db
def engine_from_config(configuration, prefix='rdfalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
:param configuration: a dictionary, typically produced from a config file
where keys are prefixed, such as `rdfalchemy.dburi`, etc.
:param prefix: indicates the prefix to be searched for.
"""
options = dict(
[(key[len(prefix):], configuration[key])
for key in configuration if key.startswith(prefix)])
options.update(kwargs)
url = options.pop('dburi')
return create_engine(url, **options)
def _parse_rfc1738_args(name):
""" parse url str into options
code orig from sqlalchemy.engine.url """
pattern = re.compile(r'''
(\w+)://
(?:
([^:/]*)
(?::([^/]*))?
@)?
(?:
([^/:]*)
(?::([^/]*))?
)?
(?:/(.*))?
''', re.X)
m = pattern.match(name)
if m is not None:
(name, username, password, host, port, database) = m.group(
1, 2, 3, 4, 5, 6)
if database is not None:
tokens = database.split(r"?", 2)
database = tokens[0]
query = (
len(tokens) > 1 and dict(
cgi.parse_qsl(tokens[1])) or None)
if query is not None:
query = dict([(k.encode('ascii'), query[k]) for k in query])
else:
query = None
opts = {
'username': username, 'password': password, 'host': host,
'port': port, 'database': database, 'query': query}
if opts['password'] is not None:
opts['password'] = urllib.unquote_plus(opts['password'])
return (name, opts)
else:
raise ValueError("Could not parse rfc1738 URL from string '%s'" % name)
| 33.778481 | 79 | 0.589657 | 594 | 5,337 | 5.257576 | 0.318182 | 0.04611 | 0.023055 | 0.042267 | 0.186679 | 0.17131 | 0.080371 | 0.080371 | 0.067883 | 0.037784 | 0 | 0.011393 | 0.276372 | 5,337 | 157 | 80 | 33.993631 | 0.797255 | 0.267941 | 0 | 0.16129 | 0 | 0 | 0.149801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0.043011 | 0.150538 | 0 | 0.215054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8b81f42e7cc837b12fe1f7bb35a147cbba65062 | 13,183 | py | Python | catalyst/metrics/_cmc_score.py | NetherQuartz/catalyst | a4d05c1da0e99b9479b043ae245e0e7b3f7c1506 | [
"Apache-2.0"
] | null | null | null | catalyst/metrics/_cmc_score.py | NetherQuartz/catalyst | a4d05c1da0e99b9479b043ae245e0e7b3f7c1506 | [
"Apache-2.0"
] | null | null | null | catalyst/metrics/_cmc_score.py | NetherQuartz/catalyst | a4d05c1da0e99b9479b043ae245e0e7b3f7c1506 | [
"Apache-2.0"
] | 1 | 2021-06-11T16:33:30.000Z | 2021-06-11T16:33:30.000Z | from typing import Dict, Iterable, List, Optional
import torch
from catalyst.metrics._metric import AccumulationMetric
from catalyst.metrics.functional._cmc_score import cmc_score, masked_cmc_score
from catalyst.utils.distributed import get_rank
class CMCMetric(AccumulationMetric):
"""Cumulative Matching Characteristics
Args:
embeddings_key: key of embedding tensor in batch
labels_key: key of label tensor in batch
is_query_key: key of query flag tensor in batch
topk_args: list of k, specifies which cmc@k should be calculated
compute_on_call: if True, allows compute metric's value on call
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from catalyst import metrics
batch = {
"embeddings": torch.tensor(
[
[1, 1, 0, 0],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 0, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[0, 1, 1, 0],
]
).float(),
"labels": torch.tensor([0, 0, 1, 1, 0, 1, 1]),
"is_query": torch.tensor([1, 1, 1, 1, 0, 0, 0]).bool(),
}
topk = (1, 3)
metric = metrics.CMCMetric(
embeddings_key="embeddings",
labels_key="labels",
is_query_key="is_query",
topk_args=topk,
)
metric.reset(num_batches=1, num_samples=len(batch["embeddings"]))
metric.update(**batch)
metric.compute()
# [0.75, 1.0] # CMC@01, CMC@03
metric.compute_key_value()
# {'cmc01': 0.75, 'cmc03': 1.0}
.. code-block:: python
import os
from torch.optim import Adam
from torch.utils.data import DataLoader
from catalyst import data, dl
from catalyst.contrib import datasets, models, nn
from catalyst.data.transforms import Compose, Normalize, ToTensor
# 1. train and valid loaders
transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MnistMLDataset(
root=os.getcwd(), download=True, transform=transforms
)
sampler = data.BalanceBatchSampler(labels=train_dataset.get_labels(), p=5, k=10)
train_loader = DataLoader(
dataset=train_dataset, sampler=sampler, batch_size=sampler.batch_size
)
valid_dataset = datasets.MnistQGDataset(
root=os.getcwd(), transform=transforms, gallery_fraq=0.2
)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=1024)
# 2. model and optimizer
model = models.MnistSimpleNet(out_features=16)
optimizer = Adam(model.parameters(), lr=0.001)
# 3. criterion with triplets sampling
sampler_inbatch = data.HardTripletsSampler(norm_required=False)
criterion = nn.TripletMarginLossWithSampler(margin=0.5, sampler_inbatch=sampler_inbatch)
# 4. training with catalyst Runner
class CustomRunner(dl.SupervisedRunner):
def handle_batch(self, batch) -> None:
if self.is_train_loader:
images, targets = batch["features"].float(), batch["targets"].long()
features = self.model(images)
self.batch = {"embeddings": features, "targets": targets,}
else:
images, targets, is_query = (
batch["features"].float(),
batch["targets"].long(),
batch["is_query"].bool()
)
features = self.model(images)
self.batch = {
"embeddings": features, "targets": targets, "is_query": is_query
}
callbacks = [
dl.ControlFlowCallback(
dl.CriterionCallback(
input_key="embeddings", target_key="targets", metric_key="loss"
),
loaders="train",
),
dl.ControlFlowCallback(
dl.CMCScoreCallback(
embeddings_key="embeddings",
labels_key="targets",
is_query_key="is_query",
topk_args=[1],
),
loaders="valid",
),
dl.PeriodicLoaderCallback(
valid_loader_key="valid", valid_metric_key="cmc01", minimize=False, valid=2
),
]
runner = CustomRunner(input_key="features", output_key="embeddings")
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
callbacks=callbacks,
loaders={"train": train_loader, "valid": valid_loader},
verbose=False,
logdir="./logs",
valid_loader="valid",
valid_metric="cmc01",
minimize_valid_metric=False,
num_epochs=10,
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples
"""
def __init__(
self,
embeddings_key: str,
labels_key: str,
is_query_key: str,
topk_args: Iterable[int] = None,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> None:
"""Init CMCMetric"""
super().__init__(
compute_on_call=compute_on_call,
prefix=prefix,
suffix=suffix,
accumulative_fields=[embeddings_key, labels_key, is_query_key],
)
self.embeddings_key = embeddings_key
self.labels_key = labels_key
self.is_query_key = is_query_key
self.topk_args = topk_args or (1,)
self.metric_name = f"{self.prefix}cmc{self.suffix}"
def reset(self, num_batches: int, num_samples: int) -> None:
"""
Reset metrics fields
Args:
num_batches: expected number of batches
num_samples: expected number of samples to accumulate
"""
super().reset(num_batches, num_samples)
assert get_rank() < 0, "No DDP support implemented yet"
def compute(self) -> List[float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
list of metrics values
"""
query_mask = (self.storage[self.is_query_key] == 1).to(torch.bool)
embeddings = self.storage[self.embeddings_key].float()
labels = self.storage[self.labels_key]
query_embeddings = embeddings[query_mask]
query_labels = labels[query_mask]
gallery_embeddings = embeddings[~query_mask]
gallery_labels = labels[~query_mask]
conformity_matrix = (gallery_labels == query_labels.reshape(-1, 1)).to(torch.bool)
metrics = []
for k in self.topk_args:
value = cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
topk=k,
)
metrics.append(value)
return metrics
def compute_key_value(self) -> Dict[str, float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
metrics values in key-value format
"""
values = self.compute()
kv_metrics = {
f"{self.metric_name}{k:02d}": value for k, value in zip(self.topk_args, values)
}
return kv_metrics
class ReidCMCMetric(AccumulationMetric):
"""Cumulative Matching Characteristics for Reid case
Args:
embeddings_key: key of embedding tensor in batch
pids_key: key of pids tensor in batch
cids_key: key of cids tensor in batch
is_query_key: key of query flag tensor in batch
topk_args: list of k, specifies which cmc@k should be calculated
compute_on_call: if True, allows compute metric's value on call
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from catalyst.metrics import ReidCMCMetric
batch = {
"embeddings": torch.tensor(
[
[1, 1, 0, 0],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 0, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[0, 1, 1, 0],
]
).float(),
"pids": torch.Tensor([0, 0, 1, 1, 0, 1, 1]).long(),
"cids": torch.Tensor([0, 1, 1, 2, 0, 1, 3]).long(),
"is_query": torch.Tensor([1, 1, 1, 1, 0, 0, 0]).bool(),
}
topk = (1, 3)
metric = ReidCMCMetric(
embeddings_key="embeddings",
pids_key="pids",
cids_key="cids",
is_query_key="is_query",
topk_args=topk,
)
metric.reset(num_batches=1, num_samples=len(batch["embeddings"]))
metric.update(**batch)
metric.compute()
# [0.75, 1.0] # CMC@01, CMC@03
metric.compute_key_value()
# {'cmc01': 0.75, 'cmc03': 1.0}
"""
def __init__(
self,
embeddings_key: str,
pids_key: str,
cids_key: str,
is_query_key: str,
topk_args: Iterable[int] = None,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> None:
"""Init CMCMetric"""
super().__init__(
compute_on_call=compute_on_call,
prefix=prefix,
suffix=suffix,
accumulative_fields=[embeddings_key, pids_key, cids_key, is_query_key],
)
self.embeddings_key = embeddings_key
self.pids_key = pids_key
self.cids_key = cids_key
self.is_query_key = is_query_key
self.topk_args = topk_args or (1,)
self.metric_name = f"{self.prefix}cmc{self.suffix}"
def reset(self, num_batches: int, num_samples: int) -> None:
"""
Reset metrics fields
Args:
num_batches: expected number of batches
num_samples: expected number of samples to accumulate
"""
super().reset(num_batches, num_samples)
assert get_rank() < 0, "No DDP support implemented yet"
def compute(self) -> List[float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
list of metrics values
Raises:
ValueError: if there are samples in query that have no relevant samples in gallery
"""
query_mask = (self.storage[self.is_query_key] == 1).to(torch.bool)
embeddings = self.storage[self.embeddings_key].float()
pids = self.storage[self.pids_key]
cids = self.storage[self.cids_key]
query_embeddings = embeddings[query_mask]
query_pids = pids[query_mask]
query_cids = cids[query_mask]
gallery_embeddings = embeddings[~query_mask]
gallery_pids = pids[~query_mask]
gallery_cids = cids[~query_mask]
pid_conformity_matrix = (gallery_pids == query_pids.reshape(-1, 1)).bool()
cid_conformity_matrix = (gallery_cids == query_cids.reshape(-1, 1)).bool()
# Now we are going to generate a mask that should show if
# a sample from gallery can be used during model scoring on the query
# sample.
# There is only one case when the label shouldn't be used for:
# if query sample is a photo of the person pid_i taken from camera
# cam_j and the gallery sample is a photo of the same person pid_i
# from the same camera cam_j. All other cases are available.
available_samples = ~(pid_conformity_matrix * cid_conformity_matrix).bool()
if (available_samples.max(dim=1).values == 0).any():
raise ValueError("There is a sample in query that has no relevant samples in gallery.")
metrics = []
for k in self.topk_args:
value = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=pid_conformity_matrix,
available_samples=available_samples,
topk=k,
)
metrics.append(value)
return metrics
def compute_key_value(self) -> Dict[str, float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
metrics values in key-value format
"""
values = self.compute()
kv_metrics = {
f"{self.metric_name}{k:02d}": value for k, value in zip(self.topk_args, values)
}
return kv_metrics
__all__ = ["CMCMetric", "ReidCMCMetric"]
| 33.544529 | 99 | 0.564287 | 1,500 | 13,183 | 4.78 | 0.168667 | 0.010321 | 0.006695 | 0.004463 | 0.557741 | 0.536262 | 0.513668 | 0.498466 | 0.475593 | 0.458298 | 0 | 0.021729 | 0.336722 | 13,183 | 392 | 100 | 33.630102 | 0.798262 | 0.544944 | 0 | 0.637931 | 0 | 0 | 0.050952 | 0.021412 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.068966 | false | 0 | 0.043103 | 0 | 0.163793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8b89a0741f789ddc86ec367bae4aea8446937c8 | 7,358 | py | Python | impfdosen/plot_vaccination_doses_per_state.py | meetunix/covid19-impfmonitoring | 8d30022ca29d92da814f85f3d5ab6113ea617035 | [
"Apache-2.0"
] | 1 | 2021-04-08T10:02:39.000Z | 2021-04-08T10:02:39.000Z | impfdosen/plot_vaccination_doses_per_state.py | meetunix/covid19-plots | 8d30022ca29d92da814f85f3d5ab6113ea617035 | [
"Apache-2.0"
] | null | null | null | impfdosen/plot_vaccination_doses_per_state.py | meetunix/covid19-plots | 8d30022ca29d92da814f85f3d5ab6113ea617035 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Creates a graph showing how much vaccines have been delivered and vaccinated
for each german state.
Source: https://impfdashboard.de/daten
Copyright 2021 Martin Steinbach
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import pickle
import sys
from datetime import datetime
from io import BytesIO
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import requests as rq
BASE_URL = "https://impfdashboard.de/static/data"
TIME_SERIES_DELIVERY = "/germany_deliveries_timeseries_v2.tsv"
VACCINATION_STATE = "/germany_vaccinations_by_state.tsv"
PICKLE_FILE = "sources.pickle"
class Sources:
"""Stores the url, length and etag information for the data sources."""
def __init__(self, delivery_url, vacc_url):
self.__delivery_url = delivery_url
self.__vacc_url = vacc_url
self.__etags = {f"{delivery_url}": None, f"{vacc_url}": None}
self.__data = {f"{delivery_url}": None, f"{vacc_url}": None}
self.__date = None
def set_etag(self, etag, url):
if url not in self.__etags.keys:
print(f"url {url} unknown\n")
sys.exit(1)
self.__etags[url] = etag
def get_urls(self):
return [self.__delivery_url, self.__vacc_url]
def get_etags(self):
return self.__etags
def get_data(self):
return self.__data
def is_etag_new(self, url):
"""Return True if the server etag is other than local."""
local_etag = self.get_etags()[url]
remote_etag = self.__get_remote_etag(url)
return not (local_etag == remote_etag)
def get_date_string(self):
return self.__date.strftime("%d.%m.%Y")
def download_sources(self):
"""Download data source files if necessary."""
urls = self.get_urls()
for url in urls:
if self.is_etag_new(url):
r = rq.get(url)
if r.status_code == 200:
# print(f"downloaded the updated file {url.split('/')[-1]}")
self.__data[url] = r.content
# update etags
self.__etags[url] = self.__get_remote_etag(url, r)
# update date
date_string = r.headers["date"]
self.__date = datetime.strptime(date_string, "%a, %d %b %Y %H:%M:%S %Z")
else:
print(f"unable to get source {url} \n{r.status_code}- {r.reason}")
sys.exit(1)
def __get_remote_etag(self, url, request=None):
return self.__get_hash(url, request)
@staticmethod
def __get_hash(url, r):
"""Calculate sha256 over content."""
if r is None:
r = rq.get(url)
if r.status_code == 200:
m = hashlib.sha256()
m.update(r.content)
return m.hexdigest()
else:
print(f"unable to get etag \n{r.status_code} - {r.reason}")
sys.exit(1)
def load_object(context):
"""Load a stored Sources instance if available, otherwise None is returned."""
sources_path = Path(context["cwd"] + f"/{PICKLE_FILE}")
if sources_path.exists():
with open(sources_path, "rb") as f:
obj = pickle.load(f)
return obj
else:
return None
def store_object(context, source):
"""Store a Sources instance to file."""
sources_path = Path(context["cwd"] + f"/{PICKLE_FILE}")
with open(sources_path, "wb") as f:
pickle.dump(source, f, pickle.DEFAULT_PROTOCOL)
def prepare_data(context, urls, source_data):
"""Prepare data for plotting."""
data = {}
delivery = pd.read_table(BytesIO(source_data[urls["delivery"]]))
vaccination = pd.read_table(BytesIO(source_data[urls["vaccination"]]))
states = delivery["region"].unique().tolist()
# exclude direct deliveries to the federal state and corporations, because of very low quantities
for s in ["DE-BUND", "DE-Bund", "de-bund", "DE-Betriebe"]:
try:
states.remove(s)
except ValueError:
pass
context["states"] = states
for state in states:
delivered = int(delivery[delivery["region"] == state][["dosen"]].sum())
v = vaccination[vaccination["code"] == state][["vaccinationsTotal"]].values
data[state] = (delivered, int(v))
context["data"] = data
def plot(context, sources):
states = context["states"]
states_short = [n.split("-")[1] for n in states]
data = context["data"]
delivered = []
vaccinated = []
for state in states:
delivered.append(data[state][0])
vaccinated.append(data[state][1])
# percentage of used doses from delivered doses max 100% for clean presentation
used_doses_norm = [100 if (i / j * 100) > 100 else i / j * 100 for i, j in zip(vaccinated, delivered)]
# percentage of used doses from delivered doses
used_doses = [i / j * 100 for i, j in zip(vaccinated, delivered)]
# percentage of unused dosis
rest = [100 - i for i in used_doses_norm]
plt.figure(figsize=(16, 9))
plt.style.use("seaborn")
plt.ylabel("%", fontsize=22, labelpad=30)
names = [f"{s}\n\n({str(round(i, 1))})" for s, i in zip(states_short, used_doses)]
plt.xticks(range(len(states)), names, size=14)
plt.yticks(range(0, 101, 10), size=14)
ds = sources.get_date_string()
plt.title(
f"Anteil verimpfter und gelieferter Impfdosen nach Bundesländern - {ds}\n",
fontsize=20,
fontweight="bold",
pad=20,
)
bar_width = 0.85
plt.bar(
range(len(states)),
used_doses_norm,
color="#9dad86",
edgecolor="white",
width=bar_width,
label="Anteil verimpfter Dosen",
)
plt.bar(
range(len(states)),
rest,
bottom=used_doses_norm,
color="#9686ad",
edgecolor="white",
width=bar_width,
label="Anteil noch nicht verimpfter Dosen",
)
plt.legend(
loc="upper center",
bbox_to_anchor=(0.5, 1.08),
shadow=True,
ncol=3,
fontsize=16,
)
plt.text(-3.9, -5, "Quelle: impfdashboard.de", fontsize=12)
plt.savefig(f"{context['cwd']}/doses_delivered_vaccinated_ratio.png")
def main():
"""Start the whole procedure."""
context = {}
urls = {}
urls["delivery"] = BASE_URL + TIME_SERIES_DELIVERY
urls["vaccination"] = BASE_URL + VACCINATION_STATE
context["cwd"] = str(Path(sys.argv[0]).parent)
# load last state
sources = load_object(context)
if sources is None:
sources = Sources(urls["delivery"], urls["vaccination"])
sources.download_sources()
prepare_data(context, urls, sources.get_data())
plot(context, sources)
# store current state
store_object(context, sources)
main()
| 29.550201 | 106 | 0.622316 | 977 | 7,358 | 4.537359 | 0.323439 | 0.016242 | 0.012633 | 0.007219 | 0.175953 | 0.13219 | 0.122716 | 0.076246 | 0.060005 | 0.034288 | 0 | 0.017534 | 0.255912 | 7,358 | 248 | 107 | 29.669355 | 0.792146 | 0.19489 | 0 | 0.140127 | 0 | 0 | 0.136015 | 0.024715 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095541 | false | 0.006369 | 0.057325 | 0.031847 | 0.216561 | 0.019108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8b8d73f77120870f06a98b9ae8f9a0f38fd193a | 2,464 | py | Python | createSchemaFile.py | WoodyRini/HockeyDB | 6b877fc5a16684bf8f0dcd9ce3de5327bf80dd45 | [
"MIT"
] | null | null | null | createSchemaFile.py | WoodyRini/HockeyDB | 6b877fc5a16684bf8f0dcd9ce3de5327bf80dd45 | [
"MIT"
] | null | null | null | createSchemaFile.py | WoodyRini/HockeyDB | 6b877fc5a16684bf8f0dcd9ce3de5327bf80dd45 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Created by Woody Rini 2017.
A Python file to create mySQL Schemas for any customized set of tables from a database.
1) Download and Extract to your directory the desired CSV files from http://www.opensourcesports.com/ (I used hockey)
2) Copy this file to the same directory as those CSV files.
3) Modify the "tableNames" variable below to list the tables you'd like to create the schema file for, and the "schema" variable to your schema name.
4) Run this script
5) Manually modify the data type (script automatically creates varchar(30)) for each column - choose based on your own logic.
6) Add your key contraints and not nulls as you see fit
7) Run the script in mySQL to create your Schema and Tables!
Future functionality: Read the entire table in, profile the data in python, and make type and character length recommendations based on that? This would be critical to resolve bugs for example, sorting by # of wins, because as a character wins will be sorted differently!!
"""
import csv
def main():
## open a new file to store our generated schema file - modify name as desired for a different db.
schema = "hockeydb"
schemaFile = open(schema + "SchemaFile.txt","w")
## MODIFY THE LINE BELOW - this is the list of tables you want to include in the schema.
tableNames = ["Master","AwardsPlayers","Coaches"]
## begin creating a text block that will become our full schema creation SQL statement.
schemaFileText = "Create Schema `" + schema + "` \n"
## iterate through the tables we want to create. For each one, use the column names from the first row and python text formatting to make a "Create Table" file.
for i in range(len(tableNames)):
schemaFileText += "Create Table `" + schema + "`.`" + tableNames[i] + "` ("
with open(tableNames[i] + ".csv", 'rU') as csvFile:
reader = csv.reader(csvFile)
columns = next(reader)
for i in range( len(columns)):
columns[i] += ' varchar(30), '
for column in columns:
schemaFileText+= column
## at the end of each table we have to clip the ", " from the last element, requiring removing 2 characters. We also add proper statement end syntax.
schemaFileText = schemaFileText[:-2] + ");\n"
## out of the loop: we're through each table and want to write back to the file.
schemaFile.write(schemaFileText)
main()
| 53.565217 | 272 | 0.688718 | 367 | 2,464 | 4.623978 | 0.474114 | 0.018857 | 0.014143 | 0.012964 | 0.0165 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009479 | 0.229302 | 2,464 | 45 | 273 | 54.755556 | 0.88415 | 0.671266 | 0 | 0 | 0 | 0 | 0.141952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8baedb648051bdee194528a7d9e2457ce47aff3 | 4,387 | py | Python | perma_web/perma/settings/deployments/settings_heroku.py | peterk/perma | 77ba552daf8eac38f815f884d6f6c37fb227db1a | [
"Unlicense",
"MIT"
] | null | null | null | perma_web/perma/settings/deployments/settings_heroku.py | peterk/perma | 77ba552daf8eac38f815f884d6f6c37fb227db1a | [
"Unlicense",
"MIT"
] | null | null | null | perma_web/perma/settings/deployments/settings_heroku.py | peterk/perma | 77ba552daf8eac38f815f884d6f6c37fb227db1a | [
"Unlicense",
"MIT"
] | null | null | null | # this file will be moved by heroku-buildpack-perma to ../settings.py
from .deployments.settings_prod import *
# ###########
# # ROLLBAR #
# ###########
#
# # Backend
# MIDDLEWARE_CLASSES += ('rollbar.contrib.django.middleware.RollbarNotifierMiddleware',)
# ROLLBAR = {
# 'access_token': os.environ.get('ROLLBAR_ACCESS_TOKEN'),
# 'environment': 'development' if DEBUG else 'production',
# 'branch': os.environ.get('GIT_BRANCH'),
# 'root': '/app',
# }
#
# # Frontend
# ROLLBAR_CLIENT_ACCESS_TOKEN = os.environ.get('ROLLBAR_CLIENT_ACCESS_TOKEN')
# TEMPLATE_VISIBLE_SETTINGS += ('ROLLBAR_CLIENT_ACCESS_TOKEN',)
#
# # Logging - enables celery error reporting
# LOGGING['handlers']['rollbar'] = {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'access_token': os.environ.get('ROLLBAR_ACCESS_TOKEN'),
# 'environment': 'development' if DEBUG else 'production',
# 'class': 'rollbar.logger.RollbarHandler'
# }
# LOGGING['loggers']['']['handlers'] += ['rollbar']
# Parse database configuration from env
import dj_database_url
if os.environ.get('DEFAULT_DATABASE_URL', False):
DATABASES['default'] = dj_database_url.config('DEFAULT_DATABASE_URL')
if os.environ.get('CDXLINE_DATABASE_URL', False):
DATABASES['perma-cdxline'] = dj_database_url.config('CDXLINE_DATABASE_URL')
if os.environ.get('USE_RDS_CERT', False):
DATABASES['default']['OPTIONS'] = DATABASES['perma-cdxline']['OPTIONS'] = {'ssl': {'ca': os.path.join(PROJECT_ROOT, 'amazon-rds-combined-ca-bundle.pem')}}
# Allow all host headers
# TODO: this is from Heroku's getting started with Django page -- is there a safer way?
ALLOWED_HOSTS = ['*']
DEFAULT_FILE_STORAGE = 'perma.storage_backends.S3MediaStorage'
# message passing
# settings via https://www.cloudamqp.com/docs/celery.html
BROKER_POOL_LIMIT=1
BROKER_URL = os.environ.get('CLOUDAMQP_URL')
BROKER_CONNECTION_TIMEOUT = 30
BROKER_HEARTBEAT = 30
CELERY_SEND_EVENTS = False # on the free CloudAMQP plan, celery events rapidly eat up our monthly message quota
CELERY_RESULT_BACKEND = os.environ.get('REDISCLOUD_URL')
CELERYD_HIJACK_ROOT_LOGGER = False
# logging
LOGGING['handlers']['default'] = {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
}
ADMINS = (
(os.environ.get('ADMIN_NAME', 'Your Name'), os.environ.get('ADMIN_EMAIL', 'your_email@example.com')),
)
# these are relative to the S3 bucket
MEDIA_ROOT = '/generated/'
# AWS storage settings
AWS_QUERYSTRING_AUTH = False
# archive creation
PHANTOMJS_LOG = 'phantomjs.log' # this will just get thrown away
# caching
CACHES['default']['LOCATION'] = os.environ.get('REDISCLOUD_URL')
# thumbnail redis server
# override defaults https://sorl-thumbnail.readthedocs.io/en/latest/reference/settings.html#thumbnail-redis-host
from urllib.parse import urlparse
_parsed_redis_url = urlparse(os.environ.get('REDISCLOUD_URL'))
THUMBNAIL_REDIS_PASSWORD = _parsed_redis_url.password
THUMBNAIL_REDIS_HOST = _parsed_redis_url.hostname
THUMBNAIL_REDIS_PORT = _parsed_redis_url.port
### OVERRIDE THESE WITH ENV VARS ###
# The host we want to display (used when DEBUG=False)
HOST = 'perma.cc'
# Amazon storage
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_STORAGE_BUCKET_NAME = ''
MEDIA_URL = 'http://BUCKET_NAME.s3.amazonaws.com/media/'
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-backend
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host
# EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-password
# EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-user
# EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-port
# EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-subject-prefix
# EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-use-tls
# EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#server-email
# SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
| 35.096 | 158 | 0.733075 | 589 | 4,387 | 5.259762 | 0.353141 | 0.054874 | 0.050355 | 0.064558 | 0.249516 | 0.241446 | 0.207553 | 0.182376 | 0.182376 | 0.182376 | 0 | 0.006907 | 0.108958 | 4,387 | 124 | 159 | 35.379032 | 0.785623 | 0.578527 | 0 | 0 | 0 | 0 | 0.280834 | 0.065431 | 0 | 0 | 0 | 0.008065 | 0 | 1 | 0 | false | 0.025641 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8bb95a2e914c6cc0eba789eda638852861dff6f | 2,651 | py | Python | vygdb/server.py | NateBu/vygdb | 1caae0fc0fa636bcf4e3c724ba792cb4cb59a093 | [
"MIT"
] | null | null | null | vygdb/server.py | NateBu/vygdb | 1caae0fc0fa636bcf4e3c724ba792cb4cb59a093 | [
"MIT"
] | null | null | null | vygdb/server.py | NateBu/vygdb | 1caae0fc0fa636bcf4e3c724ba792cb4cb59a093 | [
"MIT"
] | null | null | null | from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
import os, logging, threading, subprocess, json
from pathlib import Path
BASEPATH = os.path.dirname(os.path.realpath(__file__))
vygdbpath = os.path.join(BASEPATH,'gdb_client.py')
global THREAD
THREAD = None
def _restart(cmd):
global THREAD
if THREAD is None or not THREAD.is_alive():
THREAD = threading.Thread(target=subprocess.run, daemon=True, args=(cmd,))
THREAD.start()
else:
print('Thread still running', flush=True)
def sendx(self, typ, c):
self.send_response(200)
self.send_header('Content-type',typ)
self.end_headers()
self.wfile.write(c)
def newpath(self,p,k,v):
if p.startswith('/'+k+'/') and '..' not in p:
pp = p.replace('/'+k,v,1)
if os.path.isfile(pp):
if pp.endswith('.html'):
sendx(self, 'text/html', Path(pp).read_text().encode())
elif pp.endswith('.json'):
sendx(self, 'application/json', Path(pp).read_text().encode())
elif pp.endswith('.js'):
sendx(self, 'text/javascript', Path(pp).read_text().encode())
elif pp.endswith('.css'):
sendx(self, 'text/css', Path(pp).read_text().encode())
return True
return False
def server(cmd, port=17173, static=None):
if cmd is None or type(cmd) != list or len(cmd)==0:
return
if static is None: static = {}
static['vygdb'] = os.path.join(BASEPATH, 'main')
class VygdbHttpRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
sendx(self, 'text/html', Path(os.path.join(BASEPATH, 'main', 'main.html')).read_text().encode())
elif self.path == '/start_gdb':
_restart(['gdb', '--silent', '-x', vygdbpath, '--args']+cmd)
self.send_response(200)
self.end_headers()
else:
for k,v in static.items():
if newpath(self,self.path,k,v): return
self.send_response(404)
self.end_headers()
def do_POST(self):
if 'top' in static and self.path == '/top/vygdb_actions.json':
try:
x = self.rfile.read(int(self.headers["Content-Length"])).decode('utf-8')
x = json.dumps(json.loads(x), sort_keys=True, indent=2)
Path(os.path.join(static['top'], 'vygdb_actions.json')).write_text(x)
except Exception as exc:
print('fail',exc,flush=True)
self.send_response(200)
self.end_headers()
print('Serving vygdb on http://localhost:{p}'.format(p=port),flush=True)
TCPServer.allow_reuse_address = True
with TCPServer(("", port), VygdbHttpRequestHandler) as httpd:
httpd.serve_forever()
if __name__ == '__main__':
server() | 35.346667 | 104 | 0.64504 | 370 | 2,651 | 4.513514 | 0.356757 | 0.02515 | 0.041916 | 0.033533 | 0.175449 | 0.100599 | 0.100599 | 0.061078 | 0 | 0 | 0 | 0.009772 | 0.189363 | 2,651 | 75 | 105 | 35.346667 | 0.767334 | 0 | 0 | 0.161765 | 0 | 0 | 0.109729 | 0.008673 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.058824 | 0 | 0.205882 | 0.044118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8be05ad0df28afe2d5d3e2dc19187c68fb22571 | 618 | py | Python | app/server/app/tests/test_size_calculate.py | SweetCase-Cobalto/MicroCloudChip-NATURAL | 74dd2e9d423bd1f43b35f98fe1102364d9c08f46 | [
"MIT"
] | null | null | null | app/server/app/tests/test_size_calculate.py | SweetCase-Cobalto/MicroCloudChip-NATURAL | 74dd2e9d423bd1f43b35f98fe1102364d9c08f46 | [
"MIT"
] | null | null | null | app/server/app/tests/test_size_calculate.py | SweetCase-Cobalto/MicroCloudChip-NATURAL | 74dd2e9d423bd1f43b35f98fe1102364d9c08f46 | [
"MIT"
] | null | null | null | from django.test import TestCase
from module.label.file_type import *
class SizeCalculateUnittest(TestCase):
def test_calculate(self):
n1 = 12300 # 12.3KB
n2 = 56004 # 56.004KB
b1 = FileVolumeType.get_file_volume_type(n1)
b2 = FileVolumeType.get_file_volume_type(n2)
self.assertEqual(b2, (FileVolumeType.KB, 56.004))
self.assertEqual(b1, (FileVolumeType.KB, 12.3))
b = FileVolumeType.add(b1, b2)
s = FileVolumeType.sub(b2, b1)
self.assertEqual(b, (FileVolumeType.KB, 68.304))
self.assertEqual(s, (FileVolumeType.KB, 43.704))
| 28.090909 | 57 | 0.658576 | 78 | 618 | 5.115385 | 0.5 | 0.150376 | 0.105263 | 0.135338 | 0.155388 | 0 | 0 | 0 | 0 | 0 | 0 | 0.100629 | 0.228155 | 618 | 21 | 58 | 29.428571 | 0.735849 | 0.024272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8c33b9d195526d1925f6605c9b18a99bc8d987d | 1,642 | py | Python | Login/app.py | vgeorgo/courses-python-udemy-create-websites-using-flask | 34f0a789402f4dabfdbf87272fc823979b3af313 | [
"MIT"
] | 1 | 2021-01-05T19:26:07.000Z | 2021-01-05T19:26:07.000Z | Login/app.py | vgeorgo/courses-python-udemy-create-websites-using-flask | 34f0a789402f4dabfdbf87272fc823979b3af313 | [
"MIT"
] | null | null | null | Login/app.py | vgeorgo/courses-python-udemy-create-websites-using-flask | 34f0a789402f4dabfdbf87272fc823979b3af313 | [
"MIT"
] | null | null | null | from myproject import app,db
from flask import render_template,redirect,request,url_for,flash,abort
from flask_login import login_user,login_required,logout_user
from myproject.models import User
from myproject.forms import LoginForm,RegistrationForm
@app.route('/')
def index():
return render_template('home.html')
@app.route('/register',methods=['GET','POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registration!')
return redirect(url_for('login'))
return render_template('register.html',form=form)
@app.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user)
flash('Logged in successfully!')
next = request.args.get('next')
if next == None or not next[0]=='/':
next = url_for('welcome_user')
return redirect(next)
return render_template('login.html',form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You logged out!')
return redirect(url_for('index'))
@app.route('/welcome')
@login_required
def welcome_user():
return render_template('welcome_user.html')
if __name__ == '__main__':
app.run(debug=True)
| 28.807018 | 72 | 0.658952 | 208 | 1,642 | 5.038462 | 0.336538 | 0.066794 | 0.076336 | 0.032443 | 0.09542 | 0.057252 | 0.057252 | 0 | 0 | 0 | 0 | 0.000767 | 0.205847 | 1,642 | 56 | 73 | 29.321429 | 0.802914 | 0 | 0 | 0.088889 | 0 | 0 | 0.116322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.044444 | 0.111111 | 0.044444 | 0.377778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8c46633c54a5afc3ddfbc003f884e8ea7d065f2 | 2,949 | py | Python | compartor/display.py | zechnerlab/Compartor | 93c1b0752b6fdfffddd4f1ac6b9631729eae9a95 | [
"BSD-2-Clause"
] | 1 | 2021-02-10T15:56:02.000Z | 2021-02-10T15:56:02.000Z | compartor/display.py | zechnerlab/Compartor | 93c1b0752b6fdfffddd4f1ac6b9631729eae9a95 | [
"BSD-2-Clause"
] | null | null | null | compartor/display.py | zechnerlab/Compartor | 93c1b0752b6fdfffddd4f1ac6b9631729eae9a95 | [
"BSD-2-Clause"
] | 1 | 2021-12-05T11:24:22.000Z | 2021-12-05T11:24:22.000Z | from sympy import Eq, Symbol, Derivative, Basic
from compartor.compartments import Moment, Expectation, _getNumSpecies, get_missing_moments
from IPython.core.display import display, Markdown
###################################################
#
# Displaying reaction networks and moment equations
# in jupyter notebooks
#
###################################################
# -------------------------------------------------
def display_propensity_details(transition_class, name=None):
k, g, pi = transition_class.k, transition_class.g, transition_class.pi
if name is None:
name = transition_class.name
if name is None:
name = ''
display(Eq(Symbol('k_{' + name + '}'), k, evaluate=False))
display(Eq(Symbol('g_{' + name + '}'), g, evaluate=False))
display(Eq(Symbol('\pi_{' + name + '}'), pi.expr, evaluate=False))
# -------------------------------------------------
def display_transition_classes(transitions):
class Display(Basic):
def __new__(cls, transitions):
obj = Basic.__new__(cls)
obj.transitions = transitions
return obj
def __str__(self):
ll = []
for t in self.transitions:
ll.append(str(t.transition) + ", " + t._propensity_str(name=t.name))
return "\n".join(ll)
def _sympystr(self, printer=None):
return 'Display._sympystr: TODO'
def _latex(self, printer=None):
ll = []
for t in self.transitions:
tl = t.transition._latex(printer, align=True, name=t.name)
pl = t._propensity_latex(printer, name=t.name)
ll.append(r"%s && %s" % (tl, pl))
return r"\begin{align} %s \end{align}" % r"\\".join(ll)
display(Display(transitions))
# -------------------------------------------------
def display_moment_equation(expr_moment, expr_dfMdt):
"""
:param expr_moment: lhs of evolution equation
:param expr_dfMdt: rhs of evolution equation
"""
D = _getNumSpecies(expr_moment)
lhs = Derivative(Expectation(expr_moment), Symbol('t'))
rhs = expr_dfMdt
evolution = Eq(lhs, rhs, evaluate=False)
if not D is None:
evolution = evolution.subs(Moment(*([0]*D)),Symbol('N'))
display(evolution)
# -------------------------------------------------
def display_moment_equations(equations, print_missing=True):
for (fM, dfMdt) in equations:
display_moment_equation(fM, dfMdt)
if print_missing:
missing = get_missing_moments(equations)
if missing:
display(Markdown('**The system is not closed!** Moment equations are missing for:'))
display(missing)
# -------------------------------------------------
def display_closures(closures):
for m, c in closures:
D = _getNumSpecies(m)
display(Eq(Expectation(m), c, evaluate=False).subs(Moment(*([0] * D)), Symbol('N')))
| 35.53012 | 96 | 0.552391 | 318 | 2,949 | 4.949686 | 0.27673 | 0.031766 | 0.02859 | 0.015248 | 0.109276 | 0.053367 | 0 | 0 | 0 | 0 | 0 | 0.000869 | 0.219396 | 2,949 | 82 | 97 | 35.963415 | 0.682884 | 0.139708 | 0 | 0.113208 | 0 | 0 | 0.060656 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169811 | false | 0 | 0.056604 | 0.018868 | 0.320755 | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8c6e2d1f3c9e7fe0a12d7a96c53d13940785e8e | 2,603 | py | Python | certstream_analytics/storages/elasticsearch_storage.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | 10 | 2019-04-27T17:24:14.000Z | 2021-01-21T01:30:39.000Z | certstream_analytics/storages/elasticsearch_storage.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | null | null | null | certstream_analytics/storages/elasticsearch_storage.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | 1 | 2019-09-16T13:07:12.000Z | 2019-09-16T13:07:12.000Z | """
Save certstream data into Elasticsearch so that it can be queried by Kibana
later on.
"""
from datetime import datetime
from elasticsearch_dsl import connections, analyzer
from elasticsearch_dsl import Document, Date, Text, Keyword
from .base import Storage
ANALYZER = analyzer('standard_analyzer',
tokenizer='standard_tokenizer',
filter=['lowercase'])
# pylint: disable=too-few-public-methods
class ElasticsearchStorage(Storage):
"""
An experiment Elasticsearch storage to keep and index the received records.
"""
class Record(Document):
"""
An Elasticsearch record as it is.
"""
timestamp = Date(default_timezone='UTC')
# As reported by certstream
seen = Date(default_timezone='UTC')
# The domain time to live
not_before = Date(default_timezone='UTC')
not_after = Date(default_timezone='UTC')
# The domain and its alternative names
domain = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
san = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
# The issuer
chain = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
class Index:
"""
Use daily indices.
"""
name = 'certstream-*'
# pylint: disable=arguments-differ
def save(self, **kwargs):
"""
Magically save the record in Elasticsearch.
"""
self.timestamp = datetime.now()
# Override the index to go to the proper timeslot
kwargs['index'] = self.timestamp.strftime('certstream-%Y.%m.%d')
return super().save(**kwargs)
def __init__(self, hosts, timeout=10):
"""
Provide the Elasticsearch hostname (Defaults to localhost).
"""
connections.create_connection(hosts=hosts, timeout=timeout)
def save(self, record):
"""
Save the certstream record in Elasticsearch.
"""
elasticsearch_record = ElasticsearchStorage.Record(meta={'id': record['cert_index']})
# In miliseconds
elasticsearch_record.seen = int(record['seen'] * 1000)
elasticsearch_record.not_before = int(record['not_before'] * 1000)
elasticsearch_record.not_after = int(record['not_after'] * 1000)
# Elasticsearch will parse and index the domain and all its alternative names
elasticsearch_record.domain = record['all_domains'][0]
elasticsearch_record.san = record['all_domains'][1:]
elasticsearch_record.save()
| 32.5375 | 93 | 0.623896 | 277 | 2,603 | 5.758123 | 0.415162 | 0.095298 | 0.047649 | 0.055172 | 0.106583 | 0.106583 | 0 | 0 | 0 | 0 | 0 | 0.008425 | 0.270457 | 2,603 | 79 | 94 | 32.949367 | 0.83149 | 0.258548 | 0 | 0 | 0 | 0 | 0.089367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.125 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ca72169168ed0111c9c4000a331242e58a6dc2 | 5,772 | py | Python | chartLedger.py | JoaoP-Rodrigues/test_deep_esg | 375eef8697f1984bff5187db7e4dc879b029e8a3 | [
"MIT"
] | null | null | null | chartLedger.py | JoaoP-Rodrigues/test_deep_esg | 375eef8697f1984bff5187db7e4dc879b029e8a3 | [
"MIT"
] | null | null | null | chartLedger.py | JoaoP-Rodrigues/test_deep_esg | 375eef8697f1984bff5187db7e4dc879b029e8a3 | [
"MIT"
] | null | null | null | #imports
from re import search
from openpyxl import load_workbook, Workbook
#function fillChart that will fill de chart file with values from ledger
def fillChart(chart, ledger):
"""
fillChart Function
This function needs two parameters, both required and both must be a Excel ou similar file
------------------------------------------
First Parameter
---> chart
It must contain a single column with the numbers of the charts it will search
Second Parameter
---> ledger
It mus contain two columns:
A Column - Number of charts
B Column - Value in the chart
------------------------------------------
Operation
------------------------------------------
---> The Search
The function will take the input files and assign both in a variable for each one.
Next, the function will get a value from "sheetchart" variable and searches this value in the first column from "sheetLedger" variable.
If the values are equals, it get a value from respective row, but from second column and add in the "valueBColumn" variable.
This variable will be assign in the output file.
------------------------------------------
---> The combination of Values
The second part of the code will combine values from the same branch of the tree.
First, the code looks for cells where the values are equal to zero (if different, it skips to the next iteration).
Then store that value and string length in separate variables.
The value will be the search key for sub-values, and the length will be the limiter to not get values from different branches.
"""
#created a .XLSX file to fill with new datas
out_chart_of_accounts = Workbook()
out_plan1 = out_chart_of_accounts.active
out_plan1.title = 'Ex_Chart_Accounts'
#create the header from output file
out_plan1.cell(row=1, column=1, value='account')
out_plan1.cell(row=1, column=2, value='value')
sheetChart = chart.active #activated the sheet from origin file chart_of_accounts and assing to the sheet1 variable.
sheetLedger = ledger.active
maxrowChart = sheetChart.max_row #take the last row from sheet Chart
maxrowLedger = sheetLedger.max_row #take the last row from sheet Ledger
#first loop. Enter in the chart_of_accounts file to get a value to search
for i in range(2, maxrowChart+1):
valueBColumn = float(0.0)
searchValue = sheetChart.cell(row=i, column=1).value #value that will be searched in the "ledger" file
if searchValue == None: #Jump the remaining loop if get a empty cell (generally is in the end of file)
continue
#Second loop. Enter in the general_ledger file to search and sum values from var "searchValue"
for j in range(2, maxrowLedger+1):
valueCh = sheetLedger.cell(row=j, column=1).value #get chart name
valueLe = sheetLedger.cell(row=j, column=2).value #get chart value
try:
valueLeFl = round(float(valueLe), 2) #convert str to float
#if the values are equal, increment in the var valueBColumn
if valueCh == searchValue:
valueBColumn += valueLeFl
except:
#Probable error when converting to float
continue
try: #write values from columns A and B in the output file, with a ERROR test
out_plan1.cell(row=i, column=1, value=searchValue)
out_plan1.cell(row=i, column=2, value=valueBColumn)
except:
print('Error! Impossible save the file!')
#Second part! Combination of values
#-------------------------------------------------------------
max_rowOut = out_plan1.max_row #take the last row from sheet out_plan1
#first loop. It get a first value equal zero, and search subvalues to add.
for i in range(2, max_rowOut+1):
valueOutV = out_plan1.cell(row=i, column=2).value
if valueOutV != 0: #if the value from B column not be zero, it jump the loop
continue
else:
valueOutC = out_plan1.cell(row=i, column=1).value #value that will be used to get subvalues
newSum = 0.0
lenGetValue = len(valueOutC) #get a length from origin value. It will be a paramenter for limit of subvalues
#Second loop. This will search for subvalues
for j in range(2, max_rowOut+1):
tempC = out_plan1.cell(row=j, column=1).value
try:
tempV = round(float(out_plan1.cell(row=j, column=2).value), 2)
#if the subvalue equals search value, this will be add to var 'newSum'
if valueOutC == tempC[:lenGetValue]:
newSum += tempV
except:
#Probable error when converting to float
continue
#write the newSum value in the output file
out_plan1.cell(row=i, column=2, value=newSum)
#save the output file in the "output" diretory and close it
try:
out_chart_of_accounts.save('output/out_chart_of_accounts.xlsx')
out_chart_of_accounts.close()
except:
print('Error! Unable to save file. Check write permission for the folder!')
#RETURN
#None file will be returned. The new file will be saved in the "output" diretory
#load files from input diretory
try:
chart_of_accounts = load_workbook('input/chart_of_accounts.xlsx')
general_ledger = load_workbook('input/general_ledger.xlsx')
fillChart(chart_of_accounts, general_ledger)
except:
print('Error! Unable to load files!')
| 44.061069 | 144 | 0.626473 | 785 | 5,772 | 4.538854 | 0.238217 | 0.018243 | 0.042099 | 0.037889 | 0.188324 | 0.159697 | 0.104687 | 0.104687 | 0.019646 | 0.019646 | 0 | 0.010255 | 0.273562 | 5,772 | 130 | 145 | 44.4 | 0.839494 | 0.51438 | 0 | 0.233333 | 0 | 0 | 0.090738 | 0.03238 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.033333 | 0 | 0.05 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8cb12a36a7eac3b908ea8f664bc63aee976f4c2 | 9,638 | py | Python | kaa/filetype/default/modebase.py | atsuoishimoto/kaaedit | 5233fdb70a04783c6513a5ec339452450e62e995 | [
"Unlicense"
] | 1 | 2015-11-04T13:37:08.000Z | 2015-11-04T13:37:08.000Z | kaa/filetype/default/modebase.py | atsuoishimoto/kaaedit | 5233fdb70a04783c6513a5ec339452450e62e995 | [
"Unlicense"
] | null | null | null | kaa/filetype/default/modebase.py | atsuoishimoto/kaaedit | 5233fdb70a04783c6513a5ec339452450e62e995 | [
"Unlicense"
] | null | null | null | import itertools, unicodedata
import gappedbuf.re
import kaa
from kaa import keyboard, editmode
import kaa.log
from kaa import highlight
from kaa import theme
from kaa import screen
class SearchOption:
def __init__(self):
self.text = ''
self.ignorecase = True
self.word = False
self.regex = False
def get_regex(self):
text = self.text
if not self.regex:
text = gappedbuf.re.escape(text)
if self.word:
text = r'\b'+text+r'\b'
opt = gappedbuf.re.MULTILINE
if self.ignorecase:
opt += gappedbuf.re.IGNORECASE
regex = gappedbuf.re.compile(text, opt)
return regex
SearchOption.LAST_SEARCH = SearchOption()
class ModeBase:
CLOSE_ON_DEL_WINDOW = True
SCREEN_NOWRAP = False
SCREEN_BUILD_ENTIRE_ROWS = False
SHOW_LINENO = False
USE_UNDO = True
tab_width = 8
indent_width = 4
indent_tab = False
auto_indent = True
closed = False
theme = None
def __init__(self):
self.commands = {}
self.is_availables = {}
self.keybind = keyboard.KeyBind()
self.keybind_vi_commandmode = keyboard.KeyBind()
self.keybind_vi_visualmode = keyboard.KeyBind()
self.keybind_vi_visuallinewisemode = keyboard.KeyBind()
self.themes = []
self.init_keybind()
self.init_commands()
self.init_themes()
self._build_theme()
kaa.app.translate_theme(self.theme)
self.tokenizers = []
self.init_tokenizers()
self.stylemap = {}
self.highlight = highlight.Highlighter(self.tokenizers)
def close(self):
self.document = None
self.closed = True
self.keybind.clear()
self.theme = None
self.commands.clear()
self.commands = None
self.is_availables = None
self.highlight.close()
self.highlight = None
self.tokenizers = None
self.stylemap = None
def _build_style_map(self):
self.stylemap[0] = self.theme.get_style('default')
for tokenid in self.highlight.tokenids:
assert tokenid not in self.stylemap
token = self.highlight.get_token(tokenid)
if token:
stylename = token.get_stylename(tokenid)
style = self.theme.get_style(stylename)
self.stylemap[tokenid] = style
else:
self.stylemap[tokenid] = self.theme.get_style('default')
def on_set_document(self, document):
self.document = document
self.document.styles.setints(0, len(self.document.styles), 0)
self._build_style_map()
self.document.use_undo(self.USE_UNDO)
def on_document_updated(self, pos, inslen, dellen):
if self.highlight:
self.highlight.updated(self.document, pos, inslen, dellen)
def on_add_window(self, wnd):
self.editmode_insert(wnd)
def register_keys(self, keybind, keys):
for d in keys:
keybind.add_keybind(d)
def init_keybind(self):
pass
def init_commands(self):
for name in dir(self):
attr = getattr(self, name)
if hasattr(attr, 'COMMAND_ID') and callable(attr):
self.commands[getattr(attr, 'COMMAND_ID')] = attr
def init_themes(self):
pass
def init_tokenizers(self):
pass
def register_command(self, cmds):
self.commands.update(cmds.get_commands())
self.is_availables.update(cmds.get_commands_is_enable())
def get_command(self, commandid):
is_available = self.is_availables.get(commandid, None)
cmd = self.commands.get(commandid, None)
return (is_available, cmd)
def editmode_insert(self, wnd):
wnd.set_editmode(editmode.EditMode())
def editmode_visual(self, wnd):
wnd.set_editmode(editmode.VisualMode())
def editmode_visual_linewise(self, wnd):
wnd.set_editmode(editmode.VisualLinewiseMode())
def editmode_command(self, wnd):
wnd.set_editmode(editmode.CommandMode())
def get_styleid(self, stylename):
if stylename == 'default':
return 0
for styleid, style in self.stylemap.items():
if style.name == stylename:
return styleid
else:
if not self.stylemap:
ret = 1
else:
ret = max(self.stylemap.keys())+1
self.stylemap[ret] = self.theme.get_style(stylename)
return ret
def select_theme(self, theme_name, themes):
theme = themes.get(theme_name, None)
if theme is None:
theme = themes[kaa.app.DEFAULT_THEME]
return theme
def _build_theme(self):
theme_name = kaa.app.get_current_theme()
self.theme = theme.Theme([])
for t in self.themes:
self.theme.update(self.select_theme(theme_name, t))
def get_style(self, tokenid):
ret = self.stylemap.get(tokenid, None)
if ret is None:
ret = self.theme.get_style('default')
return ret
def get_cursor_visibility(self):
return 1 # normal
def on_keypressed(self, wnd, event, s, commands, candidate):
return s, commands, candidate
def on_str(self, wnd, s):
self.edit_commands.put_string(wnd, s)
if kaa.app.macro.is_recording():
kaa.app.macro.record_string(s)
if self.highlight:
# run highlighter a bit to display changes immediately.
self.highlight.update_style(self.document, batch=50)
def on_commands(self, wnd, commandids):
try:
if callable(commandids):
commandids(wnd)
return
lastcommands = []
for commandid in commandids:
is_available, command = self.get_command(commandid)
if not command:
msg = 'command {!r} is not registered.'.format(commandid)
kaa.app.messagebar.set_message(msg)
return
command(wnd)
if kaa.app.macro.is_recording():
kaa.app.macro.record(command)
if not getattr(command, 'NORERUN', False):
lastcommands.append(commandid)
if lastcommands:
kaa.app.lastcommands = lastcommands
finally:
wnd.editmode.clear_repeat()
def on_esc_pressed(self, wnd, event):
pass
def on_cursor_located(self, wnd, pos, y, x):
pass
def update_charattr(self, wnd):
if wnd.charattrs:
wnd.charattrs.clear()
wnd.screen.style_updated()
return True
def on_idle(self):
if self.closed:
return
ret = self.run_highlight()
return ret
def run_highlight(self):
pass
def _split_chars(self, begin, end):
"""split characters by character category."""
s = self.document.gettext(begin, end)
for key, chars in itertools.groupby(s, unicodedata.category):
chars = ''.join(chars)
end = begin + len(chars)
yield begin, end, chars
begin = end
RE_WORDCHAR = r"(?P<WORDCHAR>[a-zA-Z0-9_]+)"
RE_WHITESPACE = r"(?P<WHITESPACE>[\t ]+)"
RE_LF = r"(?P<LF>\n)"
RE_HIRAGANA = r"(?P<HIRAGANA>[\u3040-\u309f\u30fc]+)"
RE_KATAKANA = r"(?P<KATAKANA>[\u30a0-\u30ff\u30fc]+)"
RE_SPLITWORD = gappedbuf.re.compile('|'.join((
RE_WORDCHAR, RE_WHITESPACE, RE_LF, RE_HIRAGANA, RE_KATAKANA)))
def split_word(self, begin):
"""yield word in the document until line ends"""
for m in self.RE_SPLITWORD.finditer(self.document.buf, pos=begin):
# split unmatched characters by character category.
f, t = m.span()
if f != begin:
yield from self._split_chars(begin, f)
begin = t
yield (f, t, m.group())
# finish if we reach '\n'
if m.lastgroup == 'LF':
return
yield from self._split_chars(begin, self.document.endpos())
def search_next(self, wnd, pos, searchinfo):
regex = searchinfo.get_regex()
pos = min(max(0, pos), self.document.endpos())
m = regex.search(self.document.buf, pos)
if m:
return m.span()
def search_prev(self, wnd, pos, searchinfo):
regex = searchinfo.get_regex()
last = None
for m in regex.finditer(self.document.buf, 0):
span = m.span()
if span[1] >= pos:
break
last = span
return last
def get_indent_range(self, pos):
tol = self.document.gettol(pos)
regex = gappedbuf.re.compile(self.RE_WHITESPACE)
m = regex.match(self.document.buf, tol)
if m:
return m.span()
else:
return (tol, tol)
def build_indent_str(self, col):
if self.indent_tab:
ctab = col // self.tab_width
cspc = col % self.tab_width
return '\t' * ctab + ' ' * cspc
else:
return ' ' * col
def get_auto_indent(self, pos):
f, t = self.get_indent_range(pos)
indent = self.document.gettext(f, min(pos, t))
return '\n'+indent
def calc_cols(self, f, t):
chars = self.document.gettext(f, t)
(dispchrs, dispcols, positions,
intervals) = screen.translate_chars(f, chars, self.tab_width)
return sum(dispcols)
| 28.684524 | 77 | 0.583212 | 1,145 | 9,638 | 4.766812 | 0.20262 | 0.039575 | 0.010993 | 0.015573 | 0.106632 | 0.062294 | 0.030781 | 0.030781 | 0.015024 | 0.015024 | 0 | 0.004847 | 0.315003 | 9,638 | 335 | 78 | 28.770149 | 0.821872 | 0.022619 | 0 | 0.117647 | 0 | 0 | 0.024455 | 0.010526 | 0 | 0 | 0 | 0 | 0.003922 | 1 | 0.156863 | false | 0.023529 | 0.031373 | 0.007843 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ccb079c2a58221c9df73dc4b39c148bbcfc575 | 2,576 | py | Python | zubhub_backend/zubhub/creators/tasks.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | 1 | 2022-01-21T14:15:24.000Z | 2022-01-21T14:15:24.000Z | zubhub_backend/zubhub/creators/tasks.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | null | null | null | zubhub_backend/zubhub/creators/tasks.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | null | null | null | from celery import shared_task
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from random import uniform
from celery import shared_task
import requests
from zubhub.utils import upload_file_to_media_server
try:
from allauth.account.adapter import get_adapter
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
@shared_task(name="creators.tasks.send_text", bind=True, acks_late=True, max_retries=10)
def send_text(self, phone, template_name, ctx):
try:
get_adapter().send_text(template_name, phone, ctx)
except Exception as e:
raise self.retry(exc=e, countdown=int(
uniform(2, 4) ** self.request.retries))
@shared_task(name="creators.tasks.send_mass_email", bind=True, acks_late=True, max_retries=10)
def send_mass_email(self, template_name, ctxs):
try:
get_adapter().send_mass_email(template_name, ctxs)
except Exception as e:
raise self.retry(exc=e, countdown=int(
uniform(2, 4) ** self.request.retries))
@shared_task(name="creators.tasks.send_mass_text", bind=True, acks_late=True, max_retries=10)
def send_mass_text(self, template_name, ctxs):
try:
get_adapter().send_mass_text(template_name, ctxs)
except Exception as e:
raise self.retry(exc=e, countdown=int(
uniform(2, 4) ** self.request.retries))
@shared_task(name="creators.tasks.upload_file_task", bind=True, acks_late=True, max_retries=10)
def upload_file_task(self, user_id, username):
from creators.models import Creator
creator = Creator.objects.filter(id=user_id)
key = 'avatar/{0}'.format(username)
try:
res = requests.get(creator[0].avatar)
res = upload_file_to_media_server(res.content, key)
res = res.json()
res = res["url"]
if isinstance(res, str):
creator.update(avatar=res)
else:
raise Exception()
except Exception as e:
raise self.retry(exc=e, countdown=int(
uniform(2, 4) ** self.request.retries))
@periodic_task(run_every=(crontab(hour=0, minute=0)), name="creators.tasks.activity_notification_task",
bind=True, acks_late=True, max_retries=10, ignore_result=True)
def activity_notification_task(self):
from creators.utils import activity_notification
try:
activity_notification(["new_creators", "new_projects", "new_comments"])
except Exception as e:
raise self.retry(exc=e, countdown=int(
uniform(2, 4) ** self.request.retries))
| 34.346667 | 103 | 0.701475 | 360 | 2,576 | 4.827778 | 0.258333 | 0.034522 | 0.048907 | 0.04603 | 0.53107 | 0.474684 | 0.456847 | 0.456847 | 0.456847 | 0.366513 | 0 | 0.011483 | 0.188665 | 2,576 | 74 | 104 | 34.810811 | 0.820096 | 0 | 0 | 0.396552 | 0 | 0 | 0.096273 | 0.060171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.206897 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d207cec663ba5ba5150e365e8258291409ccac | 6,147 | py | Python | gym_like/envs/card_game/easy_21.py | WildeLau/gym-like | 4abd9e4e030d386ce2313d0706f1f692210d3387 | [
"MIT"
] | null | null | null | gym_like/envs/card_game/easy_21.py | WildeLau/gym-like | 4abd9e4e030d386ce2313d0706f1f692210d3387 | [
"MIT"
] | null | null | null | gym_like/envs/card_game/easy_21.py | WildeLau/gym-like | 4abd9e4e030d386ce2313d0706f1f692210d3387 | [
"MIT"
] | null | null | null | # Reference:
# https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
class Easy21(gym.Env):
"""
Easy21 is a card game corresponds to David Silver's RL course's assignment.
http://www0.cs.ucl.ac.uk/staff/d.silver/web/Teaching_files/Easy21-Johannes.pdf
This environment is designed for model-free reinforcement learning.
The game is played with an infinite deck of cards (i.e. cards are sampled
with replacement).
Each draw from the deck results in a value between 1 and 10 (uniformly
distributed) with a colour of red (probability 1/3) or black (probability
2/3).
There are no aces or picture (face) cards in this game (i.e. [2, 10]).
At the start of the game both the player and the dealer draw one black
card (fully observed).
Each turn the player may either stick (=0) or hit (=1).
If the player hits then she draws another card from the deck.
If the player sticks she receives no further cards.
The values of the player’s cards are added (black cards) or subtracted (red
cards).
If the player’s sum exceeds 21, or becomes less than 1, then she “goes
bust” and loses the game (reward -1).
If the player sticks then the dealer starts taking turns. The dealer always
sticks on any sum of 17 or greater, and hits otherwise. If the dealer goes
bust, then the player wins; otherwise, the outcome – win (reward +1),
lose (reward -1), or draw (reward 0) – is the player with the largest sum.
"""
def __init__(self):
self.metadata = {'render.modes': ['ansi']}
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.MultiDiscrete([22, 11])
self.reward_range = [-1, 1]
self.seed()
self.score = {"player": None, "dealer": None}
self.reset()
def step(self, action):
assert self.action_space.contains(action)
if action: # hit
self.score["player"] += self._draw_card()
reward, done = self._get_reward_and_terminator("player")
else: # stick
done = True
while 1 <= self.score["dealer"] < 17:
self.score["dealer"] += self._draw_card()
reward, done = self._get_reward_and_terminator("dealer")
return self._get_observation(), reward, done, {}
def reset(self):
return self._draw_hand()
def render(self, mode='ansi'):
if mode == 'ansi':
state = "Easy21: Player {} vs {} Dealer ".format(
self.score["player"], self.score["dealer"]
)
print(state)
return state
else:
super(Easy21, self).render(mode=mode)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _draw_hand(self):
self.score["player"] = self.np_random.randint(2, 11)
self.score["dealer"] = self.np_random.randint(2, 11)
return self._get_observation()
def _draw_card(self):
card_type = self.np_random.choice([-1, 1], p=[0.3333, 0.6667])
card_idx = self.np_random.randint(2, 11)
return card_type * card_idx
def _get_observation(self):
return (self.score["player"], self.score["dealer"])
def _is_bust(self, score):
return score < 1 or score > 21
def _get_reward_and_terminator(self, caller):
if caller == "player":
if self._is_bust(self.score["player"]):
reward, done = -1, True
else:
reward, done = 0, False
else:
assert self._is_bust(self.score["player"]) is False, \
"Player already goes bust."
done = True
if self._is_bust(self.score["dealer"]):
reward = 1
else:
reward = np.sign(
self.score["player"] - self.score["dealer"]
)
return reward, done
# -------test----------
if __name__ == "__main__":
env = Easy21()
try:
list(map(env.step, [env.action_space.sample(), 0, 1, 3]))
except AssertionError as e:
print("Invalid action error caught!")
ob, r, d, _ = env.step(1)
assert type(ob) is tuple
assert env.reward_range[0] <= r <= env.reward_range[1]
assert type(d) is bool
assert type(env.reset()) is tuple
try:
env.render()
env.render(mode="human")
except NotImplementedError as e:
print("Not implemented error caught!")
assert type(env.seed()) is list
assert type(env._draw_hand()) is tuple
assert type(env._draw_card()) is np.int64
for item in env._get_observation():
assert type(item) is int
assert env._is_bust(1) is False
assert env._is_bust(21) is False
assert env._is_bust(0) is True
assert env._is_bust(22) is True
env.score["player"], env.score["dealer"] = 5, 5
reward, done = env._get_reward_and_terminator("player")
assert reward == 0 and done is False
env.score["player"], env.score["dealer"] = 5, 5
reward, done = env._get_reward_and_terminator("dealer")
assert reward == 0 and done is True
env.score["player"], env.score["dealer"] = 22, 5
reward, done = env._get_reward_and_terminator("player")
assert reward == -1 and done is True
env.score["player"], env.score["dealer"] = 22, 5
try:
reward, done = env._get_reward_and_terminator("dealer")
except AssertionError as e:
print("Player goes bust error caught!")
env.score["player"], env.score["dealer"] = 14, 5
reward, done = env._get_reward_and_terminator("dealer")
assert reward == 1 and done is True
env.score["player"], env.score["dealer"] = 14, 19
reward, done = env._get_reward_and_terminator("dealer")
assert reward == -1 and done is True
env.score["player"], env.score["dealer"] = 14, 0
reward, done = env._get_reward_and_terminator("dealer")
assert reward == 1 and done is True
print("All test passed!")
| 33.590164 | 82 | 0.618188 | 867 | 6,147 | 4.258362 | 0.253749 | 0.039003 | 0.032503 | 0.059588 | 0.299837 | 0.281419 | 0.216414 | 0.19312 | 0.18039 | 0.18039 | 0 | 0.024498 | 0.262892 | 6,147 | 182 | 83 | 33.774725 | 0.789892 | 0.230031 | 0 | 0.232759 | 0 | 0 | 0.094189 | 0 | 0 | 0 | 0 | 0 | 0.189655 | 1 | 0.086207 | false | 0.008621 | 0.034483 | 0.025862 | 0.206897 | 0.043103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d2107c06314ce841262e75b5e74bfb415faa3c | 3,905 | py | Python | webservices/filters.py | 18F/openFEC | ee7b7368e0934f50c391789fb55444f811c1a2f7 | [
"CC0-1.0"
] | 246 | 2015-01-07T16:59:42.000Z | 2020-01-18T20:35:05.000Z | webservices/filters.py | 18F/openFEC | ee7b7368e0934f50c391789fb55444f811c1a2f7 | [
"CC0-1.0"
] | 2,532 | 2015-01-02T16:22:46.000Z | 2018-03-08T17:30:53.000Z | webservices/filters.py | 18F/openFEC | ee7b7368e0934f50c391789fb55444f811c1a2f7 | [
"CC0-1.0"
] | 75 | 2015-02-01T00:46:56.000Z | 2021-02-14T10:51:34.000Z | import sqlalchemy as sa
from webservices import utils
from webservices import exceptions
from webservices.common import models
def is_exclude_arg(arg):
# Handle string and int excludes
return str(arg).startswith('-')
def parse_exclude_arg(arg):
# Integers will come in as negative and strings will start with "-""
if isinstance(arg, int):
return abs(arg)
else:
return arg[1:]
def filter_match(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key) is not None:
if is_exclude_arg(kwargs[key]):
query = query.filter(sa.or_(column != parse_exclude_arg(kwargs[key]),
column == None))
else:
query = query.filter(column == kwargs[key])
return query
def filter_multi(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key):
# handle combination exclude/include lists
exclude_list = [parse_exclude_arg(value) for value in kwargs[key] if is_exclude_arg(value)]
include_list = [value for value in kwargs[key] if not is_exclude_arg(value)]
if exclude_list:
query = query.filter(sa.or_(column.notin_(exclude_list),
column == None))
if include_list:
query = query.filter(column.in_(include_list))
return query
def filter_range(query, kwargs, fields):
for (min_key, max_key), column in fields:
if kwargs.get(min_key) is not None:
query = query.filter(column >= kwargs[min_key])
if kwargs.get(max_key) is not None:
query = query.filter(column <= kwargs[max_key])
return query
def filter_fulltext(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key):
exclude_list = [parse_exclude_arg(value) for value in kwargs[key] if is_exclude_arg(value)]
include_list = [value for value in kwargs[key] if not is_exclude_arg(value)]
if exclude_list:
filters = [
sa.not_(column.match(utils.parse_fulltext(value)))
for value in exclude_list
]
query = query.filter(sa.and_(*filters))
if include_list:
filters = [
column.match(utils.parse_fulltext(value))
for value in include_list
]
query = query.filter(sa.or_(*filters))
return query
def filter_contributor_type(query, column, kwargs):
if kwargs.get('contributor_type') == ['individual']:
return query.filter(column == 'IND')
if kwargs.get('contributor_type') == ['committee']:
return query.filter(sa.or_(column != 'IND', column == None)) # noqa
return query
def filter_election(query, kwargs, candidate_column, cycle_column=None, year_column=None):
if not kwargs.get('office'):
return query
utils.check_election_arguments(kwargs)
cycle = get_cycle(kwargs)
query = query.join(
models.CandidateHistory,
candidate_column == models.CandidateHistory.candidate_id,
).filter(
models.CandidateHistory.two_year_period == cycle,
models.CandidateHistory.office == kwargs['office'][0].upper(),
)
if kwargs.get('state'):
query = query.filter(models.CandidateHistory.state == kwargs['state'])
if kwargs.get('district'):
query = query.filter(models.CandidateHistory.district == kwargs['district'])
return query
def get_cycle(kwargs):
if isinstance(kwargs['cycle'], list):
if len(kwargs['cycle']) != 1:
raise exceptions.ApiError(
'Must include exactly one argument "cycle"',
status_code=422,
)
return kwargs['cycle'][0]
return kwargs['cycle']
| 35.18018 | 103 | 0.60717 | 466 | 3,905 | 4.939914 | 0.197425 | 0.057341 | 0.069505 | 0.039096 | 0.429626 | 0.317984 | 0.272372 | 0.260209 | 0.260209 | 0.187229 | 0 | 0.002523 | 0.289629 | 3,905 | 110 | 104 | 35.5 | 0.827325 | 0.03662 | 0 | 0.295455 | 0 | 0 | 0.041789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102273 | false | 0 | 0.045455 | 0.011364 | 0.306818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d2347de09a548dd37e87c6aad8168cfdff4c85 | 23,233 | py | Python | tests/test_validation/rules/test_values_of_correct_type.py | johnpaulguzman/py-gql | 5a2d180537218e1c30c65b2a933fb4fe197785ae | [
"MIT"
] | 6 | 2019-04-30T10:48:09.000Z | 2021-08-19T15:57:53.000Z | tests/test_validation/rules/test_values_of_correct_type.py | johnpaulguzman/py-gql | 5a2d180537218e1c30c65b2a933fb4fe197785ae | [
"MIT"
] | 6 | 2019-04-08T12:39:08.000Z | 2020-08-10T15:00:18.000Z | tests/test_validation/rules/test_values_of_correct_type.py | johnpaulguzman/py-gql | 5a2d180537218e1c30c65b2a933fb4fe197785ae | [
"MIT"
] | 2 | 2021-04-14T07:06:15.000Z | 2021-08-19T15:58:46.000Z | # -*- coding: utf-8 -*-
# Tests were adapted from the one in the GraphQLJS reference implementation,
# as our version exits early not all of the expected errors are aplicable but
# they conserved as comments for reference.
# Tests related to suggestion list are kept for reference but skipped as this
# feature is not implemented.
import pytest
from py_gql.validation.rules import ValuesOfCorrectTypeChecker
from .._test_utils import assert_checker_validation_result as run_test
def test_good_int_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
intArgField(intArg: 2)
}
}
""",
)
def test_good_negative_int_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
intArgField(intArg: -2)
}
}
""",
)
def test_good_boolean_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
booleanArgField(booleanArg: true)
}
}
""",
)
def test_good_string_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
stringArgField(stringArg: "foo")
}
}
""",
)
def test_good_float_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
floatArgField(floatArg: 1.1)
}
}
""",
)
def test_good_negative_float_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
floatArgField(floatArg: -1.1)
}
}
""",
)
def test_int_into_float(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
floatArgField(floatArg: 1)
}
}
""",
)
def test_int_into_id(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
idArgField(idArg: 1)
}
}
""",
)
def test_string_into_id(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
idArgField(idArg: "someIdString")
}
}
""",
)
def test_good_enum_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
dog {
doesKnowCommand(dogCommand: SIT)
}
}
""",
)
def test_enum_with_undefined_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
enumArgField(enumArg: UNKNOWN)
}
}
""",
)
def test_enum_with_null_value(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
enumArgField(enumArg: NO_FUR)
}
}
""",
)
def test_null_into_nullable_type_1(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
intArgField(intArg: null)
}
}
""",
)
def test_null_into_nullable_type_2(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
dog(a: null, b: null, c:{ requiredField: true, intField: null }) {
name
}
}
""",
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
"1", "Expected type String, found 1", (54, 55), id="int -> string"
),
pytest.param(
"1.0",
"Expected type String, found 1.0",
(54, 57),
id="float -> string",
),
pytest.param(
"true",
"Expected type String, found true",
(54, 58),
id="bool -> string",
),
pytest.param(
"BAR",
"Expected type String, found BAR",
(54, 57),
id="enum -> string",
),
],
)
def test_invalid_string_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
stringArgField(stringArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
'"3"', 'Expected type Int, found "3"', (48, 51), id="string -> int"
),
pytest.param(
"829384293849283498239482938",
"Expected type Int, found 829384293849283498239482938",
(48, 75),
id="big int -> int",
),
pytest.param(
"FOO", "Expected type Int, found FOO", (48, 51), id="enum -> int"
),
pytest.param(
"3.0", "Expected type Int, found 3.0", (48, 51), id="float -> int"
),
pytest.param(
"true", "Expected type Int, found true", (48, 52), id="bool -> int"
),
pytest.param(
"3.333",
"Expected type Int, found 3.333",
(48, 53),
id="float -> int",
),
],
)
def test_invalid_int_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
intArgField(intArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
'"3"',
'Expected type Float, found "3"',
(52, 55),
id="string -> float",
),
pytest.param(
'"3.333"',
'Expected type Float, found "3.333"',
(52, 59),
id="string -> float",
),
pytest.param(
"true",
"Expected type Float, found true",
(52, 56),
id="bool -> float",
),
pytest.param(
"FOO",
"Expected type Float, found FOO",
(52, 55),
id="enum -> float",
),
],
)
def test_invalid_float_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
floatArgField(floatArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
"2", "Expected type Boolean, found 2", (56, 57), id="int -> boolean"
),
pytest.param(
"1.0",
"Expected type Boolean, found 1.0",
(56, 59),
id="float -> boolean",
),
pytest.param(
'"true"',
'Expected type Boolean, found "true"',
(56, 62),
id="string -> boolean",
),
pytest.param(
"TRUE",
"Expected type Boolean, found TRUE",
(56, 60),
id="enum -> boolean",
),
],
)
def test_invalid_boolean_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
booleanArgField(booleanArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
"1.0", "Expected type ID, found 1.0", (46, 49), id="float -> ID"
),
pytest.param(
"true", "Expected type ID, found true", (46, 50), id="boolean -> ID"
),
pytest.param(
"SOMETHING",
"Expected type ID, found SOMETHING",
(46, 55),
id="enum -> ID",
),
],
)
def test_invalid_id_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
idArgField(idArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
"1", "Expected type DogCommand, found 1", (48, 49), id="int -> enum"
),
pytest.param(
"1.0",
"Expected type DogCommand, found 1.0",
(48, 51),
id="float -> enum",
),
pytest.param(
'"SIT"',
'Expected type DogCommand, found "SIT"',
(48, 53),
id="string -> enum",
),
pytest.param(
"true",
"Expected type DogCommand, found true",
(48, 52),
id="boolean -> enum",
),
pytest.param(
"JUGGLE",
"Expected type DogCommand, found JUGGLE",
(48, 54),
id="unknown enum -> enum",
),
pytest.param(
"sit",
"Expected type DogCommand, found sit",
(48, 51),
id="unknown enum (case) -> enum",
),
],
)
def test_invalid_enum_values(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
dog {
doesKnowCommand(dogCommand: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value",
[
pytest.param('["one", null, "two"]', id="good"),
pytest.param("[]", id="empty"),
pytest.param("null", id="null"),
pytest.param('["one"]', id="single value into list"),
],
)
def test_valid_list_value(schema, value):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
stringListArgField(stringListArg: %s)
}
}
"""
% value,
)
@pytest.mark.parametrize(
"value,expected_err,loc",
[
pytest.param(
'["one", 2]',
"Expected type String, found 2",
(70, 71),
id="incorrect item type",
),
pytest.param(
"1",
"Expected type [String], found 1",
(62, 63),
id="single value of incorrect type",
),
],
)
def test_invalid_list_value(schema, value, expected_err, loc):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
complicatedArgs {
stringListArgField(stringListArg: %s)
}
}
"""
% value,
[expected_err],
[loc],
)
@pytest.mark.parametrize(
"value",
[
pytest.param(
"""
{
dog {
isHousetrained(atOtherHomes: true)
}
}
""",
id="Arg On Optional Arg",
),
pytest.param(
"""
{
dog {
isHousetrained
}
}
""",
id="No Arg On Optional Arg",
),
pytest.param(
"""
{
complicatedArgs {
multipleReqs(req1: 1, req2: 2)
}
}
""",
id="Multiple Args",
),
pytest.param(
"""
{
complicatedArgs {
multipleReqs(req2: 2, req1: 1)
}
}
""",
id="Multiple Args Reverse Order",
),
pytest.param(
"""
{
complicatedArgs {
multipleOpts
}
}
""",
id="No Args On Multiple Optional",
),
pytest.param(
"""
{
complicatedArgs {
multipleOpts(opt1: 1)
}
}
""",
id="One Arg On Multiple Optional",
),
pytest.param(
"""
{
complicatedArgs {
multipleOpts(opt2: 1)
}
}
""",
id="Second Arg On Multiple Optional",
),
pytest.param(
"""
{
complicatedArgs {
multipleOptAndReq(req1: 3, req2: 4)
}
}
""",
id="Multiple Reqs On Mixed List",
),
pytest.param(
"""
{
complicatedArgs {
multipleOptAndReq(req1: 3, req2: 4, opt1: 5)
}
}
""",
id="Multiple Reqs And One Opt On Mixed List",
),
pytest.param(
"""
{
complicatedArgs {
multipleOptAndReq(req1: 3, req2: 4, opt1: 5, opt2: 6)
}
}
""",
id="All Reqs And Opts On Mixed List",
),
],
)
def test_valid_non_nullable_value(schema, value):
run_test(ValuesOfCorrectTypeChecker, schema, value)
@pytest.mark.parametrize(
"value,expected_errors,locs",
[
pytest.param(
"""
{
complicatedArgs {
multipleReqs(req2: "two", req1: "one")
}
}
""",
[
'Expected type Int!, found "two"',
'Expected type Int!, found "one"',
],
[[(45, 50)], [(58, 63)]],
id="Incorrect value type",
),
pytest.param(
"""
{
complicatedArgs {
multipleReqs(req1: "one")
}
}
""",
['Expected type Int!, found "one"'],
[[(45, 50)]],
id="Incorrect value and missing argument (ProvidedNonNullArguments)",
),
pytest.param(
"""
{
complicatedArgs {
multipleReqs(req1: null)
}
}
""",
["Expected type Int!, found null"],
[[(45, 49)]],
id="Null value",
),
],
)
def test_invalid_non_nullable_value(schema, value, expected_errors, locs):
run_test(ValuesOfCorrectTypeChecker, schema, value, expected_errors, locs)
@pytest.mark.parametrize(
"value",
[
pytest.param(
"""
{
complicatedArgs {
complexArgField
}
}
""",
id="Optional arg, despite required field in type",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: { requiredField: true })
}
}
""",
id="Partial object, only required",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: { requiredField: false })
}
}
""",
id="Partial object, required field can be falsey",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: { requiredField: true, intField: 4 })
}
}
""",
id="Partial object, including required",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: {
requiredField: true,
intField: 4,
stringField: "foo",
booleanField: false,
stringListField: ["one", "two"]
})
}
}
""",
id="Full object",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: {
stringListField: ["one", "two"],
booleanField: false,
requiredField: true,
stringField: "foo",
intField: 4,
})
}
}
""",
id="Full object with fields in different order",
),
pytest.param(
"""
{
test1: anyArg(arg: 123)
test2: anyArg(arg: "abc")
test3: anyArg(arg: [123, "abc"])
test4: anyArg(arg: {deep: [123, "abc"]})
}
""",
id="allows custom scalar to accept complex literals",
),
],
)
def test_valid_input_object_value(schema, value):
run_test(ValuesOfCorrectTypeChecker, schema, value)
@pytest.mark.parametrize(
"value, expected_errors, locs",
[
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: { intField: 4 })
}
}
""",
[
"Required field ComplexInput.requiredField of type Boolean! was "
"not provided"
],
[[(52, 67)]],
id="Partial object, missing required",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: {
requiredField: true,
nonNullField: null,
})
}
}
""",
["Expected type Boolean!, found null"],
[[(121, 125)]],
id="Partial object, null to non-null field",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: {
stringListField: ["one", 2],
requiredField: true,
})
}
}
""",
["Expected type String, found 2"],
[[(83, 84)]],
id="Partial object, invalid field type",
),
pytest.param(
"""
{
complicatedArgs {
complexArgField(complexArg: {
requiredField: true,
unknownField: "value"
})
}
}
""",
[
"Field unknownField is not defined by type ComplexInput. "
'Did you mean "nonNullField", "intField" or "booleanField"?'
],
[[(83, 104)]],
id="Partial object, unknown field arg",
),
pytest.param(
"""
{
invalidArg(arg: 123)
}
""",
[
"Expected type Invalid, found 123 (Invalid scalar is always invalid)"
],
[[(18, 21)]],
id="reports original error for custom scalar which throws",
),
],
)
def test_invalid_input_object_value(schema, value, expected_errors, locs):
run_test(ValuesOfCorrectTypeChecker, schema, value, expected_errors, locs)
def test_directive_arguments_with_directives_of_valid_types(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
dog @include(if: true) {
name
}
human @skip(if: false) {
name
}
}
""",
)
def test_directive_arguments_with_directive_with_incorrect_types(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
{
dog @include(if: "yes") {
name @skip(if: ENUM)
}
}
""",
[
'Expected type Boolean!, found "yes"',
"Expected type Boolean!, found ENUM",
],
[[(23, 28)], [(51, 55)]],
)
def test_variables_with_valid_default_values(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query WithDefaultValues(
$a: Int = 1,
$b: String = "ok",
$c: ComplexInput = { requiredField: true, intField: 3 }
$d: Int! = 123
) {
dog { name }
}
""",
)
def test_variables_with_valid_default_null_values(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query WithDefaultValues(
$a: Int = null,
$b: String = null,
$c: ComplexInput = { requiredField: true, intField: null }
) {
dog { name }
}
""",
)
def test_variables_with_invalid_default_null_values(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query WithDefaultValues(
$a: Int! = null,
$b: String! = null,
$c: ComplexInput = { requiredField: null, intField: null }
) {
dog { name }
}
""",
[
"Expected type Int!, found null",
"Expected type String!, found null",
"Expected type Boolean!, found null",
],
[[(40, 44)], [(64, 68)], [(110, 114)]],
)
def test_variables_with_invalid_default_values(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query InvalidDefaultValues(
$a: Int = "one",
$b: String = 4,
$c: ComplexInput = "notverycomplex"
) {
dog { name }
}
""",
[
'Expected type Int, found "one"',
"Expected type String, found 4",
'Expected type ComplexInput, found "notverycomplex"',
],
)
def test_variables_with_complex_invalid_default_values(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query WithDefaultValues(
$a: ComplexInput = { requiredField: 123, intField: "abc" }
) {
dog { name }
}
""",
["Expected type Boolean!, found 123", 'Expected type Int, found "abc"'],
)
def test_complex_variables_missing_required_field(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query MissingRequiredField($a: ComplexInput = {intField: 3}) {
dog { name }
}
""",
[
"Required field ComplexInput.requiredField of type Boolean! "
"was not provided"
],
)
def test_list_variables_with_invalid_item(schema):
run_test(
ValuesOfCorrectTypeChecker,
schema,
"""
query InvalidItem($a: [String] = ["one", 2]) {
dog { name }
}
""",
["Expected type String, found 2"],
)
| 22.666341 | 85 | 0.442948 | 1,764 | 23,233 | 5.713719 | 0.150227 | 0.0633 | 0.114595 | 0.13543 | 0.653438 | 0.568806 | 0.498462 | 0.420081 | 0.3798 | 0.302609 | 0 | 0.028255 | 0.440925 | 23,233 | 1,024 | 86 | 22.688477 | 0.74771 | 0.013687 | 0 | 0.50613 | 0 | 0 | 0.235641 | 0.021157 | 0 | 0 | 0 | 0 | 0.001751 | 1 | 0.061296 | false | 0 | 0.005254 | 0 | 0.06655 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d23b2b1ff4f21038abe2b0f44cfaf2ddc32a2c | 578 | py | Python | signal_ocean/port.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 10 | 2020-09-29T06:36:45.000Z | 2022-03-14T18:15:50.000Z | signal_ocean/port.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 53 | 2020-10-08T10:05:00.000Z | 2022-03-29T14:21:18.000Z | signal_ocean/port.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 5 | 2020-09-25T07:48:04.000Z | 2021-11-23T07:08:56.000Z | # noqa: D100
from dataclasses import dataclass
import warnings
@dataclass(frozen=True, eq=False)
class Port:
"""A maritime facility where vessels can dock.
Attributes:
id: The ID of the port.
name: The name of the port.
"""
id: int
name: str
def __post_init__(self) -> None: # noqa: D105
warnings.warn(
"signal_ocean.Port is deprecated and will be removed in a future "
"version of the SDK. Please use tonnage_list.Port instead.",
DeprecationWarning,
stacklevel=3,
)
| 22.230769 | 78 | 0.612457 | 73 | 578 | 4.753425 | 0.739726 | 0.043228 | 0.051873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 0.309689 | 578 | 25 | 79 | 23.12 | 0.85213 | 0.240484 | 0 | 0 | 0 | 0 | 0.291566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d2faa95dd978572008d8a4d4ff1af75e8ce3f6 | 1,716 | py | Python | bdc_geoserver/coverages/controller.py | fabianazioti/geoserver | dfb3bb072be59ce83d84e913fbb8388b82b78487 | [
"MIT"
] | null | null | null | bdc_geoserver/coverages/controller.py | fabianazioti/geoserver | dfb3bb072be59ce83d84e913fbb8388b82b78487 | [
"MIT"
] | null | null | null | bdc_geoserver/coverages/controller.py | fabianazioti/geoserver | dfb3bb072be59ce83d84e913fbb8388b82b78487 | [
"MIT"
] | null | null | null | import os
import json
from flask import request
from werkzeug.exceptions import InternalServerError, BadRequest
from bdc_core.utils.flask import APIResource
from bdc_geoserver.coverages import ns
from bdc_geoserver.coverages.business import CoverageBusiness
from bdc_geoserver.coverages.parsers import validate
api = ns
@api.route('/<workspace>')
@api.route('/<workspace>/<coveragestore>/<coverage>')
class CoverageController(APIResource):
def get(self, workspace, coveragestore=None, coverage=None):
"""
List of coverages store for a workspace in geoserver
"""
layers = CoverageBusiness.get_coverages(workspace)
coverages = layers['coverageStores']['coverageStore'] if type(
layers['coverageStores']) != str else []
return {
"coverageStore": coverages
}
def delete(self, workspace, coveragestore, coverage):
"""
Unpublish a layer/coverage in geoserver
"""
status = CoverageBusiness.unpublish(workspace, coveragestore, coverage)
if not status:
raise InternalServerError('Error unpublish mosaic!')
return {
"message": "Mosaic unpublish!"
}
@api.route('/')
class CoveragesController(APIResource):
def post(self):
"""
Publish a layer/image_mosaic in geoserver
"""
data, status = validate(request.json, 'publish_raster')
if status is False:
raise BadRequest(json.dumps(data))
status = CoverageBusiness.publish(data)
if not status:
raise InternalServerError('Error publishing mosaic!')
return {
"message": "Mosaic published!"
}
| 28.131148 | 79 | 0.652681 | 166 | 1,716 | 6.704819 | 0.391566 | 0.025157 | 0.043127 | 0.067385 | 0.071878 | 0.071878 | 0 | 0 | 0 | 0 | 0 | 0 | 0.251748 | 1,716 | 60 | 80 | 28.6 | 0.866822 | 0.078089 | 0 | 0.131579 | 0 | 0 | 0.142196 | 0.025794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d4ca8c575646294cb7b5fef73303f10891255b | 3,941 | py | Python | authors/apps/article_bookmarks/views.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 3 | 2019-05-01T10:41:09.000Z | 2021-04-25T22:17:20.000Z | authors/apps/article_bookmarks/views.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 24 | 2019-04-23T14:56:21.000Z | 2021-12-13T19:58:37.000Z | authors/apps/article_bookmarks/views.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 4 | 2019-06-29T10:40:32.000Z | 2022-01-04T11:44:53.000Z | from os import environ
from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from authors.apps.article_bookmarks.models import Bookmark
from authors.apps.article_bookmarks.serializers import BookmarkSerializer
from authors.apps.articles.models import Article
from authors.apps.articles.serializers import ArticleSerializer
class CreateDestroyBookmarksView(APIView):
"""
post:
Bookmark or unbookmark an article
"""
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
current_user = request.user
slug = kwargs["slug"]
try:
article = get_object_or_404(Article, slug=slug)
bookmark = Bookmark.objects.create(
user=current_user, article=article
)
bookmark.save()
serializer = BookmarkSerializer(bookmark)
except IntegrityError:
bookmark = Bookmark.objects.get(user=current_user, article=article)
bookmark.delete()
return Response(
data={"message": "Article has been unBookmarked Successfully"},
status=status.HTTP_200_OK,
)
return Response(
data={
"message": "Article Bookmarked Successfully",
"bookmark": serializer.data,
},
status=status.HTTP_201_CREATED,
)
class RetrieveBookmarkStatusView(APIView):
"""
get:
Get the status of a user's article bookmark
"""
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
current_user = request.user
slug = kwargs["slug"]
article = get_object_or_404(Article, slug=slug)
# get bookmarks for this user and article
user_bookmarks = Bookmark.objects.filter(
user=request.user.username,
article=article.id
)
bookmarked = False
if len(user_bookmarks) > 0:
bookmarked = True
return Response(
data={
"isBookmarked": bookmarked,
},
status=status.HTTP_200_OK,
)
class ListBookmarksView(APIView):
"""
get:
returns a list of bookmarks
"""
queryset = Bookmark.objects.all()
serializer_class = BookmarkSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
"""Returns a list of bookmarks that belong to the current user"""
user_bookmarks = self.queryset.filter(user=self.request.user.username)
serializer = BookmarkSerializer(user_bookmarks, many=True)
bookmarks = serializer.data
count = len(bookmarks)
# Customize the bookmark response to include the url for article detail
for bookmark in bookmarks:
article_id = bookmark["article"]
article = Article.objects.get(id=article_id)
# change the bookmark data type from int to dictionary
bookmark["article"] = {}
article = ArticleSerializer(article).data
bookmark["article"]["id"] = article_id
bookmark["article"]["slug"] = article["slug"]
bookmark["article"]["title"] = article["title"]
bookmark["article"]["author"] = article["author"]
bookmark["article"]["description"] = article["description"]
bookmark["bookmarked_on"] = bookmark["bookmarked_on"]
bookmark["article"][
"url"
] = f"{environ.get('WEB_HOST')}/api/articles/{article_id}/"
return Response(
data={"bookmarks": bookmarks, "count": count},
status=status.HTTP_200_OK,
)
| 31.782258 | 79 | 0.620655 | 390 | 3,941 | 6.164103 | 0.282051 | 0.049917 | 0.028286 | 0.017471 | 0.246256 | 0.148503 | 0.11772 | 0.11772 | 0.08777 | 0.047421 | 0 | 0.007821 | 0.286222 | 3,941 | 123 | 80 | 32.04065 | 0.846783 | 0.087541 | 0 | 0.240964 | 0 | 0 | 0.091246 | 0.014827 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036145 | false | 0 | 0.13253 | 0 | 0.313253 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d6237c3c99aaccb94832bb39c1060bc587ef62 | 859 | py | Python | tests/test_utils.py | mbannick/CorrelatedCounts | 067486289a20b64e2ba6554be46e5c4b4453b4de | [
"MIT"
] | 3 | 2019-12-10T22:59:46.000Z | 2020-02-03T21:02:01.000Z | tests/test_utils.py | ihmeuw-msca/CorrelatedCounts | 067486289a20b64e2ba6554be46e5c4b4453b4de | [
"MIT"
] | 4 | 2019-11-17T17:57:34.000Z | 2019-12-27T16:45:07.000Z | tests/test_utils.py | mbannick/CorrelatedCounts | 067486289a20b64e2ba6554be46e5c4b4453b4de | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_utils
~~~~~~~~~~
Test utils module
"""
import numpy as np
import pytest
import ccount.utils as utils
@pytest.mark.parametrize("vec", [np.arange(10)])
@pytest.mark.parametrize("sizes",
[np.array([5, 5]),
np.array([1, 2, 3, 4])])
def test_split(vec, sizes):
result = utils.split(vec, sizes)
assert len(result) == len(sizes)
s = np.cumsum(sizes)
for i in range(len(result)):
assert len(result[i]) == sizes[i]
assert result[i][-1] == s[i] - 1
@pytest.mark.parametrize("vec", [np.arange(20)])
@pytest.mark.parametrize("d", [np.array([[5, 1, 2, 2], [1, 2, 3, 4]])])
def test_beta_transform(vec, d):
beta = utils.vec_to_beta(vec, d)
vec_recover = utils.beta_to_vec(beta)
assert np.linalg.norm(vec - vec_recover) < 1e-10
| 26.84375 | 71 | 0.576251 | 128 | 859 | 3.789063 | 0.34375 | 0.082474 | 0.173196 | 0.098969 | 0.17732 | 0.17732 | 0 | 0 | 0 | 0 | 0 | 0.036364 | 0.231665 | 859 | 31 | 72 | 27.709677 | 0.698485 | 0.074505 | 0 | 0 | 0 | 0 | 0.015484 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d62b0ed216da4fafb90cc8a2a93e49e69dbe48 | 888 | py | Python | tests/unittest/test_configuration.py | DudeNr33/flask_mock_server | 2a453189240a4d1809e72d5407b9f302621d3c42 | [
"MIT"
] | 1 | 2021-08-15T14:02:49.000Z | 2021-08-15T14:02:49.000Z | tests/unittest/test_configuration.py | DudeNr33/flask_mock_server | 2a453189240a4d1809e72d5407b9f302621d3c42 | [
"MIT"
] | 1 | 2020-10-06T04:36:55.000Z | 2020-10-06T04:36:55.000Z | tests/unittest/test_configuration.py | DudeNr33/flask_mock_server | 2a453189240a4d1809e72d5407b9f302621d3c42 | [
"MIT"
] | null | null | null | """
Unittests checking the initialization of the ``MockServer`` class and evaluation of the config passed to it.
"""
from unittest.mock import patch
import pytest
from server_double.server import Endpoint, MockServer
def test_port():
server = MockServer(config={"port": 8083})
assert server.port == 8083
@pytest.mark.parametrize(
"url,config",
[
("/endpoint", {"status_code": 200}),
("/resource", {"status_code": 204}),
("/foobar", {"status_code": 303}),
],
)
def test_single_endpoint(url, config):
with patch("server_double.server.cherrypy") as cherrypy_mock:
_ = MockServer(config={"endpoints": {url: config}})
root, script_path = cherrypy_mock.tree.mount.call_args[0]
assert script_path == url
assert isinstance(root, Endpoint)
assert root.url == url
assert root.default_status == config["status_code"]
| 27.75 | 108 | 0.675676 | 108 | 888 | 5.407407 | 0.481481 | 0.068493 | 0.061644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024931 | 0.186937 | 888 | 31 | 109 | 28.645161 | 0.783934 | 0.121622 | 0 | 0 | 0 | 0 | 0.156736 | 0.037565 | 0 | 0 | 0 | 0 | 0.227273 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d75941a9025275db193dd6de16e0dccdc9f749 | 6,925 | py | Python | db.py | vblazhnov/stats | dfe103521543af40b6e6c941d28cdd831b765a92 | [
"Apache-2.0"
] | null | null | null | db.py | vblazhnov/stats | dfe103521543af40b6e6c941d28cdd831b765a92 | [
"Apache-2.0"
] | null | null | null | db.py | vblazhnov/stats | dfe103521543af40b6e6c941d28cdd831b765a92 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 vblazhnov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'vblazhnov'
import psycopg2
import hashlib
def db_connect():
"""
Производит подключение к базе данных
"""
try:
conn = psycopg2.connect("dbname='test' user='postgres' host='localhost' password='test123'")
cur = conn.cursor()
cur.execute("SELECT EXISTS (SELECT * FROM information_schema.tables WHERE table_name=%s)", ('users',))
if not cur.fetchone()[0]:
cur.execute("""
CREATE TABLE users
(
id serial NOT NULL,
login text NOT NULL,
pwd_hash text NOT NULL,
api_key text NOT NULL,
CONSTRAINT uid PRIMARY KEY (id)
);
""")
conn.commit()
# TODO: добавить таблицу-справочник евентов, а в таблице евент добавлять id евента из справочника
cur.execute("SELECT EXISTS (SELECT * FROM information_schema.tables WHERE table_name=%s)", ('events',))
if not cur.fetchone()[0]:
cur.execute("""
CREATE TABLE events
(
id serial NOT NULL,
owner_id integer NOT NULL,
name text NOT NULL,
date timestamp without time zone NOT NULL,
ip text NOT NULL,
CONSTRAINT eid PRIMARY KEY (id),
CONSTRAINT oid FOREIGN KEY (owner_id)
REFERENCES users (id) MATCH SIMPLE
ON UPDATE CASCADE ON DELETE CASCADE
);
""")
conn.commit()
except Exception as E:
print("I am unable to connect to the database")
print(E)
else:
return conn
class DataBase:
"""
Класс базы данных.
Содержит все методы работы с БД, необходимые для приложения.
Если БД не содержит необходимые таблицы, создает их.
"""
__conn = db_connect()
__cur = __conn.cursor()
__salt = "randomSaLTFromGeneRaT0r".encode('utf-8')
@staticmethod
def get_user_info(login):
"""
Возвращает информацию о заданном пользователе
:param login: логин пользователя
:return: кортеж информации о пользователе (id, login, pwd_hash, api_key)
"""
# в psycopg2 экранирование происходит автоматически в методе execute
DataBase.__cur.execute("""SELECT id, login, pwd_hash, api_key
FROM users
WHERE login = %s""", (login,))
return DataBase.__cur.fetchone()
@staticmethod
def get_user_by_api_key(apiKey):
"""
Возвращает информацию о пользователе по заданному apiKey
:param apiKey: ключ пользователя
:return: кортеж информации о пользователе (id, login, pwd_hash, api_key)
"""
DataBase.__cur.execute("""SELECT id, login, pwd_hash, api_key
FROM users
WHERE api_key = %s
LIMIT 1""", (apiKey,))
return DataBase.__cur.fetchone()
@staticmethod
def hash_with_salt(str):
"""
Подсчитывает md5-хэш с солью
:param str: строка
:return: строку с хэшем
"""
md5 = hashlib.md5
return md5(md5(str.encode('utf-8')).hexdigest().encode('utf-8') + DataBase.__salt).hexdigest()
@staticmethod
def is_valid_pass(login, pwd):
"""
Проверяет валиден ли пароль
:param login: логин пользователя
:param pwd: пароль пользователя
:return: True - если валиден, иначе False
"""
user = DataBase.get_user_info(login)
if user is None:
return False
return DataBase.hash_with_salt(pwd) == user[2]
@staticmethod
def add_user(login, pwd):
"""
Добавляет нового пользователя в БД
:param login: логин пользователя
:param pwd: пароль пользователя
:return: кортеж информации о пользователе (id, login, pwd_hash, api_key)
"""
if DataBase.get_user_info(login) is not None:
return None
pwd_hash = DataBase.hash_with_salt(pwd)
# длинный api_key для его уникальности
api_key = DataBase.hash_with_salt(login) + DataBase.hash_with_salt(login + pwd)
DataBase.__cur.execute("""INSERT INTO users (login, pwd_hash, api_key)
VALUES (%s, %s, %s)
RETURNING id, login, pwd_hash, api_key""", (login, pwd_hash, api_key))
DataBase.__conn.commit()
return DataBase.__cur.fetchone()
@staticmethod
def add_event(apiKey, eventName, ip):
"""
Добавляет информацию о новом эвенте в базу данных
:param apiKey: ключ пользователя
:param eventName: имя евента
:param ip: ip, с которого пришел евент
:return: кортеж информации об евенте (id, owner_id, name, date, ip)
"""
user = DataBase.get_user_by_api_key(apiKey)
if user is None:
return None
userId = user[0]
# TODO: по хорошему, надо завести отдельный справочник для имен евентов
# получать оттуда id нужного евента, если его нет - то добавлять
# в postgres now заменяется на время произведения транзакции
DataBase.__cur.execute("""INSERT INTO events(owner_id, name, date, ip)
VALUES (%s, %s, %s, %s)
RETURNING id, owner_id, name, date, ip""", (userId, eventName, "now", ip))
DataBase.__conn.commit()
return DataBase.__cur.fetchone()
@staticmethod
def get_users_events(userId):
"""
Возвращает все евенты заданного пользователя и их количество
:param userId: заданный id пользователя
:return: список кортежей (eventName, count)
"""
DataBase.__cur.execute("""SELECT name, COUNT(name)
FROM events
WHERE owner_id = %s
GROUP BY name
ORDER BY COUNT(name) DESC""", (userId,))
return DataBase.__cur.fetchall()
@staticmethod
def get_users_event(userId, eventName):
"""
Возвращает все евенты с заданным именем заданного пользователя
:param userId: заданный id пользователя
:param eventName: заданное имя евента
:return: список кортежей (ip - время евента)
"""
DataBase.__cur.execute("""SELECT ip, date
FROM events
WHERE owner_id = %s AND name = %s
ORDER BY date DESC""", (userId, eventName))
return DataBase.__cur.fetchall()
| 33.293269 | 111 | 0.612708 | 818 | 6,925 | 5.05379 | 0.328851 | 0.020319 | 0.023222 | 0.029028 | 0.328254 | 0.240929 | 0.192308 | 0.18118 | 0.18118 | 0.110547 | 0 | 0.005768 | 0.299061 | 6,925 | 207 | 112 | 33.454106 | 0.8459 | 0.33935 | 0 | 0.358491 | 0 | 0 | 0.409265 | 0.017523 | 0 | 0 | 0 | 0.009662 | 0 | 1 | 0.084906 | false | 0.018868 | 0.018868 | 0 | 0.254717 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8d7ca4e6d430310eec168b38b920ccc8c0c8b08 | 5,663 | py | Python | appengine/annotation-parser/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/annotation-parser/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/annotation-parser/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import collections
import datetime
import json
import logging
import os
import re
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
import jinja2
import webapp2
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
REGEX_STEP_CURSOR = re.compile(r'^@@@STEP_CURSOR[@ ](.+)@@@$')
Property = collections.namedtuple('Property', ['name', 'value', 'source'])
Log = collections.namedtuple('Log', ['name', 'link'])
class Step(object):
def __init__(self, task_id, name):
self._task_id = task_id
self.name = name
self.css_class = 'success'
self.link = '#'
self.logs = [
Log(name='stdio', link='/swarming/step/%s/%s' % (
task_id, base64.urlsafe_b64encode(self.name))),
]
def fail(self):
self.css_class = 'failure'
def get(self, _name, default=None):
return default
def fetch_json(url):
logging.info('Fetching %s' % url)
authorization_token, _ = app_identity.get_access_token(
'https://www.googleapis.com/auth/userinfo.email')
response = urlfetch.fetch(
url, follow_redirects=False, validate_certificate=True,
headers={'Authorization': 'Bearer ' + authorization_token},
deadline=30)
logging.debug(response.content)
# TODO(phajdan.jr): Handle responses other than HTTP 200.
return json.loads(response.content)
def fetch_swarming_task_metadata(task_id):
return fetch_json(
'https://chromium-swarm.appspot.com/_ah/api/swarming/v1/'
'task/%s/result' % task_id)
def fetch_swarming_task_output(task_id):
return fetch_json(
'https://chromium-swarm.appspot.com/_ah/api/swarming/v1/'
'task/%s/stdout' % task_id)
def parse_datetime(datetime_string):
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S.%f')
def access_allowed(task_metadata):
# TODO(phajdan.jr): Remove the user-specific logic when no longer needed.
if task_metadata.get('user') == 'phajdan@google.com':
return True
if 'allow_milo:1' in task_metadata.get('tags', []):
return True
return False
class SwarmingBuildHandler(webapp2.RequestHandler):
def get(self, task_id):
task_metadata = fetch_swarming_task_metadata(task_id)
if not access_allowed(task_metadata):
self.abort(403)
data = fetch_swarming_task_output(task_id)
steps = []
seen_steps = set()
last_step = None
for line in data['output'].splitlines():
if line == '@@@STEP_FAILURE@@@' and last_step:
last_step.fail()
continue
match = REGEX_STEP_CURSOR.match(line)
if not match:
continue
step_name = match.group(1)
if step_name in seen_steps:
continue
seen_steps.add(step_name)
last_step = Step(task_id, step_name)
steps.append(last_step)
if 'started_ts' in task_metadata and 'completed_ts' in task_metadata:
started_ts = parse_datetime(task_metadata['started_ts'])
completed_ts = parse_datetime(task_metadata['completed_ts'])
elapsed = str(completed_ts - started_ts)
else:
elapsed = 'n/a'
properties = []
for key in ('task_id', 'user', 'bot_id'):
properties.append(Property(
name=key, value=task_metadata[key], source='swarming'))
for dimension in task_metadata['bot_dimensions']:
properties.append(Property(
name=dimension['key'],
value=json.dumps(dimension['value']),
source='swarming dimensions'))
if 'completed_ts' not in task_metadata:
build_result = 'Running'
result_css = 'running'
elif ((not task_metadata['failure']) and
(not task_metadata['internal_failure'])):
build_result = 'Build successful'
result_css = 'success'
else:
build_result = 'Failed'
result_css = 'failure'
template_values = {
'stylesheet': '/static/default.css',
'build_id': task_metadata['task_id'],
'result_css': result_css,
'build_result': [build_result],
'slave_url': ('https://chromium-swarm.appspot.com/restricted/bot/%s' %
task_metadata['bot_id']),
'slavename': task_metadata['bot_id'],
'steps': steps,
'properties': properties,
'start': task_metadata.get('started_ts', 'n/a'),
'end': task_metadata.get('completed_ts', 'n/a'),
'elapsed': elapsed,
}
template = JINJA_ENV.get_template('build.html')
self.response.write(template.render(template_values))
class SwarmingStepHandler(webapp2.RequestHandler):
def get(self, task_id, step_id_base64):
task_metadata = fetch_swarming_task_metadata(task_id)
if not access_allowed(task_metadata):
self.abort(403)
step_id = base64.urlsafe_b64decode(step_id_base64)
self.response.headers['Content-Type'] = 'text/plain'
data = fetch_swarming_task_output(task_id)
inside_step = False
step_lines = []
for line in data['output'].splitlines():
if inside_step:
if line == '@@@STEP_CLOSED@@@':
break
step_lines.append(line)
elif line in ['@@@STEP_CURSOR@%s@@@' % step_id,
'@@@STEP_CURSOR %s@@@' % step_id]:
inside_step = True
self.response.write('\n'.join(step_lines))
app = webapp2.WSGIApplication([
(r'/swarming/build/(.+)', SwarmingBuildHandler),
(r'/swarming/step/(.+)/(.+)', SwarmingStepHandler),
])
| 27.095694 | 76 | 0.667844 | 715 | 5,663 | 5.065734 | 0.299301 | 0.079514 | 0.028161 | 0.019879 | 0.201546 | 0.154059 | 0.140806 | 0.085036 | 0.085036 | 0.085036 | 0 | 0.009035 | 0.198658 | 5,663 | 208 | 77 | 27.225962 | 0.789114 | 0.049974 | 0 | 0.158621 | 0 | 0 | 0.181057 | 0.008374 | 0 | 0 | 0 | 0.004808 | 0 | 1 | 0.068966 | false | 0 | 0.075862 | 0.027586 | 0.22069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7633da1f4768d784eb185ba4d9ceca72928d90f4 | 2,778 | py | Python | tests/test_inputs.py | johanjeppsson/msteams | 6a539881f5c2b9c7f1d4fefd2543e381f6b7678d | [
"MIT"
] | 2 | 2020-04-16T16:56:21.000Z | 2021-12-25T05:28:31.000Z | tests/test_inputs.py | johanjeppsson/msteams | 6a539881f5c2b9c7f1d4fefd2543e381f6b7678d | [
"MIT"
] | null | null | null | tests/test_inputs.py | johanjeppsson/msteams | 6a539881f5c2b9c7f1d4fefd2543e381f6b7678d | [
"MIT"
] | 1 | 2019-10-23T21:54:22.000Z | 2019-10-23T21:54:22.000Z | import json
from collections import OrderedDict
import pytest
from msteams import DateInput, MultipleChoiceInput, TextInput
EXPECTED_INPUT = OrderedDict(
(
("@type", "type"),
("id", "comment"),
("isRequired", False),
("title", "Input's title property"),
("value", "Input's value property"),
)
)
EXPECTED_TEXT = OrderedDict((("isMultiline", True), ("maxLength", 80)))
EXPECTED_DATE = {"includeTime": False}
EXPECTED_MULTI = OrderedDict(
(
("choices", [OrderedDict((("display", "Choice 1"), ("value", "1")))]),
("isMultiSelect", False),
("style", "normal"),
)
)
def test_text_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_TEXT)
e["@type"] = "TextInput"
ti = TextInput(
id=e["id"],
is_multiline=e["isMultiline"],
title=e["title"],
is_required=False,
value=e["value"],
max_length=e["maxLength"],
)
assert ti.json_payload == json.dumps(e)
ti = TextInput()
ti.set_id(e["id"])
ti.set_is_required(e["isRequired"])
ti.set_is_multiline(e["isMultiline"])
ti.set_title(e["title"])
ti.set_value(e["value"])
ti.set_max_length(e["maxLength"])
assert ti.json_payload == json.dumps(e)
def test_date_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_DATE)
e["@type"] = "DateInput"
di = DateInput(
id=e["id"],
title=e["title"],
is_required=False,
value=e["value"],
include_time=e["includeTime"],
)
assert di.json_payload == json.dumps(e)
di = DateInput(id=e["id"], title=e["title"], is_required=False, value=e["value"])
di.set_include_time(e["includeTime"])
assert di.json_payload == json.dumps(e)
def test_multiple_choice_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_MULTI)
e["@type"] = "MultipleChoiceInput"
c = {e["choices"][0]["display"]: e["choices"][0]["value"]}
mi = MultipleChoiceInput(
id=e["id"],
title=e["title"],
is_required=False,
value=e["value"],
choices=c,
is_multi_select=False,
style="normal",
)
assert mi.json_payload == json.dumps(e)
mi = MultipleChoiceInput(
id=e["id"], title=e["title"], is_required=False, value=e["value"]
)
mi.set_choices(c)
mi.set_is_multi_select(False)
mi.set_style("normal")
assert mi.json_payload == json.dumps(e)
mi = MultipleChoiceInput(
id=e["id"], title=e["title"], is_required=False, value=e["value"]
)
mi.add_choices(c)
mi.set_is_multi_select(False)
mi.set_style("normal")
assert mi.json_payload == json.dumps(e)
with pytest.raises(ValueError):
mi = MultipleChoiceInput(style="invalid")
| 25.027027 | 85 | 0.598992 | 343 | 2,778 | 4.688047 | 0.186589 | 0.01306 | 0.021766 | 0.087065 | 0.532338 | 0.532338 | 0.532338 | 0.523632 | 0.452736 | 0.429726 | 0 | 0.002797 | 0.227862 | 2,778 | 110 | 86 | 25.254545 | 0.746853 | 0 | 0 | 0.348315 | 0 | 0 | 0.152628 | 0 | 0 | 0 | 0 | 0 | 0.078652 | 1 | 0.033708 | false | 0 | 0.044944 | 0 | 0.078652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7635389fc0335626aa9aec3710d32f29934c0fca | 2,222 | py | Python | data/byol_transform.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | null | null | null | data/byol_transform.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | null | null | null | data/byol_transform.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import torch
from torchvision import transforms
import cv2
from PIL import Image, ImageOps
import numpy as np
class MultiViewDataInjector():
def __init__(self, transform_list):
self.transform_list = transform_list
def __call__(self, sample):
output = [transform(sample).unsqueeze(0) for transform in self.transform_list]
output_cat = torch.cat(output, dim=0)
return output_cat
class GaussianBlur():
def __init__(self, kernel_size, sigma_min=0.1, sigma_max=2.0):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.kernel_size = kernel_size
def __call__(self, img):
sigma = np.random.uniform(self.sigma_min, self.sigma_max)
img = cv2.GaussianBlur(np.array(img), (self.kernel_size, self.kernel_size), sigma)
return Image.fromarray(img.astype(np.uint8))
class Solarize():
def __init__(self, threshold=128):
self.threshold = threshold
def __call__(self, sample):
return ImageOps.solarize(sample, self.threshold)
def get_transform(stage, gb_prob=1.0, solarize_prob=0.):
t_list = []
color_jitter = transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if stage in ('train', 'val'):
t_list = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur(kernel_size=23)], p=gb_prob),
transforms.RandomApply([Solarize()], p=solarize_prob),
transforms.ToTensor(),
normalize]
elif stage == 'ft':
t_list = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]
elif stage == 'test':
t_list = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]
transform = transforms.Compose(t_list)
return transform
| 35.269841 | 90 | 0.629613 | 259 | 2,222 | 5.185328 | 0.328185 | 0.044676 | 0.041698 | 0.025316 | 0.18764 | 0.11169 | 0.11169 | 0.11169 | 0 | 0 | 0 | 0.039879 | 0.255176 | 2,222 | 62 | 91 | 35.83871 | 0.771601 | 0.009001 | 0 | 0.272727 | 0 | 0 | 0.006361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.090909 | 0.018182 | 0.345455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76355d84533c29fb76e6db2e485cbc743371545e | 9,999 | py | Python | adjudicator/tests/data.py | johnpooch/diplomacy-adjudicator | 5a65ee45394afba71b0d6a9a5a1d6f0468a75384 | [
"MIT"
] | null | null | null | adjudicator/tests/data.py | johnpooch/diplomacy-adjudicator | 5a65ee45394afba71b0d6a9a5a1d6f0468a75384 | [
"MIT"
] | 7 | 2020-04-05T17:40:53.000Z | 2020-04-07T17:33:10.000Z | adjudicator/tests/data.py | johnpooch/diplomacy-adjudicator | 5a65ee45394afba71b0d6a9a5a1d6f0468a75384 | [
"MIT"
] | null | null | null | import inspect
from adjudicator.territory import CoastalTerritory, InlandTerritory, \
SeaTerritory, Territory
from adjudicator.named_coast import NamedCoast
class Nations:
ENGLAND = 'ENGLAND'
FRANCE = 'FRANCE'
GERMANY = 'GERMANY'
ITALY = 'ITALY'
AUSTRIA = 'AUSTRIA'
TURKEY = 'TURKEY'
RUSSIA = 'RUSSIA'
class Territories:
def __init__(self):
self.ADRIATIC_SEA = SeaTerritory(1, 'adriatic sea', [20, 22, 11, 53, 56])
self.AEGEAN_SEA = SeaTerritory(2, 'aegean sea', [5, 73, 28, 7, 33, 11, 50])
self.BALTIC_SEA = SeaTerritory(3, 'baltic sea', [24, 6, 29, 35, 48, 51, 37])
self.BARRENTS_SEA = SeaTerritory(4, 'barrents sea', [15, 42, 75])
self.BLACK_SEA = SeaTerritory(5, 'black sea', [21, 23, 73, 28, 47, 49])
self.GULF_OF_BOTHNIA = SeaTerritory(6, 'gulf of bothnia',
[3, 31, 37, 51, 75, 75])
self.EASTERN_MEDITERRANEAN = SeaTerritory(7, 'eastern mediterranean',
[2, 50, 52])
self.ENGLISH_CHANNEL = SeaTerritory(8, 'english channel',
[25, 26, 12, 36, 13, 16, 43, 57])
self.GULF_OF_LYON = SeaTerritory(9, 'gulf of lyon', [39, 44, 74, 55, 18, 19])
self.HELGOLAND_BIGHT = SeaTerritory(10, 'helgoland bight', [29, 34, 35, 16])
self.IONIAN_SEA = SeaTerritory(11, 'ionian sea', [2, 1, 20, 22, 33, 41, 54, 18])
self.IRISH_SEA = SeaTerritory(12, 'irish sea', [8, 38, 13, 14, 57])
self.MID_ATLANTIC = SeaTerritory(13, 'mid atlantic',
[26, 8, 32, 12, 40, 14, 74, 45, 19])
self.NORTH_ATLANTIC = SeaTerritory(14, 'north atlantic', [27, 12, 38, 13, 15])
self.NORWEGIAN_SEA = SeaTerritory(15, 'norwegian sea', [4, 27, 30, 14, 42, 16])
self.NORTH_SEA = SeaTerritory(16, 'north sea',
[25, 29, 30, 8, 10, 34, 36, 15, 42, 17, 58])
self.SKAGERRAK = SeaTerritory(17, 'skagerrak', [29, 16, 42, 51])
self.TYRRHENIAN_SEA = SeaTerritory(18, 'tyrrhenian sea',
[9, 11, 41, 46, 54, 55, 19])
self.WESTERN_MEDITERRANEAN = SeaTerritory(19, 'western mediterranean',
[9, 13, 40, 74, 54, 18])
self.ALBANIA = CoastalTerritory(20, 'albania', None, [1, 33, 11, 67, 53],
[33, 53])
self.ANKARA = CoastalTerritory(21, 'ankara', 7, [23, 5, 28, 50], [23, 28])
self.APULIA = CoastalTerritory(22, 'apulia', 5, [1, 11, 41, 46, 56], [41, 56])
self.ARMENIA = CoastalTerritory(23, 'armenia', 7, [21, 21, 49, 50, 52],
[49, 21])
self.BERLIN = CoastalTerritory(24, 'berlin', Nations.GERMANY, [3, 35, 64, 48, 68], [35, 48], supply_center=True, controlled_by=Nations.GERMANY)
self.BELGIUM = CoastalTerritory(25, 'belgium', None, [61, 8, 34, 43, 66, 16],
[34, 43])
self.BREST = CoastalTerritory(26, 'brest', 2, [8, 32, 13, 65, 43], [32, 43])
self.CLYDE = CoastalTerritory(27, 'clyde', 1, [30, 38, 12, 14, 15], [30, 38])
self.CONSTANTINOPLE = CoastalTerritory(28, 'constantinople', 7,
[2, 21, 5, 73, 50], [21, 50])
self.DENMARK = CoastalTerritory(29, 'denmark', None, [3, 10, 35, 16, 17, 51],
[35, 51])
self.EDINBURGH = CoastalTerritory(30, 'edinburgh', 1, [27, 38, 15, 16, 58],
[27, 58])
self.FINLAND = CoastalTerritory(31, 'finland', None, [6, 42, 75, 51], [51])
self.GASCONY = CoastalTerritory(32, 'gascony', 2, [26, 61, 39, 13, 65, 74],
[26])
self.GREECE = CoastalTerritory(33, 'greece', None, [2, 20, 73, 11, 67],
[20, 73])
self.HOLLAND = CoastalTerritory(34, 'holland', None, [25, 10, 35, 16, 66],
[25, 35])
self.KIEL = CoastalTerritory(35, 'kiel', 3, [3, 24, 29, 10, 34, 64, 66],
[24, 29, 34])
self.LONDON = CoastalTerritory(36, 'london', 1, [8, 16, 57, 58], [57, 58])
self.LIVONIA = CoastalTerritory(37, 'livonia', 6, [3, 6, 63, 48, 75, 72], [48])
self.LIVERPOOL = CoastalTerritory(38, 'liverpool', 1, [27, 30, 12, 14, 57, 58],
[27, 57])
self.MARSEILLES = CoastalTerritory(39, 'marseilles', 2, [61, 32, 9, 44, 74],
[44])
self.NORTH_AFRICA = CoastalTerritory(40, 'north africa', None, [13, 54, 19],
[54])
self.NAPLES = CoastalTerritory(41, 'naples', 5, [22, 11, 46, 18], [22, 46])
self.NORWAY = CoastalTerritory(42, 'norway', None, [4, 31, 15, 16, 17, 75, 51],
[51])
self.PICARDY = CoastalTerritory(43, 'picardy', 2, [26, 25, 61, 8, 65], [26, 25])
self.PIEDMONT = CoastalTerritory(44, 'piedmont', 5, [9, 39, 55, 69, 56],
[39, 55])
self.PORTUGAL = CoastalTerritory(45, 'portugal', None, [13, 74], [74])
self.ROME = CoastalTerritory(46, 'rome', 5, [22, 41, 55, 18, 56], [41, 55])
self.RUMANIA = CoastalTerritory(47, 'rumania', None,
[5, 60, 73, 62, 67, 49, 70], [49])
self.PRUSSIA = CoastalTerritory(48, 'prussia', 3, [3, 24, 37, 68, 72], [24, 37])
self.SEVASTAPOL = CoastalTerritory(49, 'sevastapol', 6, [23, 5, 63, 47, 70],
[23, 47])
self.SMYRNA = CoastalTerritory(50, 'smyrna', 7, [2, 23, 21, 28, 7, 52],
[28, 52])
self.SWEDEN = CoastalTerritory(51, 'sweden', None, [3, 6, 29, 31, 42, 17],
[29, 31, 42])
self.SYRIA = CoastalTerritory(52, 'syria', 7, [23, 7, 50], [50])
self.TRIESTE = CoastalTerritory(53, 'trieste', 4, [1, 20, 60, 52, 69, 56, 71],
[20, 56])
self.TUNIS = CoastalTerritory(54, 'tunis', None, [11, 40, 18, 19], [40])
self.TUSCANY = CoastalTerritory(55, 'tuscany', 5, [9, 44, 46, 18, 56], [44, 46])
self.VENICE = CoastalTerritory(56, 'venice', 5, [1, 22, 46, 44, 53, 18, 69],
[22, 53])
self.WALES = CoastalTerritory(57, 'wales', 1, [8, 12, 36, 38, 58], [36, 38])
self.YORKSHIRE = CoastalTerritory(58, 'yorkshire', 1, [30, 36, 38, 16, 57],
[30, 36])
self.BOHEMIA = InlandTerritory(59, 'bohemia', 4, [62, 64, 68, 69, 71])
self.BUDAPEST = InlandTerritory(60, 'budapest', 4, [62, 47, 67, 53, 71])
self.BURGUNDY = InlandTerritory(61, 'burgundy', 2,
[26, 25, 32, 39, 64, 65, 43, 66])
self.GALICIA = InlandTerritory(62, 'galicia', 4, [59, 60, 47, 68, 70, 71, 72])
self.MOSCOW = InlandTerritory(63, 'moscow', Nations.RUSSIA, [37, 47, 49, 75, 70, 72], supply_center=True, controlled_by=Nations.RUSSIA)
self.MUNICH = InlandTerritory(64, 'munich', 3, [24, 59, 61, 35, 68, 66, 69])
self.PARIS = InlandTerritory(65, 'paris', 2, [26, 61, 32, 43, 68, 66, 69])
self.RUHR = InlandTerritory(66, 'ruhr', 3, [26, 61, 34, 35, 64])
self.SERBIA = InlandTerritory(67, 'serbia', None, [20, 60, 73, 33, 47, 53])
self.SILESIA = InlandTerritory(68, 'silesia', 3, [24, 59, 62, 64, 48, 72])
self.TYROLIA = InlandTerritory(69, 'tyrolia', 4, [59, 64, 53, 56, 71])
self.UKRAINE = InlandTerritory(70, 'ukraine', 6, [62, 63, 47, 49, 72])
self.VIENNA = InlandTerritory(71, 'vienna', 4, [59, 60, 62, 53, 69])
self.WARSAW = InlandTerritory(72, 'warsaw', 6, [62, 37, 63, 68, 48, 70])
self.BULGARIA = CoastalTerritory(73, 'bulgaria', None, [2, 5, 28, 33, 47, 67],
[47, 33, 28])
self.SPAIN = CoastalTerritory(74, 'spain', None, [32, 39, 45, 9, 19, 13], [32, 45, 39])
self.ST_PETERSBURG = CoastalTerritory(75, 'st. petersburg', Nations.RUSSIA,
[31, 37, 63, 42], [37, 31, 42], supply_center=True, controlled_by=Nations.RUSSIA)
class NamedCoasts:
def __init__(self, territories):
self.SPAIN_SC = NamedCoast(1, 'spain sc', territories.SPAIN, [
territories.MARSEILLES, territories.PORTUGAL, territories.MID_ATLANTIC,
territories.WESTERN_MEDITERRANEAN, territories.GULF_OF_LYON
])
self.SPAIN_NC = NamedCoast(2, 'spain nc', territories.SPAIN, [
territories.PORTUGAL, territories.MID_ATLANTIC, territories.GASCONY
])
self.BULGARIA_EC = NamedCoast(3, 'bulgaria ec', territories.BULGARIA, [
territories.BLACK_SEA, territories.RUMANIA, territories.CONSTANTINOPLE,
])
self.BULGARIA_SC = NamedCoast(4, 'bulgaria sc', territories.BULGARIA, [
territories.CONSTANTINOPLE, territories.AEGEAN_SEA, territories.GREECE
])
self.ST_PETERSBURG_NC = NamedCoast(5, 'st petersburg nc', territories.ST_PETERSBURG, [
territories.BARRENTS_SEA, territories.NORWAY
])
self.ST_PETERSBURG_SC = NamedCoast(6, 'st petersburg nc', territories.ST_PETERSBURG, [
territories.FINLAND, territories.LIVONIA, territories.GULF_OF_BOTHNIA
])
def register_all(state, territories, named_coasts):
attributes = inspect.getmembers(territories, lambda a:not(inspect.isroutine(a)))
for item in [a[1] for a in attributes]:
if isinstance(item, Territory):
state.register(item)
attributes = inspect.getmembers(named_coasts, lambda a:not(inspect.isroutine(a)))
for item in [a[1] for a in attributes]:
if isinstance(item, NamedCoast):
state.register(item)
return state
| 61.343558 | 151 | 0.538054 | 1,206 | 9,999 | 4.411277 | 0.171642 | 0.028195 | 0.007331 | 0.014662 | 0.085902 | 0.085902 | 0.059774 | 0.026316 | 0.026316 | 0.026316 | 0 | 0.163455 | 0.309831 | 9,999 | 162 | 152 | 61.722222 | 0.607448 | 0 | 0 | 0.065789 | 0 | 0 | 0.074607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019737 | false | 0 | 0.019737 | 0 | 0.111842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
763689cceab35317b177881056ba2ba454cd9b3e | 3,874 | py | Python | python/ray/tune/function_runner.py | matthew-z/ray | f37c260bdb3cbaafc783c6274f2c4b929fce0f9a | [
"Apache-2.0"
] | null | null | null | python/ray/tune/function_runner.py | matthew-z/ray | f37c260bdb3cbaafc783c6274f2c4b929fce0f9a | [
"Apache-2.0"
] | null | null | null | python/ray/tune/function_runner.py | matthew-z/ray | f37c260bdb3cbaafc783c6274f2c4b929fce0f9a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import threading
import traceback
from ray.tune import TuneError
from ray.tune.trainable import Trainable
from ray.tune.result import TIMESTEPS_TOTAL
class StatusReporter(object):
"""Object passed into your main() that you can report status through.
Example:
>>> reporter = StatusReporter()
>>> reporter(timesteps_total=1)
"""
def __init__(self):
self._latest_result = None
self._last_result = None
self._lock = threading.Lock()
self._error = None
self._done = False
def __call__(self, **kwargs):
"""Report updated training status.
Args:
kwargs: Latest training result status.
"""
with self._lock:
self._latest_result = self._last_result = kwargs.copy()
def _get_and_clear_status(self):
if self._error:
raise TuneError("Error running trial: " + str(self._error))
if self._done and not self._latest_result:
if not self._last_result:
raise TuneError("Trial finished without reporting result!")
self._last_result.update(done=True)
return self._last_result
with self._lock:
res = self._latest_result
self._latest_result = None
return res
def _stop(self):
self._error = "Agent stopped"
DEFAULT_CONFIG = {
# batch results to at least this granularity
"script_min_iter_time_s": 1,
}
class _RunnerThread(threading.Thread):
"""Supervisor thread that runs your script."""
def __init__(self, entrypoint, config, status_reporter):
self._entrypoint = entrypoint
self._entrypoint_args = [config, status_reporter]
self._status_reporter = status_reporter
threading.Thread.__init__(self)
self.daemon = True
def run(self):
try:
self._entrypoint(*self._entrypoint_args)
except Exception as e:
self._status_reporter._error = e
print("Runner thread raised: {}".format(traceback.format_exc()))
raise e
finally:
self._status_reporter._done = True
class FunctionRunner(Trainable):
"""Trainable that runs a user function returning training results.
This mode of execution does not support checkpoint/restore."""
_name = "func"
_default_config = DEFAULT_CONFIG
def _setup(self):
entrypoint = self._trainable_func()
self._status_reporter = StatusReporter()
scrubbed_config = self.config.copy()
for k in self._default_config:
if k in scrubbed_config:
del scrubbed_config[k]
self._runner = _RunnerThread(entrypoint, scrubbed_config,
self._status_reporter)
self._start_time = time.time()
self._last_reported_timestep = 0
self._runner.start()
def _trainable_func(self):
"""Subclasses can override this to set the trainable func."""
raise NotImplementedError
def _train(self):
time.sleep(
self.config.get("script_min_iter_time_s",
self._default_config["script_min_iter_time_s"]))
result = self._status_reporter._get_and_clear_status()
while result is None:
time.sleep(1)
result = self._status_reporter._get_and_clear_status()
curr_ts_total = result.get(TIMESTEPS_TOTAL,
self._last_reported_timestep)
result.update(
timesteps_this_iter=(curr_ts_total - self._last_reported_timestep))
self._last_reported_timestep = curr_ts_total
return result
def _stop(self):
self._status_reporter._stop()
| 30.265625 | 79 | 0.639133 | 437 | 3,874 | 5.295195 | 0.304348 | 0.066551 | 0.06223 | 0.041487 | 0.083838 | 0.035436 | 0.035436 | 0.035436 | 0 | 0 | 0 | 0.001439 | 0.282395 | 3,874 | 127 | 80 | 30.503937 | 0.830935 | 0.128033 | 0 | 0.094118 | 0 | 0 | 0.050817 | 0.019964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.105882 | 0 | 0.317647 | 0.023529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
763736d01150c9f6733b323b130ddac41c941677 | 5,501 | py | Python | software_scripts/dnapilib/exhaust.py | 19zhangt/sRNA_analysis_Maize | 8a7688a30f02f0a86b05bd820a8c7d7d110b2767 | [
"MIT"
] | 20 | 2016-10-18T13:33:57.000Z | 2021-12-18T02:04:00.000Z | software_scripts/dnapilib/exhaust.py | 19zhangt/sRNA_analysis_Maize | 8a7688a30f02f0a86b05bd820a8c7d7d110b2767 | [
"MIT"
] | 2 | 2016-10-19T21:06:39.000Z | 2017-03-23T18:04:41.000Z | software_scripts/dnapilib/exhaust.py | 19zhangt/sRNA_analysis_Maize | 8a7688a30f02f0a86b05bd820a8c7d7d110b2767 | [
"MIT"
] | 6 | 2016-11-10T04:23:14.000Z | 2022-03-10T12:20:11.000Z | """Functions for exhaustive adapter search
incorporating with read mapping process.
"""
import re
import os.path
import subprocess
import fileinput
from dnapilib.io_utils import get_file_obj
from dnapilib.io_utils import fastq_sequence
from dnapilib.io_utils import fastq_record
def rm_temp_dir(temp_dir):
"""Remove temporary directory.
"""
if temp_dir:
if os.path.exists(temp_dir):
subprocess.call("rm -r {}".format(temp_dir).split())
def clip_adapter(fp, aseed, tm5, tm3, min_len, max_len):
"""Return adapter-clipped clean reads.
"""
seed_len = len(aseed)
pp = re.compile("(.*)"+aseed, re.IGNORECASE)
for seq in fastq_sequence(fp):
if len(seq) < tm5 or len(seq) < tm3:
raise Exception("trimming length is too large")
match = pp.search(seq)
if not match:
continue
end = match.end() - seed_len
clipped_seq = seq[tm5 : end-tm3]
L = len(clipped_seq)
if min_len <= L and L <= max_len:
yield clipped_seq
def to_fasta(fastq, fasta, aseed, tm5, tm3, min_len, max_len):
"""Write FASTA containing clean reads, and return
the number of the reads.
"""
fq_obj = get_file_obj(fastq)
if "RAW_INPUT".startswith(aseed):
iterator = fastq_sequence(fq_obj)
else:
iterator = clip_adapter(fq_obj, aseed, tm5, tm3, min_len, max_len)
fas = {}
clean_read_count = 0
for seq in iterator:
fas[seq] = fas.get(seq, 0) + 1
fa_obj = open(fasta, "w")
for seq, cnt in fas.items():
clean_read_count += cnt
fa_obj.write(">{0}_{1}\n{0}\n".format(seq, cnt))
fa_obj.close()
fq_obj.close()
return clean_read_count
def fastq_input_prep(fastq, ratio, temp_dir):
"""Write FASTQ in the temporary directory, and retrun
(subsampled) FASTQ name, the total read count,
standard deviation of read lengths.
"""
num = int(1/ratio)
read_count = 0.0
stats = {}
fq_out = "{}/input.fq".format(temp_dir)
fq_obj = get_file_obj(fastq)
fout = open(fq_out, "w")
for i, rec in enumerate(fastq_record(fq_obj)):
if i % num == 0:
fout.write(rec)
read_count += 1
L = len(rec.split("\n")[1])
stats[L] = stats.get(L,0) + 1
fout.close()
fq_obj.close()
mean = sum([L*c for L,c in stats.items()]) / read_count
sum_square = sum([(L-mean)**2 * c for L,c in stats.items()])
sd = (sum_square / read_count)**0.5
return fq_out, read_count, sd
def count_mapped_read_sam(samout):
"""Return the number of mapped reads to the genome.
"""
if not os.path.exists(samout):
raise Exception("can't open SAM")
mapped = set()
for x in fileinput.input(samout):
if not x or x.startswith("@"):
continue
x = x.rstrip().split("\t")
if x[2] != '*':
mapped.add(x[0])
cnt = sum([int(n.split('_')[1]) for n in mapped])
return cnt
def map_clean_reads(fastq, adapter, tm5, tm3,
min_len, max_len, map_command, temp_dir):
"""Execute mapping command, and return the numbers
of clean and mapped reads.
"""
fasta = "{0}/insert_{1}.fa".format(temp_dir, adapter)
samout = "{}/output.sam".format(temp_dir)
clipped = to_fasta(fastq, fasta, adapter, tm5, tm3, min_len, max_len)
map_command = map_command.replace("@in",fasta).replace("@out",samout)
map_command += " 2> /dev/null"
if subprocess.call(map_command, shell=True) != 0:
raise Exception("mapping failed, check command line")
mapped = count_mapped_read_sam(samout)
return clipped, mapped
def make_stats_report(table, sampled_read, subsample_rate, prefix_match,
sd, fastq, output_dir, temp_dir, no_output_files):
"""Report read statistics with predicted adapters.
"""
out = ["# sampled_reads={} (total_reads * {:.2f})".format(
int(sampled_read), subsample_rate)]
out.append("\t".join([
"# 3'adapter",
"reads_extracted",
"(reads_extracted/sampled_reads)%",
"reads_mapped",
"(reads_mapped/sampled_reads)%",
"params_k:r"]))
max_mapped_read = -1
max_index = -1
for i, x in enumerate(table):
if x[3] > max_mapped_read:
max_mapped_read = x[3]
max_index = i
out.append("{}\t{}\t{:.2f}\t{}\t{:.2f}\t{}".format(*x))
optimal = [table[max_index][0]]
fq_prefix = os.path.basename(fastq).split(".")[0]
if table[max_index][4] < 20:
optimal.append("/POOR_QUALITY")
if optimal[0] == "RAW_INPUT":
if sd:
out.append("# input reads look already clean!")
else:
optimal.append("?")
else:
if no_output_files:
pass
else:
if not os.path.exists(output_dir):
subprocess.call("mkdir {}".format(output_dir).split())
aseq = optimal[0][:prefix_match]
fa_tmp = "{}/insert_{}.fa".format(temp_dir, aseq)
fa_out = "{}/{}_{}.fa".format(output_dir, fq_prefix, aseq)
subprocess.call(("mv {} {}".format(fa_tmp,fa_out)).split())
out.insert(0, "optimal_3'adapter={}\n".format(''.join(optimal)))
report = "\n".join(out)
print(report)
if not no_output_files:
f = open("{}/{}_report.txt".format(output_dir, fq_prefix), "w")
f.write(report + "\n")
f.close()
| 31.079096 | 74 | 0.59262 | 769 | 5,501 | 4.052016 | 0.23407 | 0.026958 | 0.02086 | 0.019255 | 0.141207 | 0.10751 | 0.056162 | 0.022465 | 0.022465 | 0 | 0 | 0.013587 | 0.264134 | 5,501 | 176 | 75 | 31.255682 | 0.756176 | 0.099255 | 0 | 0.078125 | 0 | 0 | 0.104051 | 0.023237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054688 | false | 0.007813 | 0.054688 | 0 | 0.140625 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7638daeb235963308792805a277cabb2ff7708fd | 6,539 | py | Python | script.module.exodus/lib/resources/lib/sources/en/to_be_fixed/sitedown/tunemovie.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:38:10.000Z | 2019-03-05T09:38:10.000Z | script.module.exodus/lib/resources/lib/sources/en/to_be_fixed/sitedown/tunemovie.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | script.module.exodus/lib/resources/lib/sources/en/to_be_fixed/sitedown/tunemovie.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | # NEEDS FIXING
# -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['tunemovie.com', 'tunemovie.tv']
self.base_link = 'https://tunemovie.com'
self.search_link = '/search/%s.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(title)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'thumb'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
url = [i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]][0]
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(data['tvshowtitle'])
t = cleantitle.get(data['tvshowtitle'])
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'thumb'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
url = [i[0] for i in r if t in cleantitle.get(i[1]) and ('Season %s' % season) in i[1]][0]
url += '?episode=%01d' % int(episode)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
try:
url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
except:
episode = None
ref = url
for i in range(3):
result = client.request(url)
if not result == None: break
if not episode == None:
result = client.parseDOM(result, 'div', attrs = {'id': 'ip_episode'})[0]
ep_url = client.parseDOM(result, 'a', attrs = {'data-name': str(episode)}, ret='href')[0]
for i in range(3):
result = client.request(ep_url)
if not result == None: break
r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'})
for u in r:
try:
url = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')
p1 = client.parseDOM(u, 'a', ret='data-film')[0]
p2 = client.parseDOM(u, 'a', ret='data-server')[0]
p3 = client.parseDOM(u, 'a', ret='data-name')[0]
post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3}
post = urllib.urlencode(post)
for i in range(3):
result = client.request(url, post=post, XHR=True, referer=ref, timeout='10')
if not result == None: break
result = json.loads(result)
u = result['s']
s = result['v']
url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')
for n in range(3):
try:
post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': n}
post = urllib.urlencode(post)
result = client.request(url, post=post, XHR=True, referer=ref)
src = json.loads(result)['data']
if type(src) is list:
src = [i['files'] for i in src]
for i in src:
try:
sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except:
pass
else:
src = client.request(src)
src = client.parseDOM(src, 'source', ret='src', attrs = {'type': 'video.+?'})[0]
src += '|%s' % urllib.urlencode({'User-agent': client.randomagent()})
sources.append({'source': 'cdn', 'quality': 'HD', 'language': 'en', 'url': src, 'direct': False, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| 38.692308 | 193 | 0.495183 | 784 | 6,539 | 4.100765 | 0.267857 | 0.05661 | 0.022395 | 0.013064 | 0.387558 | 0.306376 | 0.24479 | 0.24479 | 0.235148 | 0.196579 | 0 | 0.016671 | 0.367029 | 6,539 | 168 | 194 | 38.922619 | 0.760087 | 0.101239 | 0 | 0.412844 | 0 | 0 | 0.096874 | 0.018894 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055046 | false | 0.036697 | 0.036697 | 0.009174 | 0.183486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7639a2cb903461ce10fef3cbb9b660fdde7ac909 | 638 | py | Python | game/urls.py | RafayelGardishyan/Memorears | c3de63aca7be487fbe4fd264d0657e290b57fea9 | [
"MIT"
] | 1 | 2018-11-03T12:42:12.000Z | 2018-11-03T12:42:12.000Z | game/urls.py | RafayelGardishyan/Memorears | c3de63aca7be487fbe4fd264d0657e290b57fea9 | [
"MIT"
] | 7 | 2020-02-11T23:22:34.000Z | 2022-03-11T23:34:09.000Z | game/urls.py | RafayelGardishyan/Memorears | c3de63aca7be487fbe4fd264d0657e290b57fea9 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('getgame', views.get),
path('getgame/<int:id>', views.getwid),
path('setonline/<int:id>', views.setonline),
path('changeturn/<int:do>', views.change_turn),
path('set/score/<int:player>/<int:plus>', views.setscore),
path('set/opencard/<int:cardid>/<int:player>', views.setopencard),
path('set/resetcards', views.reset),
path('set/resetscore', views.resetscore),
path('player', views.playerscreen),
path('create', views.create),
path('lobby', views.lobby),
path('online', views.index),
path('lock', views.lockroom)
]
| 33.578947 | 70 | 0.658307 | 80 | 638 | 5.2375 | 0.4375 | 0.066826 | 0.047733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142633 | 638 | 18 | 71 | 35.444444 | 0.765996 | 0 | 0 | 0 | 0 | 0 | 0.291536 | 0.111285 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
763d98e383358902c6fc9ed9f9a4697b765a3fc1 | 4,712 | py | Python | cryptysto/ledger.py | morucci/cryptysto | 29441adb431571a10e213064f696af7939f893d1 | [
"MIT"
] | null | null | null | cryptysto/ledger.py | morucci/cryptysto | 29441adb431571a10e213064f696af7939f893d1 | [
"MIT"
] | null | null | null | cryptysto/ledger.py | morucci/cryptysto | 29441adb431571a10e213064f696af7939f893d1 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Literal, List, Union, Callable
from datetime import datetime
from pathlib import Path
from cryptysto.types import *
from cryptysto.binance import transform_binance_le_to_generic, load_binance_ledger_file
from cryptysto.bitfinex import (
transform_bifinex_le_to_generic,
load_bitfinex_ledger_file,
)
from cryptysto.kraken import transform_kraken_le_to_generic, load_kraken_ledger_file
from cryptysto.local import transform_local_le_to_generic, load_local_ledger_file
from cryptysto import utils
def load_ledger_file(
_type: LedgerType, path: Path
) -> Union[BitfinexLedger, KrakenLedger, BinanceLedger, LocalLedger]:
if _type == "bitfinex":
return load_bitfinex_ledger_file(path)
elif _type == "kraken":
return load_kraken_ledger_file(path)
elif _type == "binance":
return load_binance_ledger_file(path)
elif _type == "local":
return load_local_ledger_file(path)
else:
raise RuntimeError("Ledger type not supported")
def transform_to_generic(ledgers: InputLedgers) -> GenericLedger:
generic = GenericLedger(ops=[])
def add_in_generic_ops(gens: List) -> None:
for gen in gens:
if gen not in generic.ops:
generic.ops.append(gen)
else:
print("Dedup Warn: %s" % gen.show())
for ledger in ledgers:
for le in ledger:
if isinstance(le, KrakenLedgerEntry):
add_in_generic_ops(transform_kraken_le_to_generic(le))
if isinstance(le, BinanceLedgerEntry):
add_in_generic_ops(transform_binance_le_to_generic(le))
if isinstance(le, BitfinexLedgerEntry):
add_in_generic_ops(transform_bifinex_le_to_generic(le))
if isinstance(le, LocalLedgerEntry):
add_in_generic_ops(transform_local_le_to_generic(le))
return generic
def display_ledger(ledger: GenericLedger) -> None:
for op in sorted(ledger.ops, key=lambda e: e.date):
print(op.show())
def display_last_op(ledger: GenericLedger) -> None:
exhanges = set([op.exchange for op in ledger.ops])
for exchange in exhanges:
last_op = sorted(
filter(lambda op: op.exchange == exchange, ledger.ops), key=lambda e: e.date
)
if last_op:
print(last_op[-1].show())
def display_ledger_summary(ledger: GenericLedger) -> None:
exhanges = set([op.exchange for op in ledger.ops])
assets = set([op.asset.name for op in ledger.ops])
m = {
"Deposit": Deposit,
"Deposit Fee": DepositFee,
"Withdrawal": Withdrawal,
"Withdrawal Fee": WithdrawalFee,
"Trade": Trade,
"Trade Fee": TradeFee,
}
def get_total(
ledger: GenericLedger, op_type: str, exchange: str, asset: str, comp: Callable
) -> float:
return sum(
[
abs(op.amount)
for op in ledger.ops
if isinstance(op, m[op_type])
and comp(op.amount)
and op.asset.name == asset
and op.exchange == exchange
]
)
def get_total_sell(
ledger: GenericLedger, op_type: str, exchange: str, asset: str
) -> float:
return get_total(ledger, op_type, exchange, asset, lambda x: x < 0)
def get_total_buy(
ledger: GenericLedger, op_type: str, exchange: str, asset: str
) -> float:
return get_total(ledger, op_type, exchange, asset, lambda x: x > 0)
for op_type in m.keys():
for exchange in exhanges:
for asset in assets:
if op_type == "Trade":
total_sell = get_total_sell(ledger, op_type, exchange, asset)
total_buy = get_total_buy(ledger, op_type, exchange, asset)
if total_buy or total_sell:
print(
"Total %s on %s of %s: BUY: %s, SELL: %s"
% (op_type, exchange, asset, total_buy, total_sell)
)
else:
total = sum(
[
op.amount
for op in ledger.ops
if isinstance(op, m[op_type])
and op.asset.name == asset
and op.exchange == exchange
]
)
if total != 0:
print(
"Total %s on %s of %s: %s"
% (op_type, exchange, asset, total)
)
| 35.164179 | 88 | 0.576613 | 546 | 4,712 | 4.760073 | 0.192308 | 0.030012 | 0.033859 | 0.043863 | 0.435937 | 0.30127 | 0.270489 | 0.207772 | 0.207772 | 0.160062 | 0 | 0.001285 | 0.339346 | 4,712 | 133 | 89 | 35.428571 | 0.833601 | 0 | 0 | 0.192982 | 0 | 0 | 0.04011 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.087719 | 0.026316 | 0.236842 | 0.04386 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7640abef2e902edfa6d479bfcded35e6bfacd3d8 | 1,908 | py | Python | app.py | gventuraagramonte/api_rest_local | 7461060bb646b6d6ec544a93a938298c972e4aad | [
"MIT"
] | null | null | null | app.py | gventuraagramonte/api_rest_local | 7461060bb646b6d6ec544a93a938298c972e4aad | [
"MIT"
] | null | null | null | app.py | gventuraagramonte/api_rest_local | 7461060bb646b6d6ec544a93a938298c972e4aad | [
"MIT"
] | null | null | null | from flask import Flask,jsonify, request
app = Flask(__name__)
from products import products
@app.route('/ping')
def ping():
return jsonify({"message": "pong!"})
@app.route('/products')
def getProducts():
return jsonify(products)
@app.route('/products/<string:product_name>')
def getProduct(product_name):
productsFound = [product for product in products if product['name']==product_name]
if(len(productsFound)>0):
return jsonify({"product":productsFound[0]})
return jsonify({"message":"Product not found"})
@app.route('/products', methods = ['POST'])
def addProduct():
new_product = {
"name": request.json['name'],
"price": request.json['price'],
"quantity":request.json['quantity']
}
products.append(new_product)
return jsonify({"message":"Product added successfully!", "products":products})
@app.route('/products/<string:product_name>', methods = ['PUT'])
def editProduct(product_name):
productFound = [product for product in products if product['name']==product_name]
if(len(productFound)>0):
productFound[0]['name'] = request.json['name']
productFound[0]['price'] = request.json['price']
productFound[0]['quantity'] = request.json['quantity']
return jsonify({
"message":"Product Updated",
"product":productFound[0]
})
return jsonify({"message":"Product not found"})
@app.route('/products/<string:product_name>', methods=['DELETE'])
def deleteProduct(product_name):
productFound=[product for product in products if product['name']==product_name]
if(len(productFound)>0):
products.remove(productFound[0])
return jsonify({
"messages":"Product Deleted",
"products":products
})
return jsonify({"message":"Product not found"})
if __name__ == '__main__':
app.run(debug=True, port=4000) | 29.353846 | 86 | 0.651468 | 211 | 1,908 | 5.781991 | 0.241706 | 0.117213 | 0.098361 | 0.110656 | 0.403279 | 0.403279 | 0.37459 | 0.281967 | 0.281967 | 0.281967 | 0 | 0.008349 | 0.183962 | 1,908 | 65 | 87 | 29.353846 | 0.775209 | 0 | 0 | 0.229167 | 0 | 0 | 0.214772 | 0.048717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0.041667 | 0.354167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7643bd835a0c8f30d242531ba919a257c7359f58 | 2,276 | py | Python | src/cifar10.py | HiroshiKERA/JKJ1A | a93a351afd015c024b88054f2c15d383516c80cf | [
"MIT"
] | null | null | null | src/cifar10.py | HiroshiKERA/JKJ1A | a93a351afd015c024b88054f2c15d383516c80cf | [
"MIT"
] | null | null | null | src/cifar10.py | HiroshiKERA/JKJ1A | a93a351afd015c024b88054f2c15d383516c80cf | [
"MIT"
] | null | null | null | import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import random_split, DataLoader, Subset
import numpy as np
# データロード関数を定義
## 引数batch_sizeはミニバッチの大きさ
def load_data(batch_size, n_train=15000, n_test=2500, use_all=False):
# クラスのラベル名
classes = ('airplane', 'automobile', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
## 前処理関数の準備
transform = transforms.Compose(
[transforms.ToTensor(),]
)
# CIFAR10の準備(ローカルにデータがない場合はダウンロードされる)
# 訓練用データセット
trainset = CIFAR10(root='./data', train=True, download=True, transform=transform)
# 評価用データセット
testset = CIFAR10(root='./data', train=False, download=True, transform=transform)
# --- 学習時間短縮のためデータを選別(本質的でないので気にしなくていい)-------------
if not use_all:
trainset.targets = np.asarray(trainset.targets)
testset.targets = np.asarray(testset.targets)
classes_id = [trainset.class_to_idx[c] for c in classes] # クラス名を数値(クラスid)に
indices = np.where(np.isin(trainset.targets, classes_id)) # 該当クラスの位置
trainset.data = trainset.data[indices] # 該当クラスのデータを抽出
trainset.targets = trainset.targets[indices] # 該当クラスのラベルを抽出
trainset.targets = [classes_id.index(i) for i in trainset.targets] # クラスidを0からの連番に
indices = np.where(np.isin(testset.targets, classes_id))
testset.data = testset.data[indices]
testset.targets = testset.targets[indices]
testset.targets = [classes_id.index(i) for i in testset.targets]
# trainsetの内,n_train個だけ選ぶ
trainset, _ = random_split(trainset, [n_train, len(trainset) - n_train])
# testsetの内,n_test個だけ選ぶ
testset, _ = random_split(testset, [n_test, len(testset) - n_test])
# ------------------------------------------------------------------------
# !ミニバッチに小分けしておく.これを後で使う
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# !ミニバッチに小分けしておく.これを後で使う
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
return (trainloader, testloader, classes) | 39.241379 | 94 | 0.640158 | 240 | 2,276 | 5.941667 | 0.395833 | 0.073633 | 0.056101 | 0.048387 | 0.102384 | 0.039271 | 0.039271 | 0.039271 | 0 | 0 | 0 | 0.011242 | 0.218366 | 2,276 | 58 | 95 | 39.241379 | 0.790332 | 0.171793 | 0 | 0 | 0 | 0 | 0.033244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76454ba31f2d02c5cc10b9982ca9d6f3b9f9e9b0 | 1,077 | py | Python | apps/users/models/user_functions.py | Sult/daf | a4da9e8c96f70577e2490c05e82bdf7d0de1a563 | [
"MIT"
] | null | null | null | apps/users/models/user_functions.py | Sult/daf | a4da9e8c96f70577e2490c05e82bdf7d0de1a563 | [
"MIT"
] | null | null | null | apps/users/models/user_functions.py | Sult/daf | a4da9e8c96f70577e2490c05e82bdf7d0de1a563 | [
"MIT"
] | null | null | null | from datetime import timedelta, datetime
from collections import OrderedDict
from django.utils import timezone
from django.contrib.auth.models import User
#return list of inactive users
@staticmethod
def inactive_dict():
users = User.objects.order_by("username")
temp = OrderedDict()
for user in users:
if user.is_inactive():
temp[user.username] = user.days_since_last_login()
return temp
#see when user is inactive
def is_inactive(self):
now = datetime.utcnow().replace(tzinfo=timezone.utc) + timedelta(days=30)
if now > self.last_login:
return False
else:
return True
#see how long ago a user last logged in
def days_since_last_login(self):
now = datetime.utcnow().replace(tzinfo=timezone.utc)
delta = now - self.last_login
if delta.days == 0:
return "Today"
else:
return "%d days ago" % delta.days
User.add_to_class("is_inactive", is_inactive)
User.add_to_class("days_since_last_login", days_since_last_login)
User.add_to_class("inactive_dict", inactive_dict)
| 25.642857 | 77 | 0.708449 | 153 | 1,077 | 4.803922 | 0.379085 | 0.073469 | 0.070748 | 0.097959 | 0.122449 | 0.122449 | 0.122449 | 0.122449 | 0 | 0 | 0 | 0.00348 | 0.199629 | 1,077 | 41 | 78 | 26.268293 | 0.849188 | 0.085422 | 0 | 0.071429 | 0 | 0 | 0.070336 | 0.021407 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76456bc25c48682c2ac8e2343cf27b334625bc44 | 3,666 | py | Python | Snek/main.py | Noha101/python | 4cafd75f3e588e8dc3cccad786781316dab836f7 | [
"MIT"
] | null | null | null | Snek/main.py | Noha101/python | 4cafd75f3e588e8dc3cccad786781316dab836f7 | [
"MIT"
] | 1 | 2021-09-07T09:59:56.000Z | 2021-09-07T10:00:40.000Z | Snek/main.py | Noha101/python | 4cafd75f3e588e8dc3cccad786781316dab836f7 | [
"MIT"
] | 1 | 2021-09-07T09:42:31.000Z | 2021-09-07T09:42:31.000Z | import pygame
import sys
import time
import random
difficulty = 10
frame_size_x = 800
frame_size_y = int(frame_size_x * 0.8)
pygame.init()
pygame.display.set_caption('Snake')
screen = pygame.display.set_mode((frame_size_x, frame_size_y))
black = pygame.Color(0, 0, 0)
white = pygame.Color(255, 255, 255)
red = pygame.Color(255, 0, 0)
green = pygame.Color(0, 255, 0)
blue = pygame.Color(0, 0, 255)
fps_controller = pygame.time.Clock()
snake_pos = [100, 50]
snake_body = [[100, 50], [100-10, 50], [100-(2*10), 50]]
food_pos = [random.randrange(1, (frame_size_x//10)) * 10, random.randrange(1, (frame_size_y//10)) * 10]
food_spawn = True
direction = 'RIGHT'
change_to = direction
score = 0
def game_over():
my_font = pygame.font.SysFont('times new roman', 90)
game_over_surface = my_font.render('YOU DIED', True, red)
game_over_rect = game_over_surface.get_rect()
game_over_rect.midtop = (frame_size_x/2, frame_size_y/4)
screen.fill(black)
screen.blit(game_over_surface, game_over_rect)
show_score(0, red, 'times', 20)
pygame.display.flip()
time.sleep(3)
pygame.quit()
sys.exit()
def show_score(choice, color, font, size):
score_font = pygame.font.SysFont(font, size)
score_surface = score_font.render('Score : ' + str(score), True, color)
score_rect = score_surface.get_rect()
if choice == 1:
score_rect.midtop = (frame_size_x/10, 15)
else:
score_rect.midtop = (frame_size_x/2, frame_size_y/1.25)
screen.blit(score_surface, score_rect)
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP or event.key == ord('w'):
change_to = 'UP'
if event.key == pygame.K_DOWN or event.key == ord('s'):
change_to = 'DOWN'
if event.key == pygame.K_LEFT or event.key == ord('a'):
change_to = 'LEFT'
if event.key == pygame.K_RIGHT or event.key == ord('d'):
change_to = 'RIGHT'
if event.key == pygame.K_ESCAPE:
run = False
if change_to == 'UP' and direction != 'DOWN':
direction = 'UP'
if change_to == 'DOWN' and direction != 'UP':
direction = 'DOWN'
if change_to == 'LEFT' and direction != 'RIGHT':
direction = 'LEFT'
if change_to == 'RIGHT' and direction != 'LEFT':
direction = 'RIGHT'
if direction == 'UP':
snake_pos[1] -= 10
if direction == 'DOWN':
snake_pos[1] += 10
if direction == 'LEFT':
snake_pos[0] -= 10
if direction == 'RIGHT':
snake_pos[0] += 10
snake_body.insert(0, list(snake_pos))
if snake_pos[0] == food_pos[0] and snake_pos[1] == food_pos[1]:
score += 1
food_spawn = False
else:
snake_body.pop()
if not food_spawn:
food_pos = [random.randrange(1, (frame_size_x//10)) * 10, random.randrange(1, (frame_size_y//10)) * 10]
food_spawn = True
screen.fill(black)
for pos in snake_body:
pygame.draw.rect(screen, green, pygame.Rect(pos[0], pos[1], 10, 10))
pygame.draw.rect(screen, red, pygame.Rect(food_pos[0], food_pos[1], 10, 10))
if snake_pos[0] < 0 or snake_pos[0] > frame_size_x-10:
game_over()
if snake_pos[1] < 0 or snake_pos[1] > frame_size_y-10:
game_over()
for block in snake_body[1:]:
if snake_pos[0] == block[0] and snake_pos[1] == block[1]:
game_over()
show_score(1, white, 'arial', 20)
pygame.display.update()
fps_controller.tick(difficulty) | 33.327273 | 111 | 0.611566 | 552 | 3,666 | 3.860507 | 0.201087 | 0.067574 | 0.042234 | 0.037541 | 0.197091 | 0.138902 | 0.104176 | 0.104176 | 0.104176 | 0.075082 | 0 | 0.052422 | 0.245499 | 3,666 | 110 | 112 | 33.327273 | 0.718004 | 0 | 0 | 0.151515 | 0 | 0 | 0.035451 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020202 | false | 0 | 0.040404 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76482a0ec2706c195ead0331b4032fbf263cafe7 | 662 | py | Python | messagevault/migrations/0002_auto_20150627_2013.py | HelloMelanieC/FiveUp | ab97d311f163b09146fe330e4360d8e75d769f95 | [
"MIT"
] | 12 | 2017-09-10T01:43:42.000Z | 2020-09-20T01:17:20.000Z | messagevault/migrations/0002_auto_20150627_2013.py | HelloMelanieC/FiveUp | ab97d311f163b09146fe330e4360d8e75d769f95 | [
"MIT"
] | 22 | 2016-12-26T21:46:10.000Z | 2022-02-10T08:01:52.000Z | messagevault/migrations/0002_auto_20150627_2013.py | HelloMelanieC/FiveUp | ab97d311f163b09146fe330e4360d8e75d769f95 | [
"MIT"
] | 4 | 2017-08-24T16:01:37.000Z | 2019-02-14T23:50:17.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('messagevault', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='curatedmessage',
name='message_author_first',
field=models.CharField(max_length=35),
preserve_default=True,
),
migrations.AlterField(
model_name='curatedmessage',
name='message_author_last',
field=models.CharField(max_length=35),
preserve_default=True,
),
]
| 24.518519 | 50 | 0.601208 | 59 | 662 | 6.474576 | 0.59322 | 0.104712 | 0.13089 | 0.151832 | 0.575916 | 0.575916 | 0.575916 | 0.575916 | 0.26178 | 0 | 0 | 0.019272 | 0.294562 | 662 | 26 | 51 | 25.461538 | 0.798715 | 0.031722 | 0 | 0.5 | 0 | 0 | 0.14241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7648d647cd3c2b91c8610d3f005baa97952a4f3c | 2,219 | py | Python | kervi/kervi/utility/application_helpers.py | wentzlau/kervi | d35a422a6bca6b0ef50a4f9e5c382dece855abdc | [
"MIT"
] | null | null | null | kervi/kervi/utility/application_helpers.py | wentzlau/kervi | d35a422a6bca6b0ef50a4f9e5c382dece855abdc | [
"MIT"
] | null | null | null | kervi/kervi/utility/application_helpers.py | wentzlau/kervi | d35a422a6bca6b0ef50a4f9e5c382dece855abdc | [
"MIT"
] | null | null | null | #MIT License
#Copyright (c) 2017 Tim Wentzlau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import kervi.core.utility.process as process
class _KerviModuleLoader(process._KerviProcess):
""" Private class that starts a separate process that loads a module in the Kervi application """
def init_process(self, **kwargs):
self.spine.log.verbose("load: %s", self.name)
try:
import kervi.hal as hal
hal._load()
__import__(self.name, fromlist=[''])
except ImportError:
self.spine.log.exception("module not found:{0}", self.name)
except:
self.spine.log.exception("error load module:{0}", self.name)
self.spine.send_command("startThreads", local_only=True)
self.spine.trigger_event(
"moduleLoaded",
self.name
)
def terminate_process(self):
pass
def load_spine(self, process_id, spine_port, root_address = None, ip=None):
from kervi.plugin.message_bus.bus_manager import BusManager
self._bus_manager = BusManager()
self._bus_manager.load(process_id, spine_port, root_address, ip)
return self._bus_manager.bus
| 40.345455 | 101 | 0.71023 | 306 | 2,219 | 5.062092 | 0.5 | 0.056811 | 0.023241 | 0.027114 | 0.037444 | 0.037444 | 0 | 0 | 0 | 0 | 0 | 0.003444 | 0.214962 | 2,219 | 54 | 102 | 41.092593 | 0.885763 | 0.520054 | 0 | 0 | 0 | 0 | 0.070125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.041667 | 0.208333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7649615613ed57077f305f2807eb72d3b1fc3c45 | 1,338 | py | Python | demo/text_detection/text_perceptron_det/config/tp_det_r50_3stages_enlarge_tt.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 387 | 2021-01-02T07:50:15.000Z | 2022-03-31T04:30:03.000Z | demo/text_detection/text_perceptron_det/config/tp_det_r50_3stages_enlarge_tt.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 70 | 2021-05-04T18:28:18.000Z | 2022-03-31T14:14:52.000Z | demo/text_detection/text_perceptron_det/config/tp_det_r50_3stages_enlarge_tt.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 83 | 2021-01-05T08:28:26.000Z | 2022-03-31T07:14:03.000Z | """
#########################################################################
# Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : tp_r50_3stages_enlarge_tt.py
# Abstract : Model settings for total_text
# Current Version: 1.0.0
# Author : Liang Qiao
# Date : 2020-05-31
#########################################################################
"""
_base_= "./tp_det_r50.py"
model = dict(
backbone=dict(
num_stages=3,
out_indices=(0, 1, 2),
dilations=(1, 2, 1), # enlarge receptive field in 8x feature map
strides=(1, 1, 2)
),
neck=dict(
in_channels=[256, 512, 1024],
num_outs=3)
)
data = dict(
samples_per_gpu=2,
train=dict(
ann_file=[
'/path/to/datalist/total_text_train_datalist.json',
],
img_prefix=[
'/path/to/Total-Text/',
],
),
)
# checkpoint saved path
checkpoint_config = dict(interval=5, filename_tmpl='checkpoint/tp_r50_3stages_enlarge_tt_epoch_{}.pth')
# 'Pretrained model on Synthtext'
# you can simply load_from = 'path/to/tp_det_r50_tt_e25-45b1f5cf.pth' to fine-tune current model into a new domain
load_from = "/path/to/workspace/log/checkpoint/tp_det_r50_3stages_enlarge_tt-45b1f5cf.pth"
| 30.409091 | 114 | 0.555306 | 161 | 1,338 | 4.372671 | 0.602484 | 0.034091 | 0.072443 | 0.080966 | 0.059659 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05642 | 0.231689 | 1,338 | 43 | 115 | 31.116279 | 0.628405 | 0.473842 | 0 | 0.16 | 0 | 0 | 0.300144 | 0.249639 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
764aae6977c27bce819db876a9972887767393e1 | 5,243 | py | Python | CelebA/eval_eoo.py | hbendekgey/FairSurrogates | d81747817c866a67a6a324f8aea2f02cc3ece5c9 | [
"CC-BY-3.0"
] | null | null | null | CelebA/eval_eoo.py | hbendekgey/FairSurrogates | d81747817c866a67a6a324f8aea2f02cc3ece5c9 | [
"CC-BY-3.0"
] | null | null | null | CelebA/eval_eoo.py | hbendekgey/FairSurrogates | d81747817c866a67a6a324f8aea2f02cc3ece5c9 | [
"CC-BY-3.0"
] | null | null | null | import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm, trange
from time import sleep
from PIL import Image
from torchvision import transforms
import numpy as np
import pandas as pd
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
trainset = torchvision.datasets.CelebA(root='./data', split='train', target_type='attr', transform=preprocess)
validset = torchvision.datasets.CelebA(root='./data', split='valid', target_type='attr', transform=preprocess)
testset = torchvision.datasets.CelebA(root='./data', split='test', target_type='attr', transform=preprocess)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2)
validloader = torch.utils.data.DataLoader(validset, batch_size=32, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
attrs = '5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young '.split()
model = torch.hub.load('pytorch/vision:v0.6.0', 'wide_resnet50_2', pretrained=False)
model.fc = nn.Linear(2048, 1, bias=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
ti = attrs.index("Smiling")
si = attrs.index("Male")
(Pmale, Pfem) = ((testset.attr[:,si].bool() & testset.attr[:,ti].bool()).float().mean(),
(~testset.attr[:,si].bool() & testset.attr[:,ti].bool()).float().mean())
ploss = nn.BCEWithLogitsLoss()
lam_fair = 0
form = "logistic"
if form == "logistic":
def floss(outputs, sens_attr):
return -lam_fair/32 * (F.logsigmoid(outputs[sens_attr]).sum()/Pmale + F.logsigmoid(-outputs[~sens_attr]).sum()/Pfem)
elif form == "linear":
def floss(outputs, sens_attr):
return lam_fair/32 * (-outputs[sens_attr].sum()/Pmale + outputs[~sens_attr].sum()/Pfem)
elif form == "weighting":
def floss(outputs, sens_attr):
return -lam_fair/32 * (F.logsigmoid(outputs[sens_attr]).sum()/Pmale - F.logsigmoid(outputs[~sens_attr]).sum()/Pfem)
def calc_loss(data):
inputs, labels = data
inputs, labels, sens_attr = inputs.to(device), labels[:,ti].float().to(device), labels[:,si].bool().to(device)
labels_bool = labels.bool()
# optimizer.zero_grad()
outputs = model(inputs).reshape(-1)
pred_loss = ploss(outputs, labels)
loss = pred_loss + floss(outputs[labels_bool], sens_attr[labels_bool])
# loss.backward()
preds = (outputs >= 0).float()
unfairness = torch.tensor([preds[ sens_attr & labels_bool].sum(), preds[ sens_attr & labels_bool].shape[0],
preds[~sens_attr & labels_bool].sum(), preds[~sens_attr & labels_bool].shape[0]]) #msmiling, m, fsmiling, f
runfairness = torch.tensor([preds[ sens_attr & ~labels_bool].sum(), preds[ sens_attr & ~labels_bool].shape[0],
preds[~sens_attr & ~labels_bool].sum(), preds[~sens_attr & ~labels_bool].shape[0]]) #msmiling, m, fsmiling, f
return ((labels == preds).float().mean(), loss, unfairness, pred_loss, runfairness)
with torch.cuda.device('cuda:0'):
lfs = np.array([0. , 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1 ,
0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2 ])
df = pd.DataFrame(columns = ['Lam_fair', 'Accuracy', 'Unfairness', 'Loss',"LL"])
for lam_fair in lfs:
model.load_state_dict(torch.load("eoo_sims/logistic" + str(lam_fair)))
torch.cuda.empty_cache()
iterator = testloader.__iter__()
N = iterator.__len__()
running_loss = 0.0
running_acc = 0.0
running_unfair = 0.0
running_runfair = 0.0
running_predloss = 0.0
with torch.no_grad():
for i in trange(N):
# get the inputs; data is a list of [inputs, labels]
(acc, loss, unfair, pred_loss, runfair) = calc_loss(iterator.next())
# print statistics
running_loss += loss.item()
running_acc += acc.item()
running_unfair += unfair
running_runfair += runfair
running_predloss += pred_loss.item()
d = {"Lam_fair": lam_fair,
"Accuracy": (running_acc / N),
"Unfairness": (running_unfair[2]/running_unfair[3] - running_unfair[0]/running_unfair[1]).item(),
"RUnfairness": (running_runfair[2]/running_runfair[3] - running_runfair[0]/running_runfair[1]).item(),
"Loss": (running_loss / N),
"LL": -(running_predloss / N)}
df = df.append(d,ignore_index=True)
df.to_csv("logistic_eoo_test.csv") | 49.933333 | 482 | 0.666603 | 726 | 5,243 | 4.636364 | 0.34022 | 0.045157 | 0.040107 | 0.048128 | 0.282828 | 0.246583 | 0.212715 | 0.201426 | 0.181818 | 0.170529 | 0 | 0.033552 | 0.187107 | 5,243 | 105 | 483 | 49.933333 | 0.756218 | 0.029945 | 0 | 0.034483 | 0 | 0.011494 | 0.140662 | 0.008274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.126437 | 0.034483 | 0.218391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
764d0378b671fa1a637354761e96ab6694bff51e | 10,292 | py | Python | tests/test_services.py | datalogics/circulation | a4767404a63dd034d101e6ee76f824626e5d5269 | [
"Apache-2.0"
] | null | null | null | tests/test_services.py | datalogics/circulation | a4767404a63dd034d101e6ee76f824626e5d5269 | [
"Apache-2.0"
] | 6 | 2016-10-25T14:55:39.000Z | 2017-10-09T15:37:03.000Z | tests/test_services.py | datalogics/circulation | a4767404a63dd034d101e6ee76f824626e5d5269 | [
"Apache-2.0"
] | 2 | 2016-09-29T16:28:14.000Z | 2021-08-04T12:28:21.000Z | import json
from . import (
DatabaseTest,
sample_data
)
from nose.tools import set_trace, eq_
from api.problem_details import EXPIRED_CREDENTIALS
from api.services import ServiceStatus
from api.config import (
Configuration,
temp_config,
)
from api.authenticator import (
LibraryAuthenticator
)
from api.circulation import CirculationAPI
from api.simple_authentication import (
SimpleAuthenticationProvider
)
from core.model import (
ConfigurationSetting,
DataSource,
ExternalIntegration,
Library,
)
class TestServiceStatusMonitor(DatabaseTest):
def test_select_log_level(self):
SUCCESS = "SUCCESS: %fsec"
def level_name(message):
return ServiceStatus.select_log_level(message).__name__
# A request failure results in an error log
status_message = 'FAILURE: It hurts.'
eq_('error', level_name(status_message))
# Request times above 10 secs also results in an error log
status_message = SUCCESS%24.03
eq_('error', level_name(status_message))
# Request times between 3 and 10 secs results in a warn log
status_message = SUCCESS%7.82
eq_('warning', level_name(status_message))
status_message = SUCCESS%3.0001
eq_('warning', level_name(status_message))
# Request times below 3 secs are set as info
status_message = SUCCESS%2.32
eq_('info', level_name(status_message))
def test_init(self):
# Test that ServiceStatus can create an Authenticator.
integration = self._external_integration(
"api.simple_authentication", goal=ExternalIntegration.PATRON_AUTH_GOAL
)
provider = SimpleAuthenticationProvider
integration.setting(provider.TEST_IDENTIFIER).value = "validpatron"
integration.setting(provider.TEST_PASSWORD).value = "password"
self._default_library.integrations.append(integration)
api = CirculationAPI(self._db, self._default_library)
service_status = ServiceStatus(api)
assert service_status.auth != None
assert isinstance(service_status.auth.basic_auth_provider, provider)
eq_(self._default_library, service_status.auth.library)
@property
def mock_auth(self):
library = self._default_library
integration = self._external_integration(self._str)
provider = SimpleAuthenticationProvider
integration.setting(provider.TEST_IDENTIFIER).value = "validpatron"
integration.setting(provider.TEST_PASSWORD).value = "password"
self.authenticator = provider(library, integration)
return LibraryAuthenticator(self._db, library, self.authenticator)
def test_test_patron(self):
"""Verify that test_patron() returns credentials determined
by the basic auth provider.
"""
auth = self.mock_auth
provider = auth.basic_auth_provider
api = CirculationAPI(self._db, self._default_library)
status = ServiceStatus(api, auth=auth)
patron, password = status.test_patron
eq_(provider.test_username, patron.authorization_identifier)
eq_(provider.test_password, password)
def test_loans_status(self):
auth = self.mock_auth
class MockPatronActivitySuccess(object):
def __init__(self, *args, **kwargs):
pass
def patron_activity(self, patron, pin):
"Simulate a patron with nothing going on."
return
class MockPatronActivityFailure(object):
def __init__(self, *args, **kwargs):
pass
def patron_activity(self, patron, pin):
"Simulate an integration failure."
raise ValueError("Doomed to fail!")
# Create a variety of Collections for this library.
overdrive_collection = self._collection(
protocol=ExternalIntegration.OVERDRIVE
)
axis_collection = self._collection(
protocol=ExternalIntegration.AXIS_360
)
self._default_library.collections.append(overdrive_collection)
self._default_library.collections.append(axis_collection)
# Test a scenario where we get information for every
# relevant collection in the library.
everything_succeeds = {
ExternalIntegration.OVERDRIVE : MockPatronActivitySuccess,
ExternalIntegration.AXIS_360 : MockPatronActivitySuccess
}
api = CirculationAPI(self._db, self._default_library,
api_map=everything_succeeds)
status = ServiceStatus(api, auth=auth)
response = status.loans_status(response=True)
for value in response.values():
assert value.startswith('SUCCESS')
# Simulate a failure in one of the providers.
overdrive_fails = {
ExternalIntegration.OVERDRIVE : MockPatronActivityFailure,
ExternalIntegration.AXIS_360 : MockPatronActivitySuccess
}
api = CirculationAPI(self._db, self._default_library,
api_map=overdrive_fails)
status = ServiceStatus(api, auth=auth)
response = status.loans_status(response=True)
key = '%s patron account (Overdrive)' % overdrive_collection.name
eq_("FAILURE: Doomed to fail!", response[key])
# Simulate failures on the ILS level.
def test_with_broken_basic_auth_provider(value):
class BrokenBasicAuthProvider(object):
def testing_patron(self, _db):
return value
auth.basic_auth_provider = BrokenBasicAuthProvider()
response = status.loans_status(response=True)
eq_({'Patron authentication':
'Could not create patron with configured credentials.'},
response)
# Test patron can't authenticate
test_with_broken_basic_auth_provider(
(None, "password that didn't work")
)
# Auth provider is just totally broken.
test_with_broken_basic_auth_provider(None)
# If the auth process returns a problem detail, the problem
# detail is used as the basis for the error message.
class ExpiredPatronProvider(object):
def testing_patron(self, _db):
return EXPIRED_CREDENTIALS, None
auth.basic_auth_provider = ExpiredPatronProvider()
response = status.loans_status(response=True)
eq_({'Patron authentication': EXPIRED_CREDENTIALS.response[0]},
response
)
def test_checkout_status(self):
# Create a Collection to test.
overdrive_collection = self._collection(protocol=ExternalIntegration.OVERDRIVE)
edition, lp = self._edition(
with_license_pool=True, collection=overdrive_collection
)
library = self._default_library
library.collections.append(overdrive_collection)
# Test a scenario where we get information for every
# relevant collection in the library.
class CheckoutSuccess(object):
def __init__(self, *args, **kwargs):
self.borrowed = False
self.fulfilled = False
self.revoked = False
def borrow(self, patron, password, license_pool, *args, **kwargs):
"Simulate a successful borrow."
self.borrowed = True
return object(), None, True
def fulfill(self, *args, **kwargs):
"Simulate a successful fulfillment."
self.fulfilled = True
def revoke_loan(self, *args, **kwargs):
"Simulate a successful loan revocation."
self.revoked = True
everything_succeeds = {ExternalIntegration.OVERDRIVE : CheckoutSuccess}
auth = self.mock_auth
api = CirculationAPI(self._db, library, api_map=everything_succeeds)
status = ServiceStatus(api, auth=auth)
ConfigurationSetting.for_library(
Configuration.DEFAULT_NOTIFICATION_EMAIL_ADDRESS, library
).value = "a@b"
response = status.checkout_status(lp.identifier)
# The ServiceStatus object was able to run its test.
for value in response.values():
assert value.startswith('SUCCESS')
# The mock Overdrive API had all its methods called.
api = status.circulation.api_for_collection[overdrive_collection.id]
eq_(True, api.borrowed)
eq_(True, api.fulfilled)
eq_(True, api.revoked)
# Now try some failure conditions.
# First: the 'borrow' operation succeeds on an API level but
# it doesn't create a loan.
class NoLoanCreated(CheckoutSuccess):
def borrow(self, patron, password, license_pool, *args, **kwargs):
"Oops! We put the book on hold instead of borrowing it."
return None, object(), True
no_loan_created = {ExternalIntegration.OVERDRIVE : NoLoanCreated}
api = CirculationAPI(self._db, library, api_map=no_loan_created)
status = ServiceStatus(api, auth=auth)
response = status.checkout_status(lp.identifier)
assert 'FAILURE: No loan created during checkout' in response.values()
# Next: The 'revoke' operation fails on an API level.
class RevokeFail(CheckoutSuccess):
def revoke_loan(self, *args, **kwargs):
"Simulate an error during loan revocation."
raise Exception("Doomed to fail!")
revoke_fail = {ExternalIntegration.OVERDRIVE : RevokeFail}
api = CirculationAPI(self._db, library, api_map=revoke_fail)
status = ServiceStatus(api, auth=auth)
response = status.checkout_status(lp.identifier)
assert 'FAILURE: Doomed to fail!' in response.values()
# But at least we got through the borrow and fulfill steps.
api = status.circulation.api_for_collection[overdrive_collection.id]
eq_(True, api.borrowed)
eq_(True, api.fulfilled)
eq_(False, api.revoked)
| 39.584615 | 87 | 0.64623 | 1,061 | 10,292 | 6.073516 | 0.225259 | 0.020174 | 0.027933 | 0.024984 | 0.438858 | 0.397579 | 0.36406 | 0.272967 | 0.260242 | 0.210894 | 0 | 0.004183 | 0.279926 | 10,292 | 259 | 88 | 39.737452 | 0.865335 | 0.141275 | 0 | 0.298429 | 0 | 0 | 0.074336 | 0.002757 | 0 | 0 | 0 | 0 | 0.031414 | 1 | 0.104712 | false | 0.04712 | 0.052356 | 0.015707 | 0.235602 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
764f182e1278292a045fb882c8e54696f63009a1 | 29,550 | py | Python | sd2c/libcloud_extended.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | sd2c/libcloud_extended.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | sd2c/libcloud_extended.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | import configparser
import getpass
import logging
import os
import stat
from abc import ABC, abstractmethod
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
import sd2c.utils as utils
from libcloud.compute.base import NodeAuthSSHKey
from sd2c.utils import get_public_key
"""
ProviderSpecific Class
This class is an upgrade of libcloud to simplify the use of SSH CROSS CLOUD
"""
#TODO: change default in credentials file by a custom name or just "sd2c"?
class ProviderSpecific(ABC):
"""
A base ProviderSpecific class to derive from
Specific methods are to be implemented. Contains methods useful to each provider.
"""
ssh_params = None
driver = None
@abstractmethod
def init_specific_credentials(self):
"""
Initialize specific values:
- credentials_items
- credentials_file_path
- credentials_name
...
"""
pass
@abstractmethod
def init_specific(self):
"""
Initialize advanced specific attributes:
- instance_user
- region
- credentials
- Libcloud driver
- Existing instances (nodes)
"""
pass
@abstractmethod
def create_instance(self):
"""
Initialize instance parameters (size, image etc.) and create an instance.
"""
pass
@abstractmethod
def get_node(self):
"""
If there is an instance with the same name as the user, returns it,
else, raises an Exception.
Most providers can just call get_node_any_arg().
:return: Returns the instance (node) with the username
:rtype: :class:`.Node`
"""
pass
@abstractmethod
def display_instances(self):
"""
Diplays the instances that are running on the specified provider
"""
pass
@abstractmethod
def _init_rsa_key_pair(self):
"""
Manage the RSA key pair creation
"""
pass
@abstractmethod
def spe_wait_until_running(self, nodes):
"""
Call wait_until_running with specific parameters
"""
pass
@abstractmethod
def start_instance(self):
"""
Restarts the stopped instance
"""
pass
@abstractmethod
def stop_instance(self):
"""
Stops the instance
"""
pass
@abstractmethod
def terminate_instance(self):
"""
Stops the instance
"""
pass
@abstractmethod
def get_credentials(self):
"""
Get credentials from a file
:return: Several Strings for each credential
:rtype: :``str``
"""
pass
def create_local_rsa_key_pair(self):
"""
If both private and public RSA keys are stored does nothing
If no key stored, create a public and a private RSA key
If only private key, generates a public RSA key from it
:return: Return code of the bash command (0 if success)
:rtype: :``int``
"""
generate_key_pair = "ssh-keygen -b 2048 -f " + self.ssh_params.rsa_private_key_file_path
pub_from_priv = "ssh-keygen -b 2048 -y -f " + self.ssh_params.rsa_private_key_file_path \
+ " > " + self.ssh_params.rsa_private_key_file_path + ".pub"
if os.path.isfile(self.ssh_params.rsa_private_key_file_path):
if os.path.isfile(self.ssh_params.rsa_private_key_file_path + ".pub"):
logging.info("Using key pair : " + self.ssh_params.rsa_private_key_file_path + ".pub")
return 0
else:
logging.info("Creating public key from private key in " + self.ssh_params.rsa_private_key_file_path)
return_code = os.system(pub_from_priv)
return return_code
else:
return_code = os.system(generate_key_pair)
logging.info("Key pair created : " + self.ssh_params.rsa_private_key_file_path)
os.chmod(self.ssh_params.rsa_private_key_file_path, stat.S_IRWXU)
return return_code
def display_instances_no_arg(self):
"""
Display instances
"""
nodes = self.driver.list_nodes()
if not nodes:
print("No instance running")
for node in nodes:
print(node)
def stop_instance_no_arg(self) -> None:
"""
Stops a running instance
:return: None
"""
nodes = self.driver.list_nodes()
if not nodes:
logging.info("No instance running")
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state != "terminated":
stop = self.driver.ex_stop_node(node)
if stop:
logging.info("Stopped : " + node.id)
return
else:
raise Exception("An error has occurred while stopping instance")
return
def start_instance_no_arg(self) -> None:
"""
Starts a stopped instance
:return: None
"""
nodes = self.driver.list_nodes()
if not nodes:
logging.info("No instance running")
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state == "stopped":
start = self.driver.ex_start_node(node)
if start:
logging.info("Started : " + node.id)
return
else:
raise Exception("An error has occurred while starting instance")
return
def terminate_instance_no_arg(self) -> None:
"""
Terminate the running instance
:return: None
"""
nodes = self.driver.list_nodes()
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state != "terminated":
terminate = self.driver.destroy_node(node)
if terminate:
logging.warning("Terminated : " + node.id)
return
else:
raise Exception("An error has occurred while terminating instance")
return
def get_node_any_arg(self, *args):
"""
If there is an instance with the same name as the user, returns it,
else, raises an Exception.
:return: Returns the instance (node) with the username
:rtype: :class:`.Node`
"""
nodes = self.driver.list_nodes(*args)
if not nodes:
raise Exception("No instance found")
if self.ssh_params.sshcrosscloud_instance_id:
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id:
return node
raise Exception("No instance found")
else:
raise Exception("No instance ID registered")
class SpecificAWS(ProviderSpecific):
def __init__(self, ssh_vas: utils.SSHParams):
self.ssh_params = ssh_vas
self.driver = None
self.image = None
self.size = None
self.security_group = None
def init_specific_credentials(self):
self.ssh_params.credentials_items = self.ssh_params.aws.credentials_items
self.ssh_params.credentials_file_path = self.ssh_params.aws.credentials_path
self.ssh_params.credentials_name = self.ssh_params.aws.credentials_name
def init_specific(self):
default_user_list = utils.aws_default_user_list
for i, j in default_user_list.items():
if i.lower() in self.ssh_params.aws.image_name.lower():
self.ssh_params.instance_user = j
if not self.ssh_params.instance_user:
raise Exception("Image name does not correspond to AWS User List")
if not self.ssh_params.aws.region:
self.ssh_params.aws.region = self.get_region_from_config_file()
self.ssh_params.aws.access_key_id, self.ssh_params.aws.secret_access_key = self.get_credentials()
cls = get_driver(Provider.EC2)
provider_driver = cls(self.ssh_params.aws.access_key_id,
self.ssh_params.aws.secret_access_key,
region=self.ssh_params.aws.region)
self.driver = provider_driver
nodes = self.driver.list_nodes()
return nodes
def create_instance(self):
logging.info("Creating instance")
self._init_rsa_key_pair()
self._init_size()
self._init_image()
self._init_security_group()
logging.info("Instance parameters : " + self.ssh_params.instance_name + " - " + self.ssh_params.aws.image_id
+ " - " + self.ssh_params.aws.size)
node = self.driver.create_node(name=self.ssh_params.instance_name,
image=self.image, # Need to use Libcloud object, can't use string
size=self.size,
ex_userdata=self.ssh_params.user_data,
ex_keyname=self.ssh_params.rsa_key_name,
ex_securitygroup=[self.security_group.name])
return node
def get_node(self):
return self.get_node_any_arg()
def get_region_from_config_file(self):
if os.path.isfile(self.ssh_params.aws.config_path):
config = configparser.ConfigParser()
config.read(self.ssh_params.aws.config_path)
aws_region = config['default']['region']
return aws_region
else:
raise Exception("No region found in " + self.ssh_params.aws.config_path
+ ", run sshcrosscloud --config -- provider aws")
def start_instance(self):
self.start_instance_no_arg()
def stop_instance(self):
self.stop_instance_no_arg()
def terminate_instance(self):
self.terminate_instance_no_arg()
def display_instances(self):
self.display_instances_no_arg()
def spe_wait_until_running(self, nodes):
return self.driver.wait_until_running(nodes=nodes)[0]
def get_credentials(self):
if os.path.isfile(self.ssh_params.credentials_file_path):
config = configparser.ConfigParser()
config.read(self.ssh_params.credentials_file_path)
aws_access_key_id = config['default']['aws_access_key_id']
aws_secret_access_key = config['default']['aws_secret_access_key']
return aws_access_key_id, aws_secret_access_key
else:
raise Exception("No credentials found in " + self.ssh_params.credentials_file_path +
", run sshcrosscloud --config -- provider aws")
def _init_rsa_key_pair(self):
if os.path.isfile(self.ssh_params.rsa_private_key_file_path):
for key in self.driver.ex_list_keypairs():
if self.ssh_params.rsa_key_name == key['keyName']:
logging.info("Key pair already stored (" + self.ssh_params.rsa_private_key_file_path + ")")
return
logging.info("Creating key pair from existing key in " + self.ssh_params.rsa_private_key_file_path)
self.driver.import_key_pair_from_file(name=self.ssh_params.rsa_key_name,
key_file_path=self.ssh_params.rsa_private_key_file_path)
return
else:
keypair = self.driver.create_key_pair(name=self.ssh_params.rsa_key_name)
rsa_key = keypair.private_key
with open(self.ssh_params.rsa_private_key_file_path, 'w') as file:
file.write(rsa_key)
os.chmod(self.ssh_params.rsa_private_key_file_path, stat.S_IRWXU)
logging.info("Key pair created : " + self.ssh_params.rsa_private_key_file_path)
logging.warning("Keypair is not secured by a password!")
def _init_size(self):
sizes = self.driver.list_sizes()
selected_sizes = [s for s in sizes if s.id == self.ssh_params.aws.size]
if not selected_sizes:
raise Exception(self.ssh_params.aws.size + " is not available in AWS")
else:
self.size = selected_sizes[0]
def _init_image(self):
images = self.driver.list_images()
selected_images = [i for i in images if i.id == self.ssh_params.aws.image_id]
if not selected_images:
raise Exception(self.ssh_params.aws.image_id + " is not available in AWS")
else:
self.image = selected_images[0]
def _init_security_group(self):
group_names = [self.ssh_params.aws.security_group]
security_groups = self.driver.ex_get_security_groups(group_names=group_names)
if not security_groups:
answer = utils.get_ui_confirmation("No security group found, would you like to create one?")
if answer:
security_group_lst = self.driver.ex_create_security_group(
self.ssh_params.aws.security_group,
self.ssh_params.aws.security_group + " security group")
security_groups = self.driver.ex_get_security_groups(
group_ids=security_group_lst.get('group_id'))
else:
logging.info("No security group created")
self.security_group = security_groups[0]
class SpecificAzure(ProviderSpecific):
def __init__(self, ssh_vas: utils.SSHParams):
self.ssh_params = ssh_vas
self.driver = None
self.image = None
self.size = None
self.location = None
self.resource_group = None
self.virtual_network = None
self.public_ip = None
def init_specific_credentials(self):
self.ssh_params.credentials_items = self.ssh_params.azure.credentials_items
self.ssh_params.credentials_file_path = self.ssh_params.azure.credentials_path
self.ssh_params.credentials_name = self.ssh_params.azure.credentials_name
def init_specific(self):
self.ssh_params.instance_user = "azure"
if not self.ssh_params.azure.region:
raise Exception("No region found, you must specify a region in .env file")
self.ssh_params.azure.tenat_id, self.ssh_params.azure.subscription_id, \
self.ssh_params.azure.application_id, self.ssh_params.azure.secret = self.get_credentials()
if not self.ssh_params.azure.public_ip_name:
self.ssh_params.azure.public_ip_name = "sshcrosscloud-ip-" + self.ssh_params.username
if not self.ssh_params.azure.virtual_network:
self.ssh_params.azure.virtual_network = "sshcrosscloud-vn-" + self.ssh_params.username
if not self.ssh_params.azure.subnet:
self.ssh_params.azure.subnet = "sshcrosscloud-sn-" + self.ssh_params.username
cls = get_driver(Provider.AZURE_ARM)
provider_driver = cls(tenant_id=self.ssh_params.azure.tenat_id,
subscription_id=self.ssh_params.azure.subscription_id,
key=self.ssh_params.azure.application_id,
secret=self.ssh_params.azure.secret)
self.driver = provider_driver
nodes = self.driver.list_nodes(self.ssh_params.azure.resource_group)
return nodes
def create_instance(self):
logging.info("Creating instance")
self._init_rsa_key_pair()
self._init_location()
self._init_size()
self._init_image()
self._init_auth()
self._init_resource_group()
self._init_virtual_network()
self._init_security_group()
self._init_public_ip()
self._init_network_interface()
# Node Creation
logging.info("Instance parameters : " + self.ssh_params.instance_name + " - "
+ self.image.name + " - " + self.size.name)
node = self.driver.create_node(name=self.ssh_params.instance_name,
image=self.image,
size=self.size,
ex_user_name=self.ssh_params.instance_user,
auth=self.auth,
ex_resource_group=self.resource_group.name,
ex_network=self.virtual_network.name,
ex_use_managed_disks=True,
ex_nic=self.network_interface,
location=self.location,
# this argument is useless, but libcloud requires it
ex_storage_account="useless")
return node
def get_node(self):
return self.get_node_any_arg(self.ssh_params.azure.resource_group)
def get_credentials(self):
if os.path.isfile(self.ssh_params.credentials_file_path):
config = configparser.ConfigParser()
config.read(self.ssh_params.credentials_file_path)
tenant_id = config['default']['tenant']
subscription_id = config['default']['subscription_id']
client_id = config['default']['client_id']
secret = config['default']['secret']
return tenant_id, subscription_id, client_id, secret
else:
raise Exception(
"No credentials found in " + self.ssh_params.credentials_file_path +
", run sshcrosscloud --config -- provider azure")
def start_instance(self) -> None:
nodes = self.driver.list_nodes(self.ssh_params.azure.resource_group)
if not nodes:
logging.info("No instance running")
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state == "stopped":
start = self.driver.ex_start_node(node)
if start:
logging.info("Started : " + node.id)
return
else:
raise Exception("An error has occurred while starting instance")
return
def stop_instance(self) -> None:
nodes = self.driver.list_nodes(self.ssh_params.azure.resource_group)
if not nodes:
logging.info("No instance running")
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state != "terminated":
terminate = self.driver.ex_stop_node(node)
if terminate:
logging.warning("Stopped : " + node.id)
return
else:
raise Exception("An error has occurred while stopping instance")
return
def terminate_instance(self) -> None:
"""
Destroys instance and volume associated with it
:return:
"""
nodes = self.driver.list_nodes(self.ssh_params.azure.resource_group)
for node in nodes:
if node.id == self.ssh_params.sshcrosscloud_instance_id and node.state != "terminated":
stop = self.driver.destroy_node(node=node, ex_destroy_vhd=True, ex_destroy_nic=False)
volumes = self.driver.list_volumes(ex_resource_group=self.ssh_params.azure.resource_group)
volume = [v for v in volumes if self.ssh_params.general_name in v.name][0]
self.driver.destroy_volume(volume)
if stop:
logging.warning("Terminated : " + node.id)
return
else:
raise Exception("An error has occurred while terminating instance")
return
def display_instances(self):
nodes = self.driver.list_nodes(self.ssh_params.azure.resource_group)
if not nodes:
print("No instance running")
for node in nodes:
print(node)
def spe_wait_until_running(self, nodes):
list_node_args = {'ex_resource_group': 'NetworkWatcherRG'}
return self.driver.wait_until_running(nodes=nodes, ex_list_nodes_kwargs=list_node_args)[0]
def _init_rsa_key_pair(self):
return_code = self.create_local_rsa_key_pair()
if return_code != 0:
raise Exception("Error while creating key pair : " + str(return_code))
def _init_size(self):
sizes = self.driver.list_sizes(self.location)
selected_sizes = [s for s in sizes if s.id == self.ssh_params.azure.size]
if not selected_sizes:
raise Exception(self.ssh_params.azure.size + " is not available in Azure")
else:
self.size = selected_sizes[0]
def _init_image(self):
images = self.driver.list_images(location=self.location, ex_publisher=self.ssh_params.azure.publisher)
selected_images = [i for i in images if self.ssh_params.azure.image_id in i.id]
if not selected_images:
raise Exception(self.ssh_params.azure.image_id + " is not available in AWS")
else:
self.image = selected_images[0]
def _init_location(self):
locations = self.driver.list_locations()
selected_locations = [loc for loc in locations if loc.id == self.ssh_params.azure.region]
if not selected_locations:
raise Exception(self.ssh_params.azure.region + " is not available in AWS")
else:
self.location = selected_locations[0]
def _init_resource_group(self):
rgs = self.driver.ex_list_resource_groups()
selected_rg = [rg for rg in rgs if rg.name == self.ssh_params.azure.resource_group]
if not selected_rg:
raise Exception(self.ssh_params.azure.resource_group + " does not exist")
else:
self.resource_group = selected_rg[0]
def _init_auth(self):
# Libcloud does not allow key vault for Azure, therefore need to store public key locally
self.auth = NodeAuthSSHKey(get_public_key(self.ssh_params.rsa_private_key_file_path))
def _init_virtual_network(self):
if not self.driver.ex_list_networks():
raise Exception("You must create a Virtual Network in Resource Group : " + self.resource_group.name)
else:
self.virtual_network = self.driver.ex_list_networks()[0]
def _init_security_group(self):
if not self.driver.ex_list_network_security_groups(resource_group=self.resource_group.name):
logging.warning("No Security Group found, it is advised to create one for increased security.")
else:
self.security_group = \
self.driver.ex_list_network_security_groups(resource_group=self.resource_group.name)[0]
def _init_public_ip(self):
pips = self.driver.ex_list_public_ips(resource_group=self.resource_group.name)
selected_pips = [ip for ip in pips if ip.name == self.ssh_params.azure.public_ip_name]
if not selected_pips:
answer = utils.get_ui_confirmation(
self.ssh_params.azure.public_ip_name + " ip does not exist, create one ?")
if answer:
public_ip = self.driver.ex_create_public_ip(self.ssh_params.azure.public_ip_name,
resource_group=self.resource_group.name,
location=self.location,
public_ip_allocation_method="Dynamic")
if not public_ip:
raise Exception("Error while creating ip")
else:
raise Exception("You need to create an IP")
else:
self.public_ip = selected_pips[0]
def _init_network_interface(self):
nics = self.network_interface = self.driver.ex_list_nics(resource_group=self.resource_group.name)
selected_nics = [ni for ni in nics if ni.name == self.ssh_params.azure.public_ip_name]
if not selected_nics:
sns = self.driver.ex_list_subnets(self.virtual_network)
selected_nics = [sn for sn in sns if sn.name == self.ssh_params.azure.subnet]
if not selected_nics:
raise Exception("You must create a Subnet '" + self.ssh_params.azure.subnet + "' in Virtual Network : "
+ self.virtual_network.name)
else:
sn = selected_nics[0]
self.network_interface = self.driver.ex_create_network_interface(
name=self.ssh_params.azure.network_interface,
resource_group=self.resource_group.name,
location=self.location,
public_ip=self.public_ip,
subnet=sn)
else:
self.network_interface = selected_nics[0]
class SpecificGPC(ProviderSpecific):
def __init__(self, ssh_vas: utils.SSHParams):
self.ssh_params = ssh_vas
self.driver = None
def init_specific_credentials(self):
self.ssh_params.credentials_items = self.ssh_params.gcp.credentials_items
self.ssh_params.credentials_file_path = self.ssh_params.gcp.credentials_path
self.ssh_params.credentials_name = self.ssh_params.gcp.credentials_name
def init_specific(self):
self.ssh_params.instance_user = getpass.getuser()
if not self.ssh_params.gcp.region:
raise Exception("No region found, you must specify a region in .env file")
self.ssh_params.gcp.user_id, self.ssh_params.gcp.key_path, self.ssh_params.gcp.project,\
self.ssh_params.gcp.data_center = self.get_credentials()
cls = get_driver(Provider.GCE)
provider_driver = cls(user_id=self.ssh_params.gcp.user_id,
key=self.ssh_params.gcp.key_path,
project=self.ssh_params.gcp.project,
datacenter=self.ssh_params.gcp.data_center)
self.driver = provider_driver
nodes = self.driver.list_nodes()
return nodes
def create_instance(self):
logging.info("Creating instance")
self._init_rsa_key_pair()
self._init_image()
self._init_size()
self._init_metadata()
logging.info("Instance parameters : " + self.ssh_params.instance_name + " - " + self.image.name
+ " - " + self.size.name)
node = self.driver.create_node(name=self.ssh_params.instance_name,
image=self.image,
size=self.size,
ex_metadata=self.metadata)
return node
def get_node(self):
return self.get_node_any_arg()
def get_credentials(self):
if os.path.isfile(self.ssh_params.credentials_file_path):
config = configparser.ConfigParser()
config.read(self.ssh_params.credentials_file_path)
user_id = config['default']['user_id']
key = config['default']['key']
project = config['default']['project']
datacenter = config['default']['datacenter']
return user_id, key, project, datacenter
else:
raise Exception(
"No credentials found in " + self.ssh_params.credentials_file_path
+ ", run sshcrosscloud --config -- provider gcp")
def start_instance(self):
self.start_instance_no_arg()
def stop_instance(self):
self.stop_instance_no_arg()
def terminate_instance(self):
self.terminate_instance_no_arg()
def display_instances(self):
self.display_instances_no_arg()
def _init_rsa_key_pair(self):
return_code = self.create_local_rsa_key_pair()
if return_code != 0:
raise Exception("Error while creating key pair : " + str(return_code))
def spe_wait_until_running(self, nodes):
return self.driver.wait_until_running(nodes=nodes)[0]
def _init_size(self):
sizes = self.driver.list_sizes()
selected_sizes = [s for s in sizes if self.ssh_params.gcp.size in s.name]
if not selected_sizes:
raise Exception(self.ssh_params.gcp.size + " is not available in GCP")
else:
self.size = selected_sizes[0]
def _init_image(self):
images = self.driver.list_images()
selected_images = [i for i in images if self.ssh_params.gcp.image_name in i.name]
if not selected_images:
raise Exception(self.ssh_params.gcp.image_name + " is not available in GCP")
else:
self.image = selected_images[0]
def _init_metadata(self):
# Libcloud does not allow key vault for Azure, therefore need to store public key locally
self.metadata = {
"items": [
{
"key": "ssh-keys",
"value": "antoinebourayne:" + get_public_key(self.ssh_params.rsa_private_key_file_path)
}
]
}
def get_provider_specific_driver(ssh_vars: utils.SSHParams):
if ssh_vars.provider == 'aws':
return SpecificAWS(ssh_vars)
elif ssh_vars.provider == 'azure':
return SpecificAzure(ssh_vars)
elif ssh_vars.provider == 'gcp':
return SpecificGPC(ssh_vars)
else:
raise Exception('No provider specified')
| 38.67801 | 119 | 0.611404 | 3,556 | 29,550 | 4.827053 | 0.085489 | 0.061171 | 0.111331 | 0.042994 | 0.689543 | 0.637169 | 0.568075 | 0.523099 | 0.494145 | 0.467172 | 0 | 0.001655 | 0.304907 | 29,550 | 763 | 120 | 38.728702 | 0.834031 | 0.065482 | 0 | 0.532468 | 0 | 0 | 0.087934 | 0.000782 | 0 | 0 | 0 | 0.001311 | 0 | 1 | 0.12987 | false | 0.025974 | 0.022263 | 0.009276 | 0.231911 | 0.007421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7652f94e10bf5e2d4d583a87a38765c532fbdd9a | 4,157 | py | Python | src/GeckoMotion.py | GavinStrunk/gecko-motion | 3930e0feffaf213b866dc097bfb82afaad86defd | [
"MIT"
] | null | null | null | src/GeckoMotion.py | GavinStrunk/gecko-motion | 3930e0feffaf213b866dc097bfb82afaad86defd | [
"MIT"
] | 1 | 2019-10-19T05:48:37.000Z | 2019-10-19T05:48:37.000Z | src/GeckoMotion.py | GavinStrunk/gecko-motion | 3930e0feffaf213b866dc097bfb82afaad86defd | [
"MIT"
] | null | null | null |
#This is for compatibility with python 3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import serial
import struct
import sys
EditCommands = {
'estop': '0x00',
'stop': '0x01',
'pause': '0x02',
'resume': '0x03',
'run': '0x04',
'short_status': '0x07',
'long_status': '0x08',
'version': '0x0E',
}
RunCommands = {
'move': '0x01',
'home': '0x02',
'velocity': '0x07',
'vector': '0x0B',
'acceleration': '0x0C',
'configure': '0x0E',
'limit_cw': '0x0F',
'zero_offset': '0x13',
'reset_pos': '0x15',
}
Motor = {
'MOTOR_X': 0,
'MOTOR_Y': 1,
'MOTOR_Z': 2,
'MOTOR_W': 3,
'MOTOR_ALL': 4
}
class GeckoMotion:
def __init__(self, portName=None):
self.portname = portName
if self.portname == None:
self.serialPort = serial.serial_for_url('loop://', timeout = 1)
else:
try:
self.serialPort = serial.Serial(self.portname, 115200, timeout = 1)
if self.serialPort.is_open:
self.serialPort.close()
self.serialPort.open()
except:
print("Err 0: Failed to open serial port")
sys.exit()
def __del__(self):
#FIXME: throws an error when you can't access the serial port
if not self.portname == None:
if self.serialPort:
self.serialPort.close()
def estop(self):
#FIXME: make it work for all motors
self._write(self._build_edit_command('estop'))
def get_long_status(self):
self._write(self._build_edit_command('long_status'))
def get_short_status(self):
self._write(self._build_edit_command('short_status'))
def home(self, motor):
#ENHANCE: make home work for MOTOR_ALL
if motor == 'MOTOR_ALL':
cmd = self._build_run_command('MOTOR_X', 'home', 0)
self._test_print(cmd)
else:
self._write(self._build_run_command(motor, 'home', 0))
def move(self, motor, distance):
self._write(self._build_run_command(motor, 'move', distance))
#ENHANCE: consider changing this to a Vector3 input
#FIXME: put in proper defaults for velocity and acceleration
def move_to(self, x, y, z, velocity=100, acceleration=50):
pass
def set_acceleration(self, motor, acceleration):
self._write(self._build_run_command(motor, 'acceleration', acceleration))
def set_velocity(self, motor, velocity):
self._write(self._build_run_command(motor, 'velocity', velocity))
def stop(self):
self._write(self._build_edit_command('stop'))
'''
Private Methods
'''
def _build_edit_command(self, command):
offset = 8
cmd = int(EditCommands[command], 16) << offset
cmd = struct.pack('>H', cmd)
return cmd
def _build_run_command(self, motor, command, data):
offset = 8
run = struct.pack('<H', int(EditCommands['run'],16))
cmd = struct.pack('<H', (int(Motor[motor]) << (offset + 6)) + (int(RunCommands[command], 16) << offset) + ((data >> 16) & 0xFF))
d = data & 0xFFFF
if data < 0:
d = struct.pack('<H', abs(data))
else:
d = struct.pack('<H', data)
cmdp = run + cmd + d
return cmdp
def _read(self):
msg = self.serialPort.read(self.serialPort.in_waiting)
#print(':'.join('{:02x}'.format(ord(x)) for x in msg))
return msg
def _write(self, message):
self.serialPort.write(message)
'''
Unit Test Helper Methods
'''
def _test_read(self):
ret = self.serialPort.readline()
value = ' '.join('{:02x}'.format(ord(x)) for x in ret)
return value
def _test_print(self, hexData):
print(':'.join('{:02x}'.format(ord(x)) for x in hexData))
| 29.06993 | 136 | 0.554968 | 482 | 4,157 | 4.585062 | 0.319502 | 0.069683 | 0.047059 | 0.065158 | 0.169683 | 0.158824 | 0.145701 | 0.071041 | 0.025339 | 0 | 0 | 0.031228 | 0.314409 | 4,157 | 143 | 137 | 29.06993 | 0.744211 | 0.079865 | 0 | 0.069307 | 0 | 0 | 0.100374 | 0 | 0 | 0 | 0.020822 | 0.006993 | 0 | 1 | 0.168317 | false | 0.009901 | 0.059406 | 0 | 0.277228 | 0.049505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7653c6b08924256fb7cac002136f2b86181b503f | 486 | py | Python | no6/app.py | yujikawa/remosta | 5e1debbd80839f3afdba14ddf6cddae0de211a2a | [
"Apache-2.0"
] | 1 | 2021-05-21T12:46:11.000Z | 2021-05-21T12:46:11.000Z | no6/app.py | yujikawa/remosta | 5e1debbd80839f3afdba14ddf6cddae0de211a2a | [
"Apache-2.0"
] | null | null | null | no6/app.py | yujikawa/remosta | 5e1debbd80839f3afdba14ddf6cddae0de211a2a | [
"Apache-2.0"
] | null | null | null | import streamlit as st
from PIL import Image
from gen_checker import predict
st.title('星野源チェッカー')
file = st.file_uploader(label='ファイルアップロード', type=['jpg', 'png', 'jpeg'])
if file is not None:
img = Image.open(file)
# img = Image.open(file)
st.image(img, width=200, caption='診断した画像')
result = predict(img) * 100
if result <= 50:
st.warning(f'残念ながら、あなたがガッキーと結婚できる確率は{result:.2f}%、恋ダンスでも踊ってろ!')
elif result > 50:
st.success(f'あなたがガッキーと結婚できる確率は{result:.2f}%、ワンチャンあるかも!') | 30.375 | 72 | 0.701646 | 71 | 486 | 4.774648 | 0.591549 | 0.035398 | 0.070796 | 0.094395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028708 | 0.139918 | 486 | 16 | 73 | 30.375 | 0.782297 | 0.045267 | 0 | 0 | 0 | 0 | 0.265659 | 0.192225 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
765592ae0e7e93a6e71d5c3fdde137eaa815efa0 | 735 | py | Python | netpen/subnet.py | defcronyke/netpen | 66bf5c4401752a6ad9f411f04d88c0189281f8fb | [
"MIT"
] | null | null | null | netpen/subnet.py | defcronyke/netpen | 66bf5c4401752a6ad9f411f04d88c0189281f8fb | [
"MIT"
] | null | null | null | netpen/subnet.py | defcronyke/netpen | 66bf5c4401752a6ad9f411f04d88c0189281f8fb | [
"MIT"
] | null | null | null | import ipaddress
from .topology import TopologyMember
class Subnet(TopologyMember):
REF = 'subnet'
DESC = {'title': 'IPv4 Subnet'}
SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'name': {'type': 'string'},
'cidr': {'type': 'string'}
}
}
def __init__(self, topology, name, cidr):
super().__init__(topology, name)
self.net = ipaddress.ip_network(cidr)
self.topology.ipam.add_subnet(self)
@property
def cidr(self):
return str(self.net)
@classmethod
def from_params(cls, topology, params):
return cls(topology, params['name'], params['cidr'])
| 24.5 | 60 | 0.572789 | 73 | 735 | 5.616438 | 0.493151 | 0.04878 | 0.082927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001887 | 0.278912 | 735 | 29 | 61 | 25.344828 | 0.771698 | 0 | 0 | 0 | 0 | 0 | 0.14966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.083333 | 0.083333 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
765885ca61621f735ee35e2f6813f997bf906f13 | 1,298 | py | Python | BOJ/graph_boj/tomato.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/tomato.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/tomato.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # BOJ 7576
import sys
from collections import deque
def bfs(que):
cnt = 0
while que:
x, y = que.popleft()
visited[x][y] = True # visited x, y가 없어도 가능
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if arr[nx][ny] == -1:
continue
if arr[nx][ny] == 0 and not visited[nx][ny]:
arr[nx][ny] = arr[x][y] + 1
if cnt < arr[nx][ny]:
cnt = arr[nx][ny]
visited[nx][ny] = True
que.append((nx, ny))
return cnt
si = sys.stdin.readline
m, n = map(int, si().split())
arr = []
que = deque()
zero = False
for i in range(n):
temp = list(map(int, si().split()))
arr.append(temp)
for j in range(m):
if temp[j] == 1:
que.append((i, j))
elif temp[j] == 0:
zero = True
if not zero:
print(0)
sys.exit(0)
visited = [[False for _ in range(m)] for _ in range(n)]
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
val = bfs(que) - 1
done = True
for i in range(n):
for j in range(m):
if arr[i][j] == 0:
done = False
break
if done:
print(val)
else:
print(-1)
| 19.969231 | 56 | 0.446071 | 203 | 1,298 | 2.842365 | 0.29064 | 0.055459 | 0.060659 | 0.057192 | 0.204506 | 0.048527 | 0 | 0 | 0 | 0 | 0 | 0.033419 | 0.400616 | 1,298 | 64 | 57 | 20.28125 | 0.708226 | 0.022342 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.039216 | 0 | 0.078431 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7659e3404db12c8d69f104d4898093255068b2aa | 10,328 | py | Python | src/snooker_ball_tracker/cli.py | dcrblack/snooker-ball-tracker | 292b307e48914ebc42227e371ca0114ea944c8cd | [
"MIT"
] | 6 | 2020-08-10T14:00:52.000Z | 2022-02-03T10:23:20.000Z | src/snooker_ball_tracker/cli.py | dcrblack/snooker-ball-tracker | 292b307e48914ebc42227e371ca0114ea944c8cd | [
"MIT"
] | 3 | 2021-04-30T14:11:06.000Z | 2021-05-21T21:05:11.000Z | src/snooker_ball_tracker/cli.py | dcrblack/snooker-ball-tracker | 292b307e48914ebc42227e371ca0114ea944c8cd | [
"MIT"
] | 1 | 2020-10-14T06:07:13.000Z | 2020-10-14T06:07:13.000Z | import argparse
import os
from copy import deepcopy
import cv2
import numpy as np
from snooker_ball_tracker.ball_tracker import BallTracker
from snooker_ball_tracker.ball_tracker.util import Image, transform_frame
from snooker_ball_tracker.enums import SnookerColour
from snooker_ball_tracker.settings import settings as s
class CLI():
image: Image = None
ball_tracker: BallTracker = None
window_title = "Snooker Ball Tracker Image CLI"
colour = {
"LOWER": np.array([0, 0, 0]),
"UPPER": np.array([0, 0, 0])
}
def create_parser(self) -> argparse.ArgumentParser:
"""Create CLI argument parser
:return: CLI argument parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
description="Ball Tracker Image CLI (Only works with images)")
parser.add_argument(
"image", help="Image file to detect and track balls from")
parser.add_argument("-s", "--settings", dest="settings", default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))), "resources", "config", "default_settings.json"),
help="Settings file to use, defaults to \"%(default)s\"")
parser.add_argument("-w", "--width", dest="width", default=800, type=int,
help="Set width of image for processing, defaults to \"%(default)s\" pixels")
parser.add_argument("-d", "--detect-colour", dest="detect_colour", default=None,
type=str.upper, choices=[
colour.value for colour in SnookerColour],
help="Detect contours matching provided colour")
parser.add_argument("--mask-colour", dest="mask_colour", action="store_true", default=False,
help="Mask contours of provided colour")
parser.add_argument("--show-threshold", dest="show_threshold", action="store_true", default=False,
help="Show thresholded frames")
parser.add_argument("--morph", dest="morph", action="store_true", default=False,
help="Perform morph closing morphology on processed frames")
return parser
def __pick_color(self, event: int, x_pos: int, y_pos: int, *ignore):
"""Listens to a left click event on the processed frame to
extract colour values from pixel located using `x_pos` and `y_pos`
:param event: event type
:type event: int
:param x_pos: x position in frame
:type x_pos: int
:param y_pos: y position in frame
:type y_pos: int
"""
if event == cv2.EVENT_LBUTTONDOWN:
hsv = self.image.hsv_frame
pixel = hsv[y_pos, x_pos]
upper = np.array([pixel[0] + 10, pixel[1] + 10, pixel[2] + 40])
lower = np.array([pixel[0] - 10, pixel[1] - 10, pixel[2] - 40])
self.colour["LOWER"] = lower
self.colour["UPPER"] = upper
self.ball_tracker.colour_settings.colour_model.update(self.colour)
cv2.setTrackbarPos("H (lower)", self.window_title, self.colour["LOWER"][0])
cv2.setTrackbarPos("H (upper)", self.window_title, self.colour["UPPER"][0])
cv2.setTrackbarPos("S (lower)", self.window_title, self.colour["LOWER"][1])
cv2.setTrackbarPos("S (upper)", self.window_title, self.colour["UPPER"][1])
cv2.setTrackbarPos("V (lower)", self.window_title, self.colour["LOWER"][2])
cv2.setTrackbarPos("V (upper)", self.window_title, self.colour["UPPER"][2])
def run(self, args: argparse.Namespace):
"""Run the CLI app
:param args: args parsed from CLI parser
:type args: argparse.Namespace
:raises OSError: if `settings` arg failed to load
:raises OSError: if `image` arg failed to load
"""
# load settings from json file
success, _ = s.load(args.settings)
if not success:
raise OSError(f"Failed to load settings file: {args.settings}")
if os.path.exists(args.image):
print("=================================")
print("CLI Arguments:")
print("=================================")
for key, value in vars(args).items():
print(f"{key}: {value}")
print("=================================")
print("USER CONTROLS:")
print("=================================")
print("s -> save processed frame to a jpg file")
print("q -> exit the program")
print("=================================")
if args.detect_colour is not None:
print("Click on image to obtain HSV values from selected pixel")
print(f"This will be used to update colour values for: {args.detect_colour}")
print("=================================")
print("waiting for user input...\n")
# create ball tracker with loaded settings
self.ball_tracker = BallTracker()
# create main ball tracker window
cv2.namedWindow(self.window_title)
# setup trackbars if --detect-colours in set
if args.detect_colour is not None:
self.ball_tracker.colour_settings.selected_colour = args.detect_colour
colour = {
"LOWER": s.COLOUR_DETECTION_SETTINGS["COLOURS"][args.detect_colour]["LOWER"],
"UPPER": s.COLOUR_DETECTION_SETTINGS["COLOURS"][args.detect_colour]["UPPER"]
}
# create trackbars for lower and upper HSV values
cv2.createTrackbar("H (lower)", self.window_title, 0, 180, lambda x: x)
cv2.createTrackbar("H (upper)", self.window_title, 0, 180, lambda x: x)
cv2.createTrackbar("S (lower)", self.window_title, 0, 255, lambda x: x)
cv2.createTrackbar("S (upper)", self.window_title, 0, 255, lambda x: x)
cv2.createTrackbar("V (lower)", self.window_title, 0, 255, lambda x: x)
cv2.createTrackbar("V (upper)", self.window_title, 0, 255, lambda x: x)
cv2.setTrackbarPos("H (lower)", self.window_title, colour["LOWER"][0])
cv2.setTrackbarPos("H (upper)", self.window_title, colour["UPPER"][0])
cv2.setTrackbarPos("S (lower)", self.window_title, colour["LOWER"][1])
cv2.setTrackbarPos("S (upper)", self.window_title, colour["UPPER"][1])
cv2.setTrackbarPos("V (lower)", self.window_title, colour["LOWER"][2])
cv2.setTrackbarPos("V (upper)", self.window_title, colour["UPPER"][2])
cv2.setMouseCallback(self.window_title, self.__pick_color)
cv2.setTrackbarPos("H (lower)", self.window_title, colour["LOWER"][0])
cv2.setTrackbarPos("H (upper)", self.window_title, colour["UPPER"][0])
cv2.setTrackbarPos("S (lower)", self.window_title, colour["LOWER"][1])
cv2.setTrackbarPos("S (upper)", self.window_title, colour["UPPER"][1])
cv2.setTrackbarPos("V (lower)", self.window_title, colour["LOWER"][2])
cv2.setTrackbarPos("V (upper)", self.window_title, colour["UPPER"][2])
# read in image provided
in_frame = cv2.imread(args.image)
in_frame = transform_frame(in_frame, width=args.width)
# frame display loop
while True:
self.image, _, _ = self.ball_tracker.process_frame(
deepcopy(in_frame), show_threshold=args.show_threshold, detect_colour=args.detect_colour,
mask_colour=args.mask_colour, perform_morph=args.morph)
cv2.imshow(self.window_title, self.image.frame)
# obtain key value if a key was pressed
key = cv2.waitKey(1) & 0xFF
# if window is closed, exit program
if cv2.getWindowProperty(self.window_title, cv2.WND_PROP_VISIBLE) == 0:
break
# if the "q" key is pressed, exit program
if key == ord("q"):
break
# if the "s" key is pressed, save processed frame to file
if key == ord("s"):
counter = 1
file_name = os.path.join(os.path.dirname(args.image),
os.path.splitext(os.path.basename(args.image))[0])
while True:
frame_name = file_name + \
"-frame-" + str(counter) + ".jpg"
if not os.path.exists(frame_name):
print("saving frame to " + frame_name)
cv2.imwrite(frame_name, self.image.frame)
break
counter += 1
# obtain trackbar values and use them for --detect-colour
if args.detect_colour is not None:
h_lower = cv2.getTrackbarPos("H (lower)", self.window_title)
h_upper = cv2.getTrackbarPos("H (upper)", self.window_title)
s_lower = cv2.getTrackbarPos("S (lower)", self.window_title)
s_upper = cv2.getTrackbarPos("S (upper)", self.window_title)
v_lower = cv2.getTrackbarPos("V (lower)", self.window_title)
v_upper = cv2.getTrackbarPos("V (upper)", self.window_title)
colour["LOWER"] = np.array([h_lower, s_lower, v_lower])
colour["UPPER"] = np.array([h_upper, s_upper, v_upper])
self.ball_tracker.colour_settings.colour_model.update(colour)
else:
raise OSError(f"Failed to load image file: {args.image}")
def main() -> None:
cli = CLI()
parser = cli.create_parser()
args = parser.parse_args()
args.image = os.path.abspath(args.image)
args.settings = os.path.abspath(args.settings)
try:
cli.run(args)
except OSError as ex:
parser.exit(1, message=ex)
finally:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 47.376147 | 134 | 0.562161 | 1,199 | 10,328 | 4.715596 | 0.176814 | 0.068093 | 0.090202 | 0.05306 | 0.367881 | 0.326671 | 0.278741 | 0.251503 | 0.226919 | 0.208879 | 0 | 0.016205 | 0.30093 | 10,328 | 217 | 135 | 47.59447 | 0.766898 | 0.097018 | 0 | 0.18543 | 0 | 0 | 0.169066 | 0.023887 | 0 | 0 | 0.000436 | 0 | 0 | 1 | 0.02649 | false | 0 | 0.059603 | 0 | 0.125828 | 0.099338 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
765bca7fc82af83cbab183cc426b30d21243a91e | 609 | py | Python | Tkinter/message.py | zuqingxie/ROS_learning | c6fcfa76eb3bc91a52850d0f28785f123ca82409 | [
"MIT"
] | null | null | null | Tkinter/message.py | zuqingxie/ROS_learning | c6fcfa76eb3bc91a52850d0f28785f123ca82409 | [
"MIT"
] | null | null | null | Tkinter/message.py | zuqingxie/ROS_learning | c6fcfa76eb3bc91a52850d0f28785f123ca82409 | [
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import messagebox
from PIL import ImageTk, Image
root = Tk()
root.title("frame learning")
root.iconbitmap('@/home/zuqing/Downloads/apple_icon.xbm')
# 可以有很多中消息的类型:
# showinfo, showwarning, showerror, askquestion, askokcancel,askyesno
def popup():
response = messagebox.askyesno("This is my Popup!","Hello World")
# Label(root, text=response).pack()
if response ==1:
Label(root, text="you clicked yes").pack()
else:
Label(root,text="you clicked no").pack()
Button(root, text="出现一个窗口",command=popup).pack(padx=100,pady=100)
root.mainloop() | 29 | 71 | 0.706076 | 80 | 609 | 5.3625 | 0.6375 | 0.074592 | 0.090909 | 0.074592 | 0.107226 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013487 | 0.147783 | 609 | 21 | 72 | 29 | 0.813102 | 0.190476 | 0 | 0 | 0 | 0 | 0.235174 | 0.07771 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
765f139f8ed9df4406459ece713a38217f294fe9 | 5,268 | py | Python | scrips/search_old/run_search_all_structs_3vdm.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | scrips/search_old/run_search_all_structs_3vdm.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | scrips/search_old/run_search_all_structs_3vdm.py | lonelu/Metalprot | e51bee472c975aa171bdb6ee426a07ca69f110ee | [
"MIT"
] | null | null | null | import os
import sys
import prody as pr
import numpy as np
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot import search_struct, extract_vdm, ligand_database
import multiprocessing as mp
'''
python /mnt/e/GitHub_Design/Metalprot/metalprot/scrips/run_search_all_structs_3vdm.py
'''
# Generate queryss
queryss = []
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20210624/'
print(query_dir)
#Get query pdbs
querys = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['M1_AAMetal'], score_cut = 0, clu_num_cut = 50)
print(len(querys))
queryss.append(querys)
#Get query_2nd pdbs
query_2nds = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['M1_AAMetal'], score_cut = 0, clu_num_cut = 50)
queryss.append(query_2nds)
#Get query_3rd pdbs
query_3rds = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['M1_AAMetal'], score_cut = 0, clu_num_cut = 50)
queryss.append(query_3rds)
contact_querys = None
#contact_querys = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['M8_AtomContact4_clusters'], score_cut = 0, clu_num_cut = 2)
_2nd_querys = None
#_2nd_querys = extract_vdm.extract_all_centroid(query_dir + '20210608/', summary_name = '_summary.txt', file_name_includes = ['M7_AA2sMetal-HIS_clusters'], score_cut = 0, clu_num_cut = 0)
#print(len(_2nd_querys))
query_bivalences = extract_vdm.extract_all_centroid('/mnt/e/DesignData/ligands/ZN_rcsb/20210527', summary_name = '_summary.txt', file_name_includes = ['M5_2aa_sep_cores_bb_clusters'], score_cut = 1, clu_num_cut = 10)
print(len(query_bivalences))
# run Search_struct
def run_search(workdir, target_file, queryss, contact_querys, _2nd_querys):
rmsd_cuts = [0.5, 0.5, 0.5]
dist_cuts = [1.5, 1.5, 1.5]
num_iter = 3
clash_query_query = 2.3
clash_query_target = 2.3
use_sep_aas = [False, False, False]
tolerance = 0.5
fine_dist_cut = 0.25
win_filter = None
validateOriginStruct = True
print('Working on ' + target_file)
outdir = workdir + 'output_' + target_file.split('.')[0] + '/'
target_path = workdir + target_file
#target_path = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20210624/_test_full_pdbs/5mr8.pdb'
target = pr.parsePDB(target_path)
metal_cores = ligand_database.get_metal_core_seq(target, metal_sel = 'name ZN', extend = 0)
win_extract = []
for c in metal_cores:
win_extract.extend(c[1].select('name CA').getResindices())
win_extract.sort()
try:
win_filters = search_struct.extract_all_win_filter_by_bivalence(query_bivalences, target, tolerance=0.75, rmsd_cut=0.75)
win_filter = [w for w in win_filters]
except:
win_filters = None
ss = search_struct.Search_struct(target_path, outdir, queryss, rmsd_cuts, dist_cuts, num_iter, clash_query_query, clash_query_target, use_sep_aas,
tolerance, fine_dist_cut = fine_dist_cut, win_filter = win_filter, contact_querys = contact_querys, secondshell_querys=_2nd_querys, validateOriginStruct = validateOriginStruct)
try:
#ss.run_search_struct()
ss.run_iter_search_structure()
##ss.run_win_based_search()
#ss.run_search_structure_member()
#ss.run_search_2nshells(outpath = '/mem_combs/', rmsd=0.5)
### If only search 2nshell for a specific comb.
#ss.search_2ndshell(4)
#ss.write_2ndshell(4)
except:
return (target_file, False, win_filter, win_extract, None, False, False, False)
win_search = set()
for c in ss.combs:
for q in c.querys:
for w in q.win:
win_search.add(w)
win_search = [w for w in win_search]
win_search.sort()
extract_in_search = [True if e in win_search else False for e in win_extract]
search_in_extract = [True if e in win_extract else False for e in win_search]
return (target_file, True, win_filter, win_extract, win_search, True, all(extract_in_search), all(search_in_extract))
num_cores = int(mp.cpu_count())
pool = mp.Pool(num_cores)
workdir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20210624/_test_full_pdbs_sub2/'
target_files = []
for target_file in os.listdir(workdir):
if target_file.endswith('.pdb'):
target_files.append(target_file)
results = [pool.apply_async(run_search, args=(workdir, target_file, queryss, None, None)) for target_file in target_files]
results = [p.get() for p in results]
with open(workdir + '_summary.txt', 'w') as f:
f.write('target_file\tRun\twin_filter\twin_extract\twin_search\tFind\tFind_all_extract\tNo_extra_Find\n')
for r in results:
try:
f.write(r[0] + '\t')
f.write(str(r[1]) + '\t')
f.write(('' if not r[2] else ';'.join([str(x) for x in r[2]])) + '\t')
f.write(';'.join([str(x) for x in r[3]]) + '\t')
f.write(';'.join([str(x) for x in r[4]]) + '\t')
f.write(str(r[5]) + '\t')
f.write(str(r[6]) + '\t')
f.write(str(r[7]) + '\t\n')
except:
f.write(r[0] + '\t\n')
| 34.207792 | 216 | 0.69609 | 803 | 5,268 | 4.255293 | 0.237858 | 0.035119 | 0.01434 | 0.035119 | 0.309043 | 0.255487 | 0.23705 | 0.195786 | 0.170618 | 0.170618 | 0 | 0.029875 | 0.180334 | 5,268 | 153 | 217 | 34.431373 | 0.761464 | 0.166667 | 0 | 0.071429 | 0 | 0 | 0.104094 | 0.06807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0.071429 | 0 | 0.107143 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76617e4c670f33b54a78a3e7bb934cf43cda06af | 2,103 | py | Python | changed_files/sedov_output/make.py | yingtchen/pyro2 | 6c078a92ba026547307a74a3f2e74aff721a75e0 | [
"BSD-3-Clause"
] | null | null | null | changed_files/sedov_output/make.py | yingtchen/pyro2 | 6c078a92ba026547307a74a3f2e74aff721a75e0 | [
"BSD-3-Clause"
] | null | null | null | changed_files/sedov_output/make.py | yingtchen/pyro2 | 6c078a92ba026547307a74a3f2e74aff721a75e0 | [
"BSD-3-Clause"
] | null | null | null | # Description: shows all compressible plots at different bulk velocities
from read_files import read
import matplotlib.pyplot as plt
import matplotlib.image as img
import matplotlib.colors as colors
import matplotlib.colorbar as cb
from mpl_toolkits.axes_grid1 import make_axes_locatable
images = read()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(top='off',bottom='off',left='off',right='off',labeltop='off',labelbottom='off',labelleft='off',labelright='off')
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.suptitle('128x128', fontsize=16, y=0.86)
plt.subplots_adjust(left=0.2,bottom=0.2,right=0.8,top=0.8,wspace=0,hspace=0)
loc = plt.MaxNLocator(4)
xticks = ['', '', '0.25', '', '0.75']
yticks = ['', '', '0.75', '', '0.25', '']
empticks = ['', '', '', '', '']
sub_titles = ['Standard', 'RK', '4th Order']
y_labels = ['t = 0.1', 't = 0.2']
for n, lst in enumerate(images):
for i in range(len(lst)):
a = fig.add_subplot(2,3,3*i+(n+1))
im = a.imshow(lst[i], aspect='auto')
a.xaxis.set_major_locator(loc)
a.yaxis.set_major_locator(loc)
if i==0:
a.set_title(sub_titles[n])
if n==0:
a.set_ylabel(y_labels[i], rotation=0, ha='right')
if n==0 and i==1:
a.set_xticklabels(xticks)
a.set_yticklabels(yticks)
elif n!=0 and i==1:
a.set_xticklabels(xticks)
a.set_yticklabels(empticks)
elif n==0 and i!=1:
a.set_xticklabels(empticks)
a.set_yticklabels(yticks)
else:
a.set_xticklabels(empticks)
a.set_yticklabels(empticks)
#divider = make_axes_locatable(ax)
#cax = divider.append_axes('right', size='7%', pad=0.05)
cax = fig.add_axes([0.85, 0.2, 0.05, 0.6])
norm = colors.Normalize(vmin=0.0, vmax=5.0)
color = cb.ColorbarBase(cax, norm=norm)
fig.set_size_inches(9, 6, forward=True)
fig.savefig('plot.png', bbox_inches='tight')
plt.show()
| 31.38806 | 127 | 0.639563 | 335 | 2,103 | 3.895522 | 0.402985 | 0.030651 | 0.045977 | 0.052107 | 0.188506 | 0.135632 | 0.135632 | 0.088889 | 0.088889 | 0.0659 | 0 | 0.044186 | 0.182121 | 2,103 | 66 | 128 | 31.863636 | 0.714535 | 0.075131 | 0 | 0.153846 | 0 | 0 | 0.062854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7661b4baad33986329078b22713e5299ee0d32d1 | 1,381 | py | Python | openapi_to_fastapi/tests/snapshots/snap_test_router.py | strlt/openapi-to-fastapi | c7339d8975d1710929e5dec0808fe0d57a4b73ad | [
"BSD-3-Clause"
] | 24 | 2020-09-14T06:18:29.000Z | 2022-03-29T10:20:56.000Z | openapi_to_fastapi/tests/snapshots/snap_test_router.py | strlt/openapi-to-fastapi | c7339d8975d1710929e5dec0808fe0d57a4b73ad | [
"BSD-3-Clause"
] | 3 | 2020-09-04T06:57:00.000Z | 2021-05-11T16:11:38.000Z | openapi_to_fastapi/tests/snapshots/snap_test_router.py | strlt/openapi-to-fastapi | c7339d8975d1710929e5dec0808fe0d57a4b73ad | [
"BSD-3-Clause"
] | 9 | 2020-09-04T06:52:49.000Z | 2022-03-15T00:15:58.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots["test_weather_route_payload_errors Missing payload"] = {
"detail": [
{
"loc": ["body", "lat"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "lon"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
snapshots["test_weather_route_payload_errors Incorrect payload type"] = {
"detail": [
{
"loc": ["body", "lat"],
"msg": "value is not a valid float",
"type": "type_error.float",
},
{
"ctx": {"limit_value": 180.0},
"loc": ["body", "lon"],
"msg": "ensure this value is less than or equal to 180.0",
"type": "value_error.number.not_le",
},
]
}
snapshots["test_custom_route_definitions Custom route definition"] = {
"detail": [
{
"loc": ["query", "vendor"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["header", "auth-header"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
| 25.574074 | 73 | 0.486604 | 129 | 1,381 | 5.023256 | 0.457364 | 0.069444 | 0.108025 | 0.123457 | 0.408951 | 0.354938 | 0.237654 | 0.123457 | 0 | 0 | 0 | 0.012182 | 0.346126 | 1,381 | 53 | 74 | 26.056604 | 0.705426 | 0.044895 | 0 | 0.326087 | 0 | 0 | 0.420213 | 0.091185 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7662e35849c21073deff54de452e69354b24cf89 | 1,398 | py | Python | 815. Bus Routes/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 815. Bus Routes/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 815. Bus Routes/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | from typing import List
import collections
class Solution:
def numBusesToDestination(self, routes: List[List[int]], source: int, target: int) -> int:
if not routes:
return -1
if source == target:
return 0
routes = list(map(set, routes))
graph = collections.defaultdict(set)
for i, r1 in enumerate(routes):
for j in range(i+1, len(routes)):
r2 = routes[j]
shared = r1.intersection(r2)
if shared and len(shared) > 0:
graph[i].add(j)
graph[j].add(i)
visited, targets = set(), set()
for i, route in enumerate(routes):
if source in route:
visited.add(i)
if target in route:
targets.add(i)
queue = collections.deque([(node, 1) for node in visited])
while queue:
node, depth = queue.popleft()
if node in targets:
return depth
for neigh in graph[node]:
if neigh not in visited:
visited.add(neigh)
queue.append((neigh, depth+1))
return -1
if __name__== '__main__':
solution = Solution()
routes = [[1,2,7],[3,6,7]]
source = 1
target = 6
ans = solution.numBusesToDestination(routes, source, target)
print(ans) | 32.511628 | 94 | 0.515737 | 163 | 1,398 | 4.374233 | 0.337423 | 0.01683 | 0.025245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021991 | 0.381974 | 1,398 | 43 | 95 | 32.511628 | 0.803241 | 0 | 0 | 0.05 | 0 | 0 | 0.005718 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05 | 0 | 0.2 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76639ae205398d8d61e6cd4d9d24fba233e72235 | 1,965 | py | Python | MovieSerieTorrent/formatting.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 7 | 2016-04-21T21:16:29.000Z | 2021-11-20T13:16:21.000Z | MovieSerieTorrent/formatting.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 5 | 2016-03-03T23:41:08.000Z | 2016-03-05T13:11:34.000Z | MovieSerieTorrent/formatting.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 3 | 2019-04-03T19:00:42.000Z | 2021-06-08T17:03:06.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
from parser import Parser
except:
from .parser import Parser
from colorama import Fore, Style, init
from tabulate import tabulate
import os
class Formatting:
def __init__(self):
self.headers_movie =self.headers = ['N°', 'Title', 'Part', 'Year', 'Language', 'Quality']
self.headers_serie = ['N°', 'Title', 'Season', 'Episode', 'Language', 'Quality']
self.table = None
self.infos = None
init(autoreset=True)
def _list_for_formatting(self, files):
parse_file = Parser().parse(files)
self.infos = parse_file[0]
if self.infos['type'] == 'serie':
return ['{title}', '{season}', '{episode}', '{languages}', '{quality}']
else:
return ['{title}', '{Part}', '{year}', '{languages}', '{quality}']
def formatting(self, path):
list_movies = []
list_serie = []
self.path = path
j = 1
for files in os.listdir(self.path):
i = 0
self.files = self._list_for_formatting(files)
if files.endswith('.DS_Store') == False:
for elements in self.files:
try:
self.files[i] = self.files[i].format(**self.infos)
except KeyError:
self.files[i] = ''
i += 1
if self.infos['type'] == 'serie':
list_serie.append(self.files)
else:
list_movies.append(self.files)
self.files.insert(0, j)
j += 1
for files in list_serie:
files.insert(0, j)
j += 1
print(Fore.RED + 'MOVIE:')
print(tabulate(list_movies, headers=self.headers_movie))
print('\n')
print(Fore.RED + 'SERIE:')
print(tabulate(list_serie, headers=self.headers_serie))
print('\n')
| 33.305085 | 97 | 0.513486 | 218 | 1,965 | 4.527523 | 0.311927 | 0.082067 | 0.030395 | 0.04458 | 0.093212 | 0.030395 | 0 | 0 | 0 | 0 | 0 | 0.007746 | 0.343003 | 1,965 | 58 | 98 | 33.87931 | 0.755229 | 0.021883 | 0 | 0.196078 | 0 | 0 | 0.099479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.098039 | 0 | 0.215686 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
766491d3189d2ce4581c010a835c2c7cde8bdabf | 12,034 | py | Python | frappe-bench/apps/erpnext/erpnext/healthcare/doctype/lab_test/lab_test.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/healthcare/doctype/lab_test/lab_test.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/healthcare/doctype/lab_test/lab_test.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account
from frappe import _
class LabTest(Document):
def on_submit(self):
frappe.db.set_value(self.doctype,self.name,"submitted_date", getdate())
insert_lab_test_to_medical_record(self)
frappe.db.set_value("Lab Test", self.name, "status", "Completed")
def on_cancel(self):
delete_lab_test_from_medical_record(self)
frappe.db.set_value("Lab Test", self.name, "status", "Cancelled")
self.reload()
def on_update(self):
if(self.sensitivity_test_items):
sensitivity = sorted(self.sensitivity_test_items, key=lambda x: x.antibiotic_sensitivity)
for i, item in enumerate(sensitivity):
item.idx = i+1
self.sensitivity_test_items = sensitivity
def after_insert(self):
if(self.prescription):
frappe.db.set_value("Lab Prescription", self.prescription, "test_created", 1)
if not self.test_name and self.template:
self.load_test_from_template()
self.reload()
def load_test_from_template(self):
lab_test = self
create_test_from_template(lab_test)
self.reload()
def create_test_from_template(lab_test):
template = frappe.get_doc("Lab Test Template", lab_test.template)
patient = frappe.get_doc("Patient", lab_test.patient)
lab_test.test_name = template.test_name
lab_test.result_date = getdate()
lab_test.department = template.department
lab_test.test_group = template.test_group
lab_test = create_sample_collection(lab_test, template, patient, None)
lab_test = load_result_format(lab_test, template, None, None)
@frappe.whitelist()
def update_status(status, name):
frappe.db.sql("""update `tabLab Test` set status=%s, approved_date=%s where name = %s""", (status, getdate(), name))
@frappe.whitelist()
def update_lab_test_print_sms_email_status(print_sms_email, name):
frappe.db.set_value("Lab Test",name,print_sms_email,1)
def create_lab_test_doc(invoice, consultation, patient, template):
#create Test Result for template, copy vals from Invoice
lab_test = frappe.new_doc("Lab Test")
if(invoice):
lab_test.invoice = invoice
if(consultation):
lab_test.physician = consultation.physician
lab_test.patient = patient.name
lab_test.patient_age = patient.get_age()
lab_test.patient_sex = patient.sex
lab_test.email = patient.email
lab_test.mobile = patient.mobile
lab_test.department = template.department
lab_test.test_name = template.test_name
lab_test.template = template.name
lab_test.test_group = template.test_group
lab_test.result_date = getdate()
lab_test.report_preference = patient.report_preference
return lab_test
def create_normals(template, lab_test):
lab_test.normal_toggle = "1"
normal = lab_test.append("normal_test_items")
normal.test_name = template.test_name
normal.test_uom = template.test_uom
normal.normal_range = template.test_normal_range
normal.require_result_value = 1
normal.template = template.name
def create_compounds(template, lab_test, is_group):
lab_test.normal_toggle = "1"
for normal_test_template in template.normal_test_templates:
normal = lab_test.append("normal_test_items")
if is_group:
normal.test_event = normal_test_template.test_event
else:
normal.test_name = normal_test_template.test_event
normal.test_uom = normal_test_template.test_uom
normal.normal_range = normal_test_template.normal_range
normal.require_result_value = 1
normal.template = template.name
def create_specials(template, lab_test):
lab_test.special_toggle = "1"
if(template.sensitivity):
lab_test.sensitivity_toggle = "1"
for special_test_template in template.special_test_template:
special = lab_test.append("special_test_items")
special.test_particulars = special_test_template.particulars
special.require_result_value = 1
special.template = template.name
def create_sample_doc(template, patient, invoice):
if(template.sample):
sample_exist = frappe.db.exists({
"doctype": "Sample Collection",
"patient": patient.name,
"docstatus": 0,
"sample": template.sample})
if sample_exist :
#Update Sample Collection by adding quantity
sample_collection = frappe.get_doc("Sample Collection",sample_exist[0][0])
quantity = int(sample_collection.sample_quantity)+int(template.sample_quantity)
if(template.sample_collection_details):
sample_collection_details = sample_collection.sample_collection_details+"\n==============\n"+"Test :"+template.test_name+"\n"+"Collection Detials:\n\t"+template.sample_collection_details
frappe.db.set_value("Sample Collection", sample_collection.name, "sample_collection_details",sample_collection_details)
frappe.db.set_value("Sample Collection", sample_collection.name, "sample_quantity",quantity)
else:
#create Sample Collection for template, copy vals from Invoice
sample_collection = frappe.new_doc("Sample Collection")
if(invoice):
sample_collection.invoice = invoice
sample_collection.patient = patient.name
sample_collection.patient_age = patient.get_age()
sample_collection.patient_sex = patient.sex
sample_collection.sample = template.sample
sample_collection.sample_uom = template.sample_uom
sample_collection.sample_quantity = template.sample_quantity
if(template.sample_collection_details):
sample_collection.sample_collection_details = "Test :"+template.test_name+"\n"+"Collection Detials:\n\t"+template.sample_collection_details
sample_collection.save(ignore_permissions=True)
return sample_collection
@frappe.whitelist()
def create_lab_test_from_desk(patient, template, prescription, invoice=None):
lab_test_exist = frappe.db.exists({
"doctype": "Lab Test",
"prescription": prescription
})
if lab_test_exist:
return
template = frappe.get_doc("Lab Test Template", template)
#skip the loop if there is no test_template for Item
if not (template):
return
patient = frappe.get_doc("Patient", patient)
consultation_id = frappe.get_value("Lab Prescription", prescription, "parent")
consultation = frappe.get_doc("Consultation", consultation_id)
lab_test = create_lab_test(patient, template, prescription, consultation, invoice)
return lab_test.name
def create_sample_collection(lab_test, template, patient, invoice):
if(frappe.db.get_value("Healthcare Settings", None, "require_sample_collection") == "1"):
sample_collection = create_sample_doc(template, patient, invoice)
if(sample_collection):
lab_test.sample = sample_collection.name
return lab_test
def load_result_format(lab_test, template, prescription, invoice):
if(template.test_template_type == 'Single'):
create_normals(template, lab_test)
elif(template.test_template_type == 'Compound'):
create_compounds(template, lab_test, False)
elif(template.test_template_type == 'Descriptive'):
create_specials(template, lab_test)
elif(template.test_template_type == 'Grouped'):
#iterate for each template in the group and create one result for all.
for test_group in template.test_groups:
#template_in_group = None
if(test_group.test_template):
template_in_group = frappe.get_doc("Lab Test Template",
test_group.test_template)
if(template_in_group):
if(template_in_group.test_template_type == 'Single'):
create_normals(template_in_group, lab_test)
elif(template_in_group.test_template_type == 'Compound'):
normal_heading = lab_test.append("normal_test_items")
normal_heading.test_name = template_in_group.test_name
normal_heading.require_result_value = 0
normal_heading.template = template_in_group.name
create_compounds(template_in_group, lab_test, True)
elif(template_in_group.test_template_type == 'Descriptive'):
special_heading = lab_test.append("special_test_items")
special_heading.test_name = template_in_group.test_name
special_heading.require_result_value = 0
special_heading.template = template_in_group.name
create_specials(template_in_group, lab_test)
else:
normal = lab_test.append("normal_test_items")
normal.test_name = test_group.group_event
normal.test_uom = test_group.group_test_uom
normal.normal_range = test_group.group_test_normal_range
normal.require_result_value = 1
normal.template = template.name
if(template.test_template_type != 'No Result'):
if(prescription):
lab_test.prescription = prescription
if(invoice):
frappe.db.set_value("Lab Prescription", prescription, "invoice", invoice)
lab_test.save(ignore_permissions=True) # insert the result
return lab_test
def create_lab_test(patient, template, prescription, consultation, invoice):
lab_test = create_lab_test_doc(invoice, consultation, patient, template)
lab_test = create_sample_collection(lab_test, template, patient, invoice)
lab_test = load_result_format(lab_test, template, prescription, invoice)
return lab_test
@frappe.whitelist()
def get_employee_by_user_id(user_id):
emp_id = frappe.db.get_value("Employee",{"user_id":user_id})
employee = frappe.get_doc("Employee",emp_id)
return employee
def insert_lab_test_to_medical_record(doc):
subject = str(doc.test_name)
if(doc.test_comment):
subject += ", "+str(doc.test_comment)
medical_record = frappe.new_doc("Patient Medical Record")
medical_record.patient = doc.patient
medical_record.subject = subject
medical_record.status = "Open"
medical_record.communication_date = doc.result_date
medical_record.reference_doctype = "Lab Test"
medical_record.reference_name = doc.name
medical_record.reference_owner = doc.owner
medical_record.save(ignore_permissions=True)
def delete_lab_test_from_medical_record(self):
medical_record_id = frappe.db.sql("select name from `tabPatient Medical Record` where reference_name=%s",(self.name))
if(medical_record_id[0][0]):
frappe.delete_doc("Patient Medical Record", medical_record_id[0][0])
def create_item_line(test_code, sales_invoice):
if test_code:
item = frappe.get_doc("Item", test_code)
if item:
if not item.disabled:
sales_invoice_line = sales_invoice.append("items")
sales_invoice_line.item_code = item.item_code
sales_invoice_line.item_name = item.item_name
sales_invoice_line.qty = 1.0
sales_invoice_line.description = item.description
@frappe.whitelist()
def create_invoice(company, patient, lab_tests, prescriptions):
test_ids = json.loads(lab_tests)
line_ids = json.loads(prescriptions)
if not test_ids and not line_ids:
return
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", patient, "customer")
sales_invoice.due_date = getdate()
sales_invoice.is_pos = '0'
sales_invoice.debit_to = get_receivable_account(company)
for line in line_ids:
test_code = frappe.get_value("Lab Prescription", line, "test_code")
create_item_line(test_code, sales_invoice)
for test in test_ids:
template = frappe.get_value("Lab Test", test, "template")
test_code = frappe.get_value("Lab Test Template", template, "item")
create_item_line(test_code, sales_invoice)
sales_invoice.set_missing_values()
sales_invoice.save()
#set invoice in lab test
for test in test_ids:
frappe.db.set_value("Lab Test", test, "invoice", sales_invoice.name)
prescription = frappe.db.get_value("Lab Test", test, "prescription")
if prescription:
frappe.db.set_value("Lab Prescription", prescription, "invoice", sales_invoice.name)
#set invoice in prescription
for line in line_ids:
frappe.db.set_value("Lab Prescription", line, "invoice", sales_invoice.name)
return sales_invoice.name
@frappe.whitelist()
def get_lab_test_prescribed(patient):
return frappe.db.sql("""select cp.name, cp.test_code, cp.parent, cp.invoice, ct.physician, ct.consultation_date from tabConsultation ct,
`tabLab Prescription` cp where ct.patient=%s and cp.parent=ct.name and cp.test_created=0""", (patient))
| 40.655405 | 190 | 0.781951 | 1,687 | 12,034 | 5.271488 | 0.114997 | 0.070055 | 0.021927 | 0.019791 | 0.480715 | 0.35781 | 0.301023 | 0.215001 | 0.122568 | 0.094344 | 0 | 0.002821 | 0.116254 | 12,034 | 295 | 191 | 40.79322 | 0.83338 | 0.03997 | 0 | 0.192913 | 0 | 0.007874 | 0.113172 | 0.004333 | 0.003937 | 0 | 0 | 0 | 0 | 1 | 0.090551 | false | 0 | 0.027559 | 0.003937 | 0.169291 | 0.007874 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
766a10448b1dd08b321b02b6a6e8c9be8af8c618 | 3,786 | py | Python | server/baseApp/socketConnection.py | totologic/NaoRemoteCsharp | 4a7c815c3a2e7bdd446609fa3507fd49ef89824c | [
"MIT"
] | null | null | null | server/baseApp/socketConnection.py | totologic/NaoRemoteCsharp | 4a7c815c3a2e7bdd446609fa3507fd49ef89824c | [
"MIT"
] | null | null | null | server/baseApp/socketConnection.py | totologic/NaoRemoteCsharp | 4a7c815c3a2e7bdd446609fa3507fd49ef89824c | [
"MIT"
] | null | null | null | '''
@author: Cedric Jules
'''
from debug import Debug
from pydispatch import dispatcher
import socket
import threading
class SocketConnection():
SIGNAL_MSG_RECEIVED = "signalMessageReceived"
SIGNAL_CLIENT_DISCONNECT = "signalClientDisconnect"
def __init__(self, port):
Debug.log("SocketConnection __init__")
self.__port = port
self.__active = False
self.__thread = None
self.__socket = None
self.__socketChannel = None
self.__msgStr = None
self.__msgArr = None
def __del__(self):
Debug.log("SocketConnection __del__")
def start(self):
Debug.log("SocketConnection start")
self.__active = True
self.__start()
def __start(self):
Debug.log("SocketConnection __start")
self.__thread = threading.Thread(None, self.__mainThread, "SocketConnectionThread", (), {})
self.__thread.start()
def stop(self):
Debug.log("SocketConnection stop")
self.__active = False
if self.__socketChannel != None:
self.__socketChannel.shutdown(socket.SHUT_RDWR)
self.__socketChannel = None
self.__socket.close()
Debug.log("SocketConnection stop is complete")
def send(self, msgStr):
if self.__active :
Debug.log("SocketConnection sent: '" + msgStr + "'")
self.__socketChannel.send(msgStr)
def getCurrMsg(self):
return self.__msgStr
def __mainThread(self):
if self.__active:
Debug.log("SocketConnection __mainThread")
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__socket.bind(('', self.__port))
self.__socket.setblocking(1)
self.__socket.settimeout(1.0)
self.__socket.listen(5)
Debug.log("SocketConection socket inited")
else:
return
while self.__active:
try:
self.__socketChannel = self.__socket.accept()[0]
Debug.log("SocketConnection socket connection accepted")
self.__socketChannel.settimeout(1.0)
break
except Exception:
pass
if self.__socketChannel == None:
Debug.log("SocketConnection socket connection aborted")
return
while self.__active:
Debug.log("SocketConnection wait for socket message")
while self.__active:
try:
self.__msgStr = self.__socketChannel.recv(256)
Debug.log("SocketConnection received: '" + self.__msgStr + "'")
break
except Exception:
pass
if not self.__active:
break
if not self.__msgStr :
Debug.log("SocketConnection socket connection interrupted")
self.stop()
self.__active = True
dispatcher.send(SocketConnection.SIGNAL_CLIENT_DISCONNECT, self)
threading.Timer(1, self.__start).start()
break
return
self.__msgArr = self.__msgStr.split(";;;")
while len(self.__msgArr) > 0 :
self.__msgStr = self.__msgArr.pop(0)
if self.__msgStr == "" :
continue
else :
dispatcher.send(SocketConnection.SIGNAL_MSG_RECEIVED, self)
| 33.803571 | 100 | 0.543846 | 329 | 3,786 | 5.854103 | 0.267477 | 0.058152 | 0.161994 | 0.058152 | 0.213915 | 0.084112 | 0.046729 | 0.046729 | 0 | 0 | 0 | 0.00588 | 0.371104 | 3,786 | 112 | 101 | 33.803571 | 0.803024 | 0.005547 | 0 | 0.292135 | 0 | 0 | 0.137099 | 0.017823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089888 | false | 0.022472 | 0.044944 | 0.011236 | 0.213483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
766bc98f3eebd4f36c2b596bbfd2e881f0c17d35 | 3,270 | py | Python | launch_scripts/start_calibration.py | abdul-gendy/Intellux | dab49ee872de2038a2afea9677f5d41dffb2a240 | [
"MIT"
] | null | null | null | launch_scripts/start_calibration.py | abdul-gendy/Intellux | dab49ee872de2038a2afea9677f5d41dffb2a240 | [
"MIT"
] | null | null | null | launch_scripts/start_calibration.py | abdul-gendy/Intellux | dab49ee872de2038a2afea9677f5d41dffb2a240 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append("..")
import argparse
import numpy as np
from pathlib import Path
from Intellux import calibrate_intellux
def start_calibration(turning_direction):
root = Path(os.path.abspath(__file__)).parents[1]
stepper_files_directory = os.path.join(root, 'Intellux', 'mechanical_module', 'stepper_position')
if os.path.isdir(stepper_files_directory) == False:
os.mkdir(stepper_files_directory)
motor_step_sequence_counter_file = os.path.join(stepper_files_directory, 'motor_step_sequence_counter.npy')
motor_global_step_counter_file = os.path.join(stepper_files_directory, 'motor_global_step_counter.npy')
reset_stepper_position_files(motor_step_sequence_counter_file, motor_global_step_counter_file)
calibration_files_directory = os.path.join(root, 'Intellux', 'mechanical_module', 'calibration_info')
if os.path.isdir(calibration_files_directory) == False:
os.mkdir(calibration_files_directory)
calibration_status_file = os.path.join(calibration_files_directory, 'calibration_status.npy')
full_range_steps_file = os.path.join(calibration_files_directory, 'full_range_steps.npy')
turning_direction_file = os.path.join(calibration_files_directory, 'turning_direction.npy')
new_calibration_status = update_calibration_status(calibration_status_file)
if not os.path.isfile(full_range_steps_file): # Load previous index
with open(full_range_steps_file, 'wb') as f:
np.save(f, 0)
with open(turning_direction_file, 'wb') as f:
np.save(f, turning_direction)
if new_calibration_status == 1:
calibration = calibrate_intellux(turning_direction)
calibration.start_calibration()
def update_calibration_status(calibration_status_file):
if os.path.isfile(calibration_status_file): # Load previous index
calibration_status = np.load(calibration_status_file)
if calibration_status == 0:
new_calibration_status = 1
elif calibration_status == 1:
raise ValueError("System already in process of calibration")
else:
raise ValueError("Unknown calibration status value loaded")
with open(calibration_status_file, 'wb') as f:
np.save(f, new_calibration_status) # First time calibration
else:
new_calibration_status = 1 # First time calibration
with open(calibration_status_file, 'wb') as f:
np.save(f, new_calibration_status)
return new_calibration_status
def reset_stepper_position_files(motor_step_sequence_counter_file, motor_global_step_counter_file):
with open(motor_step_sequence_counter_file, 'wb') as f:
np.save(f, 0) #reset stepper position file to 0
with open(motor_global_step_counter_file, 'wb') as f:
np.save(f, 0) #reset stepper position file to 0
if __name__=='__main__':
parser = argparse.ArgumentParser(description='please input the turning direction to start calibrating as an int (0-CW or 1-CCW)')
parser.add_argument('-d', '--direction', required=True, type=int, choices={0,1}, help='The required turning direction')
args = parser.parse_args()
turning_direction = int(args.direction)
start_calibration(turning_direction) | 47.391304 | 133 | 0.743731 | 432 | 3,270 | 5.291667 | 0.229167 | 0.156168 | 0.030621 | 0.023622 | 0.453631 | 0.384514 | 0.384514 | 0.261155 | 0.253718 | 0.167104 | 0 | 0.005523 | 0.169419 | 3,270 | 69 | 134 | 47.391304 | 0.836156 | 0.045566 | 0 | 0.192982 | 0 | 0 | 0.138042 | 0.033066 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.175439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
766f5c48e04d908a5c98d66ea97e575e7f45af94 | 14,367 | py | Python | tests/blackbox_functions/test_synthetic.py | dwoiwode/py-pdp-partitioner | 3029e7aaf1d8adeec114d5acd7ca9bd45d577e8e | [
"MIT"
] | 1 | 2022-03-21T07:14:16.000Z | 2022-03-21T07:14:16.000Z | tests/blackbox_functions/test_synthetic.py | dwoiwode/py-pdp-partitioner | 3029e7aaf1d8adeec114d5acd7ca9bd45d577e8e | [
"MIT"
] | null | null | null | tests/blackbox_functions/test_synthetic.py | dwoiwode/py-pdp-partitioner | 3029e7aaf1d8adeec114d5acd7ca9bd45d577e8e | [
"MIT"
] | null | null | null | from unittest import TestCase
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
import pytest
from matplotlib import pyplot as plt
from pyPDP.blackbox_functions import BlackboxFunction, config_space_nd
from pyPDP.blackbox_functions.synthetic_functions import Levy, Ackley, CrossInTray, Square, NegativeSquare, \
StyblinskiTang
from pyPDP.utils.plotting import plot_function
from tests import PlottableTest
class TestConfigspaceND(TestCase):
def test_same_bounds(self):
cs = config_space_nd(4, lower=-4, upper=5, log=False)
hps = cs.get_hyperparameters()
for hp in hps:
self.assertIsInstance(hp, CSH.NumericalHyperparameter)
self.assertEqual(-4, hp.lower)
self.assertEqual(5, hp.upper)
self.assertFalse(hp.log)
def test_prefix(self):
# Default prefix
cs = config_space_nd(4)
hps = cs.get_hyperparameters()
expected_names = {"x1", "x2", "x3", "x4"}
names = {hp.name for hp in hps}
self.assertSetEqual(expected_names, names)
# Other prefix
cs = config_space_nd(4, variable_prefix="other_prefix_")
hps = cs.get_hyperparameters()
expected_names = {"other_prefix_1", "other_prefix_2", "other_prefix_3", "other_prefix_4"}
names = {hp.name for hp in hps}
self.assertSetEqual(expected_names, names)
def test_different_bounds(self):
cs = config_space_nd(3, lower=(0, -1.5, -2), upper=(5, 20, 32.3))
hps = cs.get_hyperparameters()
# Check Hyperparameter 0
self.assertIsInstance(hps[0], CSH.NumericalHyperparameter)
self.assertEqual(0, hps[0].lower)
self.assertEqual(5, hps[0].upper)
# Check Hyperparameter 1
self.assertIsInstance(hps[1], CSH.NumericalHyperparameter)
self.assertEqual(-1.5, hps[1].lower)
self.assertEqual(20, hps[1].upper)
# Check Hyperparameter 2
self.assertIsInstance(hps[2], CSH.NumericalHyperparameter)
self.assertEqual(-2, hps[2].lower)
self.assertEqual(32.3, hps[2].upper)
def test_constants(self):
cs = config_space_nd(3, lower=(0, 5, -2.32), upper=(0, 5, -2.32))
hps = cs.get_hyperparameters()
# Check Hyperparameter 0
self.assertIsInstance(hps[0], CSH.Constant)
self.assertEqual(0, hps[0].value)
# Check Hyperparameter 1
self.assertIsInstance(hps[1], CSH.Constant)
self.assertEqual(5, hps[1].value)
# Check Hyperparameter 2
self.assertIsInstance(hps[2], CSH.Constant)
self.assertEqual(-2.32, hps[2].value)
class TestLevy(TestCase):
def test_config_space(self):
f = Levy()
default_cs = f.config_space
hp = default_cs.get_hyperparameter("x1")
self.assertIsInstance(hp, CSH.NumericalHyperparameter)
self.assertEqual(-10, hp.lower)
self.assertEqual(10, hp.upper)
self.assertFalse(hp.log)
def test_levy1D(self):
f = Levy.for_n_dimensions(1)
self.assertAlmostEqual(f(x1=1), 0) # Minimum
# Cannot be smaller than 0
for _ in range(10000):
y = f(x1=(np.random.random(1)[0] - 0.5) * 10 * 2)
self.assertGreater(y, 0)
def test_levy2D(self):
f = Levy.for_n_dimensions(2, lower=-10, upper=10)
self.assertAlmostEqual(f(x1=1, x2=1), 0) # Minimum
# Cannot be smaller than 0
for _ in range(10000):
x1, x2 = (np.random.random(2) - 0.5) * 10 * 2
y = f(x1=x1, x2=x2)
self.assertGreater(y, 0)
class TestAckley(TestCase):
def test_config_space(self):
f = Ackley()
default_cs = f.config_space
hp = default_cs.get_hyperparameter("x1")
self.assertIsInstance(hp, CSH.NumericalHyperparameter)
self.assertEqual(-32.768, hp.lower)
self.assertEqual(32.768, hp.upper)
self.assertFalse(hp.log)
def test_ackley1D(self):
f = Ackley.for_n_dimensions(1)
self.assertAlmostEqual(f(x1=0), 0) # Minimum
# Cannot be smaller than 0
for _ in range(10000):
y = f(x1=(np.random.random(1)[0] - 0.5) * 32.768 * 2)
self.assertGreater(y, 0)
def test_ackley2D(self):
f = Ackley.for_n_dimensions(2)
self.assertAlmostEqual(f(x1=0, x2=0), 0) # Minimum
# Cannot be smaller than 0
for _ in range(10000):
x1, x2 = (np.random.random(2) - 0.5) * 10 * 2
y = f(x1=x1, x2=x2)
self.assertGreater(y, 0)
class TestCrossInTray(TestCase):
def test_config_space(self):
f = CrossInTray()
default_cs = f.config_space
hp = default_cs.get_hyperparameter("x1")
self.assertIsInstance(hp, CSH.NumericalHyperparameter)
self.assertEqual(-10, hp.lower)
self.assertEqual(10, hp.upper)
self.assertFalse(hp.log)
def tets_cross_in_tray(self):
f = CrossInTray()
# Minima
self.assertAlmostEqual(f(x1=1.3491, x2=1.3491), -2.06261)
self.assertAlmostEqual(f(x1=1.3491, x2=-1.3491), -2.06261)
self.assertAlmostEqual(f(x1=-1.3491, x2=1.3491), -2.06261)
self.assertAlmostEqual(f(x1=-1.3491, x2=-1.3491), -2.06261)
# Cannot be smaller than minimum
for _ in range(10000):
x1, x2 = (np.random.random(2) - 0.5) * 10 * 2
y = f(x1=x1, x2=x2)
self.assertGreater(y, -2.06261)
class TestStyblinskiTang(TestCase):
minimum = -39.16616570377142
minimum_at = -2.90353401818596
def test_config_space(self):
f = StyblinskiTang()
default_cs = f.config_space
hp = default_cs.get_hyperparameter("x1")
self.assertIsInstance(hp, CSH.NumericalHyperparameter)
self.assertEqual(-5, hp.lower)
self.assertEqual(5, hp.upper)
self.assertFalse(hp.log)
def test_styblinski_tang_1D(self):
f = StyblinskiTang.for_n_dimensions(1)
self.assertAlmostEqual(f(x1=self.minimum_at), self.minimum)
def test_minima(self):
for d in range(1, 11): # Test multiple dimensions
f = StyblinskiTang.for_n_dimensions(d)
x = {f"x{i + 1}": self.minimum_at for i in range(d)}
print(f"Dimensions: {d:2d}, Input: {x}")
self.assertEqual(len(x), d)
self.assertAlmostEqual(f(**x), d * self.minimum)
def test_simple_integral_numerical(self):
f = StyblinskiTang()
cs = f.config_space
hp = cs["x1"]
f_int = f.pd_integral(hp)
integral_formula_1_value = f_int()
integral_formula_2_value = StyblinskiTang._styblinski_tang_integral(
hp.upper) - StyblinskiTang._styblinski_tang_integral(hp.lower)
print(integral_formula_1_value)
# Calculate ground truth by approximating it with sum of small rectangles
n_steps = 5000
integral_numeric = 0
step_size = (hp.upper - hp.lower) / n_steps
for i in range(n_steps):
integral_numeric += f(x1=hp.lower + (i + 0.5) * step_size)
integral_numeric *= step_size
print("Numeric:", integral_numeric)
print("Partial Dependence Function:", integral_formula_1_value)
print("1D-Integral Function:", integral_formula_2_value)
self.assertAlmostEqual(integral_numeric, integral_formula_1_value * (hp.upper - hp.lower), places=3)
self.assertAlmostEqual(integral_numeric, integral_formula_2_value, places=3)
def test_integral_1d(self):
"""
f(x1, x2, x3) = stybli..
F(x1) = f(x1, x2, x3) dx2 dx3
"""
def styblinski_tang_3D_int_1D(x1: float, lower_x2: float = -5, upper_x2: float = 5, lower_x3: float = -5,
upper_x3: float = 5) -> float:
styblinski_tang = StyblinskiTang.for_n_dimensions(1)
term_x1_lower_lower = styblinski_tang(x1=x1) * lower_x2 * lower_x3
term_x1_lower_upper = styblinski_tang(x1=x1) * lower_x2 * upper_x3
term_x1_upper_lower = styblinski_tang(x1=x1) * upper_x2 * lower_x3
term_x1_upper_upper = styblinski_tang(x1=x1) * upper_x2 * upper_x3
term_x1 = term_x1_upper_upper - term_x1_upper_lower - term_x1_lower_upper + term_x1_lower_lower
styblinski_tang_integral = StyblinskiTang._styblinski_tang_integral
term_x2_lower_lower = styblinski_tang_integral(lower_x2) * lower_x3
term_x2_lower_upper = styblinski_tang_integral(lower_x2) * upper_x3
term_x2_upper_lower = styblinski_tang_integral(upper_x2) * lower_x3
term_x2_upper_upper = styblinski_tang_integral(upper_x2) * upper_x3
term_x2 = term_x2_upper_upper - term_x2_upper_lower - term_x2_lower_upper + term_x2_lower_lower
term_x3_lower_lower = styblinski_tang_integral(lower_x3) * lower_x2
term_x3_lower_upper = styblinski_tang_integral(lower_x3) * upper_x2
term_x3_upper_lower = styblinski_tang_integral(upper_x3) * lower_x2
term_x3_upper_upper = styblinski_tang_integral(upper_x3) * upper_x2
term_x3 = term_x3_upper_upper - term_x3_upper_lower - term_x3_lower_upper + term_x3_lower_lower
return (term_x1 + term_x2 + term_x3) / ((upper_x2 - lower_x2) * (upper_x3 - lower_x3))
f = StyblinskiTang.for_n_dimensions(3)
f_int_specific = styblinski_tang_3D_int_1D
f_int_general = f.pd_integral('x2', 'x3')
x = np.linspace(-5, 5, num=100)
for x1 in x:
f_int_specific_x = f_int_specific(x1=x1)
f_int_general_x = f_int_general(x1=x1)
self.assertAlmostEqual(f_int_specific_x, f_int_general_x, places=5)
def test_integral_2d_x3(self):
"""
f(x1, x2, x3) = stybl...
F(x1, x2) = f(x1, x2, x3) dx3
"""
f = StyblinskiTang.for_n_dimensions(3)
# Shortcuts
def styblinski_tang_3D_int_2D(x1: float, x2: float, lower: float = -5, upper: float = 5) -> float:
styblinski_tang_2D = StyblinskiTang.for_n_dimensions(2)
lower_term = styblinski_tang_2D(x1=x1, x2=x2) * lower + f._styblinski_tang_integral(lower)
upper_term = styblinski_tang_2D(x1=x1, x2=x2) * upper + f._styblinski_tang_integral(upper)
return (upper_term - lower_term) / (upper - lower) # normalization
f_int_specific = styblinski_tang_3D_int_2D
f_int_general = f.pd_integral(f.config_space.get_hyperparameter('x3'))
for x1 in np.linspace(-5, 5, num=100):
for x2 in np.linspace(-5, 5, num=100):
f_int_specific_x = f_int_specific(x1=x1, x2=x2)
f_int_general_x = f_int_general(x1=x1, x2=x2)
self.assertAlmostEqual(f_int_specific_x, f_int_general_x, places=5)
def test_integral_2d_x2(self):
"""
f(x1, x2, x3) = stybl...
F(x1, x3) = f(x1, x2, x3) dx2
"""
f = StyblinskiTang.for_n_dimensions(3)
# Shortcuts
def styblinski_tang_3D_int_2D(x1: float, x3: float, lower: float = -5, upper: float = 5) -> float:
styblinski_tang_2D = StyblinskiTang.for_n_dimensions(2)
lower_term = styblinski_tang_2D(x1=x1, x2=x3) * lower + f._styblinski_tang_integral(lower)
upper_term = styblinski_tang_2D(x1=x1, x2=x3) * upper + f._styblinski_tang_integral(upper)
return (upper_term - lower_term) / (upper - lower) # normalization
f_int_specific = styblinski_tang_3D_int_2D
f_int_general = f.pd_integral(f.config_space.get_hyperparameter('x2'))
for x1 in np.linspace(-5, 5, num=100):
for x3 in np.linspace(-5, 5, num=100):
f_int_specific_x = f_int_specific(x1=x1, x3=x3)
f_int_general_x = f_int_general(x1=x1, x3=x3)
self.assertAlmostEqual(f_int_specific_x, f_int_general_x, places=5)
class TestPlotBlackboxFunctions(PlottableTest):
def _apply_blackbox_plot(self, f: callable, cs: CS.ConfigurationSpace, name: str, **kwargs):
if self.fig is None:
self.initialize_figure()
plot_function(f, cs, **kwargs)
plt.title(name)
plt.tight_layout()
def test_plot_ackley_1D_zoomed(self):
f = Ackley.for_n_dimensions(1, lower=-10, upper=10)
cs = f.config_space
self._apply_blackbox_plot(f, cs, "Ackley 1D")
def test_plot_ackley_2D_zoomed(self):
f = Ackley.for_n_dimensions(2, lower=-10, upper=10)
cs = f.config_space
self._apply_blackbox_plot(f, cs, "Ackley 1D")
def test_plot_styblinski_tang_3D_int_1D(self):
f = StyblinskiTang.for_n_dimensions(3)
f_int = f.pd_integral('x2', 'x3')
self._apply_blackbox_plot(f_int, f_int.config_space, "Styblinski Tang Integral 1D")
def test_plot_styblinski_tang_3D_int_2D(self):
f = StyblinskiTang.for_n_dimensions(3)
f_int = f.pd_integral('x3')
self._apply_blackbox_plot(f_int, f_int.config_space, "Styblinski Tang Integral 2D")
def test_plot_styblinski_tang_integral(self):
f = StyblinskiTang.for_n_dimensions(2)
f_int = f.pd_integral('x2')
self._apply_blackbox_plot(f_int, f_int.config_space, "Styblinski Tang Integral 1D")
def test_integral_function(self):
cs = CS.ConfigurationSpace() # Cannot use config_space_nd, because function takes "x" instead of "x1" as input
cs.add_hyperparameter(CSH.UniformFloatHyperparameter("x", -5, 5))
self._apply_blackbox_plot(StyblinskiTang._styblinski_tang_integral, cs, "Styblinski Tang Integral Function 1D")
@pytest.mark.parametrize("f", [
Square.for_n_dimensions(1), Square.for_n_dimensions(2),
NegativeSquare.for_n_dimensions(1), NegativeSquare.for_n_dimensions(2),
Ackley.for_n_dimensions(1), Ackley.for_n_dimensions(2),
CrossInTray(),
Levy.for_n_dimensions(1), Levy.for_n_dimensions(2),
StyblinskiTang.for_n_dimensions(1), StyblinskiTang.for_n_dimensions(2)
])
def test_plot_all(f: BlackboxFunction):
plt.figure(figsize=(16, 9))
cs = f.config_space
plot_function(f, cs)
plt.title(str(f))
plt.tight_layout()
plt.savefig(TestPlotBlackboxFunctions.SAVE_FOLDER / TestPlotBlackboxFunctions.__name__ / f"{str(f)}.png")
plt.show()
| 39.578512 | 119 | 0.649057 | 1,965 | 14,367 | 4.473282 | 0.101781 | 0.066894 | 0.043003 | 0.041411 | 0.648692 | 0.56405 | 0.472355 | 0.430603 | 0.36678 | 0.354266 | 0 | 0.055709 | 0.242848 | 14,367 | 362 | 120 | 39.687845 | 0.752344 | 0.050532 | 0 | 0.315385 | 0 | 0 | 0.02566 | 0 | 0 | 0 | 0 | 0 | 0.223077 | 1 | 0.115385 | false | 0 | 0.038462 | 0 | 0.196154 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
766f8a12f0c46fcb1494cc2e4a28cf14067fdd35 | 17,437 | py | Python | finds/pyR.py | terence-lim/investment-data-science | 22c1cf7b1d9c012f0e2c586a0c5bda64e6f59471 | [
"MIT"
] | 2 | 2020-04-18T00:00:15.000Z | 2020-08-17T14:24:49.000Z | finds/pyR.py | terence-lim/investment-data-science | 22c1cf7b1d9c012f0e2c586a0c5bda64e6f59471 | [
"MIT"
] | null | null | null | finds/pyR.py | terence-lim/investment-data-science | 22c1cf7b1d9c012f0e2c586a0c5bda64e6f59471 | [
"MIT"
] | 2 | 2021-06-18T23:26:44.000Z | 2021-10-04T08:03:01.000Z | """Convenience class methods to use rpy2 package and R environment
- rpy2
Author: Terence Lim
License: MIT
"""
import numpy as np
from pandas import DataFrame, Series
from pandas.api import types
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import rpy2.robjects as ro
from rpy2.robjects import FloatVector, ListVector, IntVector, StrVector, NULL
def StrListVector(strList):
"""Convert input to a StrVector, or a ListVector recursively"""
try:
assert(len(strList) > 0) # NULL, None, '', non-str scalar etc
except:
return NULL
if isinstance(strList, ListVector): # already a ListVector
return ListVector(strList)
elif isinstance(strList, StrVector): # already a StrVector
return StrVector(strList)
elif isinstance(strList, str): # str scalar, so apply StrVector
return StrVector([strList])
elif any([types.is_list_like(s) for s in strList]): # not the deepest list
return ListVector([(None, StrListVector(s)) for s in strList])
else:
return StrVector(list(strList)) # is deepest list(-like) of str types
def flatten(s):
"""Generator returns each terminal item (by DFS) from nested list"""
try:
for t in s:
if is_list_like(t):
yield from flatten(t)
else:
yield t
except:
yield s
def combine(*args):
"""Flatten each arg, and concat all into a flat list"""
return [item for sublist in args for item in list(flatten(sublist))]
class PyR:
"""Populate a data object, to be exposed to both R (using rpy2) or Python
Parameters
----------
item : (rpy2) R Object, or Python numpy.array-compatible
input item can be of either Python or R object type
names : StrVector, ListVector, str or list of str, default None
explicitly provide labels in each dimension. Error checking is minimal:
should have same dimensions as given by shape of object (or self.dim)
Attributes
----------
iloc : dict or numpy array
internally, objects are stored as either numpy array, or dict of objects
(when input was R ListVector or DataFrame, or Python dict).
Should use other safer property getters to view object in target types:
e,g, .frame (pandas), .ro (RObject), or .values (python dict or ndarray)
dim : tuple of int
dimensions of data objects
names, rownames, colnames : StrVector
Notes
-----
In R, matrices are column-major ordered (aka Fortran-like index order,
with the first index changing fastest) although the R constructor
matrix() accepts a boolean argument byrow that, when true, will build
the matrix as if row-major ordered (aka C-like, which is also Python numpy
default order, where the last axis index changes fastest)
A suggested convention in python applications is to append '_' to R function
names and '_r' to R objects, and capitalize initial letter of PyR instances.
r['plot'] may need to explicitliy set xlab='', ylab=''
TODO: if hasattri('slots'), particulary 'ts' class, e.g. nile.slots.items()
Examples
--------
from rpy2.robjects import r
from rpy2.robjects.packages import importr
amen_r = importr('amen') # use R library
c_ = r['c'] # link R routines
Nodevars = PyR(r['IR90s'].rx2['nodevars']) # retrieve R data
Gdp = Nodevars[:, 'gdp'] # getitem subset with slice
topgdp = Gdp.values > sorted(Gdp.py, reverse=True)[30] # python calculations
Dyadvars = PyR(r['IR90s'].rx2['dyadvars'])
Y = Dyadvars[topgdp, topgdp, 'exports'] # getitem with (bool) indices
Y.iloc = np.log(Y.iloc + 1) # update with python calcs
"""
def __init__(self, item, names=None, verbose=False):
"""Make data instance from a python or R (rpy2) object input item"""
#self._r = item # archive the original data object (but not used)
self.verbose = verbose
self.dim = ()
if hasattr(item, 'names'): # store names, colnames, rownames
self.names = item.names # as StrVectors
if names is not None:
self.names = StrListVector(names)
if hasattr(item, 'colnames'):
self.colnames = item.colnames
if hasattr(item, 'rownames'):
self.rownames = item.rownames
if isinstance(item, (Series, DataFrame)):
self.rownames = StrVector(item.index)
if isinstance(item, DataFrame):
self.colnames = StrVector(item.columns)
if isinstance(item, Series) and item.name is not None:
self.colnames = StrVector([item.name])
if isinstance(item, (ListVector, ro.vectors.DataFrame, dict)):
try: # convert to dict if dict-like (i.e. ListVector, R DataFrame)
names = [self.names[i] if isinstance(self.names, StrVector)
else self.names[0][i] for i in range(len(item))]
except:
names = [k for k,v in item.items()]
self.iloc = {n: PyR(v) for n, (k,v) in zip(names, item.items())}
if verbose:
print(f"PyR: dict (len={len(self.iloc)}){type(item)}")
else: # not dict-like, so convert to numpy array and apply shape dims
self.iloc = np.array(item)
if hasattr(item, 'dim'):
self.dim = tuple(item.dim)
if len(self.dim) > 1:
self.iloc = self.iloc.reshape(tuple(item.dim), order='F')
self.dim = self.iloc.shape
if (not hasattr(self, 'rownames') and len(self.iloc.shape) > 1
and self.names and isinstance(self.names, ListVector)):
self.rownames = self.names[0] # try to infer rownames
if (not hasattr(self, 'colnames') and len(self.iloc.shape) > 1
and self.names and isinstance(self.names, ListVector)):
self.colnames = self.names[1] # try to infer colnames
if verbose:
print(f"PyR: ndarray {self.iloc.shape} {type(item)}")
# TODO: WARNING self.names.shape (if hasattr and not null) via try
# len(names)==len(dict) else len(names)=len(self.dim) or sum(self.dim)
def __repr__(self):
"""str representation, preferabaly pretty print as pandas DataFrame"""
if not isinstance(self.iloc, dict):
if len(self.iloc.shape) <= 2:
return str(self.frame)
return str(self.iloc)
@staticmethod
def savefig(filename, display=True, ax=None, figsize=(12,12)):
"""Save R graphics to file, or return R command. Optionally imshow"""
s = "dev.copy(png, '{}'); dev.off()".format(filename)
if display is not None:
ro.r(s)
if display:
if not ax:
fig, ax = plt.subplots(clear=True, figsize=figsize)
img = mpimg.imread(filename)
ax.imshow(img, interpolation='nearest')
return s
def assign(self, obj):
"""Directly update internal data object (must be same numpy shape)"""
if isinstance(obj, dict):
assert(isinstance(self.iloc, dict) and len(self.iloc)==len(obj))
self.iloc = obj
else:
obj = np.array(obj)
assert(obj.shape == self.iloc.shape)
self.iloc = obj
@property
def ro(self):
"""Expose a view as RObject, to be manipulated in R environment"""
# Convert to R vector of correct data type
if isinstance(self.iloc, dict):
out = ListVector([(None, PyR(v).ro) for v in self.iloc])
if types.is_float_dtype(self.iloc):
out = FloatVector(self.iloc.reshape(-1, order='F'))
elif types.is_integer_dtype(self.iloc):
out = IntVector(self.iloc.reshape(-1, order='F'))
else:
out = StrVector(self.iloc.reshape(-1, order='F'))
if len(self.dim) > 1: # reshape to R Array if has non-trivial dim
out = ro.r.array(out, dim=IntVector(self.dim))
# Collect R object name attributes
if hasattr(self, 'rownames'):
out.rownames = StrVector(self.rownames)
if hasattr(self, 'colnames'):
out.colnames = StrVector(self.colnames)
if hasattr(self, 'names'):
out.names = ListVector(self.names) if isinstance(
self.names, ListVector) else StrVector(self.names)
return out
@property
def frame(self):
"""Try to expose a view as pandas DataFrame"""
out = DataFrame(self.values)
if hasattr(self, 'names') and isinstance(self.names, StrVector):
if len(self.names) == len(out.columns):
out.columns = list(self.names)
if len(self.names) == len(out.index):
out.index = list(self.names)
if hasattr(self, 'rownames') and isinstance(self.rownames, StrVector):
out.index = list(self.rownames)
if hasattr(self, 'colnames') and isinstance(self.colnames, StrVector):
out.columns = list(self.colnames)
return out
@property
def values(self):
"""Expose view as python dict (when ListVector) or ndarray (when not)"""
return ({k:v.values for k,v in self.iloc.items()}
if isinstance(self.iloc, dict) else self.iloc)
def __getitem__(self, args):
"""Returns copy of subset of data object from slice or index args"""
try:
if isinstance(self.iloc, dict): # return item of dict
if isinstance(args, int):
try:
args = list(self.names).index(args)
except:
args = list(self.iloc.keys()).index(args)
return self.iloc[args]
# replace any str labels in args with its index in self.names
if isinstance(args, tuple) and self.names is not None:
args = tuple(self.index(a, i) for i,a in enumerate(args))
# extract corresponding subset of names
if self.names:
names_ = deepcopy(self.names)
names = ListVector(names_)
for i in range(len(self.names)):
if isinstance(names_[i], StrVector):
s = np.array(names_[i])[args[i]]
names[i] = StrVector([s] if isinstance(s, str) else s)
else:
names = NULL
# finally extract by looping over each dim; enables R-like indexing
out = deepcopy(self.iloc)
for i, arg in enumerate(args):
a = [slice(None)]*len(args)
a[i] = arg
dims = len(out.shape)
out = out[tuple(a)]
if self.verbose:
print(i, out.shape, dims, tuple(a))
if len(out.shape) < dims: # if this dimension is flattened out
names = names[:i] + names[(i+1):]
return PyR(out, names=names)
except:
raise Exception(f"getitem: {args}")
@property
def nrow(self):
"""Length of first dimension, as R IntVector"""
return IntVector([self.iloc.shape[0]])
@property
def ncol(self):
"""Length of second dimension, as R IntVector"""
return IntVector([self.iloc.shape[1]])
def index(self, s, axis=-1):
"""Helper method to lookup index/es of (list of) str label in names"""
if isinstance(s, str):
return list(self.names[axis]).index(s)
elif types.is_list_like(s):
return [self.index(t, axis=axis) for t in s]
else:
return s
#if __name__ == "__main__":
if False: # replicate Ch 1 Gaussian AME of Hoff (2018) "Amen" tutorial
import numpy as np
import numpy.ma as ma
from numpy.ma import masked_invalid as valid
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
from finds.utils import combine
from finds.pyR import PyR
stats_ro = importr('stats')
base_ro = importr('base')
amen_ro = importr('amen')
utils_ro = importr('utils')
matrix_ro = ro.r['matrix']
t_ro = ro.r['t']
anova_ro = ro.r['anova']
lm_ro = ro.r['lm']
ame_ro = ro.r['ame'] # default nscan=10000, odens=25 => 400 samples
summary_ro = ro.r['summary']
plot_ro = ro.r['plot']
circplot_ro = ro.r['circplot']
IR90s_ro = ro.r['IR90s']
# Load GDP and exports data
Nodevars = PyR(IR90s_ro.rx2['nodevars'])
Gdp = Nodevars[:, 'gdp']
Dyadvars = PyR(IR90s_ro.rx2['dyadvars'])
topgdp = Gdp.values > sorted(Gdp.values, reverse=True)[30]
Y = Dyadvars[topgdp, topgdp, 'exports']
Y.assign(np.log(Y.values + 1))
Y[:5,:4]
# Simple ANOVA to show random effects
rowcountry_ro = matrix_ro(Y.rownames, Y.nrow, Y.ncol)
colcountry_ro = t_ro(rowcountry_ro)
formula_ro = ro.Formula("c(Y) ~ c(Rowcountry) + c(Colcountry)")
formula_ro.environment['Rowcountry'] = rowcountry_ro
formula_ro.environment['Colcountry'] = colcountry_ro
formula_ro.environment['Y'] = Y.ro
fit_anova_ro = anova_ro(lm_ro(formula_ro))
print(fit_anova_ro)
# display exporter and imported effects
muhat = np.nanmean(Y.ro)
Ahat = PyR(Y.frame.mean(axis=1) - muhat, names=['Ahat'])
print(Ahat.frame['Ahat'].sort_values(ascending=False)[:6])
Bhat = PyR(Y.frame.mean(axis=0) - muhat, names=['Bhat'])
print(Bhat.frame['Bhat'].sort_values(ascending=False)[:6])
# But ignores corr of random effects, fundamental characteristic of dyads
print(np.cov(Ahat.values, Bhat.values))
print(np.corrcoef(Ahat.values, Bhat.values)[0,1])
outer = Y.values - (muhat + np.add.outer(Ahat.values, Bhat.values))
print(ma.cov(valid(combine(outer)), valid(combine(outer.T))).data)
print(ma.corrcoef(valid(combine(outer)), valid(combine(outer.T)))[0,1])
# Social Relations Model
fit_SRM_ro = ame_ro(Y.ro, plot=False, print=False)
Fit_SRM = PyR(fit_SRM_ro)
_ = summary_ro(fit_SRM_ro)
plot_ro(fit_SRM_ro)
# Compare empirical and model estimates
print(muhat, np.nanmean(Fit_SRM['BETA'].values)) # overall mean
print(combine(np.cov(Ahat.values, Bhat.values))[:3]) # mean covariances
vcmean = Fit_SRM['VC'][:, :4].frame.mean() # posterior variance parms
print(vcmean[:3])
# Residual Dyadic Correlation
print(vcmean['cab'] / (np.sqrt(vcmean['va']) * np.sqrt(vcmean['vb'])))
print(ma.corrcoef(valid(combine(outer)), valid(combine(outer.T)))[0,1])
print(np.mean(Fit_SRM['VC'][:, 3].values))
# SRRM
Xn = PyR(IR90s_ro.rx2('nodevars'))[topgdp, :]
Xn.iloc[:, :2] = np.log(Xn.values[:, :2])
Xd = PyR(IR90s_ro.rx2('dyadvars'))[topgdp, topgdp, np.array([0,2,3,4])]
Xd.iloc[:, :, 2] = np.log(Xd.values[:, :, 2])
fit_srrm_ro = ame_ro(Y.ro, Xd=Xd.ro, Xr=Xn.ro, Xc=Xn.ro,
plot=False, print=False)
Fit_srrm = PyR(fit_srrm_ro)
_ = summary_ro(fit_srrm_ro)
plot_ro(fit_srrm_ro)
gof = Fit_srrm['GOF'].frame.iloc[:1,:] # actual in first row of gof
gof.loc['mean', :] = np.nanmean(Fit_srrm['GOF'].values[1:,:], axis=0)
gof.loc['std', :] = np.nanstd(Fit_srrm['GOF'].values[1:,:], axis=0)
print(gof)
# OLS
fit_rm_ro = ame_ro(Y.ro, Xd=Xd.ro, Xr=Xn.ro, Xc=Xn.ro, print=False,
plot=False, rvar=False, cvar=False, dcor=False)
_ = summary_ro(fit_rm_ro)
plot_ro(fit_rm_ro)
# SRRM with latent factor multiplicative effects
fit_ame2_ro = ame_ro(Y.ro, Xd=Xd.ro, Xr=Xn.ro, Xc=Xn.ro, R=2,
plot=False, print=False)
Fit_ame2 = PyR(fit_ame2_ro)
_ = summary_ro(fit_ame2_ro)
plot_ro(fit_ame2_ro)
# plots
circplot_ro(Y.ro, U=fit_ame2_ro.rx2['U'], V=fit_ame2_ro.rx2['V'],
row_names=Y.rownames, col_names=Y.colnames,
plotnames=True, pscale=FloatVector([1.5]))
if False:
from rpy2.robjects.packages import importr
from rpy2.robjects import Formula, Environment
import rpy2.robjects as ro
from rpy2.robjects import FloatVector, ListVector, IntVector, StrVector, NULL
stats = importr('stats')
base = importr('base')
# Create matrix in R
v = ro.FloatVector([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
m = ro.r.matrix(v, nrow = 2)
m = ro.r['matrix'](v, nrow = 2)
ctl = FloatVector([4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14])
trt = FloatVector([4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69])
group = base.gl(2, 10, 20, labels = ["Ctl","Trt"])
weight = ctl + trt
ro.globalenv["weight"] = weight
ro.globalenv["group"] = group
lm_D9 = stats.lm("weight ~ group")
print(stats.anova(lm_D9))
lm_D90 = stats.lm("weight ~ group - 1")
print(base.summary(lm_D90))
res = ro.StrVector(['abc', 'def'])
v = ro.FloatVector([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
m = ro.r['matrix'](v, nrow = 2)
letters = ro.r['letters']
rcode = 'paste(%s, collapse="-")' % (letters.r_repr())
res = ro.r(rcode)
| 41.319905 | 81 | 0.592418 | 2,394 | 17,437 | 4.254804 | 0.197995 | 0.025918 | 0.004418 | 0.010799 | 0.181033 | 0.124583 | 0.086295 | 0.070489 | 0.066758 | 0.057137 | 0 | 0.017397 | 0.281356 | 17,437 | 421 | 82 | 41.418052 | 0.795467 | 0.258932 | 0 | 0.189003 | 0 | 0 | 0.042604 | 0.002692 | 0 | 0 | 0 | 0.004751 | 0.010309 | 1 | 0.04811 | false | 0 | 0.085911 | 0 | 0.206186 | 0.079038 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7671429d3e62d964d15b30a186818bb87a16fae4 | 17,533 | py | Python | macrokit/expression.py | hanjinliu/macro-kit | 61ebc38ea1086337d5a7477c6e896af0220f8a71 | [
"BSD-3-Clause"
] | 2 | 2021-11-02T09:53:49.000Z | 2021-11-10T10:33:05.000Z | macrokit/expression.py | hanjinliu/macro-kit | 61ebc38ea1086337d5a7477c6e896af0220f8a71 | [
"BSD-3-Clause"
] | null | null | null | macrokit/expression.py | hanjinliu/macro-kit | 61ebc38ea1086337d5a7477c6e896af0220f8a71 | [
"BSD-3-Clause"
] | null | null | null | from copy import deepcopy
from numbers import Number
from types import ModuleType
from typing import Any, Callable, Iterable, Iterator, overload, Union, List, Tuple, Dict
from ._validator import validator
from .head import EXEC, Head
from ._symbol import Symbol
def str_(expr: Any, indent: int = 0):
"""Convert expr into a proper string."""
if isinstance(expr, Expr):
return _STR_MAP[expr.head](expr, indent)
else:
return " " * indent + str(expr)
def str_lmd(expr: Any, indent: int = 0):
"""Convert str into a proper lambda function definition."""
s = str(expr)
call = s.lstrip("<lambda>(").rstrip(")")
return " " * indent + f"lambda {call}"
def rm_par(s: str):
"""Remove parenthesis."""
if s[0] == "(" and s[-1] == ")":
s = s[1:-1]
return s
def sjoin(sep: str, iterable: Iterable[Any], indent: int = 0):
"""Join expresions into a single string."""
return sep.join(str_(expr, indent) for expr in iterable)
def _s_(n: int) -> str:
"""Return spaces."""
return " " * n
def _comma(a, b):
return f"{a}, {b}".rstrip(", ")
_STR_MAP: Dict[Head, Callable[["Expr", int], str]] = {
Head.empty: lambda e, i: "",
Head.getattr: lambda e, i: f"{str_(e.args[0], i)}.{str_(e.args[1])}",
Head.getitem: lambda e, i: f"{str_(e.args[0], i)}[{str_(e.args[1])}]",
Head.del_: lambda e, i: f"{_s_(i)}del {str_(e.args[0])}",
Head.call: lambda e, i: f"{str_(e.args[0], i)}({sjoin(', ', e.args[1:])})",
Head.assign: lambda e, i: f"{str_(e.args[0], i)} = {e.args[1]}",
Head.kw: lambda e, i: f"{str_(e.args[0])}={str_(e.args[1])}",
Head.assert_: lambda e, i: f"{_s_(i)}assert {_comma(str_(e.args[0]), str_(e.args[1]))}", # noqa
Head.comment: lambda e, i: f"{_s_(i)}# {e.args[0]}",
Head.unop: lambda e, i: f"{_s_(i)}({str_(e.args[0])}{str_(e.args[1])})",
Head.binop: lambda e, i: f"{_s_(i)}({str_(e.args[1])} {str_(e.args[0])} {str_(e.args[2])})", # noqa
Head.aug: lambda e, i: f"{_s_(i)}{str_(e.args[1])} {str_(e.args[0])}= {str_(e.args[2])}", # noqa
Head.block: lambda e, i: sjoin("\n", e.args, i),
Head.function: lambda e, i: f"{_s_(i)}def {str_(e.args[0])}:\n{str_(e.args[1], i+4)}", # noqa
Head.lambda_: lambda e, i: f"{str_lmd(e.args[0], i)}: {str_(e.args[1])}", # noqa
Head.return_: lambda e, i: f"{_s_(i)}return {sjoin(', ', e.args)}",
Head.raise_: lambda e, i: f"{_s_(i)}raise {str_(e.args[0])}",
Head.if_: lambda e, i: f"{_s_(i)}if {rm_par(str_(e.args[0]))}:\n{str_(e.args[1], i+4)}\n{_s_(i)}else:\n{str_(e.args[2], i+4)}", # noqa
Head.elif_: lambda e, i: f"{_s_(i)}if {rm_par(str_(e.args[0]))}:\n{str_(e.args[1], i+4)}\n{_s_(i)}else:\n{str_(e.args[2], i+4)}", # noqa
Head.for_: lambda e, i: f"{_s_(i)}for {rm_par(str_(e.args[0]))}:\n{str_(e.args[1], i+4)}", # noqa
Head.while_: lambda e, i: f"{_s_(i)}while {rm_par(str_(e.args[0]))}:\n{str_(e.args[1], i+4)}", # noqa
Head.annotate: lambda e, i: f"{str_(e.args[0], i)}: {str_(e.args[1])}",
}
class Expr:
"""An expression object for metaprogramming."""
n: int = 0
def __init__(self, head: Head, args: Iterable[Any]):
self._head = Head(head)
self._args = list(map(self.__class__.parse_object, args))
validator(self._head, self._args)
self.number = self.__class__.n
self.__class__.n += 1
@property
def head(self) -> Head:
"""Return the head of Expr."""
return self._head
@property
def args(self) -> List[Union["Expr", Symbol]]:
"""Return args of Expr."""
return self._args
def __repr__(self) -> str:
"""Return Julia-like repr."""
s = str(self)
if len(s) > 1:
s = rm_par(s)
s = s.replace("\n", "\n ")
return f":({s})"
def __str__(self) -> str:
"""Return a string style of the expression."""
return str_(self)
def __eq__(self, expr) -> bool:
"""Equals only if indentical Expr is given."""
if isinstance(expr, Expr):
if self.head == expr.head:
return self.args == expr.args
else:
return False
else:
return False
def _dump(self, ind: int = 0) -> str:
"""Recursively expand expressions until it reaches value/kw expression."""
out = [f"head: {self.head.name}\n{' '*ind}args:\n"]
for i, arg in enumerate(self.args):
if isinstance(arg, Symbol):
value = arg.name
else:
value = arg._dump(ind + 4)
out.append(f"{i:>{ind+2}}: {value}\n")
return "".join(out)
def dump(self) -> str:
"""Dump expression into a tree."""
s = self._dump()
return s.rstrip("\n") + "\n"
def copy(self) -> "Expr":
"""Copy Expr object."""
# Always copy object deeply.
return deepcopy(self)
def at(self, *indices: int) -> Union[Symbol, "Expr"]:
"""
Easier way of tandem get-item.
Helper function to avoid ``expr.args[0].args[0] ...``. Also, exception
descriptions during this function call. ``expr.at(i, j, k)`` is equivalent
to ``expr.args[i].args[j].args[k]``.
"""
now: Union[Symbol, Expr] = self
for i in indices:
if isinstance(now, Symbol):
raise TypeError(f"Indexing encounted Symbol at position {i}.")
try:
now = now._args[i]
except IndexError as e:
raise type(e)(f"list index out of range at position {i}.")
return now
def eval(self, _globals: dict = {}, _locals: dict = {}):
"""
Evaluate or execute macro as an Python script.
Either ``eval`` or ``exec`` will get called, which determined by its header.
Calling this function is much safer than those not-recommended usage of
``eval`` or ``exec``.
Parameters
----------
_globals : dict[Symbol, Any], optional
Mapping from global variable symbols to their values.
_locals : dict, optional
Updated variable namespace. Will be a mapping from symbols to values.
"""
_glb: Dict[str, Any] = {
(sym.name if isinstance(sym, Symbol) else sym): v
for sym, v in _globals.items()
}
# use registered modules
if Symbol._module_symbols:
format_dict: Dict[Symbol, Union[Symbol, Expr]] = {}
for id_, sym in Symbol._module_symbols.items():
mod = Symbol._module_map[sym.name]
vstr = f"var{hex(id_)}"
format_dict[sym] = Symbol(vstr)
_glb[vstr] = mod
# Modules will not be registered as alias ("np" will be "numpy" in macro).
# To avoid name collision, it is safer to rename them to "var0x...".
self = self.format(format_dict)
if self.head in EXEC:
return exec(str(self), _glb, _locals)
else:
return eval(str(self), _glb, _locals)
@classmethod
def parse_method(
cls,
obj: Any,
func: Callable,
args: Tuple[Any, ...] = None,
kwargs: dict = None,
) -> "Expr":
"""Parse ``obj.func(*args, **kwargs)``."""
method = cls(head=Head.getattr, args=[symbol(obj), func])
return cls.parse_call(method, args, kwargs)
@classmethod
def parse_init(
cls,
obj: Any,
init_cls: type = None,
args: Tuple[Any, ...] = None,
kwargs: dict = None,
) -> "Expr":
"""Parse ``obj = init_cls(*args, **kwargs)``."""
if init_cls is None:
init_cls = type(obj)
sym = symbol(obj)
return cls(Head.assign, [sym, cls.parse_call(init_cls, args, kwargs)])
@classmethod
def parse_call(
cls,
func: Union[Callable, Symbol, "Expr"],
args: Tuple[Any, ...] = None,
kwargs: dict = None,
) -> "Expr":
"""Parse ``func(*args, **kwargs)``."""
if args is None:
args = ()
elif not isinstance(args, tuple):
raise TypeError("args must be a tuple")
if kwargs is None:
kwargs = {}
elif not isinstance(kwargs, dict):
raise TypeError("kwargs must be a dict")
inputs = [func] + cls._convert_args(args, kwargs)
return cls(head=Head.call, args=inputs)
@classmethod
def parse_setitem(cls, obj: Any, key: Any, value: Any) -> "Expr":
"""Parse ``obj[key] = value)``."""
target = cls(Head.getitem, [symbol(obj), symbol(key)])
return cls(Head.assign, [target, symbol(value)])
@classmethod
def parse_setattr(cls, obj: Any, key: str, value: Any) -> "Expr":
"""Parse ``obj.key = value``."""
target = cls(Head.getattr, [symbol(obj), Symbol(key)])
return cls(Head.assign, [target, symbol(value)])
@classmethod
def _convert_args(cls, args: Tuple[Any, ...], kwargs: dict) -> list:
inputs = []
for a in args:
inputs.append(a)
for k, v in kwargs.items():
inputs.append(cls(Head.kw, [Symbol(k), symbol(v)]))
return inputs
@classmethod
def parse_object(cls, a: Any) -> Union[Symbol, "Expr"]:
"""Convert an object into a macro-type."""
return a if isinstance(a, cls) else symbol(a)
def issetattr(self) -> bool:
"""Determine if an expression is in the form of ``setattr(obj, key value)``."""
if self.head == Head.assign:
target = self.args[0]
if isinstance(target, Expr) and target.head == Head.getattr:
return True
return False
def issetitem(self) -> bool:
"""Determine if an expression is in the form of ``setitem(obj, key value)``."""
if self.head == Head.assign:
target = self.args[0]
if isinstance(target, Expr) and target.head == Head.getitem:
return True
return False
def iter_args(self) -> Iterator[Symbol]:
"""Recursively iterate along all the arguments."""
for arg in self.args:
if isinstance(arg, Expr):
yield from arg.iter_args()
elif isinstance(arg, Symbol):
yield arg
else:
raise RuntimeError(f"{arg} (type {type(arg)})")
def iter_expr(self) -> Iterator["Expr"]:
"""
Recursively iterate over all the nested Expr, until reaching to non-nested Expr.
This method is useful in macro generation.
"""
yielded = False
for arg in self.args:
if isinstance(arg, self.__class__):
yield from arg.iter_expr()
yielded = True
if not yielded:
yield self
@overload
def format(self, mapping: dict, inplace: bool = False) -> "Expr":
...
@overload
def format(
self,
mapping: Iterable[Tuple[Any, Union[Symbol, "Expr"]]],
inplace: bool = False,
) -> "Expr":
...
def format(self, mapping, inplace=False) -> "Expr":
"""
Format expressions in the macro.
Just like formatting method of string, this function can replace certain symbols
to others.
Parameters
----------
mapping : dict or iterable of tuples
Mapping from objects to symbols or expressions. Keys will be converted to
symbol. For instance, if you used ``arr``, a numpy.ndarray as an input of an
macro-recordable function, that input will appear like 'var0x1...'. By
calling ``format([(arr, "X")])`` then 'var0x1...' will be substituted to
'X'.
inplace : bool, default is False
Expression will be overwritten if true.
Returns
-------
Expression
Formatted expression.
"""
if isinstance(mapping, dict):
mapping = mapping.items()
mapping = _check_format_mapping(mapping)
if not inplace:
self = self.copy()
return self._unsafe_format(mapping)
def _unsafe_format(self, mapping: dict) -> "Expr":
for i, arg in enumerate(self.args):
if isinstance(arg, Expr):
arg._unsafe_format(mapping)
else:
try:
new = mapping[arg]
except KeyError:
pass
else:
self.args[i] = new
return self
def _check_format_mapping(mapping_list: Iterable) -> Dict[Symbol, Union[Symbol, Expr]]:
_dict: Dict[Symbol, Union[Symbol, Expr]] = {}
for comp in mapping_list:
if len(comp) != 2:
raise ValueError("Wrong style of mapping list.")
k, v = comp
if isinstance(v, Expr) and v.head in EXEC:
raise ValueError("Cannot replace a symbol to a non-evaluable expression.")
key = symbol(k)
if isinstance(key, Expr):
raise TypeError(
f"Object of type {type(k).__name__} returns Expr type, thus cannot"
"be used as a format template."
)
if isinstance(v, str) and not isinstance(k, Symbol):
_dict[key] = Symbol(v)
else:
_dict[key] = symbol(v)
return _dict
def make_symbol_str(obj: Any):
"""Make a string for symbol."""
# hexadecimals are easier to distinguish
_id = id(obj)
if obj is not None:
Symbol._variables.add(_id)
return f"var{hex(_id)}"
def symbol(obj: Any, constant: bool = True) -> Union[Symbol, Expr]:
"""
Make a proper Symbol or Expr instance from any objects.
Unlike Symbol(...) constructor, which directly make a Symbol from a string, this
function checks input type and determine the optimal string to represent the
object. Especially, Symbol("xyz") will return ``:xyz`` while symbol("xyz") will
return ``:'xyz'``.
Parameters
----------
obj : Any
Any object from which a Symbol will be created.
constant : bool, default is True
If true, object is interpreted as a constant like 1 or "a". Otherwise object is
converted to a variable that named with its ID.
Returns
-------
Symbol or Expr
"""
if isinstance(obj, (Symbol, Expr)):
return obj
obj_type = type(obj)
obj_id = id(obj)
if not constant or obj_id in Symbol._variables:
seq = make_symbol_str(obj)
constant = False
elif obj_type in Symbol._type_map:
seq = Symbol._type_map[obj_type](obj)
elif obj_type in Symbol._subclass_map:
parent_type = Symbol._subclass_map[obj_type]
seq = Symbol._type_map[parent_type](obj)
elif isinstance(obj, tuple):
if len(obj) == 1:
# length 1 tuple have to be written as (a,) instead of (a).
seq = f"({symbol(obj[0])},)"
else:
seq = "(" + ", ".join(str(symbol(a)) for a in obj) + ")"
if obj_type is not tuple:
seq = obj_type.__name__ + seq
elif isinstance(obj, list):
seq = "[" + ", ".join(str(symbol(a)) for a in obj) + "]"
if obj_type is not list:
seq = f"{obj_type.__name__}({seq})"
elif isinstance(obj, dict):
seq = "{" + ", ".join(f"{symbol(k)}: {symbol(v)}" for k, v in obj.items()) + "}"
if obj_type is not dict:
seq = f"{obj_type.__name__}({seq})"
elif isinstance(obj, set):
if len(obj) == 0:
if obj_type is set:
seq = "set()"
else:
seq = f"{obj_type.__name__}()"
else:
seq = "{" + ", ".join(str(symbol(a)) for a in obj) + "}"
if obj_type is not set:
seq = f"{obj_type.__name__}({seq})"
elif isinstance(obj, frozenset):
seq = ", ".join(str(symbol(a)) for a in obj)
if obj_type is frozenset:
seq = f"frozenset({{{seq}}})"
else:
seq = f"{obj_type.__name__}({{{seq}}})"
elif isinstance(obj, Number): # int, float, bool, ...
seq = obj
elif isinstance(obj, ModuleType):
# Register module to the default namespace of Symbol class. This function is
# called every time a module type object is converted to a Symbol because users
# always have to pass the module object to the global variables when calling
# eval function.
if obj_id in Symbol._module_symbols.keys():
sym = Symbol._module_symbols[obj_id]
else:
*main, seq = obj.__name__.split(".")
sym = Symbol(seq, obj_id)
sym.constant = True
if len(main) == 0:
# submodules should not be registered
Symbol._module_symbols[obj_id] = sym
Symbol._module_map[seq] = obj
return sym
elif hasattr(obj, "__name__"):
seq = obj.__name__
else:
for k, func in Symbol._type_map.items():
if isinstance(obj, k):
seq = func(obj)
Symbol._subclass_map[obj_type] = k
break
else:
seq = make_symbol_str(obj)
constant = False
if isinstance(seq, (Symbol, Expr)):
# The output of register_type can be a Symbol or Expr
return seq
else:
sym = Symbol(seq, obj_id)
sym.constant = constant
return sym
| 34.996008 | 141 | 0.55022 | 2,327 | 17,533 | 4.01676 | 0.141384 | 0.021932 | 0.029956 | 0.019258 | 0.26233 | 0.213437 | 0.193859 | 0.170857 | 0.159623 | 0.130523 | 0 | 0.006158 | 0.305367 | 17,533 | 500 | 142 | 35.066 | 0.76131 | 0.205156 | 0 | 0.233533 | 0 | 0.026946 | 0.131064 | 0.045828 | 0.011976 | 0 | 0 | 0 | 0.002994 | 1 | 0.10479 | false | 0.002994 | 0.020958 | 0.002994 | 0.248503 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767327bdf4abcd18ebae3661fcfae40a42034896 | 1,984 | py | Python | tests/test_helpers.py | MoralCode/hyp | 9dcfe999f7d78dc37b787523a8137d0fb008e586 | [
"MIT"
] | null | null | null | tests/test_helpers.py | MoralCode/hyp | 9dcfe999f7d78dc37b787523a8137d0fb008e586 | [
"MIT"
] | null | null | null | tests/test_helpers.py | MoralCode/hyp | 9dcfe999f7d78dc37b787523a8137d0fb008e586 | [
"MIT"
] | null | null | null | import pytest
from hyp.helpers import *
from hyp.base import BaseResponder, BaseAdapter
from fixtures import CommentSerializer
class TestBuildResourceIdentifier(object):
def test_resource_identifier_creation(self):
resource_id = {'id': 1, 'type': 'item'}
assert build_resource_identifier("item", 1) == resource_id
class TestBuildMeta(object):
def test_meta(self):
meta_info = {"hello": "world"}
assert build_meta(meta_info) == meta_info
class TestBuildLinksObject(object):
class ResponderClass(BaseResponder):
TYPE = 'comments'
SERIALIZER = CommentSerializer
ADAPTER = BaseAdapter
def test_build_string_link(self):
url = "https://www.example.com/url/for/self/"
assert build_link(url) == url
def test_build_object_link(self):
meta_info = {"hello": "world"}
url = "https://www.example.com/url/for/self/"
result = {
"href": url,
"meta": meta_info
}
assert build_link(url, meta=meta_info) == result
def test_creating_string_links(self):
links_input = {
'comments': {
'responder': self.ResponderClass,
'href': 'http://example.com/comments/'
}
}
links_output = {
"comments": "http://example.com/comments/"
}
assert build_links_object(links_input) == links_output
def test_creating_object_links(self):
links_input = {
'comments': {
'responder': self.ResponderClass,
'href': 'http://example.com/comments/',
'meta': { "whatever": "data", "here": True }
}
}
links_output = {
"comments": {
"href": "http://example.com/comments/",
"meta": { "whatever": "data", "here": True }
}
}
assert build_links_object(links_input) == links_output | 26.105263 | 66 | 0.571069 | 195 | 1,984 | 5.6 | 0.282051 | 0.038462 | 0.051282 | 0.080586 | 0.39011 | 0.349817 | 0.349817 | 0.349817 | 0.214286 | 0.214286 | 0 | 0.001456 | 0.30746 | 1,984 | 76 | 67 | 26.105263 | 0.793304 | 0 | 0 | 0.259259 | 0 | 0 | 0.170277 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7674eec2eb1ee16bc3d53e5062a165bf1ba196f4 | 1,761 | py | Python | djangoapp/eye/tasks.py | jaysridhar/ca_eye | 75ef2a1bb29f01a3d1ff262e6c6d876a2bc06f52 | [
"MIT"
] | null | null | null | djangoapp/eye/tasks.py | jaysridhar/ca_eye | 75ef2a1bb29f01a3d1ff262e6c6d876a2bc06f52 | [
"MIT"
] | null | null | null | djangoapp/eye/tasks.py | jaysridhar/ca_eye | 75ef2a1bb29f01a3d1ff262e6c6d876a2bc06f52 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import json, datetime
from django.forms.models import model_to_dict
from celery import shared_task
from celery.utils.log import get_task_logger
from eye.models import Event
logger = get_task_logger(__name__)
debug = logger.debug
warning = logger.warning
# A validator receives the event_data for the event, and should return
# None if the event is valid, an error message otherwise.
def page_validator(event_data):
return None
def form_validator(event_data):
return None
def unknown_validator(event_data):
return 'event not validated'
validators = {
'page interaction': page_validator,
'form interaction': form_validator,
'unknown': unknown_validator,
}
@shared_task
def save_event(event_data):
try:
timestamp = datetime.datetime.strptime(event_data['timestamp'],'%Y-%m-%d %H:%M:%S.%f')
errors = ['future timestamp'] if timestamp > datetime.datetime.now() else []
if event_data['category'] in validators:
res = validators[event_data['category']](event_data)
if res: errors.append(res)
else:
res = validators['unknown'](event_data)
if res: errors.append(res)
error = ','.join(errors) if errors else None
event = Event(session_id = event_data['session_id'],
category = event_data['category'],
name = event_data['name'],
data = json.dumps(event_data['data']),
error_mesg = error,
timestamp = timestamp)
event.save()
return {'success': model_to_dict(event)}
except Exception as ex:
warning(f'save_event({event}) error: {str(ex)}', exc_info=ex)
return {'error': str(ex)}
| 33.226415 | 94 | 0.645088 | 221 | 1,761 | 4.959276 | 0.366516 | 0.114964 | 0.04927 | 0.065693 | 0.109489 | 0.109489 | 0.05292 | 0 | 0 | 0 | 0 | 0 | 0.245315 | 1,761 | 52 | 95 | 33.865385 | 0.82468 | 0.080068 | 0 | 0.095238 | 0 | 0 | 0.124304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.119048 | 0.071429 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76778d40724af377f54ecf35f06d64c1d270b79d | 6,250 | py | Python | accelerator/test_methods/a_test_hashlabel.py | drougge/accelerator | f99b2550a84c79cadb032acf0d2d60bccf75bf0d | [
"Apache-2.0"
] | null | null | null | accelerator/test_methods/a_test_hashlabel.py | drougge/accelerator | f99b2550a84c79cadb032acf0d2d60bccf75bf0d | [
"Apache-2.0"
] | null | null | null | accelerator/test_methods/a_test_hashlabel.py | drougge/accelerator | f99b2550a84c79cadb032acf0d2d60bccf75bf0d | [
"Apache-2.0"
] | null | null | null | ############################################################################
# #
# Copyright (c) 2019-2020 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
description = r'''
Test that hashlabel does what it says in both split_write and hashcheck.
Then test that rehashing gives the expected result, and that using the
wrong hashlabel without rehashing is not allowed.
'''
from accelerator.dataset import DatasetWriter, Dataset
from accelerator.extras import DotDict
from accelerator.gzwrite import typed_writer
from accelerator.error import DatasetUsageError
all_data = list(zip(range(10000), reversed(range(10000))))
def prepare(params):
assert params.slices >= 2, "Hashing won't do anything with just one slice"
dws = DotDict()
for name, hashlabel in (
("unhashed_manual", None), # manually interlaved
("unhashed_split", None), # split_write interlaved
("up_checked", "up"), # hashed on up using dw.hashcheck
("up_split", "up"), # hashed on up using split_write
("down_checked", "down"), # hashed on down using dw.hashcheck
("down_discarded", "down"), # hashed on down using discarding writes
("down_discarded_list", "down"), # hashed on down using discarding list writes
("down_discarded_dict", "down"), # hashed on down using discarding dict writes
):
dw = DatasetWriter(name=name, hashlabel=hashlabel)
dw.add("up", "int32")
dw.add("down", "int32")
dws[name] = dw
return dws
def analysis(sliceno, prepare_res, params):
dws = prepare_res
dws.down_discarded.enable_hash_discard()
dws.down_discarded_list.enable_hash_discard()
dws.down_discarded_dict.enable_hash_discard()
for ix, (up, down) in enumerate(all_data):
if dws.up_checked.hashcheck(up):
dws.up_checked.write(up, down)
if dws.down_checked.hashcheck(down):
dws.down_checked.write(up, down)
if ix % params.slices == sliceno:
dws.unhashed_manual.write(up, down)
dws.down_discarded.write(up, down)
dws.down_discarded_list.write_list([up, down])
dws.down_discarded_dict.write_dict(dict(up=up, down=down))
# verify that we are not allowed to write in the wrong slice without enable_hash_discard
if not dws.up_checked.hashcheck(0):
good = True
for fn, a in (
("write", (0, 0,)),
("write_list", ([0, 0],)),
("write_dict", (dict(up=0, down=0),)),
):
try:
getattr(dws.up_checked, fn)(*a)
good = False
except Exception:
pass
assert good, "%s allowed writing in wrong slice" % (fn,)
def synthesis(prepare_res, params, job, slices):
dws = prepare_res
for dw in (dws.unhashed_split, dws.up_split,):
w = dw.get_split_write_list()
for row in all_data:
w(row)
for dw in dws.values():
dw.finish()
# Verify that the different ways of writing gave the same result
for names in (
("unhashed_split", "unhashed_manual"),
("up_checked", "up_split"),
("down_checked", "down_discarded", "down_discarded_list", "down_discarded_dict"),
):
dws = {name: job.dataset(name) for name in names}
assert dws == {name: Dataset((params.jobid, name)) for name in names}, "Old style Dataset((params.jobid, name)) broken"
for sliceno in range(slices):
data = {name: list(dws[name].iterate(sliceno)) for name in names}
good = data[names[0]]
for name in names[1:]:
assert data[name] == good, "%s doesn't match %s in slice %d" % (names[0], name, sliceno,)
# Verify that both up and down hashed on the expected column
hash = typed_writer("int32").hash
for colname in ("up", "down"):
ds = job.dataset(colname + "_checked")
for sliceno in range(slices):
for value in ds.iterate(sliceno, colname):
assert hash(value) % slices == sliceno, "Bad hashing on %s in slice %d" % (colname, sliceno,)
# Verify that up and down are not the same, to catch hashing
# not actually hashing.
up = list(job.dataset("up_checked").iterate(None))
down = list(job.dataset("down_checked").iterate(None))
assert up != down, "Hashlabel did not change slice distribution"
# And check that the data is still the same.
assert sorted(up) == sorted(down) == all_data, "Hashed datasets have wrong data"
# Verify that rehashing works.
# (Can't use sliceno None, because that won't rehash, and even if it did
# the order wouldn't match. Order doesn't even match in the rehashed
# individual slices.)
up = job.dataset("up_checked")
down = job.dataset("down_checked")
unhashed = job.dataset("unhashed_manual")
for sliceno in range(slices):
a = list(up.iterate(sliceno))
b = list(down.iterate(sliceno, hashlabel="up", rehash=True))
c = list(unhashed.iterate(sliceno, hashlabel="up", rehash=True))
assert sorted(a) == sorted(b) == sorted(c), "Rehashing is broken (slice %d)" % (sliceno,)
# And finally verify that we are not allowed to specify the wrong hashlabel
good = True
try:
up.iterate(None, hashlabel="down")
good = False
except DatasetUsageError:
pass
try:
unhashed.iterate(None, hashlabel="down")
good = False
except DatasetUsageError:
pass
assert good, "Iteration allowed on the wrong hashlabel"
| 41.666667 | 121 | 0.63776 | 829 | 6,250 | 4.703257 | 0.256936 | 0.04001 | 0.024622 | 0.016414 | 0.174147 | 0.11721 | 0.044627 | 0.030777 | 0.030777 | 0 | 0 | 0.008115 | 0.23104 | 6,250 | 149 | 122 | 41.946309 | 0.803163 | 0.30336 | 0 | 0.192661 | 0 | 0 | 0.219716 | 0.005395 | 0 | 0 | 0 | 0 | 0.082569 | 1 | 0.027523 | false | 0.027523 | 0.06422 | 0 | 0.100917 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767984c17c7e1ae7fc09cec9023e8d9ed59983d1 | 11,322 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.app_name}}/utils/augmented.py | mhavel/cookiecutter-python3-quickstart | c1d29b4b8dbdc2a53d67699c3e8c0bde786697cf | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.app_name}}/utils/augmented.py | mhavel/cookiecutter-python3-quickstart | c1d29b4b8dbdc2a53d67699c3e8c0bde786697cf | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.app_name}}/utils/augmented.py | mhavel/cookiecutter-python3-quickstart | c1d29b4b8dbdc2a53d67699c3e8c0bde786697cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
"""
Augmented Python base-class, and utils
"""
from typing import Union
from pathlib import Path
from copy import deepcopy
from .mapping import dict_deep_update, dict_default, dict_get_first_of, KeyNotFound
from ..io import json
# ============
# Dictionary
# ============
_AUGMENTED_DICT_SUPER_METHODS = {
'__call__', '__dir__', '__repr__', 'get_from_path', 'set_from_path', 'get_first_of',
'get_first_of_path', 'deep_update', 'pop_from_path', '__setstate__', '__getstate__',
'from_object', 'from_mapping', 'from_sequence', 'deep_get', 'deepcopy'
}
_AUGMENTED_DICT_OBJECT_METHODS = {
}
class AugmentedDict(dict):
"""
An augmented Dict class that allows:
- easy access to nested dict using a string path (and a separator, '.' by default)
- access (set/get) to values as attribute
- deep update
- pickling
- copy constructors from objects, mappings, sequences
Inspired partly from: http://code.activestate.com/recipes/577887-a-simple-namespace-class/
Examples:
>>> d = AugmentedDict({'a': {'b': 2}, 3: {4: 'here'}})
>>> d('a', 'b')
2
>>> d['a.b']
2
>>> d['a'].b
2
>>> d.a.b
2
>>> d[('a', 'b')]
2
>>> d(3, 4)
'here'
>>> d[(3, 4)]
'here'
>>> d('a', 'c')
None
>>> d['a.c']
Traceback (most recent call last):
...
KeyError: 'key path `a -> c`'
>>> d('a', 'c', raise_error=True)
Traceback (most recent call last):
...
KeyError: 'key path `a -> c`'
>>> d('a', 'b', 'c')
Traceback (most recent call last):
...
TypeError: argument of type 'int' is not iterable
>>> d('c', 'a')
None
"""
def __call__(self, *args, **kwargs):
"""
Resolve a possibly deep key
Args:
*args: keys path leading to the value to be retrieved
**kwargs: options
Keyword Args:
default: default returned value if the key does not exist (default: None)
raise_error: if True, raise an error if a key does not exist (default: False)
pop: if True, remove the key if it does exist (default: False)
Returns:
retrieved value
"""
if not args:
return self
_v = self
default = kwargs.pop('default', None)
raise_err = kwargs.pop('raise_error', False)
pop = kwargs.pop('pop', False)
_p = _v
k = args[0]
path = []
for k in args:
path.append(k)
if k in _v:
_p = _v
_v = _v[k]
elif not raise_err:
return dict_default(default, key=f'key path `{" -> ".join(path)}`')
else:
raise KeyError(f'key path `{" -> ".join(path)}`')
if pop:
_p.pop(k)
return _v
deep_get = __call__
def __getitem__(self, key):
""" Access dict values by key.
Args:
key: key to retrieve
"""
try:
value = super().__getitem__(key)
except KeyError:
if isinstance(key, str):
value = super().__getattribute__('get_from_path')(key, raise_error=True)
elif isinstance(key, (tuple, list)):
value = self(*key, raise_error=True)
else:
raise
if isinstance(value, dict):
# For mixed recursive assignment (e.g. `a["b"].c = value` to work
# as expected, all dict-like values must themselves be _AttrDicts.
# The "right way" to do this would be to convert to an _AttrDict on
# assignment, but that requires overriding both __setitem__
# (straightforward) and __init__ (good luck). An explicit type
# check is used here instead of EAFP because exceptions would be
# frequent for hierarchical data with lots of nested dicts.
self[key] = value = self.__class__(value)
return value
def __dir__(self):
return list(self)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, super().__repr__())
def __getattribute__(self, name):
if name in _AUGMENTED_DICT_SUPER_METHODS:
return super().__getattribute__(name)
elif name in _AUGMENTED_DICT_OBJECT_METHODS:
return object.__getattribute__(self, name)
try:
# priority given to keys in the augmented dict
return self[name]
except KeyError:
# otherwise try accessing the attribute using regular object method
try:
return super().__getattribute__(name)
except AttributeError:
raise AttributeError(f'"{name}" is not a valid key path / attribute of the {type(self)} instance')
def __setattr__(self, key, value):
""" Set dict values as attributes.
Args:
key: key to set
value: new value for key
"""
if '.' in key:
super().__getattribute__('set_from_path')(key, value, sep='.')
else:
self[key] = value
return
def __contains__(self, key):
if '.' in key:
try:
self.get_from_path(key, raise_error=True)
return True
except KeyError:
return False
else:
return super().__contains__(key)
def deepcopy(self):
return self.__class__(deepcopy(self))
copy_ = deepcopy
def get_from_path(self, e, default=None, sep='.', raise_error=False):
"""
Same as __call__, but one string argument is used to infer the keys path, separated by `sep` (default: .)
Args:
e (str): key path string
default (=None): default value if not found
sep (str='.'): separator for keys in path
raise_error (bool=False): if True, raise an error if no value found under given key path
Returns:
retrieved value
"""
assert isinstance(e, str), 'first argument must be a string representing the keys path'
return self.__call__(*e.split(sep), default=default, raise_error=raise_error)
get_ = get_from_path
def set_from_path(self, e, value, sep='.'):
"""
Set a value under the keys path
Args:
e: string keys path
value: value
sep: separator for keys (default: .)
"""
assert isinstance(e, str), 'first argument must be a string representing the keys path'
_v = self
keys = e.split(sep)
p = []
for k in keys[:-1]:
p.append(k)
if k not in _v:
_v[k] = {}
_v = _v[k]
elif not isinstance(_v[k], dict):
raise ValueError('keys path [%s] exists but is not a dictionary, as required' % sep.join(p))
else:
_v = _v[k]
k = keys[-1]
_v[k] = value
set_ = set_from_path
def get_first_of(self, *args, return_key=False, **kwargs):
"""
Return the first value for which the key is in the dict
Args:
*args: list of possible key (cannot be key paths)
return_key (bool=False): if True, return the key's path
**kwargs: options
"""
return dict_get_first_of(self, *args, return_key=return_key, **kwargs)
def get_first_of_path(self, *paths, return_key=False, **kwargs):
"""same as `get_first_of` method, but you can provide key paths too"""
knf = KeyNotFound()
def _get(_d, _k):
try:
return _d[_k]
except KeyError:
return knf
v = knf
n = len(paths)
i = 0
while isinstance(v, KeyNotFound) and i < n:
k = paths[i]
v = _get(self, k)
i += 1
if isinstance(v, KeyNotFound):
if 'default' in kwargs:
_def = dict_default(kwargs['default'])
if return_key:
return None, _def
else:
return _def
else:
raise KeyError('none of the provided keys found in the dict')
if return_key:
return k, v
else:
return v
def deep_update(self, other=None, handlers=None, **kwargs):
"""
Do a deep update of the dictionary
Args:
other (dict): the dict with updated values
handlers (dict): a dict of functions (values) to handle other type (keys) of values (eg. list, ...)
**kwargs: alternative dict with updated values (processed after `other`)
"""
d = self
if other is not None:
if hasattr(other, 'items'):
d = dict_deep_update(d, other, handlers)
else:
d = dict_deep_update(d, dict(other), handlers)
if kwargs:
d = dict_deep_update(d, kwargs, handlers)
self.update(d)
update_ = deep_update
def pop_from_path(self, e, sep='.', **kwargs):
"""
Same as `get_from_path`, but remove the key after retrieving its value
Args:
e: string keys path
sep: separator for keys (default: .)
**kwargs:
Keyword Args:
default: default value if not found. If not provided, will raise an error (as with the .pop method of dict)
Returns:
retrieved value
"""
assert isinstance(e, str), 'first argument must be a string representing the keys path'
o = dict(pop=True)
if 'default' in kwargs:
o['default'] = kwargs.pop('default')
else:
o['raise_error'] = True
return self.__call__(*e.split(sep), **o)
pop_ = pop_from_path
# ------------------
# pickling
def __setstate__(self, state):
for k, v in state.iteritems():
self[k] = v
def __getstate__(self):
return dict(**self)
def __reduce__(self):
return self.__class__, (dict(**self), )
# ------------------------
# "copy constructors"
@classmethod
def from_object(cls, obj, names=None):
if names is None:
names = dir(obj)
ns = {name: getattr(obj, name) for name in names}
return cls(ns)
@classmethod
def from_mapping(cls, ns, names=None):
if names:
ns = {name: ns[name] for name in names}
return cls(ns)
@classmethod
def from_sequence(cls, seq, names=None):
if names:
seq = {name: val for name, val in seq if name in names}
return cls(seq)
@classmethod
def from_json(cls, path: Union[str, Path], names=None):
x = json.read(path)
if names:
x = {name: val for name, val in x.items() if name in names}
return cls(x)
| 30.6 | 119 | 0.52791 | 1,355 | 11,322 | 4.211808 | 0.191144 | 0.018223 | 0.012266 | 0.003504 | 0.186087 | 0.133695 | 0.099176 | 0.079902 | 0.079902 | 0.079902 | 0 | 0.003471 | 0.363805 | 11,322 | 369 | 120 | 30.682927 | 0.788838 | 0.32927 | 0 | 0.252747 | 0 | 0 | 0.10087 | 0 | 0 | 0 | 0 | 0 | 0.016484 | 1 | 0.120879 | false | 0 | 0.027473 | 0.027473 | 0.351648 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767aaa8954e182f98f632decf5c0e09c5fc43060 | 16,358 | py | Python | Emulators/vmEmulator.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | Emulators/vmEmulator.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | Emulators/vmEmulator.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | # ========================================================================================
#
# Description:
#
# Emulates execution of VM code.
#
# Attribution:
#
# Code by www.jk-quantized.com
#
# Redistribution and use of this code in source and binary forms must retain
# the above attribution notice and this condition.
#
# ========================================================================================
'''
Purpose:
Emulation at a usable execution speed.
Description:
Faster than emulating binary code.
Infact, does not emulate nor use the Hack Computer's architecture.
Instead it executes the VM code using your machine's processor's architecture.
While running binary code is cycle accurate,
it is too slow in emulation (see cpuEmulator.py).
I'm sure there are optimizations to be made that can improve the performance
of the binary/CPU emulation. If you have any ideas, be sure to share
them because the best case scenario is for the binary emulator to execute at a
usable speed. Till then, this exists as an inbetween.
'''
# TODO - add stepping, GUI debugger (registers etc)
# Imports --------------------------
# Built ins
import re
import time
import yappi
# Hack computer
import Components
from commonHelpers import *
from .pythonNBitArithmetic import *
# Configure computer ---------------
# VMX file containing all necessary program code
programPath = ''
debugPath = 'Debug/VMEmulator/' # Folder where logs go
debugMode = False
runYappiProfile = False
# Setup computer -------------------
nBits = Components.N_BITS
ALU = NBitArithmetic( nBits )
PC = 0
PC_prev = 0
PC_jump = False
RAM = [ 0 ] * ( 2 ** 16 )
ROM = [] # Psuedo ROM, loaded with VM code
clock = None
io = None
startTime = None
sysHalt = None
yieldToExternal = False # Suspend tick
static_segment_start = Components.STATIC_START
static_segment_end = Components.STATIC_END
stack_segment_start = Components.STACK_END
heap_segment_start = Components.HEAP_START
heap_segment_end = Components.HEAP_END
# Setup pointers -------------------
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
TEMP = 5
# GP = 13
STATIC = 16
# IO Helpers ------------------------
class RAMWrapper():
def __init__( self, ram ):
self.ram = ram
def read( self, address ):
return self.ram[ address ]
def write( self, clk, x, write, address ):
if clk == 1 and write == 1:
self.ram[ address ] = x
# VM Helpers ------------------------
# unaryOps = [ 'not', 'neg' ]
# binaryOps = [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr' ]
# comparisonOps = [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ]
# operations = [ unaryOps + binaryOps + comparisonOps ]
unaryOps = set( [ 'not', 'neg' ] )
binaryOps = set( [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr', 'mul', 'div' ] )
comparisonOps = set( [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ] )
operations = unaryOps | binaryOps | comparisonOps # Set marginally faster to lookup than list
addressLookup = {}
staticLookup = {}
# VM instructions -------------------
def executeInstruction( cmd ):
cmdType = cmd[ 0 ]
if cmdType == 'push':
push( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType == 'pop':
pop( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType in operations:
operation( cmdType )
elif cmdType == 'goto':
goto( cmd[ 1 ] )
elif cmdType == 'if-goto':
ifgoto( cmd[ 1 ] )
elif cmdType == 'call':
call( cmd[ 1 ], cmd[ 2 ] )
elif cmdType == 'return':
ret()
elif cmdType == 'label':
label( cmd[ 1 ] )
elif cmdType == 'function':
function( cmd[ 1 ], cmd[ 2 ] )
else:
raise Exception( "Don't know how to execute the command - {}".format( cmd ) )
def push( seg, index, cmd ):
addr = RAM[ SP ]
if seg == 'constant':
RAM[ addr ] = index
elif seg == 'pointer':
if index == 0: RAM[ addr ] = RAM[ THIS ]
else: RAM[ addr ] = RAM[ THAT ]
elif seg == 'static':
RAM[ addr ] = RAM[ staticLookup[ cmd[ 3 ] ] ]
elif seg == 'temp':
RAM[ addr ] = RAM[ TEMP + index ]
elif seg == 'argument':
RAM[ addr ] = RAM[ RAM[ ARG ] + index ]
elif seg == 'local':
RAM[ addr ] = RAM[ RAM[ LCL ] + index ]
elif seg == 'this':
RAM[ addr ] = RAM[ RAM[ THIS ] + index ]
elif seg == 'that':
RAM[ addr ] = RAM[ RAM[ THAT ] + index ]
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] += 1
# if RAM[ SP ] >= heap_segment_start:
# raiseException( 'Stack overflow' )
def pop( seg, index, cmd ):
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if seg == 'pointer':
if index == 0: RAM[ THIS ] = value
else: RAM[ THAT ] = value
elif seg == 'static':
RAM[ staticLookup[ cmd[ 3 ] ] ] = value
elif seg == 'temp':
RAM[ TEMP + index ] = value
elif seg == 'argument':
RAM[ RAM[ ARG ] + index ] = value
elif seg == 'local':
RAM[ RAM[ LCL ] + index ] = value
elif seg == 'this':
RAM[ RAM[ THIS ] + index ] = value
elif seg == 'that':
RAM[ RAM[ THAT ] + index ] = value
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] -= 1
def operation( op ):
if op in unaryOps:
addr = RAM[ SP ] - 1
a = RAM[ addr ]
if op == 'not':
RAM[ addr ] = ALU._not( a )
elif op == 'neg':
RAM[ addr ] = ALU._neg( a )
elif op in binaryOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'and':
value = ALU._and( a, b )
elif op == 'or':
value = ALU._or( a, b )
elif op == 'xor':
value = ALU._xor( a, b )
elif op == 'lsl':
value = ALU._lsl( a, b )
elif op == 'lsr':
value = ALU._lsr( a, b )
elif op == 'add':
value = ALU._add( a, b )
elif op == 'sub':
value = ALU._sub( a, b )
elif op == 'mul':
value = ALU._mul( a, b )
elif op == 'div':
value = ALU._div( a, b )
RAM[ addr_a ] = value
# Update SP
RAM[ SP ] -= 1
elif op in comparisonOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'eq':
value = ALU._eq( a, b )
elif op == 'ne':
value = ALU._ne( a, b )
elif op == 'gt':
value = ALU._gt( a, b )
elif op == 'gte':
value = ALU._gte( a, b )
elif op == 'lt':
value = ALU._lt( a, b )
elif op == 'lte':
value = ALU._lte( a, b )
if value:
RAM[ addr_a ] = negativeOne # 111111 so that !True = 00000
else:
RAM[ addr_a ] = 0
# Update SP
RAM[ SP ] -= 1
def goto( loc ):
global PC
global PC_jump
PC = addressLookup[ loc ]
PC_jump = True
def ifgoto( loc ):
global PC
global PC_jump
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if value != 0:
# if value:
PC = addressLookup[ loc ]
PC_jump = True
# Update SP
RAM[ SP ] -= 1
def call( fxName, nArgs ):
addr = RAM[ SP ]
# Save return position
RAM[ addr ] = PC + 1
addr += 1
# Save segment pointers
RAM[ addr ] = RAM[ LCL ]
addr += 1
RAM[ addr ] = RAM[ ARG ]
addr += 1
RAM[ addr ] = RAM[ THIS ]
addr += 1
RAM[ addr ] = RAM[ THAT ]
addr += 1
# Set ARG pointer
RAM[ ARG ] = RAM[ SP ] - nArgs
# Set LCL pointer
RAM[ LCL ] = addr
# Set SP
RAM[ SP ] = addr
# Goto function
goto( fxName )
def ret():
global PC
global PC_jump
global yieldToExternal
# Save current LCL pointer
curLCL = RAM[ LCL ]
# Save return address
retAddr = RAM[ curLCL - 5 ]
# Copy return value into arg0
addr_a = RAM[ ARG ]
addr_r = RAM[ SP ] - 1
RAM[ addr_a ] = RAM[ addr_r ]
# Reposition SP for caller (to just after return value)
RAM[ SP ] = addr_a + 1
# Restore segment pointers of caller
curLCL -= 1
RAM[ THAT ] = RAM[ curLCL ]
curLCL -= 1
RAM[ THIS ] = RAM[ curLCL ]
curLCL -= 1
RAM[ ARG ] = RAM[ curLCL ]
curLCL -= 1
RAM[ LCL ] = RAM[ curLCL ]
# Jump to return position
PC = retAddr
PC_jump = True
yieldToExternal = False # temp...
def label( loc ): pass
def function( fxName, nLocals ):
global yieldToExternal
# print( 'curFx - ', fxName )
# Init locals to zeros
for i in range( nLocals ):
addr = RAM[ LCL ] + i
RAM[ addr ] = 0
RAM[ SP ] += nLocals
# If exists, execute python equivalent
if fxName in OSWrappers:
yieldToExternal = True
OSWrappers[ fxName ]()
# OS Wrappers -----------------------
# Sys ---
def Sys_wait():
# Retrieve args ---
argBase = RAM[ ARG ]
duration = RAM[ argBase ]
# Subroutine body ---
'''
if ( duration <= 0 ) {
Sys.error( 1 );
// Sys.raiseException( 'Sys.wait duration must be greater than zero' );
}
'''
if duration <= 0:
print( 'ERROR: Sys.wait duration must be greater than zero' )
# Halt program
haltOnError()
return
# print( 'About to sleep for {} ms'.format( duration ) )
time.sleep( duration / 1000 ) # convert msec to sec
# Return ---
push( 'constant', 0, None )
ret()
# ---
OSWrappers = {
'Sys.wait' : Sys_wait
}
# Load program ----------------------
cmdPattern = '''
^ # from beginning of string
.*? # select all characters until
(?=\/\/|[\r\n]) # reach start of a comment or the string's end
'''
cmdPattern = re.compile( cmdPattern, re.X )
def extractCmd( line ):
found = re.search( cmdPattern, line ) # select everything that is not a comment
if found:
cmd = found.group( 0 )
cmd = cmd.strip() # remove leading and trailing whitespace
return cmd.split( ' ' ) # split on spaces
else:
return None
def extractProgram( inputFilePath ):
addr = 0
curFx = ''
curClass = ''
freeAddress = static_segment_start
with open( inputFilePath, 'r' ) as file:
for line in file:
cmd = extractCmd( line )
if cmd:
cmdType = cmd[ 0 ]
if cmdType == 'function':
curFx = cmd[ 1 ]
curClass = curFx.split( '.' )[ 0 ]
addressLookup[ cmd[ 1 ] ] = addr
cmd[ 2 ] = int( cmd[ 2 ] ) # cast nLocals to int
ROM.append( cmd )
elif cmdType == 'label' or cmdType == 'goto' or cmdType == 'if-goto':
# Make labels globally unique
newLabel = '{}_{}'.format( curFx, cmd[ 1 ] )
if cmdType == 'label':
addressLookup[ newLabel ] = addr
ROM.append( [ cmdType, newLabel ] )
elif cmdType == 'push' or cmdType == 'pop':
cmd[ 2 ] = int( cmd[ 2 ] ) # cast index to int
if cmd[ 1 ] == 'static':
# Make static references globally unique
if len( cmd ) == 4: # 'push/pop static index className' vs 'push/pop static index'
className = cmd[ 3 ]
else:
className = curClass
refName = '{}_{}'.format( className, cmd[ 2 ] )
if refName not in staticLookup:
if freeAddress <= static_segment_end:
staticLookup[ refName ] = freeAddress
freeAddress += 1
else:
raise Exception( 'Ran out of static space' )
if len( cmd ) == 4: # 'push/pop static index className' vs 'push/pop static index'
cmd[ 3 ] = refName
else:
cmd += [ refName ]
ROM.append( cmd )
elif cmdType == 'call':
cmd[ 2 ] = int( cmd[ 2 ] ) # cast nArgs to int
ROM.append( cmd )
else:
ROM.append( cmd )
addr += 1
# Debug -----------------------------
def updateWithDebug():
update()
if breakpoint():
clock.stop()
# print( 'Breakpoint reached' )
print( 'Breakpoint reached after {} clock cycles'.format( clock.currentCycle ) )
print( 'Took {} seconds to reach breakpoint'.format( time.time() - startTime ) )
debug2File()
def breakpoint():
# pass
return PC == sysHalt
# return PC == addressLookup[ 'GFX.fillRect' ]
# return clock.currentCycle == 384381
def debug2File():
filePath = debugPath + str( clock.currentCycle )
with open( filePath, 'w' ) as file:
file.write( '{} ------------'.format( PC_prev ) + '\n' )
file.write( ' '.join( map( str, ROM[ PC_prev ] ) ) + '\n' )
file.write( '' + '\n' )
file.write( 'SP {}'.format( RAM[ 0 ] ) + '\n' )
file.write( 'LCL {}'.format( RAM[ 1 ] ) + '\n' )
file.write( 'ARG {}'.format( RAM[ 2 ] ) + '\n' )
file.write( 'THIS {}'.format( RAM[ 3 ] ) + '\n' )
file.write( 'THAT {}'.format( RAM[ 4 ] ) + '\n' )
file.write( 'TMP0 {}'.format( RAM[ 5 ] ) + '\n' )
file.write( 'TMP1 {}'.format( RAM[ 6 ] ) + '\n' )
file.write( 'TMP2 {}'.format( RAM[ 7 ] ) + '\n' )
file.write( 'TMP3 {}'.format( RAM[ 8 ] ) + '\n' )
file.write( 'TMP4 {}'.format( RAM[ 9 ] ) + '\n' )
file.write( 'TMP5 {}'.format( RAM[ 10 ] ) + '\n' )
file.write( 'TMP6 {}'.format( RAM[ 11 ] ) + '\n' )
file.write( 'TMP7 {}'.format( RAM[ 12 ] ) + '\n' )
file.write( 'GP0 {}'.format( RAM[ 13 ] ) + '\n' )
file.write( 'GP1 {}'.format( RAM[ 14 ] ) + '\n' )
file.write( 'GP2 {}'.format( RAM[ 15 ] ) + '\n' )
file.write( '' + '\n' )
# static
file.write( 'Static' + '\n' )
for i in range( static_segment_start, stack_segment_start ):
file.write( '\t{:<3} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '' + '\n' )
# stack
sp = RAM[ 0 ]
file.write( 'Stack' + '\n' )
for i in range( stack_segment_start, sp ):
file.write( '\t{:<4} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '\t{:<4} .. ({})'.format( sp, RAM[ sp ] ) + '\n' )
file.write( '' + '\n' )
# heap
file.write( 'Heap' + '\n' )
for i in range( heap_segment_start, heap_segment_end + 1 ):
file.write( '\t{:<5} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '' + '\n' )
def dumpROMnAddresses():
# Dump ROM
with open( debugPath + 'romDump', 'w' ) as file:
for e in ROM:
file.write( ' '.join( map( str, e ) ) + '\n' )
# Dump addresses
with open( debugPath + 'addressDump', 'w' ) as file:
# Dump generated label addresses
for kv in sorted( addressLookup.items(), key = lambda x : x[ 1 ] ):
file.write( '{:<5} - {}\n'.format( kv[ 1 ], kv[ 0 ] ) )
file.write( '\n\n' )
# Dump generated static addresses
for kv in sorted( staticLookup.items(), key = lambda x : x[ 1 ] ):
file.write( '{:<3} - {}\n'.format( kv[ 1 ], kv[ 0 ] ) )
# Computer --------------------------
def haltOnError():
global PC
global yieldToExternal
PC = sysHalt # end program
yieldToExternal = True # prevent tick
if debugMode:
debug2File()
update()
def setup():
global clock
global io
global startTime
global sysHalt
#
if not Components.PERFORMANCE_MODE:
raise Exception( 'The VM Emulator only works when GC.PERFORMANCE_MODE is True' )
# Setup RAM
RAM[ SP ] = 256
RAM[ LCL ] = 256
RAM[ ARG ] = 256
RAM[ THIS ] = 9999
RAM[ THAT ] = 9999
# Setup ROM
startTime = time.time()
extractProgram( programPath )
print( 'Completed ROM flash. Took {0:.2f} seconds.'.format( time.time() - startTime ) )
if debugMode:
# Dump ROM and addresses
dumpROMnAddresses()
# Retrive location
sysHalt = addressLookup[ 'Sys.halt' ]
# Initialize clock
clock = Components.Clock()
# Setup callbacks
if debugMode:
clock.callbackRising = updateWithDebug
else:
clock.callbackRising = update
# Initialize IO
io = Components.IO( nBits, RAMWrapper( RAM ) )
def tick():
global PC
global PC_prev
global PC_jump
PC_prev = PC # helps with debugging
# Fetch instruction
instruction = ROM[ PC ]
# print( '{} - {}'.format( PC, instruction ) )
# Execute instruction
executeInstruction( instruction )
# Increment PC
if PC_jump == False:
PC += 1
else:
PC_jump = False
''' Kinda hacky, workaround for different clocks.
Make IO screen updates run on CPU clock.
'''
io.updateScreen()
def update():
if not yieldToExternal:
tick()
# Handle exit via IO
if io.hasExited:
if debugMode:
debug2File()
clock.stop()
print( 'See you later!' )
# Profile... temp
if runYappiProfile:
yappi.get_func_stats().print_all()
# Stop running when reach Sys.halt
if PC == sysHalt:
# Stop clock
clock.stop()
# Stop (lower) screen update
io.maxFps = 1 # lowest can go is 1 FPS
print( 'Sys.halt reached. Took {0:.2f} seconds.'.format( time.time() - startTime ) )
# Profile... temp
if runYappiProfile:
yappi.get_func_stats().print_all()
# Run -------------------------------
def run( programPath_ ):
global programPath
# Specify program
if programPath_:
programPath = programPath_
# Setup
setup()
# Profile... temp
if runYappiProfile:
yappi.start()
# Start IO
io.runAsThread()
# Start clock
clock.run()
print( 'Program has started' )
startTime = time.time()
| 17.495187 | 94 | 0.572503 | 2,124 | 16,358 | 4.362524 | 0.208098 | 0.033024 | 0.024822 | 0.011224 | 0.181956 | 0.138139 | 0.101122 | 0.089791 | 0.058493 | 0.058493 | 0 | 0.015098 | 0.254982 | 16,358 | 934 | 95 | 17.513919 | 0.74522 | 0.224294 | 0 | 0.273782 | 0 | 0 | 0.096787 | 0 | 0 | 0 | 0 | 0.001071 | 0 | 1 | 0.058005 | false | 0.00232 | 0.013921 | 0.00464 | 0.085847 | 0.020882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767b8fac1b2b6c67e1630c194ab938fc526af488 | 4,351 | py | Python | perception/training/models.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 3 | 2021-06-19T10:49:26.000Z | 2022-03-26T11:31:28.000Z | perception/training/models.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 1 | 2021-10-12T15:40:55.000Z | 2021-10-12T15:40:55.000Z | perception/training/models.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | null | null | null | import torch
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
from torchvision import models
def createDeepLabv3(outputchannels=1, backbone="resnet50", pretrained=True):
"""DeepLabv3 class with custom head
Args:
outputchannels (int, optional): The number of output channels
in your dataset masks. Defaults to 1.
Returns:
model: Returns the DeepLabv3 model with the ResNet101 backbone.
"""
if backbone == "resnet50":
print("DeepLabv3: Using resnet50 as backbone")
model = models.segmentation.deeplabv3_resnet50(pretrained=pretrained,
progress=True)
model.classifier = DeepLabHead(2048, outputchannels)
elif backbone == "mobilenet":
print("DeepLabv3: Using mobilenet as backbone")
model = models.segmentation.deeplabv3_mobilenet_v3_large(pretrained=pretrained, progress=True)
model.classifier = DeepLabHead(960, outputchannels)
else:
print("DeepLabv3: Using resnet101 as backbone")
model = models.segmentation.deeplabv3_resnet101(pretrained=pretrained,
progress=True)
model.classifier = DeepLabHead(2048, outputchannels)
model.aux_classifier = None
#for param in model.parameters():
# param.requires_grad = False
return model
def createFCN(outputchannels=1, backbone="resnet50", pretrained=True):
if backbone == "resnet50":
print("FCN: Using resnet50 as backbone")
model = models.segmentation.fcn_resnet50(pretrained=pretrained, progress=True,
num_classes=21, aux_loss=False)
else:
print("FCN: Using resnet101 as backbone")
model = models.segmentation.fcn_resnet101(pretrained=pretrained, progress=True,
num_classes=21, aux_loss=False)
model.aux_classifier = None
#for param in model.parameters():
# param.requires_grad = False
model.classifier = FCNHead(2048, outputchannels)
return model
def createUNet():
from perception.unet.unet_model import UNet
model = UNet(n_channels=3, n_classes=1, bilinear=True)
return model
def createUNetResNet():
import segmentation_models_pytorch as smp
model = smp.Unet(encoder_name="resnet34", encoder_weights="imagenet", in_channels=3, classes=1,
activation="sigmoid")
return model
def createUNetResNetSemSeg(n_classes):
import segmentation_models_pytorch as smp
model = smp.Unet(encoder_name="resnet50", encoder_weights="imagenet", in_channels=3, classes=n_classes,
activation="softmax2d")
return model
def createMidas(use_large_model=True):
if use_large_model:
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
else:
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small")
return midas
"""
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if use_large_model:
transform = midas_transforms.default_transform
else:
transform = midas_transforms.small_transform
import cv2
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
midas.to(device)
midas.eval()
img = cv2.imread("data/perception/test1/rgb/clear_noon_1823_463.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_batch = transform(img).to(device)
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
plt.imshow(output)
plt.show()
"""
if __name__ == "__main__":
#createDeepLabv3(outputchannels=9, backbone="resnet50", pretrained=True)
createMidas()
#createFCN(outputchannels=9, backbone="resnet101", pretrained=True)
| 33.992188 | 107 | 0.662836 | 476 | 4,351 | 5.934874 | 0.321429 | 0.044602 | 0.026549 | 0.037168 | 0.399646 | 0.387611 | 0.32354 | 0.20885 | 0.184071 | 0.130265 | 0 | 0.029296 | 0.239026 | 4,351 | 127 | 108 | 34.259843 | 0.82392 | 0.111928 | 0 | 0.363636 | 0 | 0 | 0.117279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0.127273 | 0 | 0.345455 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767d3eda72c1d25b0f0a77d17424d12c2274b465 | 1,527 | py | Python | docs/conf.py | thecode/aioswitcher | 5ac068766bb44b5cc6008425021c91c6ffb858f9 | [
"Apache-2.0"
] | 11 | 2020-04-11T22:25:15.000Z | 2022-02-20T12:55:49.000Z | docs/conf.py | thecode/aioswitcher | 5ac068766bb44b5cc6008425021c91c6ffb858f9 | [
"Apache-2.0"
] | 440 | 2019-04-26T15:41:59.000Z | 2022-03-31T05:07:04.000Z | docs/conf.py | thecode/aioswitcher | 5ac068766bb44b5cc6008425021c91c6ffb858f9 | [
"Apache-2.0"
] | 5 | 2019-05-24T11:45:22.000Z | 2021-05-30T08:25:23.000Z | # Copyright Tomer Figenblat.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for Sphinx Documentation Generator."""
from os import path as os_path
from sys import path as sys_path
from toml import load as toml_load
sys_path.insert(0, os_path.abspath("../src"))
sys_path.insert(1, os_path.abspath("../scripts"))
toml_path = "{}/pyproject.toml".format(os_path.abspath(".."))
parsed_toml = toml_load(toml_path)
project = parsed_toml["tool"]["poetry"]["name"]
author = copyright = "Tomer Figenblat"
release = version = parsed_toml["tool"]["poetry"]["version"]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinxcontrib.autoprogram",
"sphinxcontrib.spelling",
]
exclude_patterns = ["docsbuild"]
language = "en"
html_theme = "insegel"
html_baseurl = parsed_toml["tool"]["poetry"]["documentation"].replace("https://", "")
autodoc_default_options = {"members": True}
autodoc_typehints = "description"
| 32.489362 | 86 | 0.711853 | 202 | 1,527 | 5.277228 | 0.549505 | 0.056285 | 0.036585 | 0.056285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004699 | 0.16372 | 1,527 | 46 | 87 | 33.195652 | 0.83007 | 0.395547 | 0 | 0 | 0 | 0 | 0.2922 | 0.054715 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767de2fd840e084d3a8339fa0ea47561cfe8b57e | 7,289 | py | Python | ENV/lib/python3.6/site-packages/pyramid_debugtoolbar/toolbar_app.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid_debugtoolbar/toolbar_app.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid_debugtoolbar/toolbar_app.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | from collections import OrderedDict
from pyramid.config import Configurator
from pyramid.interfaces import Interface
from pyramid.view import view_config
from pyramid_debugtoolbar.compat import json
from pyramid_debugtoolbar.compat import text_
from pyramid_debugtoolbar.toolbar import IPanelMap
from pyramid_debugtoolbar.utils import (
get_setting,
ROOT_ROUTE_NAME,
SETTINGS_PREFIX,
STATIC_PATH,
)
bundled_includes = (
'pyramid_debugtoolbar.panels.headers',
'pyramid_debugtoolbar.panels.introspection',
'pyramid_debugtoolbar.panels.logger',
'pyramid_debugtoolbar.panels.performance',
'pyramid_debugtoolbar.panels.renderings',
'pyramid_debugtoolbar.panels.request_vars',
'pyramid_debugtoolbar.panels.routes',
'pyramid_debugtoolbar.panels.settings',
'pyramid_debugtoolbar.panels.sqla',
'pyramid_debugtoolbar.panels.traceback',
'pyramid_debugtoolbar.panels.tweens',
'pyramid_debugtoolbar.panels.versions',
)
class IParentActions(Interface):
""" Marker interface for registered parent actions in the toolbar app."""
def make_toolbar_app(settings, parent_registry):
""" WSGI application for rendering the debug toolbar."""
config = Configurator(settings=settings)
config.registry.parent_registry = parent_registry
config.registry.registerUtility(OrderedDict(), IPanelMap)
config.add_directive('add_debugtoolbar_panel', add_debugtoolbar_panel)
config.add_directive('inject_parent_action', inject_parent_action)
config.include('pyramid_mako')
config.add_mako_renderer('.dbtmako', settings_prefix='dbtmako.')
config.add_request_method(
lambda r: r.matchdict.get('request_id'),
'pdtb_id',
reify=True,
)
config.add_request_method(
lambda r: r.registry.parent_registry.pdtb_history,
'pdtb_history',
reify=True,
)
config.add_static_view('static', STATIC_PATH)
config.add_route(ROOT_ROUTE_NAME, '/', static=True)
config.add_route('debugtoolbar.sse', '/sse')
config.add_route('debugtoolbar.redirect', '/redirect')
config.add_route('debugtoolbar.request', '/{request_id}')
config.add_route('debugtoolbar.main', '/')
config.scan(__name__)
for include in bundled_includes:
config.include(include)
# commit the toolbar config and include any user-defined includes
config.commit()
includes = settings.get(SETTINGS_PREFIX + 'includes', ())
for include in includes:
config.include(include)
return config.make_wsgi_app()
def add_debugtoolbar_panel(config, panel_class, is_global=False):
"""
Register a new panel into the debugtoolbar.
This is a Pyramid config directive that is accessible as
``config.add_debugtoolbar_panel`` within the debugtoolbar application.
It should be used from ``includeme`` functions via the
``debugtoolbar.includes`` setting.
The ``panel_class`` should be a subclass of
:class:`pyramid_debugtoolbar.panels.DebugPanel`.
If ``is_global`` is ``True`` then the panel will be added to the global
panel list which includes application-wide panels that do not depend
on per-request data to operate.
"""
panel_class = config.maybe_dotted(panel_class)
name = panel_class.name
panel_map = config.registry.getUtility(IPanelMap)
panel_map[(name, is_global)] = panel_class
def inject_parent_action(config, action):
"""
Inject an action into the parent application.
This is a Pyramid config directive that is accessible as
``config.inject_parent_action`` within the debugtoolbar application.
It should be used from ``includeme`` functions via the
``debugtoolbar.includes`` setting.
The ``action`` should be a callable that accepts the parent app's
``config`` object. It will be executed after the parent app is created
to ensure that configuration is set prior to the actions being executed.
"""
actions = config.registry.queryUtility(IParentActions)
if actions is None:
actions = []
config.registry.registerUtility(actions, IParentActions)
actions.append(action)
@view_config(
route_name='debugtoolbar.redirect',
renderer='pyramid_debugtoolbar:templates/redirect.dbtmako',
)
def redirect_view(request):
return {
'redirect_to': request.params.get('redirect_to'),
'redirect_code': request.params.get('redirect_code'),
}
@view_config(
route_name='debugtoolbar.main',
renderer='pyramid_debugtoolbar:templates/toolbar.dbtmako'
)
@view_config(
route_name='debugtoolbar.request',
renderer='pyramid_debugtoolbar:templates/toolbar.dbtmako'
)
def request_view(request):
history = request.pdtb_history
try:
last_request_pair = history.last(1)[0]
except IndexError:
last_request_pair = None
last_request_id = None
else:
last_request_id = last_request_pair[0]
request_id = request.matchdict.get('request_id', last_request_id)
toolbar = history.get(request_id, None)
# set a dictionary of panels that can be accessed inside
# DebugPanel.render_content()
if toolbar:
request.toolbar_panels = {
panel.name: panel
for panel in toolbar.panels
}
static_path = request.static_url(STATIC_PATH)
root_path = request.route_url(ROOT_ROUTE_NAME)
button_style = get_setting(request.registry.settings, 'button_style')
max_visible_requests = get_setting(request.registry.settings,
'max_visible_requests')
hist_toolbars = history.last(max_visible_requests)
return {'panels': toolbar.panels if toolbar else [],
'static_path': static_path,
'root_path': root_path,
'button_style': button_style,
'history': hist_toolbars,
'default_active_panels': (
toolbar.default_active_panels if toolbar else []),
'global_panels': toolbar.global_panels if toolbar else [],
'request_id': request_id
}
U_BLANK = text_("")
U_SSE_PAYLOAD = text_("id:{0}\nevent: new_request\ndata:{1}\n\n")
@view_config(route_name='debugtoolbar.sse')
def sse(request):
response = request.response
response.content_type = 'text/event-stream'
history = request.pdtb_history
response.text = U_BLANK
active_request_id = text_(request.GET.get('request_id'))
client_last_request_id = text_(request.headers.get('Last-Event-Id', 0))
max_visible_requests = get_setting(request.registry.settings,
'max_visible_requests')
if history:
last_request_pair = history.last(1)[0]
last_request_id = last_request_pair[0]
if not last_request_id == client_last_request_id:
data = [
[
_id,
toolbar.json,
'active' if active_request_id == _id else ''
]
for _id, toolbar in history.last(max_visible_requests)
if toolbar.visible
]
if data:
response.text = U_SSE_PAYLOAD.format(last_request_id,
json.dumps(data))
return response
| 33.43578 | 77 | 0.689669 | 853 | 7,289 | 5.652989 | 0.215709 | 0.078805 | 0.067399 | 0.021568 | 0.221277 | 0.162173 | 0.129822 | 0.093737 | 0.093737 | 0.093737 | 0 | 0.001578 | 0.217588 | 7,289 | 217 | 78 | 33.589862 | 0.843942 | 0.174098 | 0 | 0.14094 | 0 | 0 | 0.194449 | 0.115925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040268 | false | 0 | 0.053691 | 0.006711 | 0.127517 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767e4611345dce6f22595d3e963f543350edb277 | 1,008 | py | Python | setup.py | sdg32/flask-celery-boilerplate | 6c03f3bc287e9ea1a8467d4865c99cc94319f016 | [
"MIT"
] | 1 | 2019-01-28T02:10:38.000Z | 2019-01-28T02:10:38.000Z | setup.py | sdg32/flask-celery-boilerplate | 6c03f3bc287e9ea1a8467d4865c99cc94319f016 | [
"MIT"
] | null | null | null | setup.py | sdg32/flask-celery-boilerplate | 6c03f3bc287e9ea1a8467d4865c99cc94319f016 | [
"MIT"
] | null | null | null | import re
from setuptools import find_packages
from setuptools import setup
with open('fcb/app.py', 'rt', encoding='utf-8') as f:
version = re.search(r"__version__ = '(.*?)'", f.read()).group(1)
setup(
name='fcb',
version=version,
description='Flask celery boilerplate project.',
url='https://github.com/sdg32/flask-celery-boilerplate',
author='sdg32',
packages=find_packages(exclude=['*.tests', '*.tests.*',
'tests.*', 'tests']),
include_package_data=True,
zip_safe=False,
python_requires='~=3.10.0',
install_requires=[
'celery~=5.2.6',
'click~=8.1.2',
'flask~=2.1.1',
'flask-sqlalchemy~=2.5.1',
'kombu~=5.2.4',
'pytz~=2022.1',
'redis~=4.2.2',
'sqlalchemy~=1.4.35',
'werkzeug~=2.1.1',
],
tests_require=[],
extras_require={
'dev': [
'mypy',
],
'dotenv': [
'python-dotenv',
],
},
)
| 24.585366 | 68 | 0.518849 | 118 | 1,008 | 4.322034 | 0.576271 | 0.058824 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 0.285714 | 1,008 | 40 | 69 | 25.2 | 0.652778 | 0 | 0 | 0.081081 | 0 | 0 | 0.316468 | 0.022817 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081081 | 0 | 0.081081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
767e9be7f95d8403466504835ce7661edbb5732a | 1,628 | py | Python | RemoveAdjacentRepeatingChars/main.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | null | null | null | RemoveAdjacentRepeatingChars/main.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | 5 | 2020-05-04T03:11:14.000Z | 2021-06-10T20:20:38.000Z | RemoveAdjacentRepeatingChars/main.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | 1 | 2019-07-31T18:28:34.000Z | 2019-07-31T18:28:34.000Z | # https://stackoverflow.com/questions/51164448/remove-all-adjacent-duplicates-using-a-loop/51165745#51165745
def remove_adjacent_duplicates(answer):
new_answer = "" # output string
ch_last = "" # current character of last iteration
start, end = 0, 0 # start and end indices of adjacent characters sequence
for index, ch in enumerate(answer):
if index: # for index 1 and onwards
if ch == ch_last:
end = index
else: # if new character encountered after repetition of any particular character
if start == end: # if there is a repetition
new_answer = new_answer + ch_last
start, end = index, index
else: # index == 0 (only 1st time)
start, end = index, index
ch_last = ch # save the current character to use it in next iteration
if start == end: # if the last encountered character is not of repeating nature
new_answer = new_answer + ch_last
return new_answer
# START
if __name__ == "__main__":
# INPUT 1
answer = input("Enter a string: ") # abbabd
print(remove_adjacent_duplicates(answer)) # aabd
# INPUT 2
answer = input("Enter a string: ") # abcddddeefffghii
print(remove_adjacent_duplicates(answer)) # abcgh
# INPUT 3
answer = input("Enter a string: ") # abcddddeefffghi
print(remove_adjacent_duplicates(answer)) # abcghi
# INPUT 4
answer = input("Enter a string: ") # aa**mmmxxnnnnRaaI++SH((IKES))H
print(remove_adjacent_duplicates(answer)) # RISHIKESH | 38.761905 | 108 | 0.628993 | 199 | 1,628 | 5 | 0.396985 | 0.108543 | 0.120603 | 0.150754 | 0.281407 | 0.048241 | 0 | 0 | 0 | 0 | 0 | 0.028473 | 0.288084 | 1,628 | 42 | 109 | 38.761905 | 0.830026 | 0.378378 | 0 | 0.592593 | 0 | 0 | 0.072874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0 | 0 | 0.074074 | 0.148148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7680f899ce715f4effda6343efff51fc53dd2350 | 737 | py | Python | LakeStates/constants_locate_nearest.py | ritviksahajpal/EPIC | 47bfd9ecbe130667bd5e22efded95a612ea3fbd2 | [
"MIT"
] | 3 | 2015-11-08T10:34:31.000Z | 2020-07-03T09:48:20.000Z | LakeStates/constants_locate_nearest.py | ritviksahajpal/EPIC | 47bfd9ecbe130667bd5e22efded95a612ea3fbd2 | [
"MIT"
] | null | null | null | LakeStates/constants_locate_nearest.py | ritviksahajpal/EPIC | 47bfd9ecbe130667bd5e22efded95a612ea3fbd2 | [
"MIT"
] | 4 | 2015-04-24T23:40:05.000Z | 2018-08-16T14:39:10.000Z | import os, sys, logging, errno, multiprocessing, ast
from ConfigParser import SafeConfigParser
# Parse config file
parser = SafeConfigParser()
parser.read('../config_EPIC.txt')
MAX = 100000.0 # Maximum distance between any two points
TAG = parser.get('PROJECT','SEMF_TAG') # Tag of SEIMF folder
EPIC_DLY = parser.get('PARAMETERS','EPIC_DLY')
base_dir = parser.get('PATHS','base_dir')+os.sep
epic_dir = base_dir + os.sep + 'EPIC' + os.sep + parser.get('PROJECT', 'project_name') + os.sep + \
parser.get('PROJECT', 'EPIC_dat') + os.sep + parser.get('PROJECT', 'OUT_TAG') + os.sep
site_lat_lon = ast.literal_eval(parser.get('PARAMETERS','site_loc')) | 52.642857 | 104 | 0.639077 | 97 | 737 | 4.701031 | 0.494845 | 0.138158 | 0.140351 | 0.092105 | 0.208333 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012153 | 0.218453 | 737 | 14 | 105 | 52.642857 | 0.779514 | 0.104478 | 0 | 0 | 0 | 0 | 0.203957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7682dca8b9c8667e1dc09faa488d9c17999af88f | 4,146 | py | Python | Bulls & Cows.py | Skylar-Kerzner/Bulls-and-Cows | a0fa3fc09c388f1b677bbc9a1232788a368fd506 | [
"MIT"
] | null | null | null | Bulls & Cows.py | Skylar-Kerzner/Bulls-and-Cows | a0fa3fc09c388f1b677bbc9a1232788a368fd506 | [
"MIT"
] | null | null | null | Bulls & Cows.py | Skylar-Kerzner/Bulls-and-Cows | a0fa3fc09c388f1b677bbc9a1232788a368fd506 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
"""Number Guessing Game a.k.a. Bulls and Cows
Players will receive a Welcome Message, and then
can input a number of digits for the game.
Players will guess numbers of the given length,
to guess the computer's randomly generated number.
With each guess made by the player,
the computer will return a message
of how many digit are correct and in the right place.
as well as how many additional instances of
the right digit but in the wrong place.
Players keep guessing until they get it right.
Player guesses muste be in number form, or they
will be asked to retry their guess."""
import random
play_a_round = True
#At game end, always start a new round.
while play_a_round == True:
#Welcome the Player
print(
"Welcome to the number guessing game (also known as Bulls & Cows).\n I hope you have fun!! =))\n \n")
#If the number of digits is not a number, tell the player to try again.
try:
game_num_digits = int(input("Please input a number of digits for your game:"))
except ValueError:
print("\n \n Whoops! Please put in an integer number of digits\n \n")
continue
#This will be the list of digits to be guessed. Ensure that the first digit is nonzero to avoid confusion.
answer_list = [random.randint(1, 9)] + [random.randint(0, 9) for i in range(game_num_digits-1)]
#Enter the same loop for each player guess, until they win and the loop breaks.
while True:
#Take Player guess.
new_guess = input("please input your guess:")
#If it's not an integer, give player instruction to change guess, and
try:
new_guess_int = int(new_guess)
except ValueError:
print("\n \n Oh noo! Please put your guess digits together into a single number.\n \n")
continue
#If it's the wrong number of digits, inform the player and have them try again.
if len(new_guess) != game_num_digits:
print("\n \n Uh oh! That's not the right number of digits! \n \n")
continue
#The guesses are converted into a list
new_guess_list = [int(x) for x in new_guess]
#Get List of True and False for whether each digit is an exact match
#Then add up the Trues to get total number of exact matches
exact_matches_boolean = [new_guess_list[i] == answer_list[i] for i in range(game_num_digits)]
num_exact_match = sum(exact_matches_boolean)
#If the guess is correct, End Game and state You Win!!
if num_exact_match == game_num_digits:
print("You win!!\n \n")
break
#Guess list with the exact nums removed
guess_list_removed_exact = [x for i,x in enumerate(new_guess_list) if not exact_matches_boolean[i]]
#Answer list with the exact nums removed
answer_list_removed_exact = [x for i,x in enumerate(answer_list) if not exact_matches_boolean[i]]
#Get the number of out of place matches
#by checking whether each remaining guess digit
#is in the answer, and if so adding it to a new list
#and removing once instance of that digit from the answer list
matches_out_of_place = []
for digit in guess_list_removed_exact:
if digit in answer_list_removed_exact:
matches_out_of_place.append(digit)
answer_list_removed_exact.remove(digit)
#Get the number of out-of-place matches
num_matches_out_of_place = len(matches_out_of_place)
#Tell the player how many guesses are exact and out of place.
print("Great! You have {} exact matches and {} other digits that are correct but out of place!\n \n".format(num_exact_match,num_matches_out_of_place))
#TODO: Ask the player whether they want to play again.
#If Yes, then go back to the beginning.
#If No, then say goodbye!
#For now, auto restarts a new game
| 39.865385 | 158 | 0.648577 | 644 | 4,146 | 4.059006 | 0.285714 | 0.027544 | 0.03443 | 0.032517 | 0.179036 | 0.146136 | 0.089518 | 0.048967 | 0.025249 | 0 | 0 | 0.002041 | 0.290883 | 4,146 | 103 | 159 | 40.252427 | 0.887075 | 0.440666 | 0 | 0.194444 | 0 | 0.111111 | 0.210503 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7688beb24a6f5b989c3afcfe875e38b7fdeeeee2 | 4,117 | py | Python | granthawork/convert_text.py | funderburkjim/sanskrit-transcoding | ae82e95f382d400cef814188d859c3dee92b7e04 | [
"MIT"
] | 3 | 2015-07-23T08:41:56.000Z | 2019-02-11T00:52:39.000Z | granthawork/convert_text.py | funderburkjim/sanskrit-transcoding | ae82e95f382d400cef814188d859c3dee92b7e04 | [
"MIT"
] | 3 | 2015-08-29T16:40:54.000Z | 2019-10-06T18:30:03.000Z | granthawork/convert_text.py | funderburkjim/sanskrit-transcoding | ae82e95f382d400cef814188d859c3dee92b7e04 | [
"MIT"
] | 4 | 2016-02-25T01:33:25.000Z | 2020-09-22T21:15:38.000Z | #-*- coding:utf-8 -*-
"""convert_grantha.py
"""
from __future__ import print_function
import sys,re,codecs
import transcoder
transcoder.transcoder_set_dir('transcoder')
transcode = transcoder.transcoder_processString # convenience
def parse_filename(filein):
tranin_known = ['slp1','roman','hk']
m = re.search(r'^(.*)_(.*)[.]txt$',filein)
if m:
pfx,tranin = (m.group(1),m.group(2))
if (not m) or (tranin not in tranin_known):
endings = ['_%s.txt' %t for t in tranin_known]
print("filein error: require filename ending in one of %s" %endings)
exit(1)
return pfx,tranin
def generate_text(lines,tranout,classname,filein_pfx,title):
fileout = "%s_%s.txt" %(filein_pfx,classname)
with codecs.open(fileout,"w","utf-8") as f:
f.write(title+'\n')
for x in lines:
y = transcode(x,'slp1',tranout)
f.write(y+'\n')
print('%s lines written to %s' %(len(lines),fileout))
styles={
'grantha':
"""
@font-face {
src: url('../granthafonts/e-Grantamil.ttf');
font-family: grantamil;
/*https://web.archive.org/web/20070506074247/http://www.uni-hamburg.de/Wiss/FB/10/IndienS/Kniprath/INDOLIPI/Indolipi.htm
*/
}
.grantha {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil;
}
""",
'grantha-sd':
"""
@font-face {
src: url('../granthafonts/e-Grantamil-sd.ttf');
font-family: grantamil-sd;
/*$ wget --no-check-certificate https://sanskritdocuments.org/css/fonts/e-Grantamil.ttf*/
}
.grantha-sd {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil-sd;
}
""",
'grantha7':
"""
@font-face {
src: url('../granthafonts/e-Grantamil 7.ttf');
font-family: grantamil7;
/*https://www.aai.uni-hamburg.de/indtib/studium/materialien.html
*/
}
.grantha7 {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil7;
}
""",
'deva':
"""
@font-face {
src: url('../granthafonts/siddhanta.ttf');
font-family: devanagari;
}
.deva {
color:teal;
font-size:20px;
font-weight: normal;
font-family: devanagari;
}
"""
}
def generate_html_head(tranout,classname,filein_pfx):
parts=re.split(r'/',filein_pfx)
fileshow = parts[-1]
style = styles[classname]
outlinestr="""<html>
<head>
<title>%s:%s</title>
<meta charset="UTF-8">
<style>
%s
</style>
</head>
""" %(fileshow,classname,style)
outlines = outlinestr.splitlines()
return outlines
def generate_html(lines,tranout,classname,filein_pfx,title):
# lines assumed coded in slp1
fileout = "%s_%s.html" %(filein_pfx,classname)
head = generate_html_head(tranout,classname,filein_pfx)
def oneline(x):
y = transcode(x,'slp1',tranout)
z = "<span class='%s'>%s</span><br/>" %(classname,y)
return z
linesout = [oneline(x) for x in lines]
titleline = ['<H2>%s</H2>' % title]
body = ['<body>'] + titleline + linesout + ['</body>']
tail = ['</html>']
outlines = head + body + tail
with codecs.open(fileout,"w","utf-8") as f:
for y in outlines:
f.write(y+'\n')
print('%s lines written to %s' %(len(outlines),fileout))
if __name__ == "__main__":
filein = sys.argv[1]
filein_pfx,tranin = parse_filename(filein)
with codecs.open(filein,'r','utf-8') as f:
linesin0 = [x.rstrip('\r\n').rstrip() for x in f]
title = linesin0[0]
linesin = linesin0[1:]
# get lines_slp1 from tranin and linesin
if tranin == 'slp1':
lines_slp1 = linesin
else:
lines_slp1 = []
for x in linesin:
line = transcode(x,tranin,'slp1')
lines_slp1.append(line)
# also, write lines_slp1
fileout = '%s_slp1.txt' % filein_pfx
with codecs.open(fileout,"w","utf-8") as f:
f.write(title+'\n')
for x in lines_slp1:
f.write(x + '\n')
print('%s lines written to %s' %(len(lines_slp1),fileout))
# generate text files:
parmout = [
# (tranout, classname = fileoutsfx)
('grantha2','grantha'),
('grantha2','grantha-sd'),
('grantha3','grantha7'),
('deva','deva')
]
# get different versions of text files
for tranout, classname in parmout:
generate_text(lines_slp1,tranout,classname,filein_pfx,title)
# different versions of html files
for tranout, classname in parmout:
generate_html(lines_slp1,tranout,classname,filein_pfx,title)
| 24.652695 | 120 | 0.673063 | 584 | 4,117 | 4.650685 | 0.282534 | 0.036451 | 0.048601 | 0.055228 | 0.341679 | 0.315906 | 0.290133 | 0.161267 | 0.161267 | 0.137703 | 0 | 0.018986 | 0.142822 | 4,117 | 166 | 121 | 24.801205 | 0.750638 | 0.06461 | 0 | 0.11828 | 0 | 0 | 0.175287 | 0.00898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053763 | false | 0 | 0.032258 | 0 | 0.11828 | 0.053763 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
768b87cc744c060bc232e9fddf283689bed48e0f | 2,043 | py | Python | examples/instance_is_composed_from_cherrypicked_method_in_class.py | Hc10b/py3traits | 713eec35a5a2f4eba6801c1c7660b863eb692bcc | [
"Apache-2.0"
] | 25 | 2015-05-25T20:27:43.000Z | 2020-08-30T16:37:59.000Z | examples/instance_is_composed_from_cherrypicked_method_in_class.py | Hc10b/py3traits | 713eec35a5a2f4eba6801c1c7660b863eb692bcc | [
"Apache-2.0"
] | 2 | 2017-11-02T09:02:37.000Z | 2020-06-16T20:39:56.000Z | examples/instance_is_composed_from_cherrypicked_method_in_class.py | Hc10b/py3traits | 713eec35a5a2f4eba6801c1c7660b863eb692bcc | [
"Apache-2.0"
] | 3 | 2015-05-25T20:34:13.000Z | 2020-06-16T13:51:37.000Z | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
from pytraits import extendable
# Let's start by creating a simple class with some values. It contains
# class variables and instance variables. Composed methods will have
# access to all these variables.
@extendable
class ExampleClass:
PUBLIC = 24
_HIDDEN = 25
__PRIVATE = 26
def __init__(self):
self.public = 42
self._hidden = 43
self.__private = 44
# Then we create a class which contains different types of methods that will be
# transferred as a part of the class above. Note that ExampleTrait requires target
# object to contain class variables and instance variables, thus it won't work as a
# stand-alone object.
class ExampleTrait:
@staticmethod
def static_method():
return 1, 2, 3
@classmethod
def class_method(cls):
return cls.PUBLIC, cls._HIDDEN, cls.__PRIVATE
def instance_method(self):
return self.public, self._hidden, self.__private
# Create instance of target class and cherry-pick methods from ExampleTrait class.
example_instance = ExampleClass()
example_instance.add_traits(ExampleTrait.instance_method,
ExampleTrait.class_method,
ExampleTrait.static_method)
# Here are the proofs that composed methods work as part of new class.
assert example_instance.instance_method() == (42, 43, 44),\
"Instance composition fails with instance method!"
assert example_instance.class_method() == (24, 25, 26),\
"Instance composition fails with class method in instance!"
assert example_instance.static_method() == (1, 2, 3),\
"Instance composition fails with class method in instance!"
assert not hasattr(ExampleClass, "new_static_function"),\
"Instance composition fails due to class has changed!"
assert not hasattr(ExampleClass, "new_class_function"),\
"Instance composition fails due to class has changed!"
assert not hasattr(ExampleClass, "new_method"),\
"Instance composition fails due to class has changed!"
| 35.224138 | 83 | 0.72002 | 268 | 2,043 | 5.354478 | 0.380597 | 0.079443 | 0.100348 | 0.058537 | 0.287805 | 0.222997 | 0.222997 | 0.222997 | 0.192334 | 0.115679 | 0 | 0.019042 | 0.203133 | 2,043 | 57 | 84 | 35.842105 | 0.862408 | 0.303475 | 0 | 0.142857 | 0 | 0 | 0.258499 | 0 | 0 | 0 | 0 | 0 | 0.171429 | 1 | 0.114286 | false | 0 | 0.028571 | 0.085714 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
768b93ced85713f401f181af9bdc07a304099ed6 | 4,092 | py | Python | DL_inference/utils/lct.py | princeton-computational-imaging/NLOSFeatureEmbeddings | f882ca5684e9b6ffb16052a714714f570e606295 | [
"Unlicense"
] | 12 | 2020-12-12T22:12:22.000Z | 2022-02-28T12:11:59.000Z | DL_inference/utils/lct.py | princeton-computational-imaging/NLOSFeatureEmbeddings | f882ca5684e9b6ffb16052a714714f570e606295 | [
"Unlicense"
] | 4 | 2021-01-06T09:14:45.000Z | 2021-02-04T07:17:05.000Z | DL_inference/utils/lct.py | princeton-computational-imaging/NLOSFeatureEmbeddings | f882ca5684e9b6ffb16052a714714f570e606295 | [
"Unlicense"
] | 9 | 2020-12-29T07:10:39.000Z | 2022-03-13T02:29:03.000Z |
import numpy as np
import cv2
from helper import definePsf, resamplingOperator
##########################################################
def lct(meas_hxwxt, wall_size, crop, bin_len):
c = 3e8
width = wall_size / 2.0;
bin_resolution = bin_len / c
assert 2 ** int(np.log2(crop)) == crop
snr = 1e-1
###########################################
meas_hxwxt = meas_hxwxt[:, :, :crop] # HxWxT
sptial_grid = meas_hxwxt.shape[0] # H, N
temprol_grid = meas_hxwxt.shape[2] # T, M
trange = temprol_grid * c * bin_resolution
#########################################################
# 0-1
gridz_M = np.arange(temprol_grid, dtype=np.float32)
gridz_M = gridz_M / (temprol_grid - 1)
gridz_MxNxN = np.tile(gridz_M.reshape(-1, 1, 1), [1, sptial_grid, sptial_grid])
###################################################
slope = width / trange
psf = definePsf(sptial_grid, temprol_grid, slope)
fpsf = np.fft.fftn(psf)
invpsf = np.conjugate(fpsf) / (1 / snr + np.real(fpsf) ** 2 + np.imag(fpsf) ** 2)
# invpsf = np.conjugate(fpsf)
mtx_MxM, mtxi_MxM = resamplingOperator(temprol_grid)
#############################################################
# diffuse
data_TxHxW = np.transpose(meas_hxwxt, [2, 0, 1])
data_TxHxW = data_TxHxW * (gridz_MxNxN ** 4)
datapad_2Tx2Hx2W = np.zeros(shape=(2 * temprol_grid, 2 * sptial_grid, 2 * sptial_grid), dtype=np.float32)
left = mtx_MxM
right = data_TxHxW.reshape(temprol_grid, -1)
tmp = np.matmul(left, right).reshape(temprol_grid, sptial_grid, sptial_grid)
datapad_2Tx2Hx2W[:temprol_grid, :sptial_grid, :sptial_grid] = tmp
datafre = np.fft.fftn(datapad_2Tx2Hx2W)
volumn_2Mx2Nx2N = np.fft.ifftn(datafre * invpsf)
volumn_2Mx2Nx2N = np.real(volumn_2Mx2Nx2N)
volumn_ZxYxX = volumn_2Mx2Nx2N[:temprol_grid, :sptial_grid, :sptial_grid]
left = mtxi_MxM
right = volumn_ZxYxX.reshape(temprol_grid, -1)
tmp = np.matmul(left, right).reshape(temprol_grid, sptial_grid, sptial_grid)
volumn_ZxYxX = tmp
################################
volumn_ZxYxX[volumn_ZxYxX < 0] = 0
dim = volumn_ZxYxX.shape[0] * 100 // 128
volumn_ZxYxX = volumn_ZxYxX[:dim]
volumn_ZxYxX = volumn_ZxYxX / np.max(volumn_ZxYxX)
front_view_HxW = np.max(volumn_ZxYxX, axis=0)
cv2.imshow("re3", front_view_HxW / np.max(front_view_HxW))
# cv2.imshow('gt', imgt)
cv2.waitKey()
for frame in volumn_ZxYxX:
cv2.imshow("re1", frame)
cv2.imshow("re2", frame / np.max(frame))
cv2.waitKey(0)
########################################################
if __name__ == '__main__':
import os
'''
fd = '/u6/a/wenzheng/remote2/code-nlos-git/OccludedSceneRep-2/code/pytorch-wz/dataloader_lct';
ims = []
tbe = -1
for i in range(512):
name = '%s/1-%d.png' % (fd, i)
if not os.path.isfile(name):
ims.append(np.zeros((256, 256), dtype=np.uint8))
continue
if tbe < 0:
tbe = i
im = cv2.imread(name)
imgt = im[:, :256, :]
im = im[:, -256:, :]
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ims.append(imgray)
rect_data_txhxw = np.array(ims, dtype=np.float32) / 255.0
rect_data_hxwxt = np.transpose(rect_data_txhxw, [1, 2, 0])
'''
from scipy.io import loadmat
data = loadmat('/home/wenzheng/largestore/nlos-phasor/realdata/bike0.mat')
rect_data_hxwxt = data['measlr']
crop = 512
bin_len = 32e-12 * 3e8 # 0.01
K = 0
for k in range(K):
rect_data_hxwxt = rect_data_hxwxt[::2, :, :] + rect_data_hxwxt[1::2, :, :]
rect_data_hxwxt = rect_data_hxwxt[:, ::2, :] + rect_data_hxwxt[:, 1::2, :]
rect_data_hxwxt = rect_data_hxwxt[:, :, ::2] + rect_data_hxwxt[:, :, 1::2]
crop = crop // 2
bin_len = bin_len * 2
lct(rect_data_hxwxt, wall_size=2.0, crop=crop, bin_len=bin_len)
| 32.47619 | 109 | 0.552053 | 525 | 4,092 | 4.068571 | 0.28 | 0.065543 | 0.073034 | 0.046816 | 0.17603 | 0.160112 | 0.131086 | 0.131086 | 0.131086 | 0.131086 | 0 | 0.044023 | 0.239492 | 4,092 | 125 | 110 | 32.736 | 0.642352 | 0.020283 | 0 | 0.032258 | 0 | 0 | 0.026671 | 0.018906 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.016129 | false | 0 | 0.080645 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
768ba607897d7b0882acfd3743a710657081a012 | 6,754 | py | Python | Gathered CTF writeups/ptr-yudai-writeups/2019/CTFZone_2019_Quals/tic_tac_toe/server.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ptr-yudai-writeups/2019/CTFZone_2019_Quals/tic_tac_toe/server.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ptr-yudai-writeups/2019/CTFZone_2019_Quals/tic_tac_toe/server.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | import os
import time
import random
import string
import struct
import binascii
import socketserver
FLAG = 'ctfzone{place_for_flag}\n'
FLAG_COUNT = 100
HOST = '0.0.0.0'
PORT = 9998
SESSION_LIFETIME = 300
REG_USER = '1'
SEND_STATE = '2'
GET_FLAG = '3'
ERROR_NO = 1
ERROR_LOSE = 2
ERROR_WIN = 3
ERROR_FLAG = 4
ERROR_MOVE = 5
ERROR_SESS = 6
Xs = 1
Os = 2
class TicTacToeServerHandler(socketserver.BaseRequestHandler):
sessions = {}
def handle(self):
try:
data = str(self.request.recv(1), 'ascii')
if data[0] == REG_USER:
self.process_reg_user()
elif data[0] == SEND_STATE:
self.process_state()
elif data[0] == GET_FLAG:
self.process_flag()
except Exception as e:
print('[-] Error handling messages: {}'.format(e))
def process_reg_user(self):
user = self.generate_session()
values = (ERROR_NO, user['sessid'])
packer = struct.Struct('<i 32s')
packed_data = packer.pack(*values)
try:
self.request.sendall(packed_data)
print('[+] Sending session info: {} {}'.format(binascii.hexlify(packed_data), values))
except Exception as e:
print('[-] Error sending registration response: {}'.format(e))
def process_flag(self):
unpacker = struct.Struct('<32s')
input_bytes = self.request.recv(32)
session = unpacker.unpack(input_bytes)
session = str(session[0], 'ascii')
if session not in self.sessions:
err = ERROR_SESS
msg = "You trying to cheat on me!\n"
elif self.sessions[session]['level'] < FLAG_COUNT:
err = ERROR_SESS
msg = "You trying to cheat on me!\n"
else:
err = ERROR_NO
msg = FLAG
try:
packer = struct.Struct('<i {}s'.format(len(msg)))
values = (err, bytes(msg, 'ascii'))
packed_data = packer.pack(*values)
self.request.sendall(packed_data)
print('[+] Sending flag info: {} {}'.format(binascii.hexlify(packed_data), values))
except Exception as e:
print('[-] Error sending flag response: {}'.format(e))
def process_state(self):
try:
unpacker = struct.Struct('<32s i i')
input_bytes = self.request.recv(40)
session, cmove, hmove = unpacker.unpack(input_bytes)
session = str(session, 'ascii')
values = self.get_state_response(session, cmove, hmove)
except Exception as e:
print('[-] Error parsing process_state request: {}'.format(e))
try:
packer = struct.Struct('<i')
packed_data = packer.pack(*values)
self.request.sendall(packed_data)
print('[+] Sending state info: {} {}'.format(binascii.hexlify(packed_data), values))
except Exception as e:
print('[-] Error sending state response: {}'.format(e))
def get_state_response(self, session, cmove, hmove):
print(cmove, hmove)
if session not in self.sessions:
return (ERROR_SESS,)
if (self.sessions[session]['field'][cmove] != 0) or (hmove != -1 and self.sessions[session]['field'][hmove] != 0):
self.sessions[session]['field'] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
return (ERROR_MOVE,)
self.sessions[session]['field'][cmove] = Xs
if hmove != -1:
self.sessions[session]['field'][hmove] = Os
win = self.check_win(self.sessions[session]['field'])
if win == Xs:
self.sessions[session]['level'] = 1
self.sessions[session]['field'] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
return (ERROR_LOSE,)
if win == Os:
print("Congrats")
self.sessions[session]['level'] += 1
self.sessions[session]['field'] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
if self.sessions[session]['level'] > FLAG_COUNT:
return (ERROR_FLAG,)
else:
return (ERROR_WIN,)
has_empty = False
for x in self.sessions[session]['field']:
if x == 0:
has_empty = True
break
if not has_empty:
self.sessions[session]['level'] = 1
self.sessions[session]['field'] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
return (ERROR_LOSE,)
return (ERROR_NO,)
def check_win(self, field):
if field[0] == field[1] == field[2] != 0:
return field[0]
if field[3] == field[4] == field[5] != 0:
return field[3]
if field[6] == field[7] == field[8] != 0:
return field[6]
if field[0] == field[3] == field[6] != 0:
return field[0]
if field[1] == field[4] == field[7] != 0:
return field[1]
if field[2] == field[5] == field[8] != 0:
return field[2]
if field[0] == field[4] == field[8] != 0:
return field[0]
if field[2] == field[4] == field[6] != 0:
return field[2]
return 0
def generate_session(self):
try:
ct = time.time()
keys = []
for key, val in self.sessions.items():
if ct - val['time'] > SESSION_LIFETIME:
keys.append(key)
for key in keys:
del self.sessions[key]
except Exception as e:
print('[-] Error while cleaning up sessions: {}'.format(e))
sessid = random_string()
while sessid in self.sessions:
sessid = random_string()
user = {'sessid': bytes(sessid, 'ascii'),
'level': 1,
'field': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'time': time.time()}
self.sessions[sessid] = user
return user
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def random_string(string_length=32):
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for _ in range(string_length))
def start_server(lhost, lport):
server = ThreadedTCPServer((lhost, lport), TicTacToeServerHandler)
try:
print('[+] Server started at {}:{}'.format(lhost, lport))
server.serve_forever()
except KeyboardInterrupt:
print('[+] KeyboardInterrupt received, exit\n')
exit(0)
except Exception as e:
print('[-] General pooling error: {}'.format(e))
if __name__ == '__main__':
try:
FLAG = os.environ['FLAG'] + '\n'
except Exception as e:
print('[-] Can\'t get flag: {}'.format(e))
exit(0)
start_server(HOST, PORT)
| 33.60199 | 122 | 0.542789 | 814 | 6,754 | 4.404177 | 0.187961 | 0.023989 | 0.030962 | 0.034589 | 0.426499 | 0.300697 | 0.227615 | 0.193584 | 0.193584 | 0.193584 | 0 | 0.030204 | 0.318626 | 6,754 | 200 | 123 | 33.77 | 0.748805 | 0 | 0 | 0.265537 | 0 | 0 | 0.101718 | 0.003702 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0.00565 | 0.039548 | 0 | 0.20904 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
768ddbc3a3343dbcd81a6aa591e0bb8eb93dd757 | 3,270 | py | Python | Airplane/chap10/mavsim_chap10.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2020-06-07T00:14:42.000Z | 2020-06-07T00:14:42.000Z | Submarine/chap10/mavsim_chap10.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | null | null | null | Submarine/chap10/mavsim_chap10.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2019-06-24T22:10:48.000Z | 2019-06-24T22:10:48.000Z | """
mavsim_python
- Chapter 10 assignment for Beard & McLain, PUP, 2012
- Last Update:
3/11/2019 - RWB
"""
import sys
sys.path.append('..')
import numpy as np
import parameters.simulation_parameters as SIM
from chap3.data_viewer import data_viewer
from chap4.wind_simulation import wind_simulation
from chap6.autopilot import autopilot
from chap7.mav_dynamics import mav_dynamics
from chap8.observer import observer
from chap10.path_follower import path_follower
from chap10.path_viewer import path_viewer
# initialize the visualization
path_view = path_viewer() # initialize the viewer
data_view = data_viewer() # initialize view of data plots
VIDEO = False # True==write video, False==don't write video
if VIDEO == True:
from chap2.video_writer import video_writer
video = video_writer(video_name="chap10_video.avi",
bounding_box=(0, 0, 1000, 1000),
output_rate=SIM.ts_video)
# initialize elements of the architecture
wind = wind_simulation(SIM.ts_simulation)
mav = mav_dynamics(SIM.ts_simulation)
ctrl = autopilot(SIM.ts_simulation)
obsv = observer(SIM.ts_simulation)
path_follow = path_follower()
# path definition
from message_types.msg_path import msg_path
path = msg_path()
path.flag = 'line'
# path.flag = 'orbit'
if path.flag == 'line':
path.line_origin = np.array([[0.0, 0.0, -100.0]]).T
path.line_direction = np.array([[0.5, 1.0, 0.0]]).T
path.line_direction = path.line_direction / np.linalg.norm(path.line_direction)
else: # path.flag == 'orbit'
path.orbit_center = np.array([[0.0, 0.0, -150.0]]).T # center of the orbit
path.orbit_radius = 300.0 # radius of the orbit
path.orbit_direction = -1 # orbit direction: 1==clockwise, -1==counter clockwise
# initialize the simulation time
sim_time = SIM.start_time
# main simulation loop
print("Press Command-Q to exit...")
while sim_time < SIM.end_time:
#-------observer-------------
measurements = mav.sensors # get sensor measurements
estimated_state = obsv.update(measurements) # estimate states from measurements
#-------path follower-------------
# autopilot_commands = path_follow.update(path, mav.msg_true_state) # for debugging
autopilot_commands = path_follow.update(path, estimated_state)
#-------controller-------------
# delta, commanded_state = ctrl.update(autopilot_commands, mav.msg_true_state) # for debugging
delta, commanded_state = ctrl.update(autopilot_commands, estimated_state)
#-------physical system-------------
current_wind = wind.update() # get the new wind vector
# mav.update_state(delta, np.array([[0., 0., 0., 0., 0., 0.]]).T) # for debugging
mav.update_state(delta, current_wind) # propagate the MAV dynamics
mav.update_sensors()
#-------update viewer-------------
path_view.update(path, mav.msg_true_state) # plot path and MAV
data_view.update(mav.msg_true_state, # true states
estimated_state, # estimated states
commanded_state, # commanded states
SIM.ts_simulation)
if VIDEO == True: video.update(sim_time)
#-------increment time-------------
sim_time += SIM.ts_simulation
if VIDEO == True: video.close()
| 35.543478 | 98 | 0.685015 | 441 | 3,270 | 4.897959 | 0.287982 | 0.012963 | 0.0125 | 0.009259 | 0.193519 | 0.157407 | 0.071296 | 0 | 0 | 0 | 0 | 0.026168 | 0.181957 | 3,270 | 91 | 99 | 35.934066 | 0.781308 | 0.336697 | 0 | 0 | 0 | 0 | 0.024471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.226415 | 0 | 0.226415 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
768f2a2ea17408963742c0a0927aa2e04eff551a | 3,525 | py | Python | seeqler/ui/connection_list.py | ceilors/seeqler | 461811c16a3ba097cc81aab54e90ba6b3a1dedd2 | [
"MIT"
] | null | null | null | seeqler/ui/connection_list.py | ceilors/seeqler | 461811c16a3ba097cc81aab54e90ba6b3a1dedd2 | [
"MIT"
] | 2 | 2022-01-29T10:59:06.000Z | 2022-01-29T15:59:36.000Z | seeqler/ui/connection_list.py | ceilors/seeqler | 461811c16a3ba097cc81aab54e90ba6b3a1dedd2 | [
"MIT"
] | null | null | null | import dearpygui.dearpygui as dpg
from .window import Window
from ..connection_manager import ConnectionManager, Connection
class ConnectionListWindow(Window):
def __init__(self, app, **kwargs):
# fmt: off
super().__init__(
app, 'Список подключений', 'connection_list', (400, 500), window_resizable=False,
tag_listbox='connection list', tag_new_connection='create connection', tag_connect_to='connect to',
**kwargs
)
# fmt: on
def construct(self) -> None:
with dpg.child_window(border=False, autosize_x=True, height=0.8 * self.height):
with dpg.group(horizontal=False):
with dpg.group(width=self.width):
dpg.add_text('Список сохраненных подключений')
dpg.add_listbox((), num_items=10, tag=self.tag_listbox)
dpg.add_button(label='Подключиться', tag=self.tag_connect_to, callback=self.ui_connect)
with dpg.child_window(border=False, autosize_x=True, autosize_y=True):
with dpg.group(horizontal=True):
dpg.add_spacer(width=0.25 * self.relative_width)
dpg.add_button(
label='Создать новое подключение…',
tag=self.tag_new_connection,
width=0.75 * self.relative_width,
callback=self.ui_show_create_connection,
)
def show(self) -> None:
super().show()
connections = [x.label for x in ConnectionManager().list()]
if not connections:
dpg.configure_item(self.tag_connect_to, enabled=False)
dpg.configure_item(self.tag_listbox, items=connections)
def ui_connect(self) -> None:
from .schema import SchemaWindow
try:
connection = ConnectionManager().get(label=dpg.get_value(self.tag_listbox))
self.app.init(connection.connection_string)
SchemaWindow(self.app).show()
except ValueError as e:
print(f"Can't connect to selected connection! Reason:\n{e}")
def ui_show_create_connection(self) -> None:
ConnectionCreateWindow(self.app).show()
class ConnectionCreateWindow(Window):
def __init__(self, app, **kwargs):
# fmt: off
super().__init__(
app, 'Новое подключение', 'connection_create', (600, 500), window_resizable=False,
tag_input_label='tag label input', tag_connection_string='tag connection string',
**kwargs
)
# fmt: on
def create_connection(self):
label = self.shift_value_to_unicode(dpg.get_value(self.tag_input_label))
connection = self.shift_value_to_unicode(dpg.get_value(self.tag_connection_string))
ConnectionManager().add(Connection(label, connection))
self.close()
def construct(self) -> None:
dpg.add_text('Наименование подключения')
dpg.add_input_text(tag=self.tag_input_label, width=self.relative_width)
dpg.add_spacer(height=15)
dpg.add_text('Строка подключения')
dpg.add_input_text(tag=self.tag_connection_string, width=self.relative_width)
dpg.add_spacer(height=15)
with dpg.group(horizontal=True):
dpg.add_button(label='Сохранить', callback=self.create_connection)
dpg.add_button(label='Закрыть', callback=self.close)
def close(self) -> None:
ConnectionListWindow().show()
dpg.delete_item(self.window_id)
self.initiated = False
| 39.166667 | 111 | 0.640567 | 415 | 3,525 | 5.216867 | 0.257831 | 0.036028 | 0.023095 | 0.031409 | 0.283141 | 0.218938 | 0.218938 | 0.189376 | 0.15612 | 0.078522 | 0 | 0.00986 | 0.251915 | 3,525 | 89 | 112 | 39.606742 | 0.810011 | 0.009362 | 0 | 0.176471 | 0 | 0 | 0.092056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132353 | false | 0 | 0.058824 | 0 | 0.220588 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76903769036f1dd11c2f6b95330c08695ac5186c | 477 | py | Python | Sumof3n5.py | Lioncat2002/computerProjPy | accb584443796967a4c3f0e8afce3d39e44cb113 | [
"MIT"
] | null | null | null | Sumof3n5.py | Lioncat2002/computerProjPy | accb584443796967a4c3f0e8afce3d39e44cb113 | [
"MIT"
] | null | null | null | Sumof3n5.py | Lioncat2002/computerProjPy | accb584443796967a4c3f0e8afce3d39e44cb113 | [
"MIT"
] | null | null | null | '''4.WAP to write a function Dir3and5() which takes 10 numbers in a tuple and returns the sum of the elements which ae divisible by 3 and 5'''
sum=0
def Dir3and5(tup):#Function
global sum
for i in tup:#looping through the tuple
k=i
if(i%3==0 and i%5==0):#checking if divisible by 3 and 5
sum+=k
return sum
if __name__=='__main__':
n=eval(input("Enter a tuple with 10 digits: "))
print(f"The sum of the tuple is {Dir3and5(n)}")
| 31.8 | 142 | 0.647799 | 87 | 477 | 3.45977 | 0.54023 | 0.039867 | 0.053156 | 0.07309 | 0.126246 | 0.126246 | 0 | 0 | 0 | 0 | 0 | 0.05571 | 0.247379 | 477 | 14 | 143 | 34.071429 | 0.78273 | 0.42348 | 0 | 0 | 0 | 0 | 0.280899 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
769040b8c248fd4d51d2dd4bdb5dcf1cf1242410 | 15,483 | py | Python | sdb/commands/stacks.py | sravyamks/sdb | c51baa5d76144c29f7ee954167e982f03e53a972 | [
"Apache-2.0"
] | null | null | null | sdb/commands/stacks.py | sravyamks/sdb | c51baa5d76144c29f7ee954167e982f03e53a972 | [
"Apache-2.0"
] | null | null | null | sdb/commands/stacks.py | sravyamks/sdb | c51baa5d76144c29f7ee954167e982f03e53a972 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from typing import Dict, Iterable, List, Optional, Tuple
from collections import defaultdict
import drgn
from drgn.helpers.linux.list import list_for_each_entry
from drgn.helpers.linux.pid import for_each_task
import sdb
class Stacks(sdb.Locator, sdb.PrettyPrinter):
"""
Print the stack traces for active threads (task_struct)
DESCRIPTION
By default, the command will aggregate similar call stacks
printing them in descending order of frequency. The output
includes the `struct task_struct` address, thread state, and
aggregation count.
Optionally, the command can filter stacks, displaying only
those that match a given thread state, containing a given
function, or belonging to a given kernel module.
The command returns all task_stuct structs that matched the
filter.
EXAMPLES
Print the call stacks for all tasks
sdb> stacks
TASK_STRUCT STATE COUNT
==========================================
0xffff9521bb3c3b80 IDLE 394
__schedule+0x24e
schedule+0x2c
worker_thread+0xba
kthread+0x121
ret_from_fork+0x35
0xffff9521bb3cbb80 INTERRUPTIBLE 384
__schedule+0x24e
schedule+0x2c
smpboot_thread_fn+0x166
kthread+0x121
ret_from_fork+0x35
...
Print stacks containing functions from the zfs module
sdb> stacks -m zfs
TASK_STRUCT STATE COUNT
==========================================
0xffff952130515940 INTERRUPTIBLE 1
__schedule+0x24e
schedule+0x2c
cv_wait_common+0x11f
__cv_wait_sig+0x15
zthr_procedure+0x51
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x35
...
Print stacks containing the l2arc_feed_thread function
sdb> stacks -c l2arc_feed_thread
TASK_STRUCT STATE COUNT
==========================================
0xffff9521b3f43b80 INTERRUPTIBLE 1
__schedule+0x24e
schedule+0x2c
schedule_timeout+0x15d
__cv_timedwait_common+0xdf
__cv_timedwait_sig+0x16
l2arc_feed_thread+0x66
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x35
Print stacks of threads in the RUNNING state
sdb> stacks -t RUNNING
TASK_STRUCT STATE COUNT
==========================================
0xffff95214ff31dc0 RUNNING 1
Count the number of stacks in the zfs module
sdb> stacks -m zfs | count
(unsigned long long)12
Print stacks of the threads started by the zthr command
sdb> threads | filter obj.comm == "zthr_procedure" | stack
TASK_STRUCT STATE COUNT
==========================================
0xffff9c7e6c268000 INTERRUPTIBLE 5
__schedule+0x24e
schedule+0x2c
cv_wait_common+0x118
__cv_wait_sig+0x15
zthr_procedure+0x45
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x1f
0xffff9c7e6c1f8000 INTERRUPTIBLE 1
__schedule+0x24e
schedule+0x2c
schedule_hrtimeout_range_clock+0xb9
schedule_hrtimeout_range+0x13
__cv_timedwait_hires+0x117
cv_timedwait_hires_common+0x4b
cv_timedwait_sig_hires+0x14
zthr_procedure+0x96
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x1f
"""
names = ["stacks", "stack"]
input_type = "struct task_struct *"
output_type = "struct task_struct *"
def __init__(self,
args: Optional[List[str]] = None,
name: str = "_") -> None:
super().__init__(args, name)
self.mod_start, self.mod_end = 0, 0
self.func_start, self.func_end = 0, 0
self.match_state = ""
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument(
"-a",
"--all",
action="store_true",
help="list all threads for each unique stack trace" +
" instead of printing a single representative thread")
parser.add_argument(
"-c",
"--function",
help="only print threads whose stacks contains FUNCTION")
parser.add_argument(
"-m",
"--module",
help="only print threads whose stacks contain functions from MODULE"
)
parser.add_argument(
"-t",
"--tstate",
help="only print threads which are in TSTATE thread state")
parser.epilog = "TSTATE := [{:s}]".format(", ".join(
Stacks.TASK_STATES.values()))
return parser
#
# See include/linux/sched.h
#
TASK_STATES = {
0x00: "RUNNING",
0x01: "INTERRUPTIBLE",
0x02: "UNINTERRUPTIBLE",
0x04: "STOPPED",
0x08: "TRACED",
0x10: "DEAD",
0x20: "ZOMBIE",
0x40: "PARKED",
0x402: "IDLE",
}
#
# See man page of ps(1)
#
TASK_STATE_SHORTCUTS = {
"R": 0x00,
"S": 0x01,
"D": 0x02,
"T": 0x04,
"t": 0x08,
"X": 0x10,
"Z": 0x20,
}
@staticmethod
def task_struct_get_state(task: drgn.Object) -> str:
state = task.state.value_()
if state == 0x402:
return "IDLE"
exit_state = task.exit_state.value_()
return Stacks.TASK_STATES[(state | exit_state) & 0x7f]
@staticmethod
def resolve_state(tstate: str) -> str:
tstate = tstate.upper()
if tstate in Stacks.TASK_STATE_SHORTCUTS:
return Stacks.TASK_STATES[Stacks.TASK_STATE_SHORTCUTS[tstate]]
return tstate
@staticmethod
def get_frame_pcs(task: drgn.Object) -> List[int]:
frame_pcs = []
try:
for frame in sdb.get_prog().stack_trace(task):
frame_pcs.append(frame.pc)
except ValueError:
#
# Unwinding the stack of a running/runnable task will
# result in an exception. Since we expect some tasks to
# be running, we silently ignore this case, and move on.
#
# Unfortunately, the exception thrown in this case is a
# generic "ValueError" exception, so we may wind up
# masking other "ValueError" exceptions that are not due
# to unwinding the stack of a running task.
#
# We can't check the state of the task here, and verify
# it's in the "R" state, since that state can change in
# between the point where the "ValueError" exception was
# originally raised, and here where we'd verify the
# state of the task; i.e. it could have concurrently
# transitioned from running to some other state.
#
pass
return frame_pcs
#
# Unfortunately the drgn Symbol API does not specify the namelist
# that a symbol came from. As a result, we created the following
# function to implement the `-m` functionality. Whenever we filter
# by module name, we find the segment in memory where this module
# resides and do the matching based on the address of the function
# of the current frame.
#
@staticmethod
def find_module_memory_segment(mod_name: str) -> Tuple[int, int]:
"""
Looks for the segment in memory where `mod_name` is
loaded.
Returns:
(<base_offset>, <size>) if `mod_name` is found.
(-1, 0) otherwise.
"""
for mod in list_for_each_entry('struct module',
sdb.get_object('modules').address_of_(),
'list'):
if mod.name.string_().decode("utf-8") == mod_name:
return (mod.core_layout.base.value_(),
mod.core_layout.size.value_())
return (-1, 0)
def validate_context(self) -> None:
#
# This implementation only works for linux kernel targets
# (crash dumps or live systems). When support for userland is added we can
# refactor the kernel code into its own function and switch to the correct
# codepath depending on the target.
#
if not sdb.get_target_flags() & drgn.ProgramFlags.IS_LINUX_KERNEL:
raise sdb.CommandError(self.name,
"userland targets are not supported yet")
self.validate_args()
def validate_args(self) -> None:
if self.args.function:
try:
#
# It would be simpler to resolve the symbol from the function
# name directly but we use the address due to osandov/drgn#47.
#
func = sdb.get_object(self.args.function)
sym = sdb.get_symbol(func.address_of_())
except KeyError:
raise sdb.CommandError(
self.name, f"symbol '{self.args.function}' does not exist")
if func.type_.kind != drgn.TypeKind.FUNCTION:
raise sdb.CommandError(
self.name, f"'{self.args.function}' is not a function")
self.func_start = sym.address
self.func_end = self.func_start + sym.size
if self.args.tstate:
self.match_state = Stacks.resolve_state(self.args.tstate)
task_states = Stacks.TASK_STATES.values()
if self.match_state not in task_states:
valid_states = ", ".join(task_states)
raise sdb.CommandError(
self.name, f"'{self.args.tstate}' is not a valid task state"
f" (acceptable states: {valid_states})")
if self.args.module:
if Stacks.find_module_memory_segment(self.args.module)[0] == -1:
raise sdb.CommandError(
self.name,
f"module '{self.args.module}' doesn't exist or isn't currently loaded"
)
self.mod_start, mod_size = Stacks.find_module_memory_segment(
self.args.module)
assert self.mod_start != -1
self.mod_end = self.mod_start + mod_size
def match_stack(self, task: drgn.Object) -> bool:
if self.args.tstate and self.match_state != Stacks.task_struct_get_state(
task):
return False
if not (self.args.module or self.args.function):
return True
mod_match, func_match = not self.args.module, not self.args.function
for frame_pc in Stacks.get_frame_pcs(task):
if not mod_match and self.mod_start <= frame_pc < self.mod_end:
mod_match = True
if not func_match and self.func_start <= frame_pc < self.func_end:
func_match = True
if mod_match and func_match:
return True
return False
def print_header(self) -> None:
header = "{:<18} {:<16s}".format("TASK_STRUCT", "STATE")
if not self.args.all:
header += " {:>6s}".format("COUNT")
print(header)
print("=" * 42)
#
# De-duplicate the objs (task_structs) using a dictionary indexed by
# task state and program counters. Return a collection sorted by number
# of tasks per stack.
#
# Note: we disabled pyline C0330 due to https://github.com/PyCQA/pylint/issues/289
@staticmethod
def aggregate_stacks(
objs: Iterable[drgn.Object] # pylint: disable=C0330
) -> List[Tuple[Tuple[str, Tuple[int, ...]], List[drgn.Object]]]:
stack_aggr: Dict[Tuple[str, Tuple[int, ...]],
List[drgn.Object]] = defaultdict(list)
for task in objs:
stack_key = (Stacks.task_struct_get_state(task),
tuple(Stacks.get_frame_pcs(task)))
stack_aggr[stack_key].append(task)
return sorted(stack_aggr.items(), key=lambda x: len(x[1]), reverse=True)
def print_stacks(self, objs: Iterable[drgn.Object]) -> None:
self.print_header()
for stack_key, tasks in Stacks.aggregate_stacks(objs):
stacktrace_info = ""
task_state = stack_key[0]
if self.args.all:
for task in tasks:
stacktrace_info += "{:<18s} {:<16s}\n".format(
hex(task.value_()), task_state)
else:
stacktrace_info += "{:<18s} {:<16s} {:6d}\n".format(
hex(tasks[0].value_()), task_state, len(tasks))
frame_pcs: Tuple[int, ...] = stack_key[1]
for frame_pc in frame_pcs:
try:
sym = sdb.get_symbol(frame_pc)
func = sym.name
offset = frame_pc - sym.address
except LookupError:
func = hex(frame_pc)
offset = 0x0
stacktrace_info += "{:18s}{}+{}\n".format("", func, hex(offset))
print(stacktrace_info)
def pretty_print(self, objs: Iterable[drgn.Object]) -> None:
self.validate_context()
self.print_stacks(filter(self.match_stack, objs))
def no_input(self) -> Iterable[drgn.Object]:
self.validate_context()
yield from filter(self.match_stack, for_each_task(sdb.get_prog()))
| 38.22963 | 90 | 0.53407 | 1,677 | 15,483 | 4.769231 | 0.273107 | 0.019005 | 0.011253 | 0.018755 | 0.165416 | 0.13916 | 0.098775 | 0.055514 | 0.035509 | 0.028257 | 0 | 0.0342 | 0.3768 | 15,483 | 404 | 91 | 38.324257 | 0.794694 | 0.422786 | 0 | 0.115183 | 0 | 0 | 0.100888 | 0.00521 | 0 | 0 | 0.009118 | 0 | 0.005236 | 1 | 0.073298 | false | 0.005236 | 0.036649 | 0 | 0.209424 | 0.062827 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
769226177be95a7ba002992f842ea76e36ae377a | 15,794 | py | Python | torch/nn/quantizable/modules/rnn.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 24 | 2020-11-02T21:25:12.000Z | 2022-03-17T07:20:33.000Z | torch/nn/quantizable/modules/rnn.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 1 | 2021-04-22T18:37:42.000Z | 2021-04-28T00:53:25.000Z | torch/nn/quantizable/modules/rnn.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 12 | 2020-11-06T05:00:37.000Z | 2022-01-30T19:17:36.000Z | import numbers
from typing import Optional, Tuple
import warnings
import torch
from torch import Tensor
"""
We will recreate all the RNN modules as we require the modules to be decomposed
into its building blocks to be able to observe.
"""
class LSTMCell(torch.nn.Module):
r"""A quantizable long short-term memory (LSTM) cell.
For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell`
Examples::
>>> import torch.nn.quantizable as nnqa
>>> rnn = nnqa.LSTMCell(10, 20)
>>> input = torch.randn(3, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx, cx = rnn(input[i], (hx, cx))
output.append(hx)
"""
_FLOAT_MODULE = torch.nn.LSTMCell
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.input_size = input_dim
self.hidden_size = hidden_dim
self.bias = bias
self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
self.gates = torch.nn.quantized.FloatFunctional()
self.fgate_cx = torch.nn.quantized.FloatFunctional()
self.igate_cgate = torch.nn.quantized.FloatFunctional()
self.fgate_cx_igate_cgate = torch.nn.quantized.FloatFunctional()
self.ogate_cy = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
if hidden is None or hidden[0] is None or hidden[1] is None:
hidden = self.initialize_hidden(x.shape[0], x.is_quantized)
hx, cx = hidden
igates = self.igates(x)
hgates = self.hgates(hx)
gates = self.gates.add(igates, hgates)
input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
input_gate = torch.sigmoid(input_gate)
forget_gate = torch.sigmoid(forget_gate)
cell_gate = torch.tanh(cell_gate)
out_gate = torch.sigmoid(out_gate)
fgate_cx = self.fgate_cx.mul(forget_gate, cx)
igate_cgate = self.igate_cgate.mul(input_gate, cell_gate)
fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate)
cy = fgate_cx_igate_cgate
tanh_cy = torch.tanh(cy)
hy = self.ogate_cy.mul(out_gate, tanh_cy)
return hy, cy
def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]:
h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
if is_quantized:
h = torch.quantize_per_tensor(h, scale=1.0, zero_point=0, dtype=torch.quint8)
c = torch.quantize_per_tensor(c, scale=1.0, zero_point=0, dtype=torch.quint8)
return h, c
def _get_name(self):
return 'QuantizableLSTMCell'
@classmethod
def from_params(cls, wi, wh, bi=None, bh=None):
"""Uses the weights and biases to create a new LSTM cell.
Args:
wi, wh: Weights for the input and hidden layers
bi, bh: Biases for the input and hidden layers
"""
assert (bi is None) == (bh is None) # Either both None or both have values
input_size = wi.shape[1]
hidden_size = wh.shape[1]
cell = cls(input_dim=input_size, hidden_dim=hidden_size,
bias=(bi is not None))
cell.igates.weight = torch.nn.Parameter(wi)
if bi is not None:
cell.igates.bias = torch.nn.Parameter(bi)
cell.hgates.weight = torch.nn.Parameter(wh)
if bh is not None:
cell.hgates.bias = torch.nn.Parameter(bh)
return cell
@classmethod
def from_float(cls, other):
assert type(other) == cls._FLOAT_MODULE
assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
observed = cls.from_params(other.weight_ih, other.weight_hh,
other.bias_ih, other.bias_hh)
observed.qconfig = other.qconfig
observed.igates.qconfig = other.qconfig
observed.hgates.qconfig = other.qconfig
return observed
class _LSTMSingleLayer(torch.nn.Module):
r"""A single one-directional LSTM layer.
The difference between a layer and a cell is that the layer can process a
sequence, while the cell only expects an instantaneous value.
"""
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
result = []
for xx in x:
hidden = self.cell(xx, hidden)
result.append(hidden[0]) # type: ignore[index]
result_tensor = torch.stack(result, 0)
return result_tensor, hidden
@classmethod
def from_params(cls, *args, **kwargs):
cell = LSTMCell.from_params(*args, **kwargs)
layer = cls(cell.input_size, cell.hidden_size, cell.bias)
layer.cell = cell
return layer
class _LSTMLayer(torch.nn.Module):
r"""A single bi-directional LSTM layer."""
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
batch_first: bool = False, bidirectional: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.batch_first = batch_first
self.bidirectional = bidirectional
self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
if self.bidirectional:
self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
if self.batch_first:
x = x.transpose(0, 1)
if hidden is None:
hx_fw, cx_fw = (None, None)
else:
hx_fw, cx_fw = hidden
if self.bidirectional:
if hx_fw is None:
hx_bw = None
else:
hx_bw = hx_fw[1]
hx_fw = hx_fw[0]
if cx_fw is None:
cx_bw = None
else:
cx_bw = cx_fw[1]
cx_fw = cx_fw[0]
hidden_bw = hx_bw, cx_bw
if hx_fw is None and cx_fw is None:
hidden_fw = None
else:
hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw)
result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
if hasattr(self, 'layer_bw') and self.bidirectional:
x_reversed = x.flip(0)
result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
result_bw = result_bw.flip(0)
result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
if hidden_fw is None and hidden_bw is None:
h = None
c = None
elif hidden_fw is None:
h = hidden_bw[0]
c = hidden_bw[1]
elif hidden_bw is None:
h = hidden_fw[0]
c = hidden_fw[1]
else:
h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
else:
result = result_fw
h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
if self.batch_first:
result.transpose_(0, 1)
return result, (h, c)
@classmethod
def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
r"""
There is no FP equivalent of this class. This function is here just to
mimic the behavior of the `prepare` within the `torch.quantization`
flow.
"""
assert hasattr(other, 'qconfig') or (qconfig is not None)
input_size = kwargs.get('input_size', other.input_size)
hidden_size = kwargs.get('hidden_size', other.hidden_size)
bias = kwargs.get('bias', other.bias)
batch_first = kwargs.get('batch_first', other.batch_first)
bidirectional = kwargs.get('bidirectional', other.bidirectional)
layer = cls(input_size, hidden_size, bias, batch_first, bidirectional)
layer.qconfig = getattr(other, 'qconfig', qconfig)
wi = getattr(other, f'weight_ih_l{layer_idx}')
wh = getattr(other, f'weight_hh_l{layer_idx}')
bi = getattr(other, f'bias_ih_l{layer_idx}', None)
bh = getattr(other, f'bias_hh_l{layer_idx}', None)
layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
if other.bidirectional:
wi = getattr(other, f'weight_ih_l{layer_idx}_reverse')
wh = getattr(other, f'weight_hh_l{layer_idx}_reverse')
bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None)
bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None)
layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
return layer
class LSTM(torch.nn.Module):
r"""A quantizable long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples below.
Examples::
>>> import torch.nn.quantizable as nnqa
>>> rnn = nnqa.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
>>> # To get the weights:
>>> print(rnn.layers[0].weight_ih)
tensor([[...]])
>>> print(rnn.layers[0].weight_hh)
AssertionError: There is no reverse path in the non-bidirectional layer
"""
_FLOAT_MODULE = torch.nn.LSTM
def __init__(self, input_size: int, hidden_size: int,
num_layers: int = 1, bias: bool = True,
batch_first: bool = False, dropout: float = 0.,
bidirectional: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.training = False # We don't want to train using this module
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0:
warnings.warn("dropout option for quantizable LSTM is ignored. "
"If you are training, please, use nn.LSTM version "
"followed by `prepare` step.")
if num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} "
"and num_layers={}".format(dropout, num_layers))
layers = [_LSTMLayer(self.input_size, self.hidden_size,
self.bias, batch_first=False,
bidirectional=self.bidirectional, **factory_kwargs)]
for layer in range(1, num_layers):
layers.append(_LSTMLayer(self.hidden_size, self.hidden_size,
self.bias, batch_first=False,
bidirectional=self.bidirectional,
**factory_kwargs))
self.layers = torch.nn.ModuleList(layers)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
if self.batch_first:
x = x.transpose(0, 1)
max_batch_size = x.size(1)
num_directions = 2 if self.bidirectional else 1
if hidden is None:
zeros = torch.zeros(num_directions, max_batch_size,
self.hidden_size, dtype=torch.float,
device=x.device)
zeros.squeeze_(0)
if x.is_quantized:
zeros = torch.quantize_per_tensor(zeros, scale=1.0,
zero_point=0, dtype=x.dtype)
hxcx = [(zeros, zeros) for _ in range(self.num_layers)]
else:
hidden_non_opt = torch.jit._unwrap_optional(hidden)
if isinstance(hidden_non_opt[0], Tensor):
hx = hidden_non_opt[0].reshape(self.num_layers, num_directions,
max_batch_size,
self.hidden_size).unbind(0)
cx = hidden_non_opt[1].reshape(self.num_layers, num_directions,
max_batch_size,
self.hidden_size).unbind(0)
hxcx = [(hx[idx].squeeze_(0), cx[idx].squeeze_(0)) for idx in range(self.num_layers)]
else:
hxcx = hidden_non_opt
for idx, layer in enumerate(self.layers):
x, hxcx[idx] = layer(x, hxcx[idx])
hx_list = []
cx_list = []
for idx in range(self.num_layers):
hx_list.append(hxcx[idx][0])
cx_list.append(hxcx[idx][1])
hx_tensor = torch.stack(hx_list)
cx_tensor = torch.stack(cx_list)
# We are creating another dimension for bidirectional case
# need to collapse it
hx_tensor = hx_tensor.reshape(-1, *hx_tensor.shape[-2:])
cx_tensor = cx_tensor.reshape(-1, *cx_tensor.shape[-2:])
if self.batch_first:
x = x.transpose(0, 1)
return x, (hx_tensor, cx_tensor)
def _get_name(self):
return 'QuantizableLSTM'
@classmethod
def from_float(cls, other, qconfig=None):
assert isinstance(other, cls._FLOAT_MODULE)
assert (hasattr(other, 'qconfig') or qconfig)
observed = cls(other.input_size, other.hidden_size, other.num_layers,
other.bias, other.batch_first, other.dropout,
other.bidirectional)
observed.qconfig = getattr(other, 'qconfig', qconfig)
for idx in range(other.num_layers):
observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig,
batch_first=False)
observed.eval()
observed = torch.quantization.prepare(observed, inplace=True)
return observed
@classmethod
def from_observed(cls, other):
return torch.quantization.convert(other, inplace=False,
remove_qconfig=True)
| 41.023377 | 106 | 0.584779 | 2,000 | 15,794 | 4.4335 | 0.144 | 0.017368 | 0.015789 | 0.01624 | 0.383106 | 0.319274 | 0.279576 | 0.235029 | 0.220706 | 0.1624 | 0 | 0.010492 | 0.312081 | 15,794 | 384 | 107 | 41.130208 | 0.805614 | 0.124668 | 0 | 0.220217 | 0 | 0 | 0.058233 | 0.011899 | 0 | 0 | 0 | 0 | 0.021661 | 1 | 0.061372 | false | 0 | 0.018051 | 0.01083 | 0.148014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |