hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d8319cb25c16d92efd3edb9186ca3c5ae307547 | 2,763 | py | Python | DeepFilterNet/df/logger.py | cookcodes/DeepFilterNet | d36e70c8f09b0707c6718b74c25c3edaf4dce0e2 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2021-12-23T09:57:29.000Z | 2022-01-17T07:01:53.000Z | DeepFilterNet/df/logger.py | Andong-Li-speech/DeepFilterNet | e651b79e48d5d25fd22a55514534c6c1e65f72fa | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | DeepFilterNet/df/logger.py | Andong-Li-speech/DeepFilterNet | e651b79e48d5d25fd22a55514534c6c1e65f72fa | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | import os
import sys
from typing import Dict, Optional
import torch
from loguru import logger
from torch.types import Number
from df.utils import get_branch_name, get_commit_hash, get_device, get_host
_logger_initialized = False
def init_logger(file: Optional[str] = None, level: str = "INFO"):
global _logger_initialized
if _logger_initialized:
logger.debug("Logger already initialized.")
return
logger.remove()
level = level.upper()
if level != "NONE":
log_format = get_log_format(debug=level == "DEBUG")
logger.add(sys.stdout, level=level, format=log_format)
if file is not None:
logger.add(file, level=level, format=log_format)
logger.info(f"Running on torch {torch.__version__}")
logger.info(f"Running on host {get_host()}")
commit = get_commit_hash()
if commit is not None:
logger.info(f"Git commit: {commit}, branch: {get_branch_name()}")
if (jobid := os.getenv("SLURM_JOB_ID")) is not None:
logger.info(f"Slurm jobid: {jobid}")
_logger_initialized = True
def get_log_format(debug=False):
if debug:
return (
"<green>{time:YYYY-MM-DD HH:mm:ss}</green>"
" | <level>{level: <8}</level>"
" | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>"
" | <level>{message}</level>"
)
else:
return (
"<green>{time:YYYY-MM-DD HH:mm:ss}</green>"
" | <level>{level: <8}</level>"
" | <cyan>DF</cyan>"
" | <level>{message}</level>"
)
def log_metrics(prefix: str, metrics: Dict[str, Number]):
msg = prefix
for n, v in sorted(metrics.items()):
msg += f" | {n}: {v:.5g}"
logger.info(msg)
def log_model_summary(model: torch.nn.Module, verbose=False):
import ptflops
from df.model import ModelParams
# Generate input of 1 second audio
# Necessary inputs are:
# spec: [B, 1, T, F, 2], F: freq bin
# feat_erb: [B, 1, T, E], E: ERB bands
# feat_spec: [B, 2, T, C*2], C: Complex features
p = ModelParams()
b = 1
t = p.sr // p.hop_size
device = get_device()
spec = torch.randn([b, 1, t, p.fft_size // 2 + 1, 2]).to(device)
feat_erb = torch.randn([b, 1, t, p.nb_erb]).to(device)
feat_spec = torch.randn([b, 1, t, p.nb_df, 2]).to(device)
macs, params = ptflops.get_model_complexity_info(
model,
(t,),
input_constructor=lambda _: {"spec": spec, "feat_erb": feat_erb, "feat_spec": feat_spec},
as_strings=False,
print_per_layer_stat=verbose,
verbose=verbose,
)
logger.info(f"Model complexity: {params/1e6:.3f}M #Params, {macs/1e6:.1f}M MACS")
| 31.397727 | 97 | 0.596453 | 385 | 2,763 | 4.12987 | 0.335065 | 0.037736 | 0.011321 | 0.010063 | 0.183648 | 0.127044 | 0.101887 | 0.067925 | 0.067925 | 0.067925 | 0 | 0.01117 | 0.254796 | 2,763 | 87 | 98 | 31.758621 | 0.761049 | 0.064785 | 0 | 0.119403 | 0 | 0.014925 | 0.218774 | 0.0609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.134328 | 0 | 0.238806 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d8530c0d1846c03825ab65f90bae48db4ebfd24 | 8,232 | py | Python | src/svtk/svtk/baf/BAFpysam.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | src/svtk/svtk/baf/BAFpysam.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | src/svtk/svtk/baf/BAFpysam.py | talkowski-lab/gnomad-sv-v3-qc | db23760af7bc21a776e14f6ca1fbc213ff0ff9a1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from scipy import stats
import numpy as np
import os
import os.path
from sklearn import mixture
def Deltest(F,M,E,length,crit=0.01,thres1=0.0005): # calculate the Del statistic given a FME combo in het files
# if True:
thres1=min(50/length,thres1)
if F/length<thres1 and M/length<thres1 \
or E/length<thres1 and M/length<thres1 :
return "ROH"
else:
flank=min(F,E)
ratio=np.log10((M+thres1*length)/(flank+thres1*length))
# if m<flank:
# print()
# if ratio<0:
# print(ratio)
return ratio
def ROH(F,M,E,length,thres=0.0001):
if min(F,M,E)<length*thres:
return True
else:
return False
class DeletionTest:
def __init__(self,obj,probands,length):
self.length=length #length of SV
self.obj=obj# het file
# self.homobkgrd=0
# self.hetbkgrd=0
self.probands=probands # python list of proband IDs
self.count={} # record of FME count and Deltest statistic for everyone
self.nullratio=[] # list of del statistic for non-ROH controls
# if not os.path.isfile(self.regionfile):
# raise ValueError("file not found")
if self.obj.shape[0]==0:
self.nullavg='nan'
self.ns=0
else:
nsROH=0 # total number of SNP in nonROH controls in SV region
ns=0 #total number of SNPs in SV region
for index, row in self.obj.iterrows():
F=row['before']
M=row['inside']
E=row['after']
# print(dat[-1])
self.count[row['sample']]={'F':F,'M':M,'E':E,'Ratio':Deltest(F,M,E,self.length)}
if row['sample'] not in self.probands and Deltest(F,M,E,self.length)!='ROH':
self.nullratio.append(Deltest(F,M,E,self.length))
# print(F,M,E)
nsROH+=M
ns+=M
self.nullavg=nsROH/(len(self.nullratio)+1)
self.ns=ns
self.nullratio=np.array(self.nullratio).reshape(-1,1)
if len(self.nullratio)>10:
self.gmm = mixture.BayesianGaussianMixture(n_components=3, covariance_type='spherical').fit(self.nullratio)
# def Ttest(self,sample):
# testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
# if len(self.nullratio)<10 or np.std(self.nullratio)==0:
# return 'nan',"ROHregion"
# elif len(testlist)==0:
# return 'nan',"ROH"
# elif len(testlist)==1:
# stat=testlist[0]
# if stat=="ROH":
# return 'nan',"ROH"
# else:
# stat1=(stat-np.mean(self.nullratio))/(np.std(self.nullratio))
# ans=stats.norm.cdf(stat1)
# return 10**-stat,ans
# else:
# tstat,pvalue=stats.ttest_ind(testlist,self.nullratio)
# mean=np.mean([10**-x for x in testlist])
# if tstat<0:
# return mean,pvalue
# else:
# return mean,1-pvalue
# def Ttest(self,sample):
# testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
# if len(self.nullratio)<10 or max(self.nullratio)-min(self.nullratio)<0.0001:
# return 'nan',"ROHregion"
# elif len(testlist)==0:
# return 'nan',"ROH"
# elif len(testlist)>len(self.nullratio) or self.ns<10:
# return 'nan',"Potential ROHregion or reference error"
# elif len(testlist)==1:
# stat=testlist[0]
# if stat=="ROH":
# return 'nan',"ROH"
# else:
# _,ans=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False,alternative='less')
# return 10**-stat,ans
# else:
# _,pvalue=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False, alternative='less')
# mean=np.mean([10**-x for x in testlist])
# return mean,pvalue
def Ttest(self,sample):
testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
if len(self.nullratio)<=10 or max(self.nullratio)-min(self.nullratio)<0.0001:
return 'nan',"ROHregion"
elif len(testlist)==0:
return 'nan',"ROH"
elif len(testlist)>len(self.nullratio) or self.ns<10:
return 'nan',"Potential ROHregion or reference error"
elif len(testlist)==1:
stat=testlist[0]
if stat=="ROH":
return 'nan',"ROH"
else:
# stat1=(stat-np.mean(self.nullratio))/(np.std(self.nullratio))
# gmm = mixture.BayesianGaussianMixture(n_components=3, covariance_type='spherical').fit(a.reshape(-1,1))
# ans=stats.norm.cdf(stat1)
ans=self.gmm.score(np.array([stat]).reshape(-1,1))
return 10**-stat,ans
else:
# tstat,pvalue=stats.ttest_ind(testlist,self.nullratio)
# _,pvalue=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False, alternative='less')
ans=self.gmm.score(np.array(testlist).reshape(-1,1))
mean=np.mean([10**-x for x in testlist])
return mean,ans
# if tstat<0:
# return mean,pvalue
# else:
# return mean,1-pvalue
def stats(self,sample):
nsnp=0
for x in sample:
nsnp+=self.count[x]['M']
testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
nsamplenullratio=len(self.nullratio)
nonrohsample=len(testlist)
nsample=len(sample)
nnorm=len(self.count.keys())-nsample
return str(nsnp)+','+str(self.ns)+'\t'+str(nonrohsample)+','+str(nsample)+'\t'+str(self.nullavg)+','+str(nsamplenullratio)+','+str(nnorm)
# return Deltest(self.count[sample]['F'],self.count[sample]['M'],self.count[sample]['E'],self.length)
# with open(self.regionfile,'r') as f:
# for line in f:
# dat=line.rstrip().split("\t")
# hom=int(dat[3])
# he=int(dat[4])
# if dat[-1]==sample:
# homo=hom
# het=he
# oddsratio, pvalue =stats.fisher_exact([[self.count[sample][0],s
class KS2sample:
def __init__(self,obj,probands):
self.obj=obj
self.probands=probands
self.controlst=[]
self.dct={}
# if not os.path.isfile(self.regionfile):
# raise ValueError("file not found")
if obj.shape[0]==0:
self.mean=''
for index, row in self.obj.iterrows():
if row['sample'] not in probands:
self.controlst.append(row['baf'])
else:
if row['sample'] not in self.dct.keys():
self.dct[row['sample']]=[row['baf']]
else:
self.dct[row['sample']].append(row['baf'])
# self.mean=np.mean(self.controlst)##
# self.sd=np.std(self.controlst)##
# print(self.mean,self.sd)
def test(self,samples):
testset=[]
for sample in samples:
if sample in self.dct.keys():
testset+=self.dct[sample]
if len(testset)<1:
return 'nan',"lowSNPs"
elif len(self.controlst)<1:
return 'nan',"noBG"
else:
# testset=[(x-self.mean)/self.sd for x in testset]
ks=stats.ks_2samp(testset,self.controlst)
# ks=stats.kstest(testset,'norm')
return ks
#############
# import sys
# [_,txt,het,chr,start,end,cnvid,sample,type]=sys.argv
# samplelst=sample.split(",")
# Del=DeletionTest(het,samplelst,int(end)-int(start))
# delp=Del.Ttest(samplelst)
# KS=KS2sample(txt,samplelst)
# ksp=KS.test(samplelst)
# stats=Del.stats(samplelst)
# print(chr+'\t'+start+'\t'+end+'\t'+cnvid+'\t'+sample+'\t'+type+'\t'+str(delp)+"\t"+str(ksp)+'\t'+stats)
| 39.014218 | 145 | 0.539237 | 1,040 | 8,232 | 4.246154 | 0.177885 | 0.079484 | 0.02038 | 0.027174 | 0.507246 | 0.463089 | 0.417799 | 0.405118 | 0.405118 | 0.399004 | 0 | 0.019329 | 0.308673 | 8,232 | 210 | 146 | 39.2 | 0.756633 | 0.398081 | 0 | 0.174757 | 0 | 0 | 0.042949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0 | 0.048544 | 0 | 0.271845 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d87394aa03f1901f2b10c5dd1230fa13966fdb3 | 602 | py | Python | collapsible_garden/water_level_sensor.py | katienaha/collapsible_garden | b3f52e9083d1d9584e5da7289c1ce560f793ef18 | [
"MIT"
] | null | null | null | collapsible_garden/water_level_sensor.py | katienaha/collapsible_garden | b3f52e9083d1d9584e5da7289c1ce560f793ef18 | [
"MIT"
] | null | null | null | collapsible_garden/water_level_sensor.py | katienaha/collapsible_garden | b3f52e9083d1d9584e5da7289c1ce560f793ef18 | [
"MIT"
] | null | null | null |
# External module imports
import RPi.GPIO as GPIO
# Sensor that checks whether water levels have gone too low
class WaterLevelSensor:
# Store which pin receives info from the water level sensor
def __init__(self, pin):
self.pin = pin
self.is_too_low = False
# Check to see if the water level is too low
def check_water_level(self):
print('pin {} is {}'.format(self.pin, GPIO.input(self.pin)))
if GPIO.input(self.pin) == GPIO.HIGH:
self.is_too_low = True
else:
self.is_too_low = False
return self.is_too_low
| 25.083333 | 68 | 0.642857 | 90 | 602 | 4.144444 | 0.477778 | 0.096515 | 0.107239 | 0.128686 | 0.091153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.277409 | 602 | 23 | 69 | 26.173913 | 0.857471 | 0.302326 | 0 | 0.166667 | 0 | 0 | 0.029197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d882e613aec8852e2007e1e358a346706de1051 | 782 | py | Python | swap/__init__.py | afourmy/SWAPON | 9dfc980ff4b5096f42e9fe5891873b465a98e88d | [
"MIT"
] | 7 | 2018-03-28T10:21:22.000Z | 2018-07-03T18:37:30.000Z | swap/__init__.py | afourmy/SWAPON | 9dfc980ff4b5096f42e9fe5891873b465a98e88d | [
"MIT"
] | 1 | 2018-03-28T13:32:45.000Z | 2018-03-28T13:32:45.000Z | swap/__init__.py | afourmy/SWAPON | 9dfc980ff4b5096f42e9fe5891873b465a98e88d | [
"MIT"
] | 1 | 2018-03-28T13:30:23.000Z | 2018-03-28T13:30:23.000Z | """Application and database initialization."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from swap.routes import swap
def configure_database(app):
"""Handle database initialization and shutdown."""
@app.before_first_request
def initialize_database():
db.create_all()
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
def create_app():
"""Flask app creation and configuration."""
app = Flask(__name__)
app.config['SECRET_KEY'] = 'key'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.register_blueprint(swap)
db.init_app(app)
configure_database(app)
return app
| 24.4375 | 67 | 0.712276 | 93 | 782 | 5.763441 | 0.451613 | 0.050373 | 0.074627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176471 | 782 | 31 | 68 | 25.225806 | 0.832298 | 0.157289 | 0 | 0 | 0 | 0 | 0.135303 | 0.115086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.15 | 0 | 0.4 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d8877106d1619c09d6a3b5167c9ba5af60e927b | 1,917 | py | Python | engines/vecm.py | BBVA/timecop | 0ff5c679ecf62c943e0bb31f561d4f601822a781 | [
"Apache-2.0"
] | 79 | 2018-08-13T08:36:33.000Z | 2022-03-27T05:20:07.000Z | engines/vecm.py | BBVA/timecop | 0ff5c679ecf62c943e0bb31f561d4f601822a781 | [
"Apache-2.0"
] | 14 | 2018-10-11T10:06:55.000Z | 2020-09-02T23:50:49.000Z | engines/vecm.py | BBVA/timecop | 0ff5c679ecf62c943e0bb31f561d4f601822a781 | [
"Apache-2.0"
] | 22 | 2018-08-08T08:17:47.000Z | 2021-08-03T11:47:03.000Z | import numpy as np
from matplotlib import pyplot as plt
import statsmodels.tsa.vector_ar.vecm as vecm
import pandas as pd
from . engine_output_creation import engine_output_creation
def anomaly_vecm(list_var,num_fut=5,desv_mse=2,train=True,name='model-name'):
df_var = pd.DataFrame()
for i in range(len(list_var)):
df_var['var_{}'.format(i)] = list_var[i]
# split
tam_train = int(len(df_var)*0.7)
#print tam_train
df_train = df_var[:tam_train]
print('Tamanio train: {}'.format(df_train.shape))
df_test = df_var[tam_train:]
lag_order = vecm.select_order(data=df_train, maxlags=10, deterministic="ci", seasons=0)
rank_test = vecm.select_coint_rank(df_train, 0, 3, method="trace",signif=0.01)
print ("pasa")
model = vecm.VECM(df_train, deterministic="ci", seasons=4, coint_rank=rank_test.rank) # =1
print ("define")
vecm_res = model.fit()
futures = vecm_res.predict(steps=len(df_test))
# lag_order.summary()
result=[]
for list in futures:
result.append(list[0])
engine = engine_output_creation('vecm')
print("empieza")
df_test['puntos']= df_test.index
df_test['valores'] = df_test[df_var.columns[0]]
engine.alerts_creation(result,df_test)
# # print("empieza")
engine.metrics_generation(df_test[df_test.columns[0]].values, result)
# print("empieza")
engine.debug_creation(result,df_test)
lag_order = vecm.select_order(data=df_var, maxlags=10, deterministic="ci", seasons=4)
rank_test = vecm.select_coint_rank(df_var, 0, 3, method="trace",signif=0.01)
print ("pasa")
model = vecm.VECM(df_var, deterministic="ci", seasons=4, coint_rank=rank_test.rank) # =1
print ("define")
vecm_res = model.fit()
futures = vecm_res.predict(steps=num_fut)
# lag_order.summary()
result=[]
for list in futures:
result.append(list[0])
engine.forecast_creation( result, df_var.shape[0],num_fut)
return(engine.engine_output)
| 30.919355 | 93 | 0.715702 | 302 | 1,917 | 4.311258 | 0.294702 | 0.038402 | 0.067588 | 0.052995 | 0.448541 | 0.400922 | 0.400922 | 0.311828 | 0.311828 | 0.311828 | 0 | 0.017597 | 0.140323 | 1,917 | 61 | 94 | 31.42623 | 0.772451 | 0.053208 | 0 | 0.285714 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.119048 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d88ff2f19c547b52af367b081354284bd65b5d4 | 1,104 | py | Python | tests/playlist/test_playlist_init.py | fdenivac/python-qobuz | 7a70d12b80d022541af5e210f75d8943c8bd54a1 | [
"MIT"
] | 11 | 2020-04-19T00:47:47.000Z | 2022-02-04T15:39:08.000Z | tests/playlist/test_playlist_init.py | fdenivac/python-qobuz | 7a70d12b80d022541af5e210f75d8943c8bd54a1 | [
"MIT"
] | 1 | 2020-05-02T17:11:32.000Z | 2020-05-02T17:11:32.000Z | tests/playlist/test_playlist_init.py | netsuso/python-qobuz | 13a9bfeca2a23b5819f6bdaaad9edf7309ab9443 | [
"MIT"
] | 4 | 2020-04-20T16:36:21.000Z | 2021-03-20T01:56:48.000Z | import pytest
import qobuz
import responses
from tests.resources.responses import playlist_create_json
from tests.resources.fixtures import playlist
@pytest.fixture
def app():
qobuz.api.register_app(app_id="request_from_api@qobuz.com")
def get_url(playlist_id):
return (
qobuz.api.API_URL
+ "playlist/get"
+ "?playlist_id={}".format(playlist_id)
+ "&app_id={}".format(qobuz.api.APP_ID)
)
def test_playlist_init(app):
playlist = qobuz.Playlist(playlist_create_json)
assert playlist.id == playlist_create_json["id"]
assert playlist.name == playlist_create_json["name"]
assert playlist.description == playlist_create_json["description"]
def test_playlist_from_id(app, playlist):
with responses.RequestsMock() as response_mock:
response_mock.add(
responses.GET,
url=get_url(playlist.id),
json=playlist_create_json,
status=200,
match_querystring=True,
)
playlist_from_id = qobuz.Playlist.from_id(playlist.id)
assert playlist_from_id == playlist
| 25.090909 | 70 | 0.689312 | 137 | 1,104 | 5.284672 | 0.284672 | 0.116022 | 0.149171 | 0.044199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00344 | 0.210145 | 1,104 | 43 | 71 | 25.674419 | 0.826835 | 0 | 0 | 0 | 0 | 0 | 0.072464 | 0.023551 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.129032 | false | 0 | 0.16129 | 0.032258 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d8ed691bc1e2cfc66de444bb1d05af62280c187 | 1,787 | py | Python | src/models/operations/densenet.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 3 | 2022-02-03T13:25:12.000Z | 2022-02-04T16:12:23.000Z | src/models/operations/densenet.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | null | null | null | src/models/operations/densenet.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 1 | 2022-02-04T12:28:02.000Z | 2022-02-04T12:28:02.000Z | import collections
from typing import Callable
import torch.nn as nn
from ..modules import DropBlock
class DenseNetOperation(nn.Sequential):
'''
Operation class for DenseNets.
'''
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int,
growth: int,
expansion: int,
normalization: Callable[..., nn.Module],
activation: Callable[..., nn.Module],
dropblock: bool,
**kwargs,
) -> None:
if stride != 1:
super().__init__(collections.OrderedDict((n, m) for n, m in [
('norm1', normalization(in_channels)),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)),
] if m is not None))
else:
channels = growth * expansion
super().__init__(collections.OrderedDict((n, m) for n, m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, growth, kernel_size=3, padding=1,
stride=1, bias=False)),
] if m is not None))
| 35.039216 | 73 | 0.521544 | 181 | 1,787 | 5.022099 | 0.348066 | 0.055006 | 0.069307 | 0.068207 | 0.459846 | 0.433443 | 0.365237 | 0.365237 | 0.365237 | 0.259626 | 0 | 0.025087 | 0.353106 | 1,787 | 50 | 74 | 35.74 | 0.761246 | 0.016788 | 0 | 0.285714 | 0 | 0 | 0.03274 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d92af4b43cb3e937840bf4564f0bebb96606407 | 1,664 | py | Python | src/de_direct/ipfs_utils.py | tubleronchik/drone_passport_agent | e85bbc8b4777b2c93c3564c22336cb13bb93bf5f | [
"BSD-3-Clause"
] | null | null | null | src/de_direct/ipfs_utils.py | tubleronchik/drone_passport_agent | e85bbc8b4777b2c93c3564c22336cb13bb93bf5f | [
"BSD-3-Clause"
] | null | null | null | src/de_direct/ipfs_utils.py | tubleronchik/drone_passport_agent | e85bbc8b4777b2c93c3564c22336cb13bb93bf5f | [
"BSD-3-Clause"
] | null | null | null | import os
import rospy
from shutil import move
from tempfile import gettempdir, NamedTemporaryFile
from ipfshttpclient import connect
from rosbag import Bag
def ipfs_download_txt_file(ipfs_hash: str) -> str:
temp_log = NamedTemporaryFile(delete=False)
ipfs_download_file(connect(), ipfs_hash, temp_log.name)
with open(temp_log.name) as f:
return f.read()
def ipfs_download_file(ipfs_client, multihash, filepath):
file_dst = filepath
dst_dir, dst_file = os.path.split(file_dst)
if not os.path.isdir(dst_dir):
try:
os.mkdir(dst_dir)
except Exception as e:
rospy.logerr("Directory %s does not exists and cannot be created: %s", e)
return False
if os.path.isdir(file_dst):
rospy.logwarn(
"Collision between existed directory and IPFS downloading file destination \"%s\". Please fix it manually.",
file_dst)
return False
try:
tempdir = gettempdir()
os.chdir(tempdir)
ipfs_client.get(multihash)
move(tempdir + os.path.sep + multihash, file_dst)
except Exception as e:
rospy.logerr("Failed to download %s to %s with exception: %s", multihash, file_dst, e)
return True
def ipfs_download(multihash):
tempdir = gettempdir()
os.chdir(tempdir)
temp_obj = NamedTemporaryFile(delete=False)
res = ipfs_download_file(connect(), multihash.multihash, temp_obj.name)
if not res:
raise Exception("Can't download objective")
messages = {}
for topic, msg, timestamp in Bag(temp_obj.name, 'r').read_messages():
messages[topic] = msg
return messages
| 30.254545 | 120 | 0.672476 | 221 | 1,664 | 4.923077 | 0.384615 | 0.038603 | 0.04136 | 0.042279 | 0.110294 | 0.053309 | 0 | 0 | 0 | 0 | 0 | 0 | 0.23738 | 1,664 | 54 | 121 | 30.814815 | 0.857368 | 0 | 0 | 0.227273 | 0 | 0 | 0.135298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.136364 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d94415ab82731c0dda5e02646e86a001b75c57f | 5,200 | py | Python | AutoBookTKB/AutoBookTKB.py | heyfey/AutoBookseatTKB | 9d0b27a3227e7ca5975a6b1fd3749f5b2ed6aa75 | [
"MIT"
] | 2 | 2019-07-09T08:32:35.000Z | 2019-09-30T19:02:35.000Z | AutoBookTKB/AutoBookTKB.py | heyfey/AutoBookTKB | 9d0b27a3227e7ca5975a6b1fd3749f5b2ed6aa75 | [
"MIT"
] | 1 | 2018-08-22T03:52:13.000Z | 2018-08-22T03:52:13.000Z | AutoBookTKB/AutoBookTKB.py | heyfey/AutoBookTKB | 9d0b27a3227e7ca5975a6b1fd3749f5b2ed6aa75 | [
"MIT"
] | null | null | null | # !/usr/bin/python
# -*-coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class AutoBookTKB:
def __init__(self, settings):
import json
with open(settings, 'r', encoding="utf-8") as fp:
self.settings = json.load(fp)
with open('locationList.json', 'r', encoding="utf-8") as fp:
self.location_list = json.load(fp)
fp.close()
self.driver = webdriver.Chrome()
self.driver.get("http://bookseat.tkblearning.com.tw/book-seat/student/bookSeat/index")
self.wait = WebDriverWait(self.driver, 60)
def login(self):
element = self.driver.find_element_by_id("id")
element.clear()
element.send_keys(self.settings['id'])
element = self.driver.find_element_by_id("pwd")
element.clear()
element.send_keys(self.settings['password'])
element = self.driver.find_element_by_id("logininputcode")
element.click()
element.clear()
code = self.driver.execute_script("return LonginSecurityCode;")
element.send_keys(code)
self.click_send()
def click_send(self):
element = self.driver.find_element_by_link_text(u"送出")
element.click()
def wait_until_noon_or_midnight(self):
import datetime, time
midnight = datetime.datetime.replace(
datetime.datetime.now() + datetime.timedelta(days=1),
hour=0, minute=0, second=0)
noon = datetime.datetime.now().replace(hour=12, minute=0, second=0)
now = datetime.datetime.now()
delta = noon - now
if delta.days < 0: # It's afternoon now, wait until midnight.
delta = midnight - now
print("Current time : " + time.strftime("%Y-%m-%d %H:%M:%S"))
print("Sleep for " + str(delta.seconds) + " seconds..."
"do not close this window and the web driver.")
time.sleep(delta.seconds)
def refresh(self):
"""Refresh current page."""
self.driver.refresh()
def select_class(self):
element = self.driver.find_element_by_id("class_selector")
element.click()
Select(element).select_by_index(self.settings['classIndex'])
element.click()
def send_securitycode(self):
element = self.driver.find_element_by_id("userinputcode")
element.click()
element.clear()
code = self.driver.execute_script("return SecurityCode;")
element.send_keys(code)
def select_location(self):
location_value = self.location_list[self.settings['location']]
element = self.wait.until(
EC.presence_of_element_located((
By.CSS_SELECTOR,
"option[value=%s]" % location_value
))
)
element = self.driver.find_element_by_id("branch_selector")
element.click()
Select(element).select_by_value(location_value)
element.click()
def select_date(self):
"""Select the newest date."""
import datetime
date = datetime.date.today() + datetime.timedelta(days=6)
element = self.wait.until(
EC.presence_of_element_located((
By.CSS_SELECTOR,
"option[value='%d-%02d-%02d']" % (date.year, date.month,
date.day)
))
)
element = self.driver.find_element_by_id("date_selector")
element.click()
Select(element).select_by_value(str(date))
element.click()
def select_sessions(self):
element = self.wait.until(
EC.presence_of_element_located((By.ID, "session_time_div"))
)
element = self.driver.find_element_by_name("session_time")
for i in self.settings['sessions']:
if self.driver.find_elements_by_xpath('//input[@value="%d"]' % i):
self.driver.find_element_by_xpath('//input[@value="%d"]' % i).click()
def accept_alerts(self):
"""Keep accepting alerts until there's a result."""
while self.wait.until(EC.alert_is_present()):
if self.accept_one_alert():
break
def accept_one_alert(self):
alert = self.driver.switch_to_alert()
print('**' + alert.text + '**')
mylist = [u'已滿', u'請勾選場次時間', u'預約成功', u'請選擇', u'異常']
for s in mylist:
if s in alert.text:
return True
alert.accept()
def main(self):
print("Mission started...")
self.login()
self.wait_until_noon_or_midnight()
self.refresh()
self.select_class()
self.send_securitycode()
self.select_location()
self.select_date()
self.select_sessions()
self.click_send()
self.accept_alerts()
print("Task completed. Plese check your booking:)")
if __name__ == '__main__':
atb = AutoBookTKB('AutoBookTKB-settings.json')
atb.main()
| 32.704403 | 94 | 0.600962 | 614 | 5,200 | 4.907166 | 0.28013 | 0.059741 | 0.051112 | 0.069698 | 0.345171 | 0.338201 | 0.273813 | 0.166611 | 0.100232 | 0.100232 | 0 | 0.005029 | 0.273462 | 5,200 | 158 | 95 | 32.911392 | 0.792483 | 0.032692 | 0 | 0.214876 | 0 | 0 | 0.115323 | 0.010575 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107438 | false | 0.008264 | 0.066116 | 0 | 0.190083 | 0.041322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d957ac7f202a3c53e2756d95e0827c954759d24 | 1,245 | py | Python | scale/storage/migrations/0016_populate_data_type_tags.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/storage/migrations/0016_populate_data_type_tags.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/storage/migrations/0016_populate_data_type_tags.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection, migrations
def populate_data_type_tags(apps, schema_editor):
# Go through all of the ScaleFile models and convert the data_type string into an array of tags
update = 'UPDATE scale_file SET data_type_tags = string_to_array(data_type,\',\') WHERE data_type <> \'\''
with connection.cursor() as cursor:
cursor.execute(update)
count = cursor.rowcount
if count:
print('%d entries updated with data type tags' % count)
print ('Migration finished.')
def non_null_metadata(apps, schema_editor):
ScaleFile = apps.get_model('storage', 'ScaleFile')
# Capture Null values for the meta_data field
print('Fixing null metadata...')
ScaleFile.objects.filter(meta_data='null').update(meta_data={})
ScaleFile.objects.filter(meta_data__isnull=True).update(meta_data={})
print('Fixed null metadata')
class Migration(migrations.Migration):
dependencies = [
('storage', '0015_scalefile_data_type_tags'),
]
operations = [
migrations.RunPython(non_null_metadata),
migrations.RunPython(populate_data_type_tags),
]
| 32.763158 | 110 | 0.683534 | 152 | 1,245 | 5.355263 | 0.486842 | 0.078624 | 0.07371 | 0.04914 | 0.07371 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005097 | 0.212048 | 1,245 | 37 | 111 | 33.648649 | 0.824669 | 0.127711 | 0 | 0 | 0 | 0 | 0.224584 | 0.051756 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.291667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d9b1fa7fc8c23b3d41158678831aba2fa1fcc52 | 1,436 | py | Python | cybereason_consts.py | splunk-soar-connectors/cybereason | f53892478b9bd75415d0b3eb984d5818bce9185c | [
"Apache-2.0"
] | null | null | null | cybereason_consts.py | splunk-soar-connectors/cybereason | f53892478b9bd75415d0b3eb984d5818bce9185c | [
"Apache-2.0"
] | 2 | 2021-11-09T20:46:34.000Z | 2021-11-25T01:20:52.000Z | cybereason_consts.py | splunk-soar-connectors/cybereason | f53892478b9bd75415d0b3eb984d5818bce9185c | [
"Apache-2.0"
] | 1 | 2021-11-12T09:55:02.000Z | 2021-11-12T09:55:02.000Z | # File: cybereason_consts.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
PHANTOM_TO_CYBEREASON_STATUS = {
'Unread': "UNREAD",
'To Review': "TODO",
'Not Relevant': "FP",
'Remediated': "CLOSE",
'Reopend': "REOPEN",
'Under Investigation': "OPEN"
}
CUSTOM_REPUTATION_LIST = ["whitelist", "blacklist", "remove"]
# Constants relating to '_get_error_message_from_exception'
ERR_CODE_MSG = "Error code unavailable"
ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
# Constants relating to '_validate_integer'
INVALID_INTEGER_ERR_MSG = "Please provide a valid integer value in the {}"
INVALID_NON_NEGATIVE_INTEGER_ERR_MSG = "Please provide a valid non-negative integer value in the {}"
MALOP_HISTORICAL_DAYS_KEY = "malop_historical_days asset configuration parameter"
MALWARE_HISTORICAL_DAYS_KEY = "malware_historical_days asset configuration parameter"
| 42.235294 | 112 | 0.770891 | 197 | 1,436 | 5.441624 | 0.568528 | 0.05597 | 0.024254 | 0.029851 | 0.136194 | 0.059701 | 0.059701 | 0 | 0 | 0 | 0 | 0.003265 | 0.146936 | 1,436 | 33 | 113 | 43.515152 | 0.871837 | 0.451253 | 0 | 0 | 0 | 0 | 0.560155 | 0.056921 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d9c32db10f13bc5f70c8f9104956589288f639d | 4,242 | py | Python | creme/compose/union.py | IFV/creme | a7393b534489422ba156f2d2e83fb777afbd2efb | [
"BSD-3-Clause"
] | null | null | null | creme/compose/union.py | IFV/creme | a7393b534489422ba156f2d2e83fb777afbd2efb | [
"BSD-3-Clause"
] | 1 | 2022-02-10T06:24:42.000Z | 2022-02-10T06:24:42.000Z | creme/compose/union.py | igorol/creme | 60977c4accfdca08cfd76a162095ff738ef87281 | [
"BSD-3-Clause"
] | 1 | 2021-04-16T08:27:14.000Z | 2021-04-16T08:27:14.000Z | import collections
import types
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
from .. import base
from . import func
__all__ = ['TransformerUnion']
class TransformerUnion(collections.UserDict, base.Transformer):
"""Packs multiple transformers into a single one.
Calling ``transform_one`` will concatenate each transformer's output using a
`collections.ChainMap`.
Parameters:
transformers (list): transformers to pack together.
Example:
::
>>> from pprint import pprint
>>> import creme.compose
>>> import creme.feature_extraction
>>> import creme.stats
>>> X = [
... {'place': 'Taco Bell', 'revenue': 42},
... {'place': 'Burger King', 'revenue': 16},
... {'place': 'Burger King', 'revenue': 24},
... {'place': 'Taco Bell', 'revenue': 58},
... {'place': 'Burger King', 'revenue': 20},
... {'place': 'Taco Bell', 'revenue': 50}
... ]
>>> mean = creme.feature_extraction.Agg(
... on='revenue',
... by='place',
... how=creme.stats.Mean()
... )
>>> count = creme.feature_extraction.Agg(
... on='revenue',
... by='place',
... how=creme.stats.Count()
... )
>>> agg = creme.compose.TransformerUnion([mean])
>>> agg += count
>>> for x in X:
... pprint(agg.fit_one(x).transform_one(x))
{'revenue_count_by_place': 1, 'revenue_mean_by_place': 42.0}
{'revenue_count_by_place': 1, 'revenue_mean_by_place': 16.0}
{'revenue_count_by_place': 2, 'revenue_mean_by_place': 20.0}
{'revenue_count_by_place': 2, 'revenue_mean_by_place': 50.0}
{'revenue_count_by_place': 3, 'revenue_mean_by_place': 20.0}
{'revenue_count_by_place': 3, 'revenue_mean_by_place': 50.0}
>>> pprint(agg.transform_one({'place': 'Taco Bell'}))
{'revenue_count_by_place': 3, 'revenue_mean_by_place': 50.0}
"""
def __init__(self, transformers=None):
super().__init__()
if transformers is not None:
for transformer in transformers:
self += transformer
@property
def is_supervised(self):
return any(transformer.is_supervised for transformer in self.values())
def __str__(self):
"""Returns a human friendly representation of the pipeline."""
return f' + '.join(map(str, self.keys()))
def __repr__(self):
return str(self)
def add_step(self, other):
"""Adds a transformer while taking care of the input type."""
# Infer a name if none is given
if not isinstance(other, (list, tuple)):
other = (str(other), other)
name, transformer = other
# If a function is given then wrap it in a FuncTransformer
if isinstance(transformer, types.FunctionType):
name = transformer.__name__
transformer = func.FuncTransformer(transformer)
# Prefer clarity to magic
if name in self:
raise KeyError(f'{name} already exists')
# Store the transformer
self[name] = transformer
return self
def __add__(self, other):
return self.add_step(other)
def fit_one(self, x, y=None):
for transformer in self.values():
transformer.fit_one(x, y)
return self
def transform_one(self, x):
"""Passes the data through each transformer and packs the results together."""
return dict(collections.ChainMap(*(
transformer.transform_one(x)
for transformer in self.values()
)))
def draw(self):
if not GRAPHVIZ_INSTALLED:
raise ImportError('graphviz is not installed')
g = graphviz.Digraph(engine='fdp')
for part in self.values():
if hasattr(part, 'draw'):
g.subgraph(part.draw())
else:
g.node(str(part))
return g
| 30.517986 | 86 | 0.558934 | 463 | 4,242 | 4.930886 | 0.308855 | 0.049058 | 0.042926 | 0.058257 | 0.20806 | 0.196233 | 0.169952 | 0.169952 | 0.169952 | 0.136662 | 0 | 0.013831 | 0.318246 | 4,242 | 138 | 87 | 30.73913 | 0.775588 | 0.495521 | 0 | 0.036364 | 0 | 0 | 0.036923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163636 | false | 0 | 0.127273 | 0.054545 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d9d5c2fd8fb2fa4769b60940901fb851de328f1 | 30,740 | py | Python | app.py | mattmallencode/republic | ce7c634fab048e2734f999d56edcf444b71d28ff | [
"CC-BY-4.0"
] | null | null | null | app.py | mattmallencode/republic | ce7c634fab048e2734f999d56edcf444b71d28ff | [
"CC-BY-4.0"
] | null | null | null | app.py | mattmallencode/republic | ce7c634fab048e2734f999d56edcf444b71d28ff | [
"CC-BY-4.0"
] | null | null | null | """
Attribution:
1. Fire SVG made by made by Deepak K Vijayan (2xsamurai). Available from: https://codepen.io/2xsamurai/pen/EKpYM". Logo animation and form animation were made by me.
2. "round_up" function wirtten by Priyankur Sarkar. AVailable from: https://www.knowledgehut.com/blog/programming/python-rounding-numbers
3. Icons used in navbar are free even without attribution. Available from: https://uxwing.com/
4. Favicon is from the open source project Twemoji.
Licensed under CC-BY 4.0.
Twemoji: https://twemoji.twitter.com/
CC-BY 4.0 License: https://creativecommons.org/licenses/by/4.0/
5. Borrowed some CSS from Stack Overflow to center placeholder text in form fields. Available from: https://stackoverflow.com/questions/7381446/center-html-input-text-field-placeholder
6. Borrowed some CSS from Stack Overflow to brighten anchor tags on hover. Available from: https://stackoverflow.com/questions/16178382/css-lighten-an-element-on-hover
7. Borrowed some CSS to make form labels accessible to screen readers. Available from: https://webaim.org/techniques/css/invisiblecontent/
8. Borrowed some CSS to fix issues with safari mobile. Available from: https://stackoverflow.com/questions/50475114/when-rotating-an-iphone-x-to-landscape-white-space-appears-to-the-left-and-belox
9. Borrowed some CSS to fix scrolling issues on mobile. Available from: https://css-tricks.com/css-fix-for-100vh-in-mobile-webkit/
10. Borrowed some JavaScript from Stack Overflow to fix HTML validation issues due to blank action attribute. Available from: https://stackoverflow.com/questions/32491347/bad-value-for-attribute-action-on-element-form-must-be-non-empty/32491636
11. Borrowed some JavaScript from Stack Overflow to keep scroll at the buttom on the forum. Available from: https://stackoverflow.com/questions/3842614/how-do-i-call-a-javascript-function-on-page-load
12. Borrowed some Javascript from Stack Overflow to force refresh on the chat page. Available from: https://stackoverflow.com/questions/32913226/auto-refresh-page-every-30-seconds
13. All page transition animations were made using the swup page transition library. Available from: https://swup.js.org/
14. Font used is Roboto Mono. Available from: https://fonts.google.com/specimen/Roboto+Mono?preview.text_type=custom
Admin access:
1. Admin user_id is "admin".
2. Admin password is "keen/nimble_SALSA".
3. The admin portal can be accessed at the route "/admin".
Test acocunts (Feel free to make your own.):
1. user_id: "cartwheelkitten", password: "supercsecret" (User is banned).
2. user_id: "floralpelicanfly", password: "superfsecret".
3. user_id: "unforgivenbeans", password: "superusecret".
4. user_id: "spinachstandby", password: "superssecret".
5. user_id: "fitnessjuice", password: "superfsecret".
6. user_id: "departed", password: "superdsecret".
7. user_id: "notorious", password: "supernsecret".
8. user_id: "doughnutwalrus", password: "superdsecret".
9. user_id: "snake", password: "superssecret".
10. user_id: "birthdaycake", password: "superbsecret".
"""
from flask import Flask, render_template, session, g, redirect, url_for, request
from database import get_db, close_db
from forms import SignInForm, RegistrationForm, ChatForm, SellForm, ColorForm, TaxForm, LimitForm, AdminForm
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
import math
app = Flask(__name__)
app.config["SECRET_KEY"] = "demistifyeasypetenimblesauce"
@app.teardown_appcontext
def close_db_at_end_of_request(e=None):
"""
Closes the connection to the database at the end of a user request
"""
close_db(e)
@app.before_request
def load_logged_in_user():
"""
Creates a global variable called user that was stored in the user's session once they logged in.
ALso creates global variables for the site's colour pallete.
"""
g.user = session.get("user_id", None)
db = get_db()
# Creates a global colour variables so they can be inserted into CSS variables in the Jinja2 templates.
colors = db.execute("SELECT proposal_value FROM policies WHERE proposal_type = 'color'").fetchone()[
"proposal_value"]
# Since a color code in hex is 7 characters (including the #), index slicing is used to parse the color data.
g.maincolor = colors[0:7]
g.secondcolor = colors[7:14]
g.textcolor = colors[14:21]
def login_required(view):
"""
Redirects the user to the login page if they aren't logged in.
Also saves what page the user was tyring to access so they can be redirected there once they've been authenticated.
If the user is banned they are returned to the login page.
"""
@ wraps(view)
def wrapped_view(**kwargs):
db = get_db()
if g.user is None:
return redirect(url_for("login", next=request.url))
if db.execute("""SELECT isBanned FROM users WHERE user_id = ? """, (g.user,)).fetchone()["isBanned"] == 1:
return redirect(url_for("login"))
return view(**kwargs)
return wrapped_view
@ app.route("/login", methods=["GET", "POST"])
def login():
"""
Handles user authentication.
The hash of the password the user entered is compared to the hash in the database.
Also saves the user_id in the user's session.
"""
form = SignInForm()
banned = None
reason = None
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
db = get_db()
user = db.execute("""SELECT * FROM users
where user_id = ?;""", (user_id,)).fetchone()
if user is None:
form.user_id.errors.append("Unkown user id")
elif not check_password_hash(user["password"], password):
form.password.errors.append("Incorrect password!")
elif user["isBanned"] == 1:
banned = "You have been banned"
reason = user["bannedReason"]
else:
session.clear()
session["user_id"] = user_id
next_page = request.args.get("next")
if not next_page:
next_page = url_for("chat")
return redirect(next_page)
return render_template("login.html", form=form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, banned=banned, reason=reason)
@ app.route("/", methods=["GET", "POST"])
@ login_required
def chat():
"""
This is the route for the chat room / public forum.
Since it doesn't make use of web sockets it isn't an instant messaging room.
Javascript is used to refresh the page every 30 seconds.
Jinja2 logic is used to distinguish the user's messages from the messages of others using CSS.
"""
db = get_db()
form = ChatForm()
# Fetches what the chat limit currently is from the database.
chat_limit = int(db.execute(
"""SELECT proposal_value FROM policies WHERE proposal_type = 'limit'""").fetchone()["proposal_value"])
messages = db.execute(""" SELECT * FROM chats """).fetchall()
if form.validate_on_submit():
message = form.message.data
db.execute(
"""INSERT INTO chats(user_id, message) VALUES(?, ?)""", (g.user, message))
db.commit()
messages = db.execute("""SELECT * FROM chats """).fetchall()
# Checks to see if the number of chats in the database has exceeded the given limit once the user submits their message.
# The oldest message is culled by ordering the messages by descending order of ID and limiting the query to the chat's limit.
if len(messages) >= chat_limit:
db.execute(
"""DELETE from chats WHERE message_id NOT IN (SELECT message_id FROM chats ORDER BY message_id DESC LIMIT ?)""", (chat_limit,))
db.commit()
return redirect(url_for("chat"))
return render_template("index.html", form=form, messages=messages, user_id=g.user, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, chat_limit=chat_limit)
@app.route("/shop", methods=["GET", "POST"])
@login_required
def shop():
"""
This is the route for the shop area.
This is where users can buy hyperlinks that are being sold by other users.
The hyperlink for each listing is stored in the database but is hidden from users as it is ommitted from the Jinja2 template.
"""
db = get_db()
boughtlinks = db.execute(
"""SELECT listing_id FROM boughtlinks WHERE user_id= ?""", (g.user,)).fetchall()
boughtlinksList = []
# Checks to see what links this user has bought in the past. Then use Jinja2 logic to display "bought" rather than buy for that user.
# Also uses Jinja2 logic to display "your listing" if the seller_id is the user_id
for boughtlink in boughtlinks:
boughtlinksList.append(boughtlink["listing_id"])
listings = db.execute(
"""SELECT * FROM listings ORDER BY listing_id DESC; """).fetchall()
balance = db.execute(
"""SELECT tulips FROM users WHERE user_id= ?""", (g.user,)).fetchone()["tulips"]
return render_template("shop.html", listings=listings, balance=balance, boughtlinksList=boughtlinksList, user_id=g.user, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
@app.route("/buy/<int:listing_id>", methods=["GET", "POST"])
@login_required
def buy(listing_id):
"""
This is the route that a user can use to buy a hyperlink and store it in their cart.
"""
db = get_db()
# Creates a list of the links the user has already bought
boughtlinks = db.execute(
"""SELECT listing_id FROM boughtlinks WHERE user_id= ?""", (g.user,)).fetchall()
boughtlinksList = []
for boughtlink in boughtlinks:
boughtlinksList.append(boughtlink["listing_id"])
# Works out the price, the id of the user selling the link, the seller's balance and how much tax will be owed on this sale if any.
price = db.execute(
"""SELECT price FROM listings WHERE listing_id= ?""", (listing_id,)).fetchone()["price"]
seller_id = db.execute(
"""SELECT seller_id FROM listings WHERE listing_id= ?""", (listing_id,)).fetchone()["seller_id"]
seller_balance = db.execute(
"""SELECT tulips FROM users WHERE user_id= ?""", (seller_id,)).fetchone()["tulips"]
tax = int(db.execute(
"""SELECT proposal_value FROM policies WHERE proposal_type = 'tax'""").fetchone()["proposal_value"])
tax = price * (tax/100)
db.execute("""UPDATE treasury SET tulips=tulips + ?""", (tax,))
db.commit()
# Increase the sellers balance by the price of the link less the tax owed.
seller_balance += (price - tax)
balance = db.execute(
"""SELECT tulips FROM users WHERE user_id= ?""", (g.user,)).fetchone()["tulips"]
# If the link the user is trying to buy is one they themselves are selling or if they can't afford the link or if they have already bought it then redirect them to the shop.
if seller_id == g.user or price > balance or listing_id in boughtlinksList:
return redirect(url_for("shop"))
# If the user hasn't been redirected because none of the above is true then update the user's bought links, the seller's balance and the treasury.
db.execute(
"""INSERT INTO boughtlinks(user_id, listing_id) VALUES(?, ?)""", (g.user, listing_id))
balance -= price
db.execute("""UPDATE users SET tulips= ? WHERE user_id= ?""",
(seller_balance, seller_id))
db.execute("""UPDATE users SET tulips= ? WHERE user_id= ?""",
(balance, g.user))
db.commit()
return redirect(url_for("shop"))
@ app.route("/boughtlinks", methods=["GET", "POST"])
@ login_required
def boughtlinks():
"""
This is the route to display the user's cart / the links they have bought.
Unlike the shop route the hyperlink of any listing the user has bought is not ommitted using Jinja2 logic so they can visit the links they have bought.
"""
db = get_db()
balance = db.execute(
"""SELECT tulips FROM users WHERE user_id= ?""", (g.user,)).fetchone()["tulips"]
boughtlinks = db.execute(
"""SELECT * FROM listings WHERE listing_id IN(SELECT listing_id FROM boughtlinks WHERE user_id= ?)""", (g.user,)).fetchall()
return render_template("boughtlinks.html", boughtlinks=boughtlinks, balance=balance, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
@ app.route("/sell", methods=["GET", "POST"])
@ login_required
def sell():
"""
This is the route a user can use to post a listing on the market.
"""
form = SellForm()
db = get_db()
tax = db.execute(
"""SELECT proposal_value FROM policies WHERE proposal_type = 'tax'""").fetchone()["proposal_value"]
balance = db.execute(
"""SELECT tulips FROM users WHERE user_id= ?""", (g.user,)).fetchone()["tulips"]
if form.validate_on_submit():
title = form.title.data
description = form.description.data
price = float(round(form.price.data, 2))
link = form.link.data
db.execute(
"""INSERT INTO listings(title, description, price, link, seller_id) VALUES(?, ?, ?, ?, ?); """, (title, description, price, link, g.user))
db.commit()
return redirect(url_for("shop"))
return render_template("postlink.html", form=form, balance=balance, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, tax=tax)
@ app.route("/register", methods=["GET", "POST"])
def register():
"""
This is the route where users can register.
Each user starts off with a balance of 1000 students and a blank "votes" string.
Users are not an admin or banned by default.s
"""
form = RegistrationForm()
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
password2 = form.password2.data
password2 = password2
db = get_db()
# Extra if statement to check to see if it's a duplicate user
if db.execute("""SELECT * FROM users WHERE user_id= ?""", (user_id,)).fetchone() is not None:
form.user_id.errors.append("User id already exists!")
else:
db.execute(
"""INSERT INTO users(user_id, password, tulips, isAdmin, isBanned, bannedReason) VALUES(?, ?, ?, ?, ?, ?); """, (
user_id, generate_password_hash(password), 1000.0, 0, 0, ""))
db.execute(
"""INSERT INTO votes(user_id, votes) VALUES(?, ?); """, (user_id, ""))
db.commit()
return redirect(url_for("login"))
return render_template("register.html", form=form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
@ app.route("/about", methods=["GET", "POST"])
def about():
"""
This is the route that displays the website's "about page"
"""
return render_template("about.html", maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
# Start of function written by Priyankur Sarkar
def round_up(n, decimals=0):
"""
This function rounds up rather than down.
This is to avoid Python's default behaviour of rounding down when a number is x.5 when voting thresholds are calculated.
"""
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
# End of function written by Priyankur Sarkar
@ app.route("/voting", methods=["GET", "POST"])
@ login_required
def voting():
"""
This route displays the policies that are currently in place and allows users to upvote or downvote proposals.
"""
db = get_db()
treasury = db.execute(
"""SELECT tulips FROM treasury""").fetchone()["tulips"]
chat_limit = int(db.execute(
"""SELECT proposal_value FROM policies WHERE proposal_type = 'limit'""").fetchone()["proposal_value"])
tax = db.execute(
"""SELECT proposal_value FROM policies WHERE proposal_type = 'tax'""").fetchone()["proposal_value"]
# Banned users are excluded from the user count.
user_count = db.execute(
"""SELECT COUNT(user_id) FROM users WHERE isBanned = 0""").fetchone()["COUNT(user_id)"]
threshold = user_count / 2
# If there's an event number of users then a majority is half the users + 1, else it's half the users rounded up to the nearest number.
if user_count % 2 == 0:
threshold += 1
else:
# The round_up function is used rather than round() to avoid Python rounding down. The threshold is the number of users divided by 2 rounding to the next largest number if the number is a decimal.
threshold = int(round_up(threshold))
# Parsing the user's votes using the split method. Voting is explained in comments in the "vote" route.
user_votes = db.execute(
"""SELECT votes FROM votes WHERE user_id= ?""", (g.user,)).fetchone()["votes"].split(",")
proposals = db.execute(
"""SELECT * FROM proposals ORDER BY votes DESC""").fetchall()
return render_template("voting.html", proposals=proposals, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, user_votes=user_votes, threshold=threshold, treasury=treasury, tax=tax, chat_limit=chat_limit)
@ app.route("/propose", methods=["GET", "POST"])
@ login_required
def propose():
"""
This is the route where a user can propose a policy change.
All forms are displayed at once and can be toggled to display by the user using Javascript.
To avoid validation errors from having multiple forms displayed at once, error handling is used.
All "proposal_value"s are stored as SQL TEXT data types. This works fine for the tax and chat limit figures as they can be cast as integers by Python.
The color form is different however as the form is handling 3 values at once. The 3 colors are all concatonated onto one string which can then be parsed to extract the individual colors later using index slicing.
The maincolor goes first, then the secondcolor, and finally the textcolor. Each hex color code is 7 characters long (including the #).
"""
color_form = ColorForm()
tax_form = TaxForm()
limit_form = LimitForm()
if color_form.validate_on_submit():
try:
maincolor = request.form["maincolor"]
secondcolor = request.form["secondcolor"]
textcolor = request.form["textcolor"]
proposal_value = maincolor + secondcolor + textcolor
proposal_type = "color"
db = get_db()
db.execute("""INSERT INTO proposals(proposal_type, proposal_value, votes) VALUES(?, ?, ?)""",
(proposal_type, proposal_value, 0))
db.commit()
return redirect(url_for("voting"))
except:
pass
if tax_form.validate_on_submit():
try:
proposal_value = str(tax_form.salestax.data)
proposal_type = "tax"
db = get_db()
db.execute("""INSERT INTO proposals(proposal_type, proposal_value, votes) VALUES(?, ?, ?)""",
(proposal_type, proposal_value, 0))
db.commit()
return redirect(url_for("voting"))
except:
pass
if limit_form.validate_on_submit():
try:
proposal_value = str(limit_form.limit.data)
proposal_type = "limit"
db = get_db()
db.execute("""INSERT INTO proposals(proposal_type, proposal_value, votes) VALUES(?, ?, ?)""",
(proposal_type, proposal_value, 0))
db.commit()
return redirect(url_for("voting"))
except:
pass
return render_template("makeproposal.html", color_form=color_form, tax_form=tax_form, limit_form=limit_form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
@ app.route("/vote/<string:proposal_id>", methods=["GET", "POST"])
@ login_required
def vote(proposal_id):
"""
This is the route that handles user votes. Votes work on an upvote and downvote system.
Users can also withdraw having any input on a given proposal by clicking the given vote button again.
Voting works as follows:
Each proposal has a given id, when a user votes their vote is recorded with their id in the following format [proposal_id] + [y/n], y = an upvote n = a downvote.
Each user therefor as a string consisting of all of the votes they have issued on the platform which we can then iterate over to parse.
Each vote in the string is seperated by a "," which then makes it easy to parse all of the user's voting data using a for loop.
Users can change the colour scheme of the website, how much sales tax is charged in the market and what the chat limit should be on the forum.
Any proposal that gets a majority vote is implemented automatically without need for admin inteference as these variables are fetched from the database.
The voting system is a sort of smart contract that is self executing. The only pitfall in this regard is that since the website is centralised then admins can make executive decisions and rollback democractic choices.
"""
db = get_db()
# Banned users are excluded from the user count
user_count = db.execute(
"""SELECT COUNT(user_id) FROM users WHERE isBanned = 0""").fetchone()["COUNT(user_id)"]
# If there's an event number of users then a majority is half the users + 1, else it's half the users rounded up to the nearest number.
threshold = user_count / 2
if user_count % 2 == 0:
threshold += 1
else:
# The round_up function is used rather than round() to avoid Python rounding down. The threshold is the number of users divided by 2 rounding to the next largest number if the number is a decimal.
threshold = round_up(threshold)
user_votes = db.execute(
"""SELECT votes FROM votes WHERE user_id= ?""", (g.user,)).fetchone()["votes"].split(",")
proposal_id_sql = proposal_id[0:-1]
user_votes = db.execute(
"""SELECT votes FROM votes WHERE user_id= ?""", (g.user,)).fetchone()["votes"].split(",")
choice = proposal_id[-1]
# Check to see if the user has already voted this way on this given proposal before
if proposal_id not in user_votes:
# User has not voted this way on this given proposal before.
if choice == "y":
# User wants to vote yes on this proposal.
# If they have voted no on this proposal before but now want to vote yes then remove their no vote from their votes string and increase the vote tally by 1 to cancel out that downvote.
if proposal_id_sql + "n" in user_votes:
db.execute(
"""UPDATE votes SET votes=REPLACE(votes, ? || "n,", "") WHERE user_id= ?""", (proposal_id_sql, g.user))
db.execute(
"""UPDATE proposals SET votes=votes + 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
# Increase the vote tally by 1, irrespective of whether the user had originally voted no on this proposal.
db.execute("""UPDATE votes SET votes=votes || ? WHERE user_id= ?""",
(proposal_id + ",", g.user))
db.execute(
"""UPDATE proposals SET votes=votes + 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
else:
# User wants to vote no on this proposal.
# If they have voted yes on this proposal before but now want to vote no then remove their yes vote from their votes string and decrease the vote tally by 1 to cancel out that upvote.
if proposal_id_sql + "y" in user_votes:
db.execute(
"""UPDATE votes SET votes=REPLACE(votes, ? || "y,", "") WHERE user_id= ?""", (proposal_id_sql, g.user))
db.execute(
"""UPDATE proposals SET votes=votes - 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
# Decrease the vote tally by 1, irrespective of whether the user had originally voted no on this proposal.
db.execute("""UPDATE votes SET votes=votes || ? WHERE user_id= ?""",
(proposal_id + ",", g.user))
db.execute(
"""UPDATE proposals SET votes=votes - 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
else:
# User has voted this way on the proposal before i.e. they want to cancel out their vote and withdraw their opinion from this proposal.
if choice == "y":
# If the user wants to cancel out a yes vote then remove the yes vote from the user's votes string and decrease the proposal's votes by 1.
db.execute(
"""UPDATE votes SET votes=REPLACE(votes, ? || "y,", "") WHERE user_id= ?""", (proposal_id_sql, g.user))
db.execute(
"""UPDATE proposals SET votes=votes - 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
else:
# If the user wants to cancel out a no vote then remove the no vote from the user's votes string and increase the proposal's votes by 1.
db.execute(
"""UPDATE votes SET votes=REPLACE(votes, ? || "n,", "") WHERE user_id= ?""", (proposal_id_sql, g.user))
db.execute(
"""UPDATE proposals SET votes=votes + 1 WHERE proposal_id= ?""", (proposal_id_sql,))
db.commit()
proposal = db.execute(
"""SELECT * FROM proposals WHERE proposal_id= ?""", (proposal_id_sql,)).fetchone()
# If the proposal has passed the majority threshold then implement it as a policy.
if threshold <= proposal["votes"]:
db.execute("""UPDATE policies SET proposal_value= ? WHERE proposal_type= ?""",
(proposal["proposal_value"], proposal["proposal_type"]))
db.execute("""DELETE FROM proposals WHERE proposal_id= ?""",
(proposal["proposal_id"],))
db.commit()
# If the majority of users have voted no on a proposal then remove it from the database.
if proposal["votes"] <= threshold * -1:
db.execute("""DELETE FROM proposals WHERE proposal_id= ?""",
(proposal["proposal_id"],))
db.commit()
return redirect(url_for('voting'))
@app.route("/admin", methods=["GET", "POST"])
@login_required
def admin():
"""
This is the route for the admin portal.
Admin status is stored in session but the user must also enter in the admin password to submit a command.
The admin can ban any user but must give a reason for doing so.
The ban/unban commands work as follows "[ban/unban] [user_id] [reason].
If the admin fails to start the command with ban or unban, fails to provide a valid user_id or a reason then the command is not submmitted.
Users who have already beenn banned/unbanned will not be banned/unbanned again.
If the user has been successfully banned then their chats and market listings are also purged.
Other data isn't deleted because:
A) Users even if they've violated rules should be able to retrieve the links they bought if they request it.
B) Unlike other data such as policy votes, chats and listings can be malicious in nature e.g. scam listings and/or abusive messages.
"""
db = get_db()
outcome = None
if db.execute("""SELECT * FROM users WHERE user_id = ? AND isAdmin = 1""", (g.user,)).fetchone() is None:
return redirect(url_for("chat"))
form = AdminForm()
if form.validate_on_submit():
password = form.password.data
admin = db.execute(
"""SELECT * FROM users WHERE user_id = 'admin';""").fetchone()["password"]
if not check_password_hash(admin, password):
outcome = "That's not the admin password!"
return render_template("portal.html", form=form, outcome=outcome)
command = form.command.data.split(" ")
reason = ""
try:
if command[0] != "ban" or command[0] != "unban":
outcome = "Command must begin with 'ban' or 'unban'!"
if db.execute("""SELECT user_id FROM users WHERE user_id = ?""", (command[1],)).fetchone() is None:
outcome = "User does not exist!"
else:
if command[0] == "ban":
try:
if db.execute("""SELECT isBanned FROM users where user_id = ?""", (command[1],)).fetchone()["isBanned"] == 1:
outcome = "User is already banned!"
else:
if command[2] == "":
outcome = "Can't leave ban reason blank!"
else:
for word in command[2::]:
reason = reason + " " + word
db.execute(
"""UPDATE users SET isBanned = 1, bannedReason = ? WHERE user_id = ?""", (reason.lower(), command[1]))
db.commit()
db.execute(
"""DELETE FROM chats WHERE user_id = ?""", (command[1],))
db.execute(
"""DELETE FROM listings WHERE seller_id = ?""", (command[1],))
db.commit()
outcome = "User has been banned!"
except:
outcome = "Invalid command!"
if command[0] == "unban":
try:
if db.execute("""SELECT isBanned FROM users where user_id = ?""", (command[1],)).fetchone()["isBanned"] == 0:
outcome = "User already isn't banned!"
else:
db.execute(
"""UPDATE users SET isBanned = 0, bannedReason = "" WHERE user_id = ?""", (command[1],))
db.commit()
outcome = "User has been unbanned!"
except:
outcome = "Invalid command!"
except:
outcome = "Invalid command!"
return render_template("portal.html", form=form, outcome=outcome, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor)
| 54.310954 | 252 | 0.640468 | 4,109 | 30,740 | 4.710392 | 0.157703 | 0.02201 | 0.027125 | 0.01209 | 0.422371 | 0.388478 | 0.329889 | 0.306691 | 0.288091 | 0.241901 | 0 | 0.00857 | 0.252212 | 30,740 | 565 | 253 | 54.40708 | 0.833428 | 0.378855 | 0 | 0.523035 | 0 | 0 | 0.139028 | 0.009336 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04607 | false | 0.04065 | 0.01626 | 0 | 0.135501 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d9d86c211fc1d58055ad9b788bab3c0e20dbabd | 1,398 | py | Python | test/test_earthdata.py | matthewhanson/modis-ingestor | c8b903b8ce671a93a40f563103a9ca5264658815 | [
"MIT"
] | 13 | 2017-01-31T16:37:56.000Z | 2020-06-23T19:55:55.000Z | test/test_earthdata.py | matthewhanson/modis-ingestor | c8b903b8ce671a93a40f563103a9ca5264658815 | [
"MIT"
] | 22 | 2017-01-12T19:42:32.000Z | 2021-05-20T16:03:08.000Z | test/test_earthdata.py | matthewhanson/modis-ingestor | c8b903b8ce671a93a40f563103a9ca5264658815 | [
"MIT"
] | 2 | 2018-03-29T23:41:59.000Z | 2019-11-09T00:33:38.000Z | import os
from dateutil.parser import parse
import unittest
from modispds.earthdata import query, download_granule
class TestCMR(unittest.TestCase):
""" Test query and downloading from CMR """
date1 = parse('2016-01-01').date()
date2 = parse('2016-01-02').date()
date3 = parse('2016-01-30').date()
url = 'http://e4ftl01.cr.usgs.gov//MODV6_Cmp_B/MOTA/MCD43A4.006/2016.01.01/MCD43A4.A2016001.h11v12.006.2016174075640.hdf'
@classmethod
def setUpClass(self):
""" Setup class once by issuing a query """
self.q = query(self.date1, self.date1)
def test_query(self):
""" Query CMR """
self.assertEqual(len(self.q), 299)
keys = self.q[0].keys()
self.assertTrue('links' in keys)
def test_query_2days(self):
""" Query CMR for two days """
q = query(self.date1, self.date2)
self.assertEqual(len(q), 598)
def _test_query_30days(self):
""" Query CMR for 30 days """
q = query(self.date1, self.date3)
self.assertEqual(len(q), 9272)
def test_download(self):
""" Download a file from CMR """
q = self.q[0]
url = q['links'][0]['href']
self.assertEqual(url, self.url)
fnames = download_granule(q, outdir=os.path.dirname(__file__))
for f in fnames:
self.assertTrue(os.path.exists(f))
os.remove(f)
| 31.066667 | 125 | 0.610873 | 192 | 1,398 | 4.369792 | 0.401042 | 0.053635 | 0.039333 | 0.053635 | 0.077473 | 0.054827 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 0.244635 | 1,398 | 44 | 126 | 31.772727 | 0.700758 | 0.111588 | 0 | 0 | 0 | 0.033333 | 0.130399 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.166667 | false | 0 | 0.133333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1da378ba7833b27f03503c506a22364e42c3cfd3 | 4,195 | py | Python | Interface.py | Angel2298/PyCakeSwapBot | e92ed52006a7aa6f94a3c88a5a70e7f4098096c9 | [
"BSD-3-Clause"
] | null | null | null | Interface.py | Angel2298/PyCakeSwapBot | e92ed52006a7aa6f94a3c88a5a70e7f4098096c9 | [
"BSD-3-Clause"
] | null | null | null | Interface.py | Angel2298/PyCakeSwapBot | e92ed52006a7aa6f94a3c88a5a70e7f4098096c9 | [
"BSD-3-Clause"
] | null | null | null | ####################################################### CREATE INTERFACE GUI ###########################################
import os
from tkinter import *
import time
window = Tk()
window.title("DEX PancakeBot")
window.minsize(width=500, height= 600)
window.config(padx=20, pady=20)
# Title
Title = Label(text="Welcome to the Limit PancakeSwap Bot", font=("Century", 16))
Title.grid(column=1, row=0)
################################################ Define all function ##################################################
def run_bot():
buy_qty = buy_amount_entry.get()
contract = contract_token_entry.get()
target = target_price_entry.get()
new_text = (f"You are running the bot\n"
f"Buy: {buy_qty}\n"
f"Contract: {contract}\n"
f"target: {target}")
label_resume.config(text=new_text)
def actual_time():
hour = time.strftime("%H")
minute = time.strftime("%M")
second = time.strftime("%S")
label_time.config(text=hour + ":" + minute + ":" + second)
label_time.after(1000, actual_time)
def close_window():
new_text = buy_amount_entry.get()
label1.config(text=new_text)
def action_buy_Sell():
if action.get() == 1:
print("Buy token")
elif action.get() == 2:
print("Sell Token")
# Radiobutton
def radio_used():
print(radio_state.get())
#################################################### Define all Labels #################################################
# Label
label1 = Label(text="This the first GUI of the bot", font=("Century", 16))
label1.grid(column=1, row=1)
# Label for time
label_time = Label(text="", font=("Century", 16))
label_time.grid(column=1, row=7)
# Label for amount
label_amount = Label(text="How much amount of WBNB would you like to trade ", font=("Century", 16))
label_amount.grid(column=0, row=3)
# Label for contract
label_contract = Label(text="What is the contract to trade (Omit if in config file) ", font=("Century", 16))
label_contract.grid(column=0, row=5)
# Label for target price
label_target_price_entry = Label(text="What is the target price?", font=("Century", 16))
label_target_price_entry.grid(column=0, row=7)
# Label final
label_resume = Label(text=" ", font=("Century", 16))
label_resume.grid(column=0, row=15)
############################################## Define all entries ######################################################
# Entry for the buy amount
buy_amount_entry = Entry(width=15)
buy_amount_entry.grid(column=0, row=4)
# Entry for write the contract
contract_token_entry = Entry(width=25)
contract_token_entry.grid(column=0, row=6)
# Entry for the target price
target_price_entry = Entry(width=15)
target_price_entry.grid(column=0, row=8)
######################################### Define the RadioButtons ######################################################
#Variable to hold on to which radio button value is checked.
radio_state = IntVar()
tradeButton = Radiobutton(text="Trade", value=1, variable=radio_state, command=radio_used)
notifyButton = Radiobutton(text="Notify", value=2, variable=radio_state, command=radio_used)
tradeButton.grid(column=0, row=10)
notifyButton.grid(column=0, row=11)
#Variable to hold on to which radio button value is checked Buy or Sell.
action = IntVar()
BuyButton = Radiobutton(text="Buy", value=1, variable=action, command=action_buy_Sell)
SellButton = Radiobutton(text="Sell", value=2, variable=action, command=action_buy_Sell)
BuyButton.grid(column=1, row=10)
SellButton.grid(column=1, row=11)
################################################## Define Buttons ######################################################
# Button to close the program
run = Button(text="Run", command=run_bot)
run.grid(column=0, row=12)
# Button to close the program
close = Button(text="Close", command=window.destroy)
close.grid(column=1, row=12)
# # Button
# button = Button(text="Sell", command=close_window)
# button.grid(column=0, row=5)
#
# button = Button(text="Buy", command=close_window)
# button.grid(column=3, row=5)
actual_time()
window.mainloop()
| 32.269231 | 121 | 0.591657 | 536 | 4,195 | 4.511194 | 0.244403 | 0.074442 | 0.050041 | 0.063689 | 0.245658 | 0.171216 | 0.064516 | 0.039702 | 0.039702 | 0.039702 | 0 | 0.024191 | 0.152563 | 4,195 | 129 | 122 | 32.51938 | 0.655978 | 0.15876 | 0 | 0 | 0 | 0 | 0.139878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073529 | false | 0 | 0.044118 | 0 | 0.117647 | 0.044118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1da7670cb3265bca373b14be3b2d7200397286df | 1,664 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Models for the dark-launching languages
"""
from config_models.models import ConfigurationModel
from django.db import models
class DarkLangConfig(ConfigurationModel):
"""
Configuration for the dark_lang django app.
.. no_pii:
"""
released_languages = models.TextField(
blank=True,
help_text="A comma-separated list of language codes to release to the public."
)
enable_beta_languages = models.BooleanField(
default=False,
help_text="Enable partially supported languages to display in language drop down."
)
beta_languages = models.TextField(
blank=True,
help_text="A comma-separated list of language codes to release to the public as beta languages."
)
def __str__(self):
return "DarkLangConfig()"
@property
def released_languages_list(self):
"""
``released_languages`` as a list of language codes.
Example: ['it', 'de-at', 'es', 'pt-br']
"""
if not self.released_languages.strip():
return []
languages = [lang.lower().strip() for lang in self.released_languages.split(',')]
# Put in alphabetical order
languages.sort()
return languages
@property
def beta_languages_list(self):
"""
``released_languages`` as a list of language codes.
Example: ['it', 'de-at', 'es', 'pt-br']
"""
if not self.beta_languages.strip():
return []
languages = [lang.lower().strip() for lang in self.beta_languages.split(',')]
# Put in alphabetical order
languages.sort()
return languages
| 27.278689 | 104 | 0.622596 | 190 | 1,664 | 5.326316 | 0.357895 | 0.100791 | 0.055336 | 0.075099 | 0.590909 | 0.590909 | 0.590909 | 0.590909 | 0.590909 | 0.590909 | 0 | 0 | 0.271034 | 1,664 | 60 | 105 | 27.733333 | 0.834295 | 0.200721 | 0 | 0.322581 | 0 | 0 | 0.191935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0.032258 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1da8215721de36398c957e99a911b088c52928ea | 3,210 | py | Python | refnx/reflect/_jax_reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 32 | 2016-04-18T15:29:59.000Z | 2022-03-27T08:35:29.000Z | refnx/reflect/_jax_reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 116 | 2015-10-27T04:33:09.000Z | 2022-02-22T02:02:47.000Z | refnx/reflect/_jax_reflect.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 22 | 2015-09-29T23:21:15.000Z | 2022-02-27T18:12:18.000Z | """
*Calculates the specular (Neutron or X-ray) reflectivity from a stratified
series of layers.
The refnx code is distributed under the following license:
Copyright (c) 2015 A. R. J. Nelson, ANSTO
Permission to use and redistribute the source code or binary forms of this
software and its documentation, with or without modification is hereby
granted provided that the above notice of copyright, these terms of use,
and the disclaimer of warranty below appear in the source code and
documentation, and that none of the names of above institutions or
authors appear in advertising or endorsement of works derived from this
software without specific prior written permission from all parties.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THIS SOFTWARE.
"""
from functools import reduce
import jax.numpy as jnp
from jax import jit
from jax.ops import index, index_add, index_update
TINY = 1e-30
def jabeles(q, layers, scale=1.0, bkg=0, threads=0):
qvals = q.astype(jnp.float64)
flatq = qvals.ravel()
nlayers = layers.shape[0] - 2
npnts = flatq.size
mi00 = jnp.ones((npnts, nlayers + 1), jnp.complex128)
sld = jnp.zeros(nlayers + 2, jnp.complex128)
# addition of TINY is to ensure the correct branch cut
# in the complex sqrt calculation of kn.
sld = index_add(
sld,
index[1:],
((layers[1:, 1] - layers[0, 1]) + 1j * (jnp.abs(layers[1:, 2]) + TINY))
* 1.0e-6,
)
kn = jnp.sqrt(flatq[:, jnp.newaxis] ** 2.0 / 4.0 - 4.0 * jnp.pi * sld)
# reflectances for each layer
# rj.shape = (npnts, nlayers + 1)
damping = jnp.exp(-2.0 * kn[:, :-1] * kn[:, 1:] * layers[1:, 3] ** 2)
rj = (kn[:, :-1] - kn[:, 1:]) / (kn[:, :-1] + kn[:, 1:]) * damping
# characteristic matrices for each layer
# miNN.shape = (npnts, nlayers + 1)
if nlayers:
mi00 = index_update(
mi00,
index[:, 1:],
jnp.exp(kn[:, 1:-1] * 1j * jnp.fabs(layers[1:-1, 0])),
)
mi11 = 1.0 / mi00
mi10 = rj * mi11
mi01 = rj * mi00
mi = jnp.zeros((npnts, nlayers + 1, 2, 2), jnp.complex128)
mi = index_update(
mi,
index[:, :, 0, 0],
mi00,
)
mi = index_update(
mi,
index[:, :, 0, 1],
mi01,
)
mi = index_update(
mi,
index[:, :, 1, 1],
mi11,
)
mi = index_update(
mi,
index[:, :, 1, 0],
mi10,
)
sub = [jnp.squeeze(v) for v in jnp.hsplit(mi, nlayers + 1)]
mrtot = reduce(jnp.matmul, sub[1:], sub[0])
r = mrtot[:, 1, 0] / mrtot[:, 0, 0]
reflectivity = r * jnp.conj(r) * scale
reflectivity = index_add(reflectivity, ..., bkg)
return jnp.real(jnp.reshape(reflectivity, qvals.shape))
# abeles_jax = jabeles
abeles_jax = jit(jabeles)
| 30 | 79 | 0.62648 | 473 | 3,210 | 4.22833 | 0.391121 | 0.028 | 0.026 | 0.012 | 0.048 | 0.048 | 0 | 0 | 0 | 0 | 0 | 0.045588 | 0.25514 | 3,210 | 106 | 80 | 30.283019 | 0.790882 | 0.436449 | 0 | 0.172414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017241 | false | 0 | 0.068966 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1da9f37084416d4890228bbc62b4a8fea137f0a1 | 7,015 | py | Python | ModelFinder/model_finder.py | Sparab16/CreditCardPrediction | 70c26664d747e0cfcae5256609097ab2e9434b67 | [
"MIT"
] | null | null | null | ModelFinder/model_finder.py | Sparab16/CreditCardPrediction | 70c26664d747e0cfcae5256609097ab2e9434b67 | [
"MIT"
] | null | null | null | ModelFinder/model_finder.py | Sparab16/CreditCardPrediction | 70c26664d747e0cfcae5256609097ab2e9434b67 | [
"MIT"
] | null | null | null | from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
import os
from Logger import AppLogger
class ModelFinder:
'''
This class shall be used to find the model with best accuracy and AUC Score.
'''
def __init__(self):
self.current_directory = os.getcwd()
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger = AppLogger()
self.gnb = GaussianNB()
self.xgb = XGBClassifier(objective='binary:logistic', n_jobs=-1)
def get_best_params_xgboost(self, train_x, train_y):
'''
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
:param train_x: Feature Dataset
:param train_y: Label Dataset
:return: The model with best parameters
:failure: Raise Exception
'''
try:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object,'Entered the get_best_params_for_xgboost method of the Model_Finder class')
# initializing with different combination of parameters
self.param_grid_xgboost = {
"n_estimators": [50, 100, 130],
"max_depth": range(3, 11, 1),
"random_state": [0, 50, 100]
}
# Creating an object of the Grid Search class
self.grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), self.param_grid_xgboost, verbose=3,
cv=2, n_jobs=-1)
# Finding the best parameters
self.grid.fit(train_x, train_y)
# extracting the best parameters
self.random_state = self.grid.best_params_['random_state']
self.max_depth = self.grid.best_params_['max_depth']
self.n_estimators = self.grid.best_params_['n_estimators']
# creating a new model with the best parameters
self.xgb = XGBClassifier(random_state=self.random_state, max_depth=self.max_depth,n_estimators= self.n_estimators, n_jobs=-1 )
# training the mew model
self.xgb.fit(train_x, train_y)
self.logger.log(self.file_object,
'XGBoost best params: ' + str(
self.grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return self.xgb
except Exception as e:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object, 'Error Occurred {}'.format(str(e)))
raise e
finally:
self.file_object.close()
def get_best_params_naive_bayes(self, train_x, train_y):
'''
Description: get the parameters for the Naive Bayes's Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
:param train_x: Feature Dataset
:param train_y: Label Dataset
:return: The model with best parameters
:failure: Raise Exception
'''
try:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object,'Entered the get_best_params_for_naive_bayes method of the Model_Finder class')
# initializing with different combination of parameters
self.param_grid = {"var_smoothing": [1e-9, 0.1, 0.001, 0.5, 0.05, 0.01, 1e-8, 1e-7, 1e-6, 1e-10, 1e-11]}
# Creating an object of the Grid Search class
self.grid = GridSearchCV(estimator=self.gnb, param_grid=self.param_grid, cv=3, verbose=3)
# finding the best parameters
self.grid.fit(train_x, train_y)
# extracting the best parameters
self.var_smoothing = self.grid.best_params_['var_smoothing']
# creating a new model with the best parameters
self.gnb = GaussianNB(var_smoothing=self.var_smoothing)
# training the mew model
self.gnb.fit(train_x, train_y)
self.logger.log(self.file_object,'Naive Bayes best params: ' + str(self.grid.best_params_) +
'. Exited the get_best_params_for_naive_bayes method of the Model_Finder class')
return self.gnb
except Exception as e:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object, 'Error Occurred {}'.format(str(e)))
raise e
finally:
self.file_object.close()
def get_best_model(self, train_x, train_y, test_x, test_y):
'''
Description: Finds out the model which has the best AUC score.
:param train_x: Feature Training Dataset
:param train_y: Label Training Dataset
:param test_x: Feature Testing Dataset
:param test_y: Label Testing Dataset
:return: The best model name and the object of it
:failure: Raise Exception
'''
try:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object,'Entered the get_best_model method of the Model_Finder class')
# Create the best model for XGBoost
xgboost = self.get_best_params_xgboost(train_x, train_y)
prediction_xgboost = xgboost.predict(test_x) # Predictions on the test data
# Calculating the roc_auc score
xgboost_score = roc_auc_score(test_y, prediction_xgboost)
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object, 'AUC for XGBoost: ' + str(xgboost_score))
# Create the best model for Naive Bayes
naive_bayes = self.get_best_params_naive_bayes(train_x, train_y)
prediction_naive_bayes = naive_bayes.predict(test_x)
# Calculating the roc_auc score
naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes)
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object, 'AUC for RF:' + str(naive_bayes_score))
# Comparing the two models with their score
if (naive_bayes_score < xgboost_score):
return 'XGBoost', xgboost
else:
return 'NaiveBayes', naive_bayes
except Exception as e:
self.file_object = open('Training_Logs/ModelFinder.txt', 'a+')
self.logger.log(self.file_object, 'Error Occurred {}'.format(str(e)))
raise e
finally:
self.file_object.close() | 45.258065 | 142 | 0.609123 | 865 | 7,015 | 4.731792 | 0.178035 | 0.043 | 0.07525 | 0.041534 | 0.627413 | 0.56389 | 0.557293 | 0.542145 | 0.537259 | 0.516736 | 0 | 0.010903 | 0.307056 | 7,015 | 155 | 143 | 45.258065 | 0.831105 | 0.224519 | 0 | 0.349398 | 0 | 0 | 0.177971 | 0.07455 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.072289 | 0 | 0.180723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d517c4f09ba04347c87fe273db685ea053682476 | 7,248 | py | Python | barbican-8.0.0/barbican/plugin/util/translations.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | barbican-8.0.0/barbican/plugin/util/translations.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | barbican-8.0.0/barbican/plugin/util/translations.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from OpenSSL import crypto
from oslo_serialization import base64
import six
from barbican import i18n as u # noqa
from barbican.plugin.interface import secret_store as s
from barbican.plugin.util import mime_types
def normalize_before_encryption(unencrypted, content_type, content_encoding,
secret_type, enforce_text_only=False):
"""Normalize unencrypted prior to plugin encryption processing.
This normalizes the secrets before they are handed off to the SecretStore
for storage. This converts all data to Base64 data. If the data is plain
text then it encoded using utf-8 first and then Base64 encoded. Binary
data is simply converted to Base64.
:param str unencrypted: Raw payload
:param str content_type: The media type for the payload
:param str content_encoding: Transfer encoding
:param str secret_type: The type of secret
:param bool enforce_text_only: Require text content_type or base64
content_encoding
:returns: Tuple containing the normalized (base64 encoded) payload and
the normalized media type.
"""
if not unencrypted:
raise s.SecretNoPayloadProvidedException()
# Validate and normalize content-type.
normalized_media_type = normalize_content_type(content_type)
# Process plain-text type.
if normalized_media_type in mime_types.PLAIN_TEXT:
# normalize text to binary and then base64 encode it
if six.PY3:
b64payload = base64.encode_as_bytes(unencrypted)
else:
unencrypted_bytes = unencrypted.encode('utf-8')
b64payload = base64.encode_as_bytes(unencrypted_bytes)
# Process binary type.
else:
if not content_encoding:
b64payload = base64.encode_as_bytes(unencrypted)
elif content_encoding.lower() == 'base64':
if not isinstance(unencrypted, six.binary_type):
b64payload = unencrypted.encode('utf-8')
else:
b64payload = unencrypted
elif enforce_text_only:
# For text-based protocols (such as the one-step secret POST),
# only 'base64' encoding is possible/supported.
raise s.SecretContentEncodingMustBeBase64()
else:
# Unsupported content-encoding request.
raise s.SecretContentEncodingNotSupportedException(
content_encoding
)
return b64payload, normalized_media_type
def normalize_content_type(content_type):
"""Normalize the content type and validate that it is supported."""
normalized_mime = mime_types.normalize_content_type(content_type)
if not mime_types.is_supported(normalized_mime):
raise s.SecretContentTypeNotSupportedException(content_type)
return normalized_mime
def analyze_before_decryption(content_type):
"""Determine support for desired content type."""
if not mime_types.is_supported(content_type):
raise s.SecretAcceptNotSupportedException(content_type)
def denormalize_after_decryption(unencrypted, content_type):
"""Translate the decrypted data into the desired content type.
This is called when the raw keys are requested by the user. The secret
returned from the SecretStore is the unencrypted parameter. This
'denormalizes' the data back to its binary format.
"""
# Process plain-text type.
if content_type in mime_types.PLAIN_TEXT:
# normalize text to binary string
try:
unencrypted = base64.decode_as_text(unencrypted)
except UnicodeDecodeError:
raise s.SecretAcceptNotSupportedException(content_type)
# Process binary type.
elif content_type in mime_types.BINARY:
unencrypted = base64.decode_as_bytes(unencrypted)
else:
raise s.SecretContentTypeNotSupportedException(content_type)
return unencrypted
def convert_pem_to_der(pem, secret_type):
if secret_type == s.SecretType.PRIVATE:
return _convert_private_pem_to_der(pem)
elif secret_type == s.SecretType.PUBLIC:
return _convert_public_pem_to_der(pem)
elif secret_type == s.SecretType.CERTIFICATE:
return _convert_certificate_pem_to_der(pem)
else:
reason = u._("Secret type can not be converted to DER")
raise s.SecretGeneralException(reason=reason)
def convert_der_to_pem(der, secret_type):
if secret_type == s.SecretType.PRIVATE:
return _convert_private_der_to_pem(der)
elif secret_type == s.SecretType.PUBLIC:
return _convert_public_der_to_pem(der)
elif secret_type == s.SecretType.CERTIFICATE:
return _convert_certificate_der_to_pem(der)
else:
reason = u._("Secret type can not be converted to PEM")
raise s.SecretGeneralException(reason=reason)
def _convert_private_pem_to_der(pem):
private_key = serialization.load_pem_private_key(
pem,
password=None,
backend=default_backend()
)
der = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
return der
def _convert_private_der_to_pem(der):
private_key = serialization.load_der_private_key(
der,
password=None,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
return pem
def _convert_public_pem_to_der(pem):
public_key = serialization.load_pem_public_key(
pem,
backend=default_backend()
)
der = public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
return der
def _convert_public_der_to_pem(der):
public_key = serialization.load_der_public_key(
der,
backend=default_backend()
)
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
return pem
def _convert_certificate_pem_to_der(pem):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, pem)
der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
return der
def _convert_certificate_der_to_pem(der):
cert = crypto.load_certificate(crypto.FILETYPE_ASN1, der)
pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
return pem
| 35.356098 | 77 | 0.717991 | 880 | 7,248 | 5.696591 | 0.240909 | 0.048274 | 0.011171 | 0.01536 | 0.420507 | 0.327349 | 0.229404 | 0.159585 | 0.145222 | 0.05785 | 0 | 0.009901 | 0.219647 | 7,248 | 204 | 78 | 35.529412 | 0.876414 | 0.266004 | 0 | 0.376 | 0 | 0 | 0.018053 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096 | false | 0.016 | 0.064 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5188b939478073039307a3d9b9b3d9126626f3a | 35,539 | py | Python | mars/worker/execution.py | immortalFrogJiang/mars | 93c786e38bdc0fbb483282d7792379db0345a3b6 | [
"Apache-2.0"
] | 1 | 2019-02-01T07:41:48.000Z | 2019-02-01T07:41:48.000Z | mars/worker/execution.py | immortalFrogJiang/mars | 93c786e38bdc0fbb483282d7792379db0345a3b6 | [
"Apache-2.0"
] | null | null | null | mars/worker/execution.py | immortalFrogJiang/mars | 93c786e38bdc0fbb483282d7792379db0345a3b6 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import random
import sys
import time
from functools import partial
from collections import defaultdict
from .. import promise
from ..compat import Enum
from ..config import options
from ..errors import PinChunkFailed, WorkerProcessStopped, ExecutionInterrupted, DependencyMissing
from ..tensor.expressions.datasource import TensorFetch
from ..utils import deserialize_graph, log_unhandled
from .chunkholder import ensure_chunk
from .spill import spill_exists
from .utils import WorkerActor, ExpiringCache, concat_operand_keys
logger = logging.getLogger(__name__)
class ExecutionState(Enum):
PRE_PUSHED = 'pre_pushed'
ALLOCATING = 'allocating'
PREPARING_INPUTS = 'preparing_inputs'
CALCULATING = 'calculating'
STORING = 'storing'
class GraphExecutionRecord(object):
"""
Execution records of the graph
"""
__slots__ = ('graph', 'graph_serialized', '_state', 'op_string', 'targets', 'io_meta',
'priority_data', 'data_sizes', 'chunks_use_once', 'state_time',
'mem_request', 'pin_request', 'est_finish_time', 'calc_actor_uid',
'send_addresses', 'retry_delay', 'enqueue_callback', 'finish_callbacks',
'stop_requested', 'succ_keys', 'undone_pred_keys')
def __init__(self, graph_serialized, state, targets=None, io_meta=None, priority_data=None,
data_sizes=None, chunks_use_once=None, mem_request=None, pin_request=None,
est_finish_time=None, calc_actor_uid=None, send_addresses=None,
retry_delay=None, enqueue_callback=None, finish_callbacks=None,
stop_requested=False, undone_pred_keys=None, succ_keys=None):
self.graph_serialized = graph_serialized
graph = self.graph = deserialize_graph(graph_serialized)
self._state = state
self.state_time = time.time()
self.targets = targets or []
self.io_meta = io_meta or dict()
self.data_sizes = data_sizes or dict()
self.priority_data = priority_data or dict()
self.chunks_use_once = chunks_use_once or set()
self.mem_request = mem_request or dict()
self.pin_request = pin_request or set()
self.est_finish_time = est_finish_time or time.time()
self.calc_actor_uid = calc_actor_uid
self.send_addresses = send_addresses
self.retry_delay = retry_delay or 0
self.enqueue_callback = enqueue_callback
self.finish_callbacks = finish_callbacks or []
self.stop_requested = stop_requested or False
self.succ_keys = set(succ_keys or ())
self.undone_pred_keys = set(undone_pred_keys or ())
_, self.op_string = concat_operand_keys(graph)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
self.state_time = time.time()
class GraphResultRecord(object):
"""
Execution result of a graph
"""
__slots__ = 'data_sizes', 'exc', 'succeeded'
def __init__(self, *args, **kwargs):
succeeded = self.succeeded = kwargs.pop('succeeded', True)
if succeeded:
self.data_sizes = args[0]
else:
self.exc = args
def build_args(self):
if self.succeeded:
return (self.data_sizes,), {}
else:
return self.exc, dict(_accept=False)
class ExecutionActor(WorkerActor):
"""
Actor for execution control
"""
_last_dump_time = time.time()
def __init__(self):
super(ExecutionActor, self).__init__()
self._chunk_holder_ref = None
self._dispatch_ref = None
self._task_queue_ref = None
self._mem_quota_ref = None
self._status_ref = None
self._daemon_ref = None
self._graph_records = dict() # type: dict[tuple, GraphExecutionRecord]
self._result_cache = ExpiringCache() # type: dict[tuple, GraphResultRecord]
def post_create(self):
from .chunkholder import ChunkHolderActor
from .daemon import WorkerDaemonActor
from .dispatcher import DispatchActor
from .quota import MemQuotaActor
from .status import StatusActor
from .taskqueue import TaskQueueActor
super(ExecutionActor, self).post_create()
self._chunk_holder_ref = self.promise_ref(ChunkHolderActor.default_name())
self._dispatch_ref = self.promise_ref(DispatchActor.default_name())
self._task_queue_ref = self.promise_ref(TaskQueueActor.default_name())
self._mem_quota_ref = self.promise_ref(MemQuotaActor.default_name())
self._daemon_ref = self.ctx.actor_ref(WorkerDaemonActor.default_name())
if self.ctx.has_actor(self._daemon_ref):
self._daemon_ref.register_callback(self.ref(), self.handle_process_down.__name__, _tell=True)
else:
self._daemon_ref = None
self._status_ref = self.ctx.actor_ref(StatusActor.default_name())
if not self.ctx.has_actor(self._status_ref):
self._status_ref = None
self.periodical_dump()
def periodical_dump(self):
"""
Periodically dump debug information
"""
if logger.getEffectiveLevel() > logging.DEBUG:
return
cls = type(self)
if cls._last_dump_time < time.time() - 10:
cls._last_dump_time = time.time()
if self._graph_records:
self._dump_execution_states()
self.ref().periodical_dump(_tell=True, _delay=10)
@promise.reject_on_exception
@log_unhandled
def enqueue_graph(self, session_id, graph_key, graph_ser, io_meta, data_sizes,
priority_data=None, send_addresses=None, succ_keys=None,
pred_keys=None, callback=None):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: graph key
:param graph_ser: serialized executable graph
:param io_meta: io meta of the chunk
:param data_sizes: data size of each input chunk, as a dict
:param priority_data: data priority
:param send_addresses: targets to send results after execution
:param pred_keys: predecessor operand keys, available when the submitted graph require predecessors to finish
:param succ_keys: successor operand keys
:param callback: promise callback
"""
priority_data = priority_data or dict()
graph_record = self._graph_records[(session_id, graph_key)] = GraphExecutionRecord(
graph_ser, ExecutionState.ALLOCATING,
io_meta=io_meta,
data_sizes=data_sizes,
enqueue_callback=callback,
priority_data=priority_data,
targets=io_meta['chunks'],
succ_keys=succ_keys,
chunks_use_once=set(io_meta.get('input_chunks', [])) - set(io_meta.get('shared_input_chunks', [])),
send_addresses=send_addresses,
)
for k in pred_keys or ():
try:
pred_result = self._result_cache[(session_id, k)]
if pred_result.succeeded:
graph_record.data_sizes.update(pred_result.data_sizes)
else:
graph_record.undone_pred_keys.add(k)
except KeyError:
graph_record.undone_pred_keys.add(k)
if not graph_record.undone_pred_keys:
logger.debug('Worker graph %s(%s) targeting at %r accepted.', graph_key,
graph_record.op_string, graph_record.targets)
self._update_state(session_id, graph_key, ExecutionState.ALLOCATING)
self._task_queue_ref.enqueue_task(session_id, graph_key, priority_data, _promise=True) \
.then(lambda *_: self.tell_promise(callback) if callback else None)
else:
logger.debug('Worker graph %s(%s) targeting at %r pre-pushed.', graph_key,
graph_record.op_string, graph_record.targets)
self._update_state(session_id, graph_key, ExecutionState.PRE_PUSHED)
logger.debug('Worker graph %s(%s) now has unfinished predecessors %r.',
graph_key, graph_record.op_string, graph_record.undone_pred_keys)
def _notify_successors(self, session_id, graph_key):
query_key = (session_id, graph_key)
graph_rec = self._graph_records[query_key]
result_rec = self._result_cache[query_key]
for succ_key in graph_rec.succ_keys:
try:
succ_rec = self._graph_records[(session_id, succ_key)]
except KeyError:
continue
try:
succ_rec.data_sizes.update(result_rec.data_sizes)
except (KeyError, AttributeError):
pass
succ_rec.undone_pred_keys.difference_update((graph_key,))
if succ_rec.undone_pred_keys:
logger.debug('Worker graph %s(%s) now has unfinished predecessors %r.',
succ_key, succ_rec.op_string, succ_rec.undone_pred_keys)
continue
missing_keys = [c.key for c in succ_rec.graph if c.key not in succ_rec.data_sizes
and isinstance(c.op, TensorFetch)]
if missing_keys:
sizes = self.get_meta_ref(session_id, graph_key, local=False) \
.batch_get_chunk_size(session_id, missing_keys)
succ_rec.data_sizes.update(zip(missing_keys, sizes))
logger.debug('Worker graph %s(%s) targeting at %r from PRE_PUSHED into ALLOCATING.',
succ_key, succ_rec.op_string, succ_rec.targets)
self._update_state(session_id, succ_key, ExecutionState.ALLOCATING)
enqueue_callback = succ_rec.enqueue_callback
p = self._task_queue_ref.enqueue_task(
session_id, succ_key, succ_rec.priority_data, _promise=True)
if enqueue_callback:
p.then(partial(self.tell_promise, enqueue_callback))
@log_unhandled
def prepare_quota_request(self, session_id, graph_key):
"""
Calculate quota request for an execution graph
:param session_id: session id
:param graph_key: key of the execution graph
:return: allocation dict
"""
try:
graph_record = self._graph_records[(session_id, graph_key)]
except KeyError:
return None
graph = graph_record.graph
alloc_mem_batch = dict()
alloc_cache_batch = dict()
input_chunk_keys = dict()
if self._status_ref:
self.estimate_graph_finish_time(session_id, graph_key)
# collect potential allocation sizes
for chunk in graph:
if not isinstance(chunk.op, TensorFetch) and chunk.key in graph_record.targets:
# use estimated size as potential allocation size
alloc_mem_batch[chunk.key] = chunk.rough_nbytes * 2
alloc_cache_batch[chunk.key] = chunk.rough_nbytes
else:
# use actual size as potential allocation size
input_chunk_keys[chunk.key] = graph_record.data_sizes.get(chunk.key, chunk.nbytes)
keys_to_pin = list(input_chunk_keys.keys())
try:
graph_record.pin_request = set(self._chunk_holder_ref.pin_chunks(graph_key, keys_to_pin))
except PinChunkFailed:
# cannot pin input chunks: retry later
self.dequeue_graph(session_id, graph_key)
retry_delay = graph_record.retry_delay + 0.5 + random.random()
graph_record.retry_delay = min(1 + graph_record.retry_delay, 30)
self.ref().enqueue_graph(
session_id, graph_key, graph_record.graph_serialized, graph_record.io_meta,
graph_record.data_sizes, priority_data=graph_record.priority_data,
send_addresses=graph_record.send_addresses, succ_keys=graph_record.succ_keys,
callback=graph_record.enqueue_callback, _tell=True, _delay=retry_delay)
return None
load_chunk_sizes = dict((k, v) for k, v in input_chunk_keys.items()
if k not in graph_record.pin_request)
alloc_mem_batch.update((self._build_load_key(graph_key, k), v)
for k, v in load_chunk_sizes.items() if k in graph_record.chunks_use_once)
self._chunk_holder_ref.spill_size(sum(alloc_cache_batch.values()), _tell=True)
if alloc_mem_batch:
graph_record.mem_request = alloc_mem_batch
return alloc_mem_batch
@log_unhandled
def dequeue_graph(self, session_id, graph_key):
"""
Remove execution graph task from queue
:param session_id: session id
:param graph_key: key of the execution graph
"""
self._cleanup_graph(session_id, graph_key)
@log_unhandled
def update_priority(self, session_id, graph_key, priority_data):
"""
Update priority data for given execution graph
:param session_id: session id
:param graph_key: key of the execution graph
:param priority_data: priority data
"""
query_key = (session_id, graph_key)
if query_key not in self._graph_records:
return
self._graph_records[query_key].priority_data = priority_data
self._task_queue_ref.update_priority(session_id, graph_key, priority_data)
@staticmethod
def _build_load_key(graph_key, chunk_key):
return '%s_load_memory_%s' % (graph_key, chunk_key)
@log_unhandled
def _fetch_remote_data(self, session_id, graph_key, chunk_key, remote_addr, *_, **kwargs):
"""
Asynchronously send data receiving command to a remote address
:param session_id: session id
:param graph_key: graph key
:param chunk_key: chunk key
:param remote_addr: remote server containing provided chunk key
:return: promise object
"""
from .dispatcher import DispatchActor
remote_disp_ref = self.promise_ref(uid=DispatchActor.default_name(),
address=remote_addr)
ensure_cached = kwargs.pop('ensure_cached', True)
@log_unhandled
def _finish_fetch(*_):
self._chunk_holder_ref.pin_chunks(graph_key, chunk_key)
if self._chunk_holder_ref.is_stored(chunk_key):
self._mem_quota_ref.release_quota(self._build_load_key(graph_key, chunk_key))
@log_unhandled
def _fetch_step(sender_uid):
if self._graph_records[(session_id, graph_key)].stop_requested:
self._dispatch_ref.register_free_slot(sender_uid, 'sender')
raise ExecutionInterrupted
sender_ref = self.promise_ref(sender_uid, address=remote_addr)
logger.debug('Request for chunk %s transferring from %s', chunk_key, remote_addr)
return sender_ref.send_data(
session_id, chunk_key, self.address, ensure_cached=ensure_cached,
timeout=options.worker.prepare_data_timeout, _promise=True
).then(_finish_fetch)
return promise.Promise(done=True) \
.then(lambda *_: remote_disp_ref.get_free_slot('sender', _promise=True)) \
.then(_fetch_step)
def estimate_graph_finish_time(self, session_id, graph_key, calc_fetch=True, base_time=None):
"""
Calc predictions for given chunk graph
"""
session_graph_key = (session_id, graph_key)
if session_graph_key not in self._graph_records:
return
graph_record = self._graph_records[session_graph_key]
graph = graph_record.graph
ops = set(type(c.op).__name__ for c in graph if not isinstance(c.op, TensorFetch))
op_calc_key = ('calc_speed.' + list(ops)[0]) if len(ops) == 1 else None
stats = defaultdict(lambda: dict(count=0))
if self._status_ref:
stats.update(self._status_ref.get_stats(['disk_read_speed', 'disk_write_speed',
'net_transfer_speed', op_calc_key]))
if op_calc_key not in stats:
return None
if stats[op_calc_key]['count'] < options.optimize.min_stats_count:
return None
if abs(stats[op_calc_key]['count']) < 1e-6:
return None
input_size = 0
net_size = 0
disk_size = 0
base_time = base_time or time.time()
if calc_fetch:
for c in graph:
if not isinstance(c.op, TensorFetch):
break
input_size += c.nbytes
if self._chunk_holder_ref.is_stored(c.key):
continue
if spill_exists(c.key):
disk_size += c.nbytes
else:
net_size += c.nbytes
if stats['net_transfer_speed']['count'] >= options.optimize.min_stats_count:
base_time += net_size * 1.0 / stats['net_transfer_speed']['mean']
if stats['disk_read_speed']['count'] >= options.optimize.min_stats_count:
base_time += disk_size * 1.0 / stats['disk_read_speed']['mean']
else:
base_time += disk_size * 1.0 / options.optimize.default_disk_io_speed
est_finish_time = base_time + input_size * 1.0 / stats[op_calc_key]['mean']
graph_record.est_finish_time = est_finish_time
self._status_ref.update_stats(dict(
min_est_finish_time=min(rec.est_finish_time for rec in self._graph_records.values()),
max_est_finish_time=max(rec.est_finish_time for rec in self._graph_records.values()),
), _tell=True, _wait=False)
self.ref().estimate_graph_finish_time(session_id, graph_key, _tell=True, _delay=1)
def _update_state(self, session_id, key, state):
logger.debug('Operand %s switched to %s', key, getattr(state, 'name'))
record = self._graph_records[(session_id, key)]
record.state = state
if self._status_ref:
self._status_ref.update_progress(session_id, key, record.op_string, state.name,
_tell=True, _wait=False)
@promise.reject_on_exception
@log_unhandled
def start_execution(self, session_id, graph_key, send_addresses=None, callback=None):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: key of the execution graph
:param send_addresses: targets to send results after execution
:param callback: promise callback
"""
graph_record = self._graph_records[(session_id, graph_key)]
if send_addresses:
graph_record.send_addresses = send_addresses
# add callbacks to callback store
if callback is None:
callback = []
elif not isinstance(callback, list):
callback = [callback]
graph_record.finish_callbacks.extend(callback)
try:
del self._result_cache[(session_id, graph_key)]
except KeyError:
pass
@log_unhandled
def _wait_free_slot(*_):
return self._dispatch_ref.get_free_slot('cpu', _promise=True)
@log_unhandled
def _handle_success(*_):
self._notify_successors(session_id, graph_key)
self._invoke_finish_callbacks(session_id, graph_key)
@log_unhandled
def _handle_rejection(*exc):
# some error occurred...
logger.debug('Entering _handle_rejection() for graph %s', graph_key)
self._dump_execution_states()
if graph_record.stop_requested:
graph_record.stop_requested = False
if not isinstance(exc[1], ExecutionInterrupted):
try:
raise ExecutionInterrupted
except ExecutionInterrupted:
exc = sys.exc_info()
if isinstance(exc[1], ExecutionInterrupted):
logger.warning('Execution of graph %s interrupted.', graph_key)
else:
logger.exception('Unexpected error occurred in executing %s', graph_key, exc_info=exc)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(*exc, **dict(succeeded=False))
self._invoke_finish_callbacks(session_id, graph_key)
self._prepare_graph_inputs(session_id, graph_key) \
.then(_wait_free_slot) \
.then(lambda uid: self._send_calc_request(session_id, graph_key, uid)) \
.then(lambda uid, sizes: self._dump_cache(session_id, graph_key, uid, sizes)) \
.then(_handle_success, _handle_rejection)
@log_unhandled
def _prepare_graph_inputs(self, session_id, graph_key):
"""
Load input data from spilled storage and other workers
:param session_id: session id
:param graph_key: key of the execution graph
"""
graph_record = self._graph_records[(session_id, graph_key)]
if graph_record.stop_requested:
raise ExecutionInterrupted
unspill_keys = []
transfer_keys = []
logger.debug('Start preparing input data for graph %s', graph_key)
self._update_state(session_id, graph_key, ExecutionState.PREPARING_INPUTS)
prepare_promises = []
chunks_use_once = graph_record.chunks_use_once
handled_keys = set()
for chunk in graph_record.graph:
if not isinstance(chunk.op, TensorFetch):
continue
if chunk.key in handled_keys:
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(self._build_load_key(graph_key, chunk.key))
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(self._build_load_key(graph_key, chunk.key))
load_fun = partial(lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key, chunk.key)
unspill_keys.append(chunk.key)
prepare_promises.append(ensure_chunk(self, session_id, chunk.key, move_to_end=True) \
.then(load_fun))
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key) \
.get_chunk_meta(session_id, chunk.key)
if chunk_meta is None:
raise DependencyMissing('Dependency %s not met on sending.' % chunk.key)
worker_results = chunk_meta.workers
worker_priorities = []
for worker_ip in worker_results:
# todo sort workers by speed of network and other possible factors
worker_priorities.append((worker_ip, (0, )))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(session_id, graph_key, chunk.key, sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once)
for wp in sorted_workers[1:]:
p = p.catch(functools.partial(self._fetch_remote_data, session_id, graph_key, chunk.key, wp[0],
ensure_cached=chunk.key not in chunks_use_once))
prepare_promises.append(p)
logger.debug('Graph key %s: Targets %r, unspill keys %r, transfer keys %r',
graph_key, graph_record.targets, unspill_keys, transfer_keys)
return promise.all_(prepare_promises)
@log_unhandled
def _send_calc_request(self, session_id, graph_key, calc_uid):
"""
Start actual calculation in CpuCalcActor
:param session_id: session id
:param graph_key: key of the execution graph
:param calc_uid: uid of the allocated CpuCalcActor
"""
graph_record = self._graph_records[(session_id, graph_key)]
try:
if graph_record.stop_requested:
raise ExecutionInterrupted
graph_record.calc_actor_uid = calc_uid
# get allocation for calc, in case that memory exhausts
target_allocs = dict()
for chunk in graph_record.graph:
if isinstance(chunk.op, TensorFetch):
if not self._chunk_holder_ref.is_stored(chunk.key):
alloc_key = self._build_load_key(graph_key, chunk.key)
if alloc_key in graph_record.mem_request:
target_allocs[alloc_key] = graph_record.mem_request[alloc_key]
elif chunk.key in graph_record.targets:
target_allocs[chunk.key] = graph_record.mem_request[chunk.key]
logger.debug('Start calculation for graph %s in actor %s', graph_key, calc_uid)
self._update_state(session_id, graph_key, ExecutionState.CALCULATING)
raw_calc_ref = self.ctx.actor_ref(calc_uid)
calc_ref = self.promise_ref(raw_calc_ref)
def _start_calc(*_):
if self._daemon_ref is None or self._daemon_ref.is_actor_process_alive(raw_calc_ref):
return calc_ref.calc(session_id, graph_record.graph_serialized,
graph_record.targets, _promise=True)
else:
raise WorkerProcessStopped
self.estimate_graph_finish_time(session_id, graph_key, calc_fetch=False)
except:
self._dispatch_ref.register_free_slot(calc_uid, 'cpu')
raise
# make sure that memory suffices before actually run execution
return self._mem_quota_ref.request_batch_quota(target_allocs, _promise=True) \
.then(_start_calc)
@log_unhandled
def _dump_cache(self, session_id, graph_key, inproc_uid, save_sizes):
"""
Dump calc results into shared cache or spill
:param session_id: session id
:param graph_key: key of the execution graph
:param inproc_uid: uid of the InProcessCacheActor
:param save_sizes: sizes of data
"""
graph_record = self._graph_records[session_id, graph_key]
calc_keys = graph_record.targets
send_addresses = graph_record.send_addresses
@log_unhandled
def _do_active_transfer(*_):
# transfer the result chunk to expected endpoints
@log_unhandled
def _send_chunk(sender_uid, chunk_key, target_addrs):
if graph_record.stop_requested:
self._dispatch_ref.register_free_slot(sender_uid, 'sender')
raise ExecutionInterrupted
sender_ref = self.promise_ref(sender_uid)
logger.debug('Request for chunk %s sent to %s', chunk_key, target_addrs)
return sender_ref.send_data(session_id, chunk_key, target_addrs, ensure_cached=False,
timeout=options.worker.prepare_data_timeout, _promise=True)
if graph_record.mem_request:
self._mem_quota_ref.release_quotas(tuple(graph_record.mem_request.keys()), _tell=True)
graph_record.mem_request = dict()
promises = []
for key, targets in send_addresses.items():
promises.append(self._dispatch_ref.get_free_slot('sender', _promise=True)
.then(partial(_send_chunk, chunk_key=key, target_addrs=targets))
.catch(lambda *_: None))
return promise.all_(promises)
logger.debug('Graph %s: Start putting %r into shared cache. Target actor uid %s.',
graph_key, calc_keys, inproc_uid)
self._update_state(session_id, graph_key, ExecutionState.STORING)
raw_inproc_ref = self.ctx.actor_ref(inproc_uid)
inproc_ref = self.promise_ref(raw_inproc_ref)
if graph_record.stop_requested:
logger.debug('Graph %s already marked for stop, quit.', graph_key)
if (self._daemon_ref is None or self._daemon_ref.is_actor_process_alive(raw_inproc_ref)) \
and self.ctx.has_actor(raw_inproc_ref):
logger.debug('Try remove keys for graph %s.', graph_key)
raw_inproc_ref.remove_cache(list(calc_keys), _tell=True)
logger.debug('Graph %s already marked for stop, quit.', graph_key)
raise ExecutionInterrupted
self._chunk_holder_ref.unpin_chunks(
graph_key, list(set(c.key for c in graph_record.graph)), _tell=True)
self._dump_execution_states()
if self._daemon_ref is not None and not self._daemon_ref.is_actor_process_alive(raw_inproc_ref):
raise WorkerProcessStopped
def _cache_result(*_):
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
if not send_addresses:
# no endpoints to send, dump keys into shared memory and return
logger.debug('Worker graph %s(%s) finished execution. Dumping %r into plasma...',
graph_key, graph_record.op_string, calc_keys)
return inproc_ref.dump_cache(calc_keys, _promise=True) \
.then(_cache_result)
else:
# dump keys into shared memory and send
logger.debug('Worker graph %s(%s) finished execution. Dumping %r into plasma '
'while actively transferring %r...',
graph_key, graph_record.op_string, calc_keys, send_addresses)
return inproc_ref.dump_cache(calc_keys, _promise=True) \
.then(_do_active_transfer) \
.then(_cache_result)
def _cleanup_graph(self, session_id, graph_key):
"""
Do clean up after graph is executed
:param session_id: session id
:param graph_key: graph key
"""
logger.debug('Cleaning callbacks for graph %s', graph_key)
self._task_queue_ref.release_task(session_id, graph_key, _tell=True)
try:
graph_record = self._graph_records[(session_id, graph_key)]
except KeyError:
return
self._mem_quota_ref.cancel_requests(tuple(graph_record.mem_request.keys()), _tell=True)
if graph_record.mem_request:
self._mem_quota_ref.release_quotas(tuple(graph_record.mem_request.keys()), _tell=True)
if graph_record.pin_request:
self._chunk_holder_ref.unpin_chunks(graph_key, graph_record.pin_request, _tell=True)
if self._status_ref:
self._status_ref.remove_progress(session_id, graph_key, _tell=True, _wait=False)
del self._graph_records[(session_id, graph_key)]
@promise.reject_on_exception
@log_unhandled
def add_finish_callback(self, session_id, graph_key, callback):
"""
Register a callback to callback store
:param session_id: session id
:param graph_key: graph key
:param callback: promise call
"""
logger.debug('Adding callback %r for graph %s', callback, graph_key)
try:
args, kwargs = self._result_cache[(session_id, graph_key)].build_args()
self.tell_promise(callback, *args, **kwargs)
except KeyError:
self._graph_records[(session_id, graph_key)].finish_callbacks.append(callback)
@log_unhandled
def stop_execution(self, session_id, graph_key):
"""
Mark graph for stopping
:param graph_key: graph key
"""
logger.debug('Receive stop for graph %s', graph_key)
try:
graph_record = self._graph_records[(session_id, graph_key)]
except KeyError:
return
graph_record.stop_requested = True
if graph_record.state == ExecutionState.ALLOCATING:
try:
raise ExecutionInterrupted
except:
exc_info = sys.exc_info()
if graph_record.mem_request:
self._mem_quota_ref.cancel_requests(tuple(graph_record.mem_request.keys()), exc_info, _tell=True)
elif graph_record.state == ExecutionState.CALCULATING:
if self._daemon_ref is not None and graph_record.calc_actor_uid is not None:
self._daemon_ref.kill_actor_process(self.ctx.actor_ref(graph_record.calc_actor_uid), _tell=True)
@log_unhandled
def _invoke_finish_callbacks(self, session_id, graph_key):
"""
Call finish callback when execution is done
:param session_id: session id
:param graph_key: graph key
"""
query_key = (session_id, graph_key)
callbacks = self._graph_records[query_key].finish_callbacks
args, kwargs = self._result_cache[query_key].build_args()
logger.debug('Send finish callback for graph %s into %d targets', graph_key, len(callbacks))
for cb in callbacks:
self.tell_promise(cb, *args, **kwargs)
self._cleanup_graph(session_id, graph_key)
def _dump_execution_states(self, show_unrun=False):
if logger.getEffectiveLevel() <= logging.DEBUG:
cur_time = time.time()
states = dict((k[1], (cur_time - v.state_time, v.state.name))
for k, v in self._graph_records.items()
if show_unrun or v.state not in (ExecutionState.PRE_PUSHED, ExecutionState.ALLOCATING))
logger.debug('Executing states: %r', states)
def handle_process_down(self, halt_refs):
"""
Handle process down event
:param halt_refs: actor refs in halt processes
"""
logger.debug('Process halt detected. Trying to reject affected promises %r.',
[ref.uid for ref in halt_refs])
try:
raise WorkerProcessStopped
except WorkerProcessStopped:
exc_info = sys.exc_info()
for ref in halt_refs:
self.reject_promise_ref(ref, *exc_info)
| 43.552696 | 117 | 0.63533 | 4,352 | 35,539 | 4.859605 | 0.106618 | 0.043501 | 0.039718 | 0.047425 | 0.415953 | 0.343704 | 0.271928 | 0.230555 | 0.182656 | 0.151969 | 0 | 0.001965 | 0.284026 | 35,539 | 815 | 118 | 43.606135 | 0.8292 | 0.110526 | 0 | 0.254833 | 0 | 0 | 0.058298 | 0 | 0 | 0 | 0 | 0.001227 | 0 | 1 | 0.063269 | false | 0.003515 | 0.040422 | 0.005272 | 0.168717 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d51a3e8b7e251050ecd8235bb18dc9a527dfd0c7 | 5,574 | py | Python | models/text2text/encoder.py | aasseman/mi-prometheus | c655c88cc6aec4d0724c19ea95209f1c2dd6770d | [
"Apache-2.0"
] | null | null | null | models/text2text/encoder.py | aasseman/mi-prometheus | c655c88cc6aec4d0724c19ea95209f1c2dd6770d | [
"Apache-2.0"
] | null | null | null | models/text2text/encoder.py | aasseman/mi-prometheus | c655c88cc6aec4d0724c19ea95209f1c2dd6770d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Sean Robertson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# --------------------------------------------------------------------------------
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""encoder.py: Implementation of a GRU based encoder for text2text problems (e.g. translation)
Inspiration taken from the corresponding Pytorch tutorial.
See https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html """
__author__ = "Vincent Marois "
import torch
from torch import nn
from utils.app_state import AppState
class EncoderRNN(nn.Module):
"""
GRU Encoder for Encoder-Decoder.
"""
def __init__(self, input_voc_size, hidden_size, bidirectional, n_layers):
"""
Initializes an Encoder network based on a Gated Recurrent Unit.
:param input_voc_size: size of the vocabulary set to be embedded by the Embedding layer.
:param hidden_size: length of embedding vectors.
:param bidirectional: indicates whether the encoder model is bidirectional or not.
:param n_layers: number of layers for the Gated Recurrent Unit.
"""
# call base constructor.
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.n_layers = n_layers
# Embedding: creates a look-up table of the embedding of a vocabulary set
# (size: input_voc_size -> input_language.n_words) on vectors of size hidden_size.
# adds 1 dimension to the shape of the tensor
# WARNING: input must be of type LongTensor
self.embedding = nn.Embedding(
num_embeddings=input_voc_size, embedding_dim=hidden_size)
# Apply a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
# NOTE: default number of recurrent layers is 1
# 1st parameter: expected number of features in the input -> same as hidden_size because of embedding
# 2nd parameter: expected number of features in hidden state -> hidden_size.
# batch_first=True -> input and output tensors are provided as (batch, seq, feature)
# batch_first=True do not affect hidden states
self.gru = nn.GRU(
input_size=hidden_size,
hidden_size=hidden_size,
num_layers=self.n_layers,
batch_first=True,
bidirectional=self.bidirectional)
def forward(self, input, hidden):
"""
Runs the Encoder.
:param input: tensor of indices, of size [batch_size x 1] (word by word looping)
:param hidden: initial hidden state for each element in the input batch.
Should be of size [(n_layers * n_directions) x batch_size x hidden_size]
For every input word, the encoder outputs a vector and a hidden state, and uses the hidden state for
the next input word.
:return: output should be of size [batch_size x seq_len x (hidden_size * n_directions)]: tensor containing the output features h_t from the last layer of the RNN, for each t.
:return: hidden should be of size [(n_layers * n_directions) x batch_size x hidden_size]: tensor containing the hidden state for t = seq_length.
"""
embedded = self.embedding(input)
# embedded: [batch_size x 1 x hidden_size]
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def init_hidden(self, batch_size):
"""
Initializes the hidden states for the encoder.
:param batch_size: batch size
:return: initial hidden states.
"""
if self.bidirectional:
return torch.zeros(self.n_layers * 2, batch_size,
self.hidden_size).type(AppState().dtype)
else:
return torch.zeros(self.n_layers, batch_size,
self.hidden_size).type(AppState().dtype)
| 41.909774 | 182 | 0.685863 | 767 | 5,574 | 4.887875 | 0.353325 | 0.045345 | 0.022406 | 0.016004 | 0.100293 | 0.082689 | 0.049613 | 0.049613 | 0.028274 | 0.028274 | 0 | 0.005385 | 0.233764 | 5,574 | 132 | 183 | 42.227273 | 0.872395 | 0.700036 | 0 | 0.066667 | 0 | 0 | 0.010601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d51cb89eb6deb0955d26dd78296fd7fa6bebba2b | 3,855 | py | Python | gatlin/infra/excel.py | kokomal/GATLIN | 20102e6d926a3a805d1cb30c8d6ec45b492ac507 | [
"BSD-3-Clause"
] | 1 | 2019-08-05T13:01:04.000Z | 2019-08-05T13:01:04.000Z | gatlin/infra/excel.py | kokomal/GATLIN | 20102e6d926a3a805d1cb30c8d6ec45b492ac507 | [
"BSD-3-Clause"
] | null | null | null | gatlin/infra/excel.py | kokomal/GATLIN | 20102e6d926a3a805d1cb30c8d6ec45b492ac507 | [
"BSD-3-Clause"
] | null | null | null | # coding = utf-8
# -*- coding: utf-8 -*-
import json
from openpyxl import load_workbook
# 读取某一个region,从此之下读取两列拼装成map
def read_region_below_map(fn, sheet_name, region_name):
mp = {}
wb = load_workbook(fn)
ws = wb[sheet_name]
region = ws[region_name]
next_row = region.row + 1
column = region.column
while 1:
if ws.cell(next_row, column).value is None:
break
mp[ws.cell(next_row, column).value] = ws.cell(next_row, column + 1).value
next_row = next_row + 1
return mp
def read_sheet_and_get_json(fn, sheet_name, region):
wb = load_workbook(fn)
wb.guess_types = True # 猜测格式类型
ws = wb[sheet_name]
region = ws[region]
row_idx = region.row
col_idx = region.column
js_str = ""
while 1:
oneline = ws.cell(row_idx, col_idx).value
if oneline is None:
break
oneline = oneline.replace("_x000D_", "") # 处理excel的换行CR编码残留,略丑陋
# for i in range(len(oneline)):
# print("ascii of " + oneline[i] + " is: " + ascii(ord(oneline[i])))
js_str = js_str + oneline.strip()
row_idx = row_idx + 1
# print(js_str)
return json.loads(js_str)
# 找到指定的列的关键字keyword所在的行号
def find_row_num(fn, sheet_name, keyword, start_region_name):
wb = load_workbook(fn)
ws = wb[sheet_name]
max_rows = ws.max_row
start_row = ws[start_region_name].row
start_col = ws[start_region_name].column
for i in range(start_row, max_rows):
candi = ws.cell(i + 1, start_col).value
if candi == keyword:
return i + 1
return -1
# 找到指定的列的关键字keyword所在的坐标
def find_row_region(fn, sheet_name, keyword, start_region_name):
ws = load_workbook(fn)[sheet_name]
row_num = find_row_num(fn, sheet_name, keyword, start_region_name)
nm = ws.cell(row_num, ws[start_region_name].column)
return nm.coordinate
def find_row_and_pack_map(fn, sheet_name, keyword, start_region_name):
mp = {}
ws = load_workbook(fn)[sheet_name]
row_num = find_row_num(fn, sheet_name, keyword, start_region_name)
nm = ws.cell(row_num, ws[start_region_name].column)
region = ws[nm.coordinate]
next_row = region.row + 1
column = region.column
while 1:
if ws.cell(next_row, column).value is None:
break
mp[ws.cell(next_row, column).value] = ws.cell(next_row, column + 1).value
next_row = next_row + 1
return mp
class XlsmWrapper:
def __init__(self, fn):
self.fn = fn
self.wb = load_workbook(fn)
def find_row_and_pack_map(self, sheet_name, keyword, start_region_name):
mp = {}
ws = load_workbook(self.fn)[sheet_name]
row_num = find_row_num(self.fn, sheet_name, keyword, start_region_name)
nm = ws.cell(row_num, ws[start_region_name].column)
region = ws[nm.coordinate]
next_row = region.row + 1
column = region.column
while 1:
if ws.cell(next_row, column).value is None:
break
mp[ws.cell(next_row, column).value] = ws.cell(next_row, column + 1).value
next_row = next_row + 1
return mp
def find_row_and_pack_map_with_switch(self, sheet_name, keyword, start_region_name):
mp = {}
ws = load_workbook(self.fn)[sheet_name]
row_num = find_row_num(self.fn, sheet_name, keyword, start_region_name)
nm = ws.cell(row_num, ws[start_region_name].column)
region = ws[nm.coordinate]
next_row = region.row + 1
column = region.column
while 1:
if ws.cell(next_row, column).value is None:
break
if ws.cell(next_row, column + 2).value == 'ON':
mp[ws.cell(next_row, column).value] = ws.cell(next_row, column + 1).value
next_row = next_row + 1
return mp
| 32.669492 | 89 | 0.629572 | 567 | 3,855 | 4.015873 | 0.141093 | 0.076856 | 0.098814 | 0.07422 | 0.667106 | 0.657005 | 0.622749 | 0.589372 | 0.5639 | 0.5639 | 0 | 0.00951 | 0.263554 | 3,855 | 117 | 90 | 32.948718 | 0.792533 | 0.06537 | 0 | 0.62766 | 0 | 0 | 0.002506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.021277 | 0 | 0.202128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d51db870e2b3ba60a86f83ec6d2144b9f9d6cde6 | 42,562 | py | Python | Assets/Python/Screens/CvForeignAdvisor.py | Imperator-Knoedel/Sunset | 19c95f4844586b96341f3474b58e0dacaae485b9 | [
"MIT"
] | 1 | 2019-08-05T18:36:14.000Z | 2019-08-05T18:36:14.000Z | Assets/Python/Screens/CvForeignAdvisor.py | Imperator-Knoedel/Sunset | 19c95f4844586b96341f3474b58e0dacaae485b9 | [
"MIT"
] | null | null | null | Assets/Python/Screens/CvForeignAdvisor.py | Imperator-Knoedel/Sunset | 19c95f4844586b96341f3474b58e0dacaae485b9 | [
"MIT"
] | null | null | null | ## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
from CvPythonExtensions import *
import CvUtil
import ScreenInput
import CvScreenEnums
import math
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
# this class is shared by both the resource and technology foreign advisors
FOREIGN_BONUS_SCREEN = 0
FOREIGN_TECH_SCREEN = 1
FOREIGN_RELATIONS_SCREEN = 2
FOREIGN_ACTIVE_TRADE_SCREEN = 3
NUM_FOREIGN_SCREENS = 4
class CvForeignAdvisor:
"Foreign Advisor Screen"
def __init__(self):
self.iScreen = -1
self.nWidgetCount = 0
self.nLineCount = 0
self.WIDGET_ID = "ForeignAdvisorWidget"
self.LINE_ID = "ForeignAdvisorLine"
self.SCREEN_NAME = "ForeignAdvisor"
self.DEBUG_DROPDOWN_ID = "ForeignAdvisorDropdownWidget"
self.EXIT_ID = "ForeignAdvisorExitWidget"
self.BACKGROUND_ID = "ForeignAdvisorBackground"
self.X_SCREEN = 500
self.Y_SCREEN = 396
self.W_SCREEN = 1024
self.H_SCREEN = 768
self.Y_TITLE = 8
self.X_EXIT = 994
self.Y_EXIT = 726
self.X_LEADER = 80
self.Y_LEADER = 115
self.H_LEADER = 64
self.W_LEADER = 64
self.X_LINK = 50
self.DX_LINK = 220
self.Y_LINK = 726
self.X_LEGEND = 20
self.Y_LEGEND = 530
self.H_LEGEND = 180
self.W_LEGEND = 160
self.MARGIN_LEGEND = 10
self.X_LEADER_CIRCLE_TOP = self.X_SCREEN + 10
self.Y_LEADER_CIRCLE_TOP = 87
self.RADIUS_LEADER_ARC = 480
self.LINE_WIDTH = 6
self.BUTTON_SIZE = 64
self.iSelectedLeader = -1
self.iActiveLeader = -1
self.listSelectedLeaders = []
self.iShiftKeyDown = 0
self.iDefaultScreen = FOREIGN_RELATIONS_SCREEN
def killScreen(self):
if (self.iScreen >= 0):
screen = self.getScreen()
screen.hideScreen()
self.iScreen = -1
return
def getScreen(self):
return CyGInterfaceScreen(self.SCREEN_NAME + str(self.iScreen), CvScreenEnums.FOREIGN_ADVISOR)
def interfaceScreen (self, iScreen):
if (iScreen < 0):
if (self.iScreen < 0):
iScreen = self.iDefaultScreen
else:
iScreen = self.iScreen
self.EXIT_TEXT = u"<font=4>" + localText.getText("TXT_KEY_PEDIA_SCREEN_EXIT", ()).upper() + u"</font>"
self.SCREEN_TITLE = u"<font=4b>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_TITLE", ()).upper() + u"</font>"
if (self.iScreen != iScreen):
self.killScreen()
self.iScreen = iScreen
screen = self.getScreen()
if screen.isActive():
return
screen.setRenderInterfaceOnly(True);
screen.showScreen( PopupStates.POPUPSTATE_IMMEDIATE, False)
self.iActiveLeader = CyGame().getActivePlayer()
self.iSelectedLeader = self.iActiveLeader
self.listSelectedLeaders = []
#self.listSelectedLeaders.append(self.iSelectedLeader)
# Set the background and exit button, and show the screen
screen.setDimensions(screen.centerX(0), screen.centerY(0), self.W_SCREEN, self.H_SCREEN)
screen.addDrawControl(self.BACKGROUND_ID, ArtFileMgr.getInterfaceArtInfo("SCREEN_BG_OPAQUE").getPath(), 0, 0, self.W_SCREEN, self.H_SCREEN, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.addPanel( "TopPanel", u"", u"", True, False, 0, 0, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_TOPBAR )
screen.addPanel( "BottomPanel", u"", u"", True, False, 0, 713, self.W_SCREEN, 55, PanelStyles.PANEL_STYLE_BOTTOMBAR )
screen.showWindowBackground(False)
screen.setText(self.EXIT_ID, "", self.EXIT_TEXT, CvUtil.FONT_RIGHT_JUSTIFY, self.X_EXIT, self.Y_EXIT, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_CLOSE_SCREEN, -1, -1 )
self.nWidgetCount = 0
self.nLineCount = 0
if (CyGame().isDebugMode()):
self.szDropdownName = self.getWidgetName(self.DEBUG_DROPDOWN_ID)
screen.addDropDownBoxGFC(self.szDropdownName, 22, 12, 300, WidgetTypes.WIDGET_GENERAL, -1, -1, FontTypes.GAME_FONT)
for j in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(j).isAlive()):
#screen.addPullDownString(self.szDropdownName, gc.getPlayer(j).getName(), j, j, False ) #Rhye
screen.addPullDownString(self.szDropdownName, gc.getPlayer(j).getCivilizationShortDescription(0), j, j, False ) #Rhye
CyInterface().setDirty(InterfaceDirtyBits.Foreign_Screen_DIRTY_BIT, False)
# Draw leader heads
self.drawContents(True)
# Drawing Leaderheads
def drawContents(self, bInitial):
if (self.iScreen < 0):
return
self.deleteAllWidgets()
screen = self.getScreen()
# Header...
screen.setLabel(self.getNextWidgetName(), "", self.SCREEN_TITLE, CvUtil.FONT_CENTER_JUSTIFY, self.X_SCREEN, self.Y_TITLE, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
if (self.iScreen == FOREIGN_RELATIONS_SCREEN):
self.drawRelations(bInitial)
elif (self.iScreen == FOREIGN_ACTIVE_TRADE_SCREEN):
self.drawActive()
else:
self.drawPossibleDeals()
# Link to other Foreign advisor screens
xLink = self.X_LINK
szRelationsId = self.getNextWidgetName()
if (self.iScreen != FOREIGN_RELATIONS_SCREEN):
screen.setText(szRelationsId, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_RELATIONS", ()).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, FOREIGN_RELATIONS_SCREEN, -1)
else:
screen.setText(szRelationsId, "", u"<font=4>" + localText.getColorText("TXT_KEY_FOREIGN_ADVISOR_RELATIONS", (), gc.getInfoTypeForString("COLOR_YELLOW")).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, -1, -1)
xLink += self.DX_LINK
szBonusId = self.getNextWidgetName()
if (self.iScreen != FOREIGN_BONUS_SCREEN):
screen.setText(szBonusId, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_RESOURCES", ()).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, FOREIGN_BONUS_SCREEN, -1)
else:
screen.setText(szBonusId, "", u"<font=4>" + localText.getColorText("TXT_KEY_FOREIGN_ADVISOR_RESOURCES", (), gc.getInfoTypeForString("COLOR_YELLOW")).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, -1, -1)
xLink += self.DX_LINK
szTechId = self.getNextWidgetName()
if (self.iScreen != FOREIGN_TECH_SCREEN):
screen.setText(szTechId, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_TECHS", ()).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, FOREIGN_TECH_SCREEN, -1)
else:
screen.setText(szTechId, "", u"<font=4>" + localText.getColorText("TXT_KEY_FOREIGN_ADVISOR_TECHS", (), gc.getInfoTypeForString("COLOR_YELLOW")).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, -1, -1)
xLink += self.DX_LINK
szActiveId = self.getNextWidgetName()
if (self.iScreen != FOREIGN_ACTIVE_TRADE_SCREEN):
screen.setText(szActiveId, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_ACTIVE", ()).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, FOREIGN_ACTIVE_TRADE_SCREEN, -1)
else:
screen.setText(szActiveId, "", u"<font=4>" + localText.getColorText("TXT_KEY_FOREIGN_ADVISOR_ACTIVE", (), gc.getInfoTypeForString("COLOR_YELLOW")).upper() + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, xLink, self.Y_LINK, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_FOREIGN_ADVISOR, -1, -1)
xLink += self.DX_LINK
def drawActive(self):
screen = self.getScreen()
# Get the Players
playerActive = gc.getPlayer(self.iActiveLeader)
# Put everything inside a main panel, so we get vertical scrolling
mainPanelName = self.getNextWidgetName()
screen.addPanel(mainPanelName, "", "", True, True, 50, 100, self.W_SCREEN - 100, self.H_SCREEN - 200, PanelStyles.PANEL_STYLE_EMPTY)
# loop through all players and sort them by number of active deals
listPlayers = [(0,0)] * gc.getMAX_PLAYERS()
nNumPLayers = 0
for iLoopPlayer in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(iLoopPlayer).isAlive() and iLoopPlayer != self.iActiveLeader and not gc.getPlayer(iLoopPlayer).isBarbarian() and not gc.getPlayer(iLoopPlayer).isMinorCiv()):
if (gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isHasMet(gc.getPlayer(self.iActiveLeader).getTeam()) or gc.getGame().isDebugMode()):
nDeals = 0
for i in range(gc.getGame().getIndexAfterLastDeal()):
deal = gc.getGame().getDeal(i)
if ((deal.getFirstPlayer() == iLoopPlayer and deal.getSecondPlayer() == self.iActiveLeader) or (deal.getSecondPlayer() == iLoopPlayer and deal.getFirstPlayer() == self.iActiveLeader)):
nDeals += 1
listPlayers[nNumPLayers] = (nDeals, iLoopPlayer)
nNumPLayers += 1
listPlayers.sort()
listPlayers.reverse()
# loop through all players and display leaderheads
for j in range (nNumPLayers):
iLoopPlayer = listPlayers[j][1]
# Player panel
playerPanelName = self.getNextWidgetName()
screen.attachPanel(mainPanelName, playerPanelName, gc.getPlayer(iLoopPlayer).getCivilizationShortDescription(0), "", False, True, PanelStyles.PANEL_STYLE_MAIN)
screen.attachLabel(playerPanelName, "", " ")
screen.attachImageButton(playerPanelName, "", gc.getLeaderHeadInfo(gc.getPlayer(iLoopPlayer).getLeaderType()).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_LEADERHEAD, iLoopPlayer, -1, False)
innerPanelName = self.getNextWidgetName()
screen.attachPanel(playerPanelName, innerPanelName, "", "", False, False, PanelStyles.PANEL_STYLE_EMPTY)
dealPanelName = self.getNextWidgetName()
screen.attachListBoxGFC(innerPanelName, dealPanelName, "", TableStyles.TABLE_STYLE_EMPTY)
screen.enableSelect(dealPanelName, False)
iRow = 0
for i in range(gc.getGame().getIndexAfterLastDeal()):
deal = gc.getGame().getDeal(i)
if (deal.getFirstPlayer() == iLoopPlayer and deal.getSecondPlayer() == self.iActiveLeader and not deal.isNone()) or (deal.getSecondPlayer() == iLoopPlayer and deal.getFirstPlayer() == self.iActiveLeader):
screen.appendListBoxString(dealPanelName, CyGameTextMgr().getDealString(deal, iLoopPlayer), WidgetTypes.WIDGET_DEAL_KILL, deal.getID(), -1, CvUtil.FONT_LEFT_JUSTIFY)
iRow += 1
def drawPossibleDeals(self):
screen = self.getScreen()
# Get the Players
playerActive = gc.getPlayer(self.iActiveLeader)
playerSelected = gc.getPlayer(self.iSelectedLeader)
# Put everything inside a main panel, so we get vertical scrolling
mainPanelName = self.getNextWidgetName()
screen.addPanel( mainPanelName, "", "", True, True, 50, 100, self.W_SCREEN - 100, self.H_SCREEN - 200, PanelStyles.PANEL_STYLE_MAIN )
# Active player panel
activePlayerPanelName = self.getNextWidgetName()
szPlayerName = playerActive.getCivilizationShortDescription(0)
if (gc.getTeam(playerActive.getTeam()).isGoldTrading() or gc.getTeam(playerSelected.getTeam()).isGoldTrading()):
if (self.iScreen == FOREIGN_BONUS_SCREEN):
szPlayerName += u" : " + localText.getText("TXT_KEY_MISC_GOLD_PER_TURN", (playerActive.calculateGoldRate(), ))
elif (self.iScreen == FOREIGN_TECH_SCREEN):
szPlayerName += u" : " + localText.getText("TXT_KEY_MISC_GOLD", (playerActive.getGold(), ))
screen.attachPanel(mainPanelName, activePlayerPanelName, szPlayerName, "", False, True, PanelStyles.PANEL_STYLE_EMPTY )
screen.attachLabel(activePlayerPanelName, "", " ")
screen.attachMultiListControlGFC(activePlayerPanelName, "Child" + activePlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
if (self.iScreen == FOREIGN_BONUS_SCREEN):
tradeData = TradeData()
tradeData.ItemType = TradeableItems.TRADE_RESOURCES
for iLoopBonus in range(gc.getNumBonusInfos()):
tradeData.iData = iLoopBonus
bTradeable = False
if (self.iSelectedLeader == self.iActiveLeader):
# loop through all players and display resources that are available to trade to at least one leader
for iLoopPlayer in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(iLoopPlayer).isAlive() and not gc.getPlayer(iLoopPlayer).isBarbarian() and not gc.getPlayer(iLoopPlayer).isMinorCiv() and gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isHasMet(gc.getPlayer(self.iActiveLeader).getTeam())):
if (iLoopPlayer != self.iActiveLeader and gc.getPlayer(self.iActiveLeader).canTradeItem(iLoopPlayer, tradeData, False)):
bTradeable = True
iLoopPlayer = gc.getMAX_PLAYERS() # exit for loop
else:
# display resources that you can trade to the selected leader
bTradeable = gc.getPlayer(self.iActiveLeader).canTradeItem(self.iSelectedLeader, tradeData, False)
if bTradeable:
for i in range(playerActive.getNumTradeableBonuses(iLoopBonus)):
screen.appendMultiListButton("Child" + activePlayerPanelName, gc.getBonusInfo(iLoopBonus).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_BONUS, iLoopBonus, -1, False)
elif (self.iScreen == FOREIGN_TECH_SCREEN):
tradeData = TradeData()
tradeData.ItemType = TradeableItems.TRADE_TECHNOLOGIES
for iLoopTech in range(gc.getNumTechInfos()):
bTradeable = False
tradeData.iData = iLoopTech
if (self.iSelectedLeader == self.iActiveLeader):
# loop through all players and display techs that are available to trade to at least one leader
for iLoopPlayer in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(iLoopPlayer).isAlive() and not gc.getPlayer(iLoopPlayer).isBarbarian() and not gc.getPlayer(iLoopPlayer).isMinorCiv() and gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isHasMet(gc.getPlayer(self.iActiveLeader).getTeam())):
if (iLoopPlayer != self.iActiveLeader and gc.getPlayer(self.iActiveLeader).canTradeItem(iLoopPlayer, tradeData, False)):
bTradeable = True
iLoopPlayer = gc.getMAX_PLAYERS() # exit for loop
else:
# display techs that you can trade to the selected leader
bTradeable = gc.getPlayer(self.iActiveLeader).canTradeItem(self.iSelectedLeader, tradeData, False)
if bTradeable:
screen.appendMultiListButton("Child" + activePlayerPanelName, gc.getTechInfo(iLoopTech).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iLoopTech, -1, False)
# Add active player leaderhead
screen.attachLabel(activePlayerPanelName, "", " ")
szName = self.getNextWidgetName()
screen.addCheckBoxGFCAt(activePlayerPanelName, szName, gc.getLeaderHeadInfo(gc.getPlayer(self.iActiveLeader).getLeaderType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), 10, 0, self.W_LEADER, self.H_LEADER, WidgetTypes.WIDGET_LEADERHEAD, self.iActiveLeader, -1, ButtonStyles.BUTTON_STYLE_LABEL, False)
if (self.iSelectedLeader == self.iActiveLeader):
screen.setState(szName, True)
else:
screen.setState(szName, False)
# Their leaderheads
for iLoopPlayer in range(gc.getMAX_PLAYERS()):
if (gc.getPlayer(iLoopPlayer).isAlive() and iLoopPlayer != self.iActiveLeader and (gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isHasMet(gc.getPlayer(self.iActiveLeader).getTeam()) or gc.getGame().isDebugMode()) and not gc.getPlayer(iLoopPlayer).isBarbarian() and not gc.getPlayer(iLoopPlayer).isMinorCiv()):
currentPlayerPanelName = self.getNextWidgetName()
szPlayerName = gc.getPlayer(iLoopPlayer).getCivilizationShortDescription(0)
if (gc.getTeam(playerActive.getTeam()).isGoldTrading() or gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isGoldTrading()):
if (self.iScreen == FOREIGN_BONUS_SCREEN):
szPlayerName += u" : " + localText.getText("TXT_KEY_FOREIGN_ADVISOR_GOLD_PER_TURN_FOR_TRADE", (gc.getPlayer(iLoopPlayer).AI_maxGoldPerTurnTrade(self.iActiveLeader), ))
elif (self.iScreen == FOREIGN_TECH_SCREEN):
szPlayerName += u" : " + localText.getText("TXT_KEY_FOREIGN_ADVISOR_GOLD_FOR_TRADE", (gc.getPlayer(iLoopPlayer).AI_maxGoldTrade(self.iActiveLeader), ))
if (not playerActive.canTradeNetworkWith(iLoopPlayer) and self.iScreen == FOREIGN_BONUS_SCREEN):
szPlayerName += u" : " + localText.getText("TXT_KEY_FOREIGN_ADVISOR_NOT_CONNECTED", ())
elif (not gc.getTeam(playerActive.getTeam()).isTechTrading() and not gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isTechTrading()):
szPlayerName += u" : " + localText.getText("TXT_KEY_FOREIGN_ADVISOR_NO_TECH_TRADING", ())
screen.attachPanel(mainPanelName, currentPlayerPanelName, szPlayerName, "", False, True, PanelStyles.PANEL_STYLE_EMPTY )
screen.attachLabel(currentPlayerPanelName, "", " ")
if (self.iScreen == FOREIGN_BONUS_SCREEN):
if (not playerActive.canTradeNetworkWith(iLoopPlayer) and not gc.getGame().isDebugMode()):
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
screen.appendMultiListButton("ChildTrade" + currentPlayerPanelName, ArtFileMgr.getInterfaceArtInfo("INTERFACE_BUTTONS_CANCEL").getPath(), 0, WidgetTypes.WIDGET_GENERAL, -1, -1, False)
else:
listTradeable = []
listUntradeable = []
tradeData = TradeData()
tradeData.ItemType = TradeableItems.TRADE_RESOURCES
for iLoopBonus in range(gc.getNumBonusInfos()):
tradeData.iData = iLoopBonus
if (gc.getPlayer(iLoopPlayer).canTradeItem(self.iActiveLeader, tradeData, False)):
if (gc.getPlayer(iLoopPlayer).getTradeDenial(self.iActiveLeader, tradeData) == DenialTypes.NO_DENIAL):
listTradeable.append(iLoopBonus)
else:
listUntradeable.append(iLoopBonus)
if len(listTradeable) > 0:
screen.attachLabel(currentPlayerPanelName, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_FOR_TRADE", ()) + u"</font>")
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
for iLoopBonus in listTradeable:
screen.appendMultiListButton("ChildTrade" + currentPlayerPanelName, gc.getBonusInfo(iLoopBonus).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_BONUS, iLoopBonus, -1, False)
if len(listUntradeable) > 0:
screen.attachLabel(currentPlayerPanelName, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_NOT_FOR_TRADE", ()) + u"</font>")
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildNoTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
for iLoopBonus in listUntradeable:
screen.appendMultiListButton("ChildNoTrade" + currentPlayerPanelName, gc.getBonusInfo(iLoopBonus).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_BONUS, iLoopBonus, -1, False)
elif (self.iScreen == FOREIGN_TECH_SCREEN):
if (not gc.getTeam(playerActive.getTeam()).isTechTrading() and not gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isTechTrading() and not gc.getGame().isDebugMode()):
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
screen.appendMultiListButton("ChildTrade" + currentPlayerPanelName, ArtFileMgr.getInterfaceArtInfo("INTERFACE_BUTTONS_CANCEL").getPath(), 0, WidgetTypes.WIDGET_GENERAL, -1, -1, False)
else:
listTradeable = []
listUntradeable = []
listTradeNotAllowed = []
tradeData = TradeData()
tradeData.ItemType = TradeableItems.TRADE_TECHNOLOGIES
for iLoopTech in range(gc.getNumTechInfos()):
tradeData.iData = iLoopTech
if (gc.getPlayer(iLoopPlayer).canTradeItem(self.iActiveLeader, tradeData, False)):
if (gc.getPlayer(iLoopPlayer).getTradeDenial(self.iActiveLeader, tradeData) == DenialTypes.NO_DENIAL):
listTradeable.append(iLoopTech)
else:
listUntradeable.append(iLoopTech)
elif (gc.getTeam(gc.getPlayer(iLoopPlayer).getTeam()).isHasTech(iLoopTech) and playerActive.canResearch(iLoopTech, False)):
listTradeNotAllowed.append(iLoopTech)
if len(listTradeable) > 0:
screen.attachLabel(currentPlayerPanelName, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_FOR_TRADE", ()) + u"</font>")
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
for iLoopTech in listTradeable:
screen.appendMultiListButton("ChildTrade" + currentPlayerPanelName, gc.getTechInfo(iLoopTech).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iLoopTech, -1, False)
if len(listUntradeable) > 0:
screen.attachLabel(currentPlayerPanelName, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_NOT_FOR_TRADE", ()) + u"</font>")
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildNoTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
for iLoopTech in listUntradeable:
screen.appendMultiListButton("ChildNoTrade" + currentPlayerPanelName, gc.getTechInfo(iLoopTech).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iLoopTech, -1, False)
if len(listTradeNotAllowed) > 0:
screen.attachLabel(currentPlayerPanelName, "", u"<font=4>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_NOT_ALLOWED_TRADE", ()) + u"</font>")
screen.attachMultiListControlGFC(currentPlayerPanelName, "ChildCantTrade" + currentPlayerPanelName, "", 1, self.BUTTON_SIZE, self.BUTTON_SIZE, TableStyles.TABLE_STYLE_STANDARD)
for iLoopTech in listTradeNotAllowed:
screen.appendMultiListButton("ChildCantTrade" + currentPlayerPanelName, gc.getTechInfo(iLoopTech).getButton(), 0, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iLoopTech, -1, False)
screen.attachLabel(currentPlayerPanelName, "", " ")
szName = self.getNextWidgetName()
screen.addCheckBoxGFCAt(currentPlayerPanelName, szName, gc.getLeaderHeadInfo(gc.getPlayer(iLoopPlayer).getLeaderType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), 10, 0, self.W_LEADER, self.H_LEADER, WidgetTypes.WIDGET_LEADERHEAD, iLoopPlayer, -1, ButtonStyles.BUTTON_STYLE_LABEL, False)
if (self.iSelectedLeader == iLoopPlayer):
screen.setState(szName, True)
else:
screen.setState(szName, False)
def drawRelations(self, bInitial):
if self.iShiftKeyDown == 1:
if (self.iSelectedLeader in self.listSelectedLeaders):
self.listSelectedLeaders.remove(self.iSelectedLeader)
else:
self.listSelectedLeaders.append(self.iSelectedLeader)
else:
self.listSelectedLeaders = []
if (not bInitial):
self.listSelectedLeaders.append(self.iSelectedLeader)
bNoLeadersSelected = (len(self.listSelectedLeaders) == 0)
bSingleLeaderSelected = (len(self.listSelectedLeaders) == 1)
if bSingleLeaderSelected:
self.iSelectedLeader = self.listSelectedLeaders[0]
# Get the Players
playerActive = gc.getPlayer(self.iActiveLeader)
# count the leaders
iCount = 0
leaderMap = { }
# Count all other leaders
for iPlayer in range(gc.getMAX_PLAYERS()):
player = gc.getPlayer(iPlayer)
if (player.isAlive() and iPlayer != self.iActiveLeader and (gc.getTeam(player.getTeam()).isHasMet(gc.getPlayer(self.iActiveLeader).getTeam()) or gc.getGame().isDebugMode()) and not player.isBarbarian() and not player.isMinorCiv()):
leaderMap[iPlayer] = iCount
iCount = iCount + 1
fLeaderTop = self.Y_LEADER_CIRCLE_TOP
fRadius = self.RADIUS_LEADER_ARC - self.H_LEADER
fLeaderArcTop = fLeaderTop + self.H_LEADER + 10
if iCount < 8:
iLeaderHeight = int((3 * self.H_LEADER) / 2)
iLeaderWidth = int((3 * self.W_LEADER) / 2)
else:
iLeaderHeight = self.H_LEADER
iLeaderWidth = self.W_LEADER
screen = self.getScreen()
#screen.addPanel(self.getNextWidgetName(), "", "", False, False, 0, 50, self.W_SCREEN, 667, PanelStyles.PANEL_STYLE_MAIN_WHITE)
#screen.addPanel(self.getNextWidgetName(), "", "", False, False, 0, 50, self.W_SCREEN, 667, PanelStyles.PANEL_STYLE_MAIN_WHITE)
#screen.addPanel(self.getNextWidgetName(), "", "", False, False, 0, 50, self.W_SCREEN, 667, PanelStyles.PANEL_STYLE_MAIN_WHITE)
# legend
screen.addPanel(self.getNextWidgetName(), u"", u"", True, False, self.X_LEGEND, self.Y_LEGEND, self.W_LEGEND, self.H_LEGEND, PanelStyles.PANEL_STYLE_IN)
x = self.X_LEGEND + self.MARGIN_LEGEND
y = self.Y_LEGEND + self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_FOREIGN_ADVISOR_CONTACT", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_WHITE"))
y += 2 * self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_CONCEPT_WAR", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_RED"))
y += 2 * self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_TRADE_DEFENSIVE_PACT_STRING", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_BLUE"))
y += 2 * self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_TRADE_OPEN_BORDERS_STRING", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_CITY_GREEN"))
y += 2 * self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_PITBOSS_TEAM", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_YELLOW"))
y += 2 * self.MARGIN_LEGEND
screen.setLabel(self.getNextWidgetName(), "", u"<font=2>" + localText.getText("TXT_KEY_MISC_VASSAL_SHORT", ()) + u"</font>", CvUtil.FONT_LEFT_JUSTIFY, x, y-10, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
y += self.MARGIN_LEGEND
screen.addLineGFC(self.BACKGROUND_ID, self.getNextLineName(), x, y, x + self.W_LEGEND - 2*self.MARGIN_LEGEND, y, gc.getInfoTypeForString("COLOR_CYAN"))
# Our leader head
szLeaderHead = self.getNextWidgetName()
#screen.addCheckBoxGFC(szLeaderHead, gc.getLeaderHeadInfo(gc.getPlayer(self.iActiveLeader).getLeaderType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_LEADER_CIRCLE_TOP - iLeaderWidth/2, int(fLeaderTop), iLeaderWidth, iLeaderHeight, WidgetTypes.WIDGET_LEADERHEAD, self.iActiveLeader, -1, ButtonStyles.BUTTON_STYLE_LABEL) #Rhye
screen.addCheckBoxGFC(szLeaderHead, gc.getCivilizationInfo(gc.getPlayer(self.iActiveLeader).getCivilizationType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), self.X_LEADER_CIRCLE_TOP - iLeaderWidth/2, int(fLeaderTop), iLeaderWidth, iLeaderHeight, WidgetTypes.WIDGET_LEADERHEAD, self.iActiveLeader, -1, ButtonStyles.BUTTON_STYLE_LABEL) #Rhye
if (self.iActiveLeader in self.listSelectedLeaders):
screen.setState(szLeaderHead, True)
else:
screen.setState(szLeaderHead, False)
szName = self.getNextWidgetName()
#Rhye - start
#szLeaderName = u"<font=3>" + playerActive.getName() + u"</font>"
#szLeaderName = u"<font=3>" + playerActive.getCivilizationShortDescription(0) + u"</font>"
if (len(leaderMap.keys()) >= 16):
szLeaderName = u"<font=1>" + playerActive.getCivilizationDescription(0) + u"</font>"
iDist = -4
elif (len(leaderMap.keys()) >= 12):
szLeaderName = u"<font=2>" + playerActive.getCivilizationDescription(0) + u"</font>"
iDist = 1
else:
szLeaderName = u"<font=3>" + playerActive.getCivilizationDescription(0) + u"</font>"
iDist = 5
#screen.setLabel(szName, "", szLeaderName, CvUtil.FONT_CENTER_JUSTIFY, self.X_LEADER_CIRCLE_TOP, fLeaderTop + iLeaderHeight + 5, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
screen.setLabel(szName, "", szLeaderName, CvUtil.FONT_CENTER_JUSTIFY, self.X_LEADER_CIRCLE_TOP, fLeaderTop + iLeaderHeight + iDist, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
#Rhye - end
# angle increment in radians (180 degree range)
if (iCount < 2):
deltaTheta = 0
else:
deltaTheta = 3.1415927 / (iCount - 1)
iTot = 0 #Rhye
# draw other leaderheads
for iPlayer in leaderMap.keys():
player = gc.getPlayer(iPlayer)
iTot += 1 #Rhye
if bSingleLeaderSelected:
# attitudes shown are towards single selected leader
iBaseLeader = self.iSelectedLeader
else:
# attitudes shown are towards active leader
iBaseLeader = self.iActiveLeader
playerBase = gc.getPlayer(iBaseLeader)
fX = int(self.X_LEADER_CIRCLE_TOP - fRadius * math.cos(deltaTheta * leaderMap[iPlayer]) - iLeaderWidth/2)
fY = int(fLeaderArcTop + fRadius * math.sin(deltaTheta * leaderMap[iPlayer]) - iLeaderHeight/2)
szLeaderHead = self.getNextWidgetName()
#screen.addCheckBoxGFC(szLeaderHead, gc.getLeaderHeadInfo(player.getLeaderType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), int(fX), int(fY), iLeaderWidth, iLeaderHeight, WidgetTypes.WIDGET_LEADERHEAD, iPlayer, iBaseLeader, ButtonStyles.BUTTON_STYLE_LABEL) #Rhye
screen.addCheckBoxGFC(szLeaderHead, gc.getCivilizationInfo(player.getCivilizationType()).getButton(), ArtFileMgr.getInterfaceArtInfo("BUTTON_HILITE_SQUARE").getPath(), int(fX), int(fY), iLeaderWidth, iLeaderHeight, WidgetTypes.WIDGET_LEADERHEAD, iPlayer, iBaseLeader, ButtonStyles.BUTTON_STYLE_LABEL) #Rhye
if (iPlayer in self.listSelectedLeaders):
screen.setState(szLeaderHead, True)
else:
screen.setState(szLeaderHead, False)
szName = self.getNextWidgetName()
#Rhye - start
iOffsetX = 0
#szText = u"<font=3>" + player.getName() + u"</font>"
#szText = u"<font=3>" + player.getCivilizationShortDescription(0) + u"</font>"
if (len(leaderMap.keys()) >= 16):
szText = u"<font=1>" + player.getCivilizationDescription(0) + u"</font>"
iDist = -4
iOffsetX = (min(5, max(-5, iTot - len(leaderMap.keys())/2)))*6
elif (len(leaderMap.keys()) >= 12):
szText = u"<font=2>" + player.getCivilizationDescription(0) + u"</font>"
iDist = 1
iOffsetX = (min(5, max(-5, iTot - len(leaderMap.keys())/2)))*3
else:
szText = u"<font=3>" + player.getCivilizationDescription(0) + u"</font>"
iDist = 5
#screen.setLabel(szName, "", szText, CvUtil.FONT_CENTER_JUSTIFY, fX + iLeaderWidth/2, fY + iLeaderHeight + 5, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.setLabel(szName, "", szText, CvUtil.FONT_CENTER_JUSTIFY, fX + iLeaderWidth/2 + iOffsetX, fY + iLeaderHeight + iDist, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
#Rhye - end
# Leader attitude towards active player
szName = self.getNextWidgetName()
if (gc.getTeam(player.getTeam()).isHasMet(playerBase.getTeam()) and iBaseLeader != iPlayer):
#Rhye - start
#szText = " (" + gc.getAttitudeInfo(gc.getPlayer(iPlayer).AI_getAttitude(iBaseLeader)).getDescription()
#if (iBaseLeader != iPlayer):
# if (gc.getTeam(player.getTeam()).isVassal(playerBase.getTeam())):
# szText += ", " + localText.getText("TXT_KEY_MISC_VASSAL_SHORT", ())
# elif (gc.getTeam(playerBase.getTeam()).isVassal(player.getTeam())):
# szText += ", " + localText.getText("TXT_KEY_MISC_MASTER", ())
#szText += ")"
if (len(leaderMap.keys()) >= 16):
szText = u"<font=1>" + " (" + gc.getAttitudeInfo(gc.getPlayer(iPlayer).AI_getAttitude(iBaseLeader)).getDescription() + u"</font>"
if (iBaseLeader != iPlayer):
if (gc.getTeam(player.getTeam()).isVassal(playerBase.getTeam())):
szText += (u"<font=1>" + ", " + localText.getText("TXT_KEY_MISC_VASSAL_SHORT", ()) + u"</font>")
elif (gc.getTeam(playerBase.getTeam()).isVassal(player.getTeam())):
szText += (u"<font=1>" + ", " + localText.getText("TXT_KEY_MISC_MASTER", ()) + u"</font>")
szText += (u"<font=1>" + ")" + u"</font>")
else:
szText = " (" + gc.getAttitudeInfo(gc.getPlayer(iPlayer).AI_getAttitude(iBaseLeader)).getDescription()
if (iBaseLeader != iPlayer):
if (gc.getTeam(player.getTeam()).isVassal(playerBase.getTeam())):
szText += ", " + localText.getText("TXT_KEY_MISC_VASSAL_SHORT", ())
elif (gc.getTeam(playerBase.getTeam()).isVassal(player.getTeam())):
szText += ", " + localText.getText("TXT_KEY_MISC_MASTER", ())
szText += ")"
#Rhye - end
else:
szText = u""
#Rhye - start
iOffsetY = 0
if (len(leaderMap.keys()) >= 16):
iOffsetY = -16
elif (len(leaderMap.keys()) >= 12):
iOffsetY = -8
#screen.setLabel(szName, "", szText, CvUtil.FONT_CENTER_JUSTIFY, fX + iLeaderWidth/2, fY + iLeaderHeight + 25, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.setLabel(szName, "", szText, CvUtil.FONT_CENTER_JUSTIFY, fX + iLeaderWidth/2 + iOffsetX, fY + iLeaderHeight + 25 + iOffsetY, 0, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
#Rhye - end
# draw lines
for iSelectedLeader in range(gc.getMAX_PLAYERS()):
bDisplayed = (not gc.getPlayer(iSelectedLeader).isBarbarian() and not gc.getPlayer(iSelectedLeader).isMinorCiv() and gc.getPlayer(iSelectedLeader).isAlive() and (gc.getGame().isDebugMode() or gc.getTeam(playerActive.getTeam()).isHasMet(gc.getPlayer(iSelectedLeader).getTeam())))
if iSelectedLeader in self.listSelectedLeaders or (bNoLeadersSelected and bDisplayed):
# get selected player and location
if (iSelectedLeader in leaderMap):
thetaSelected = deltaTheta * leaderMap[iSelectedLeader]
fXSelected = self.X_LEADER_CIRCLE_TOP - fRadius * math.cos(thetaSelected)
fYSelected = fLeaderArcTop + fRadius * math.sin(thetaSelected)
else:
fXSelected = self.X_LEADER_CIRCLE_TOP
fYSelected = fLeaderTop + iLeaderHeight/2
for iPlayer in leaderMap.keys():
player = gc.getPlayer(iPlayer)
fX = self.X_LEADER_CIRCLE_TOP - fRadius * math.cos(deltaTheta * leaderMap[iPlayer])
fY = fLeaderArcTop + fRadius * math.sin(deltaTheta * leaderMap[iPlayer])
# draw lines
if (iSelectedLeader != iPlayer):
if (player.getTeam() == gc.getPlayer(iSelectedLeader).getTeam()):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(fX), int(fY), gc.getInfoTypeForString("COLOR_YELLOW") )
elif (gc.getTeam(player.getTeam()).isVassal(gc.getPlayer(iSelectedLeader).getTeam()) or gc.getTeam(gc.getPlayer(iSelectedLeader).getTeam()).isVassal(player.getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(fX), int(fY), gc.getInfoTypeForString("COLOR_CYAN") )
elif (gc.getTeam(player.getTeam()).isHasMet(gc.getPlayer(iSelectedLeader).getTeam())):
if (gc.getTeam(player.getTeam()).isAtWar(gc.getPlayer(iSelectedLeader).getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(fX), int(fY), gc.getInfoTypeForString("COLOR_RED") )
else:
bJustPeace = True
if (gc.getTeam(player.getTeam()).isOpenBorders(gc.getPlayer(iSelectedLeader).getTeam())):
fDy = fYSelected - fY
fDx = fXSelected - fX
fTheta = math.atan2(fDy, fDx)
if (fTheta > 0.5 * math.pi):
fTheta -= math.pi
elif (fTheta < -0.5 * math.pi):
fTheta += math.pi
fSecondLineOffsetY = self.LINE_WIDTH * math.cos(fTheta)
fSecondLineOffsetX = -self.LINE_WIDTH * math.sin(fTheta)
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected + fSecondLineOffsetX), int(fYSelected + fSecondLineOffsetY), int(fX + fSecondLineOffsetX), int(fY + fSecondLineOffsetY), gc.getInfoTypeForString("COLOR_CITY_GREEN") )
bJustPeace = False
if (gc.getTeam(player.getTeam()).isDefensivePact(gc.getPlayer(iSelectedLeader).getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(fX), int(fY), gc.getInfoTypeForString("COLOR_BLUE") )
bJustPeace = False
if (bJustPeace):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(fX), int(fY), gc.getInfoTypeForString("COLOR_WHITE") )
player = gc.getPlayer(self.iActiveLeader)
if (player.getTeam() == gc.getPlayer(iSelectedLeader).getTeam()):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), self.X_LEADER_CIRCLE_TOP, fLeaderTop + iLeaderHeight/2, gc.getInfoTypeForString("COLOR_YELLOW") )
elif (gc.getTeam(player.getTeam()).isVassal(gc.getPlayer(iSelectedLeader).getTeam()) or gc.getTeam(gc.getPlayer(iSelectedLeader).getTeam()).isVassal(player.getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), self.X_LEADER_CIRCLE_TOP, fLeaderTop + iLeaderHeight/2, gc.getInfoTypeForString("COLOR_CYAN") )
elif (gc.getTeam(player.getTeam()).isHasMet(gc.getPlayer(iSelectedLeader).getTeam())):
if (gc.getTeam(player.getTeam()).isAtWar(gc.getPlayer(iSelectedLeader).getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), self.X_LEADER_CIRCLE_TOP, fLeaderTop + iLeaderHeight/2, gc.getInfoTypeForString("COLOR_RED") )
else:
bJustPeace = True
if (gc.getTeam(player.getTeam()).isOpenBorders(gc.getPlayer(iSelectedLeader).getTeam())):
fDy = fLeaderTop + iLeaderHeight/2 - fYSelected
fDx = self.X_LEADER_CIRCLE_TOP - fXSelected
fTheta = math.atan2(fDy, fDx)
if (fTheta > 0.5 * math.pi):
fTheta -= math.pi
elif (fTheta < -0.5 * math.pi):
fTheta += math.pi
fSecondLineOffsetY = self.LINE_WIDTH * math.cos(fTheta)
fSecondLineOffsetX = -self.LINE_WIDTH * math.sin(fTheta)
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected + fSecondLineOffsetX), int(fYSelected + fSecondLineOffsetY), int(self.X_LEADER_CIRCLE_TOP + fSecondLineOffsetX), int(fLeaderTop + iLeaderHeight/2 + fSecondLineOffsetY), gc.getInfoTypeForString("COLOR_CITY_GREEN") )
bJustPeace = False
if (gc.getTeam(player.getTeam()).isDefensivePact(gc.getPlayer(iSelectedLeader).getTeam())):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(self.X_LEADER_CIRCLE_TOP), int(fLeaderTop + iLeaderHeight/2), gc.getInfoTypeForString("COLOR_BLUE") )
bJustPeace = False
if (bJustPeace):
szName = self.getNextLineName()
screen.addLineGFC(self.BACKGROUND_ID, szName, int(fXSelected), int(fYSelected), int(self.X_LEADER_CIRCLE_TOP), int(fLeaderTop + iLeaderHeight/2), gc.getInfoTypeForString("COLOR_WHITE") )
# returns a unique ID for a widget in this screen
def getNextWidgetName(self):
szName = self.WIDGET_ID + str(self.nWidgetCount * NUM_FOREIGN_SCREENS + self.iScreen)
self.nWidgetCount += 1
return szName
def getNextLineName(self):
szName = self.LINE_ID + str(self.nLineCount * NUM_FOREIGN_SCREENS + self.iScreen)
self.nLineCount += 1
return szName
def getWidgetName(self, szBaseName):
szName = szBaseName + str(self.iScreen)
return szName
def clearAllLines(self):
screen = self.getScreen()
nLines = self.nLineCount
self.nLineCount = 0
for i in range(nLines):
screen.removeLineGFC(self.BACKGROUND_ID, self.getNextLineName())
self.nLineCount = 0
def deleteAllWidgets(self):
screen = self.getScreen()
i = self.nWidgetCount - 1
while (i >= 0):
self.nWidgetCount = i
screen.deleteWidget(self.getNextWidgetName())
i -= 1
self.nWidgetCount = 0
self.clearAllLines()
# Handles the input for this screen...
def handleInput (self, inputClass):
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED):
if (inputClass.getButtonType() == WidgetTypes.WIDGET_LEADERHEAD):
if (inputClass.getFlags() & MouseFlags.MOUSE_LBUTTONUP):
self.iSelectedLeader = inputClass.getData1()
self.drawContents(False)
elif (inputClass.getFlags() & MouseFlags.MOUSE_RBUTTONUP):
if (self.iActiveLeader != inputClass.getData1()):
self.getScreen().hideScreen()
elif (inputClass.getNotifyCode() == NotifyCode.NOTIFY_LISTBOX_ITEM_SELECTED):
if (inputClass.getFunctionName() + str(inputClass.getID()) == self.getWidgetName(self.DEBUG_DROPDOWN_ID)):
szName = self.getWidgetName(self.DEBUG_DROPDOWN_ID)
iIndex = self.getScreen().getSelectedPullDownID(szName)
self.iActiveLeader = self.getScreen().getPullDownData(szName, iIndex)
self.drawContents(False)
elif (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CHARACTER):
if (inputClass.getData() == int(InputTypes.KB_LSHIFT) or inputClass.getData() == int(InputTypes.KB_RSHIFT)):
self.iShiftKeyDown = inputClass.getID()
return 0
def update(self, fDelta):
if (CyInterface().isDirty(InterfaceDirtyBits.Foreign_Screen_DIRTY_BIT) == True):
CyInterface().setDirty(InterfaceDirtyBits.Foreign_Screen_DIRTY_BIT, False)
self.drawContents(False)
return
| 56.298942 | 378 | 0.728725 | 4,839 | 42,562 | 6.263898 | 0.101881 | 0.027218 | 0.021774 | 0.021048 | 0.72957 | 0.679621 | 0.637821 | 0.60516 | 0.588763 | 0.544159 | 0 | 0.012003 | 0.140665 | 42,562 | 755 | 379 | 56.37351 | 0.816733 | 0.092312 | 0 | 0.431304 | 0 | 0 | 0.057627 | 0.025145 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026087 | false | 0 | 0.008696 | 0.001739 | 0.052174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d51e26d1b13d097b4892231545aad4dd6145adfc | 6,300 | py | Python | Examples/ApiExamples/ex_hyphenation.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 3 | 2021-12-04T22:17:28.000Z | 2022-02-22T03:30:01.000Z | Examples/ApiExamples/ex_hyphenation.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 4 | 2021-11-26T10:01:06.000Z | 2021-12-14T15:01:11.000Z | Examples/ApiExamples/ex_hyphenation.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 2 | 2021-10-20T18:06:22.000Z | 2021-10-29T20:59:18.000Z | # Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import aspose.words as aw
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR
class ExHyphenation(ApiExampleBase):
def test_dictionary(self):
#ExStart
#ExFor:Hyphenation.is_dictionary_registered(str)
#ExFor:Hyphenation.register_dictionary(str,str)
#ExFor:Hyphenation.unregister_dictionary(str)
#ExSummary:Shows how to register a hyphenation dictionary.
# A hyphenation dictionary contains a list of strings that define hyphenation rules for the dictionary's language.
# When a document contains lines of text in which a word could be split up and continued on the next line,
# hyphenation will look through the dictionary's list of strings for that word's substrings.
# If the dictionary contains a substring, then hyphenation will split the word across two lines
# by the substring and add a hyphen to the first half.
# Register a dictionary file from the local file system to the "de-CH" locale.
aw.Hyphenation.register_dictionary("de-CH", MY_DIR + "hyph_de_CH.dic")
self.assertTrue(aw.Hyphenation.is_dictionary_registered("de-CH"))
# Open a document containing text with a locale matching that of our dictionary,
# and save it to a fixed-page save format. The text in that document will be hyphenated.
doc = aw.Document(MY_DIR + "German text.docx")
self.assertTrue(all(node for node in doc.first_section.body.first_paragraph.runs
if node.as_run().font.locale_id == 2055))
doc.save(ARTIFACTS_DIR + "Hyphenation.dictionary.registered.pdf")
# Re-load the document after un-registering the dictionary,
# and save it to another PDF, which will not have hyphenated text.
aw.Hyphenation.unregister_dictionary("de-CH")
self.assertFalse(aw.Hyphenation.is_dictionary_registered("de-CH"))
doc = aw.Document(MY_DIR + "German text.docx")
doc.save(ARTIFACTS_DIR + "Hyphenation.dictionary.unregistered.pdf")
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Hyphenation.dictionary.registered.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertIn(
# "La ob storen an deinen am sachen. Dop-\r\n" +
# "pelte um da am spateren verlogen ge-\r\n" +
# "kommen achtzehn blaulich.",
# text_absorber.text)
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Hyphenation.dictionary.unregistered.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertIn(
# "La ob storen an deinen am sachen. \r\n" +
# "Doppelte um da am spateren verlogen \r\n" +
# "gekommen achtzehn blaulich.",
# text_absorber.text)
##ExStart
##ExFor:Hyphenation
##ExFor:Hyphenation.callback
##ExFor:Hyphenation.register_dictionary(str,BytesIO)
##ExFor:Hyphenation.register_dictionary(str,str)
##ExFor:Hyphenation.warning_callback
##ExFor:IHyphenationCallback
##ExFor:IHyphenationCallback.request_dictionary(str)
##ExSummary:Shows how to open and register a dictionary from a file.
#def test_register_dictionary(self):
# # Set up a callback that tracks warnings that occur during hyphenation dictionary registration.
# warning_info_collection = aw.WarningInfoCollection()
# aw.Hyphenation.warning_callback = warning_info_collection
# # Register an English (US) hyphenation dictionary by stream.
# dictionary_stream = open(MY_DIR + "hyph_en_US.dic", "rb")
# aw.Hyphenation.register_dictionary("en-US", dictionary_stream)
# self.assertEqual(0, warning_info_collection.count)
# # Open a document with a locale that Microsoft Word may not hyphenate on an English machine, such as German.
# doc = aw.Document(MY_DIR + "German text.docx")
# # To hyphenate that document upon saving, we need a hyphenation dictionary for the "de-CH" language code.
# # This callback will handle the automatic request for that dictionary.
# aw.Hyphenation.callback = ExHyphenation.CustomHyphenationDictionaryRegister()
# # When we save the document, German hyphenation will take effect.
# doc.save(ARTIFACTS_DIR + "Hyphenation.register_dictionary.pdf")
# # This dictionary contains two identical patterns, which will trigger a warning.
# self.assertEqual(1, warning_info_collection.count)
# self.assertEqual(aw.WarningType.MINOR_FORMATTING_LOSS, warning_info_collection[0].warning_type)
# self.assertEqual(aw.WarningSource.LAYOUT, warning_info_collection[0].source)
# self.assertEqual("Hyphenation dictionary contains duplicate patterns. The only first found pattern will be used. " +
# "Content can be wrapped differently.", warning_info_collection[0].description)
#class CustomHyphenationDictionaryRegister(aw.IHyphenationCallback):
# """Associates ISO language codes with local system filenames for hyphenation dictionary files."""
# def __init__(self):
# self.hyphenation_dictionary_files = {
# "en-US": MY_DIR + "hyph_en_US.dic",
# "de-CH": MY_DIR + "hyph_de_CH.dic",
# }
# def request_dictionary(self, language: str):
# print("Hyphenation dictionary requested: " + language, end="")
# if aw.Hyphenation.is_dictionary_registered(language):
# print(", is already registered.")
# return
# if self.hyphenation_dictionary_files.contains_key(language):
# aw.Hyphenation.register_dictionary(language, self.hyphenation_dictionary_files[language])
# print(", successfully registered.")
# return
# print(", no respective dictionary file known by this Callback.")
##ExEnd
| 47.727273 | 125 | 0.685714 | 767 | 6,300 | 5.512386 | 0.312907 | 0.074503 | 0.048013 | 0.03122 | 0.264428 | 0.204825 | 0.151372 | 0.132923 | 0.074267 | 0.047777 | 0 | 0.003493 | 0.22746 | 6,300 | 131 | 126 | 48.091603 | 0.865215 | 0.757143 | 0 | 0.142857 | 0 | 0 | 0.099509 | 0.053259 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d52046c063e104480fca192e02774204fb6af2fe | 43,682 | py | Python | sim/python/theory_control.py | wpisailbot/boat | 7c053d67422d21af95e350c4c9d31425e5760df8 | [
"Apache-2.0"
] | 4 | 2017-04-12T19:33:17.000Z | 2019-01-29T07:44:52.000Z | sim/python/theory_control.py | wpisailbot/boat | 7c053d67422d21af95e350c4c9d31425e5760df8 | [
"Apache-2.0"
] | 17 | 2017-12-05T01:43:14.000Z | 2019-02-01T00:48:11.000Z | sim/python/theory_control.py | wpisailbot/boat | 7c053d67422d21af95e350c4c9d31425e5760df8 | [
"Apache-2.0"
] | 2 | 2017-02-19T22:40:12.000Z | 2018-09-07T11:14:24.000Z | #!/usr/bin/python3
import numpy as np
from numpy import matlib
from numpy import random
import sys
import copy
import scipy.signal
import scipy.stats.stats
from matplotlib import pyplot as plt
import unittest
def Norm(t):
while t > np.pi:
t -= 2 * np.pi
while t < -np.pi:
t += 2 * np.pi
return t
def Sign(n):
return 1.0 if n >= 0.0 else -1.0
class Airfoil(object):
def __init__(self, A, rho, lifting=5.0, cmax=1.2):
self.A = A # Cross-sectional area, m^2
self.rho = rho # Density of medium, kg / m^2
self.lifting = lifting
self.Cmax = cmax
def ClipAlpha(self, alpha):
return np.clip(Norm(alpha), -np.pi / 2, np.pi / 2)
def atanClCd(self, alpha):
"""
Based on playing around with some common profiles,
assuming a linear relationship to calculate
atan2(Cl(alpha), Cd(alpha)) w.r.t. alpha
seems reasonable.
"""
clipalpha = self.ClipAlpha(alpha)
deltaatan = -Sign(alpha) if abs(alpha) < np.pi / 2.0 else 0.0
return (np.pi / 2.0 - abs(clipalpha)) * np.sign(clipalpha), deltaatan
def normClCd(self, alpha):
"""
Calculates sqrt(Cl^2 + Cd^2). This
doesn't seem to capture typical profiles
at particularly high angles of attack, but
it seems a fair approximation. This may
cause us to be more incliuned to sail
straight downwind than we really should be.
True profiles have a dip ~70-80 deg angle of attack.
Returns norm, deltanorm/deltaalpha
"""
alpha = self.ClipAlpha(alpha)
exp = np.exp(-self.lifting * abs(alpha))
norm = self.Cmax * (1.0 - exp)
deltanorm = self.Cmax * self.lifting * exp * Sign(alpha)
return norm, deltanorm
def F(self, alpha, v):
"""
Arguments:
alpha: Airfoil angle of attack
v: Relative speed in medium
Returns:
F, deltaF/deltaalpha: Note: deltaF does not account for heel
"""
clipalpha = self.ClipAlpha(alpha)
S = 0.5 * self.rho * self.A * v ** 2
norm, deltanorm = self.normClCd(clipalpha)
F = S * norm
deltaF = S * deltanorm
# Account for stupid angles of attack
deltaF *= -1.0 if abs(alpha) > np.pi / 2.0 else 1.0
return F, deltaF
class DebugForces(object):
def __init__(self):
self.taunet = []
self.Flon = []
self.Flat = []
self.Fs = []
self.Fk = []
self.Fr = []
self.gammas = []
self.gammak = []
self.gammar = []
self.FBlon = []
self.FBlat = []
self.taus = []
self.tauk = []
self.taur = []
self.tauB = []
def UpdateZero(self):
self.Update(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0)
def Update(self, taunet, Flon, Flat, Fs, Fk, Fr, gammas,
gammak, gammar, FBlon, FBlat, taus, tauk, taur, tauB):
self.taunet.append(taunet)
self.Flon.append(Flon)
self.Flat.append(Flat)
self.Fs.append(Fs)
self.Fk.append(Fk)
self.Fr.append(Fr)
self.gammas.append(gammas)
self.gammak.append(gammak)
self.gammar.append(gammar)
self.FBlon.append(FBlon)
self.FBlat.append(FBlat)
self.taus.append(taus)
self.tauk.append(tauk)
self.taur.append(taur)
self.tauB.append(tauB)
def Flonlat(self, F, gamma):
lon = [f * np.cos(g) for f, g in zip(F, gamma)]
lat = [f * np.sin(g) for f, g in zip(F, gamma)]
return lon, lat
def Fslonlat(self):
return self.Flonlat(self.Fs, self.gammas)
def Fklonlat(self):
return self.Flonlat(self.Fk, self.gammak)
def Frlonlat(self):
return self.Flonlat(self.Fr, self.gammar)
class Physics(object):
def __init__(self):
self.hs = 1.5 # Height of sail CoE above origin, m
self.hk = -0.7 # Height of keel CoE above origin, m
self.hr = 0.0 # Height of rudder CoE above origin, m
# Positions longitudinally on the boat relative
# to the origin, in m:
self.rs = 0.1
self.rk = 0.0
self.rr = -0.9
# Distance of the CoE from the rotational point (i.e.,
# 0 would be a rudder that required no force to turn)
self.ls = 0.25
self.lr = 0.0
rhowater = 1000.0 # Density of water, kg / m^3
rhoair = 1.225 # Density of air, kg / m^3
As = 2.0 # Sail Area, m^2
Ak = .3 # Keel Area, m^2
Ar = .04 # Rudder Area, m^2
self.sail = Airfoil(As, rhoair, 5.0, 1.4)
self.keel = Airfoil(Ak, rhowater, 8.0, 1.4)
self.rudder = Airfoil(Ar, rhowater, 4.0, 1.7)
self.Blon = 15.0 # Damping term, N / (m / s)
self.Blat = 25.0 # Lateral damping term, bigger b/c hull long/thin)
# self.Bv = 10.0
self.Bomega = 500 # Damping term, N * m / (rad / sec)
self.hb = -1.0 # Height of CoM of boat ballast, m
self.wb = 14.0 * 9.8 # Weight of boat ballast, N
self.J = 10.0 # Boat Moment of Inertia about yaw, kg * m^2
self.m = 25.0 # Boat mass, kg
def SailForces(self, thetaw, vw, deltas):
"""
Calculates and returns forces from the sail.
Arguments:
thetaw: Wind, 0 = running downwind, +pi / 2 = wind from port
vw: Wind speed, m / s
deltas: Sail angle, 0 = all in, +pi / 2 = sail on starboard
heel: Boat heel, 0 = upright
Returns:
Fs: Magnitude of force from sail (N)
gammas: Angle of force from sail (rad, 0 = forwards, +pi / 2 = pushing to port)
deltaFs: Derivative of Fs w.r.t. deltas
deltagamma: Derivative of gamma w.r.t. deltas
"""
alphas = -Norm(thetaw + deltas + np.pi)
atanC, deltaatan = self.sail.atanClCd(alphas)
Fs, deltaFs = self.sail.F(alphas, vw)
#Fs = Fs if abs(alphas) > 0.08 else 0.0
gammas = Norm(atanC - thetaw)
deltaFs = deltaFs * -1.0 # -1 = dalpha / ddeltas
deltagamma = deltaatan * -1.0 # -1 = dalpha / ddeltas
return Fs, gammas, deltaFs, deltagamma
def KeelForces(self, thetac, vc):
"""
Calculates and returns forces from the sail.
Arguments:
thetac: Current, 0 = Boat going straight, +pi / 2 = Boat drifting to starboard
vc: Speed in water, m / s
heel: Boat heel, 0 = upright
Returns:
Fk: Magnitude of force from keel (N)
gammak: Angle of force from keel (rad, 0 = forwards, +pi / 2 = pushing to port)
"""
alphak = -Norm(thetac)
atanC, _ = self.keel.atanClCd(alphak)
atanC = (np.pi / 2.0 - 0.05) * np.sign(alphak)
Fk, deltaFk = self.keel.F(alphak, vc)
gammak = Norm(atanC - thetac + np.pi)
return Fk, gammak
def RudderForces(self, thetac, vc, deltar):
"""
Calculates and returns forces from the sail.
Arguments:
thetac: Current, 0 = Boat going straight, +pi / 2 = Boat drifting to starboard
vc: Speed in water, m / s
deltar: Rudder angle, 0 = straight, + pi / 2 = rudder on starboard
heel: Boat heel, 0 = upright
Returns:
Fr: Magnitude of force from rudder (N)
gammar: Angle of force from rudder (rad, 0 = forwards, +pi / 2 = pushing to port)
deltaFr: dFr / ddeltar
deltagamma: dgammar / ddeltar
"""
alphar = -Norm(thetac + deltar)
alphar = np.clip(alphar, -.25, .25)
atanC = (np.pi / 2.0 - 0.05) * Sign(alphar)
Fr = 0.5 * self.rudder.A * self.rudder.rho * vc ** 2 * 5.0 * abs(alphar)
gammar = Norm(atanC - thetac + np.pi)
deltaFr = 0.5 * self.rudder.A * self.rudder.rho * vc ** 2 * 5.0 * -Sign(alphar)
deltagamma = 0.0
return Fr, gammar, deltaFr, deltagamma
def SailTorque(self, Fs, gammas, deltas, heel, deltaFs,
deltagammas, deltaheel):
"""
Calculate yaw torque from sail, using output from SailForces
Returns the torque and the derivative of the torque
w.r.t. deltas.
"""
sheel = np.sin(heel)
cheel = np.cos(heel)
cdeltas = np.cos(deltas)
sdeltas = np.sin(deltas)
return Fs * ((self.rs - self.ls * cdeltas) * np.sin(gammas) * cheel
+ self.hk * np.cos(gammas) * sheel), 0.0
r = np.sqrt((self.rs - self.ls * cdeltas) ** 2 + (self.hs * sheel) ** 2)
drds = ((self.rs - self.ls * cdeltas) * (self.ls * sdeltas) \
+ (self.hs * sheel) * (self.hs * cheel) * deltaheel) \
/ r
atany = -self.hs * sheel
atanx = self.rs - self.ls * cdeltas
theta = gammas - np.arctan2(atany, atanx)
stheta = np.sin(theta)
dsthetads = np.cos(theta) * \
(deltagammas -
(atanx * (-self.hs * cheel * deltaheel) -
atany * (self.ls * cdeltas))
/ (atanx ** 2 + atany ** 2))
dcheelds = -sheel * deltaheel
tau = r * Fs * stheta * cheel
dtauds = r * Fs * stheta * dcheelds \
+ r * Fs * dsthetads * cheel \
+ r * deltaFs * stheta * cheel \
+ drds * Fs * stheta * cheel
return tau, dtauds
def KeelTorque(self, Fk, gammak, heel):
"""
Calculate yaw torque from keel, using output from KeelForces
"""
return Fk * (self.rk * np.sin(gammak) * np.cos(heel)
+ self.hk * np.cos(gammak) * np.sin(heel))
r = np.sqrt(self.rk ** 2 + (self.hk * np.sin(heel)) ** 2)
theta = gammak - np.arctan2(-self.hk * np.sin(heel), self.rk)
return r * Fk * np.sin(theta) * np.cos(heel)
def RudderTorque(self, Fr, gammar, heel, deltaFr, deltaheel):
"""
Calculate yaw torque from rudder, using output from RudderForces
Assumes self.hr is negligible.
"""
tau = self.rr * Fr * np.sin(gammar) * np.cos(heel)
dtaudr = self.rr * np.cos(heel) * deltaFr * np.sin(gammar)
dtauds = -self.rr * Fr * np.sin(gammar) * np.sin(heel) * deltaheel
dtauds = 0.0 # Not sure if above dtauds is still good.
return tau, dtaudr, dtauds
def ApproxHeel(self, Fs, gammas, Fk, gammak, deltaFs, deltagammas):
"""
Returns equilibrium heel angle for a given Fs, Fk,
as well as the derivative of the heel with respect
to deltas
"""
tanheel = (Fs * self.hs * np.sin(gammas) + Fk * self.hk * np.sin(gammak)) / (self.hb * self.wb)
heel = np.arctan(tanheel)
dheel = self.hs * (deltaFs * np.sin(gammas) + Fs * np.cos(gammas) * deltagammas) \
/ ((1.0 + tanheel ** 2) * self.hb * self.wb)
return heel, dheel
def NetForce(self, thetaw, vw, thetac, vc, deltas, deltar, heel, omega, debugf=None):
"""
Sum up all the forces and return net longitudinal and lateral forces, and net torque
Arguments:
thetaw: Wind dir
vw: wind speed
thetac: Water dir
vc: Water speed
deltas: sail angle
deltar: rudder angle
heel: Duh.
omega: boat rotational velocity, rad / s
debugf: DebugForces instance for... debugging
Returns: Flon, Flat, taunet, newheel
"""
Fs, gammas, dFsds, dgsds= self.SailForces(thetaw, vw, deltas)
Fk, gammak = self.KeelForces(thetac, vc)
heel, dheelds = self.ApproxHeel(Fs, gammas, Fk, gammak, dFsds, dgsds)
Fr, gammar, dFrdr, dgrdr = self.RudderForces(thetac, vc, deltar)
taus, dtausds = self.SailTorque(Fs, gammas, deltas, heel, dFsds, dgsds, dheelds)
tauk = self.KeelTorque(Fk, gammak, heel)
taur, dtaurdr, dtaurds = self.RudderTorque(Fr, gammar, heel, dFrdr, dheelds)
tauB = -self.Bomega * omega * abs(omega)
FBlon = -self.Blon * vc * abs(vc) * np.cos(thetac)
FBlat = self.Blat * vc * np.sin(thetac)
Flon = Fs * np.cos(gammas) + Fk * np.cos(gammak) + Fr * np.cos(gammar) + FBlon
Flat = (Fs * np.sin(gammas) + Fk * np.sin(gammak) + Fr * np.sin(gammar)) * np.cos(heel) + FBlat
taunet = taus + tauk + taur + tauB
newheel, _ = self.ApproxHeel(Fs, gammas, Fk, gammak, 0, 0)
#print("Flon: ", Flon, " Flat: ", Flat, " Blon: ", -self.Blon * vc * np.cos(thetac),
# " Fs ", Fs, " gammas ", gammas, " Fk ", Fk, " gammak ", gammak, " Fr ", Fr,
# " gammar ", gammar)
#print("taunet ", taunet, " taus ", taus, " tauk ", tauk, " taur ", taur, " Btau",
# -self.Bomega * omega)
if debugf != None:
debugf.Update(taunet, Flon, Flat, Fs, Fk, Fr, gammas,
gammak, gammar, FBlon, FBlat, taus, tauk, taur, tauB)
return Flon, Flat, taunet, newheel
def Yadaptive(self, thetaw, vw, thetac, vc, yaw, omega, deltas, deltar):
"""
Using: u = {F_lon, tau_net}
beta = {Blon, Bomega, Ar, rs, taubias, 1}
"""
YFlonBlon = -vc * abs(vc) * np.cos(thetac)
Fr, gammar, _, _ = self.RudderForces(thetac, vc, deltar)
YFlonAr = Fr * np.cos(gammar) / self.rudder.A
Fs, gammas, _, _= self.SailForces(thetaw, vw, deltas)
Fk, gammak = self.KeelForces(thetac, vc)
YFlonconst = Fs * np.cos(gammas) + Fk * np.cos(gammak)
YFlon = np.matrix([[YFlonBlon, 0.0, YFlonAr, 0.0, 0.0, YFlonconst]])
heel, _ = self.ApproxHeel(Fs, gammas, Fk, gammak, 0.0, 0.0)
taur, _, _ = self.RudderTorque(Fr, gammar, heel, 0.0, 0.0)
tauk = self.KeelTorque(Fk, gammak, heel)
taus, _ = self.SailTorque(Fs, gammas, deltas, heel, 0.0, 0.0, 0.0)
YtauBomega = -omega * abs(omega)
YtauAr = taur / self.rudder.A
Ytaurs = Fs * np.sin(gammas) * np.cos(heel)
Ytauconst = tauk + (taus - Ytaurs * self.rs)
Ytau = np.matrix([[0.0, YtauBomega, YtauAr, Ytaurs, 1.0, Ytauconst]])
#print("Ytau: ", Ytau)
#print("YFlon: ", YFlon)
return np.concatenate((YFlon, Ytau), axis=0)
def Update(self, truewx, truewy, x, y, vx, vy, yaw, omega, deltas, deltar,
heel, dt, flopsail=False, debugf=None):
thetac = -Norm(np.arctan2(vy, vx) - yaw)
vc = np.sqrt(vx ** 2 + vy ** 2)
appwx = truewx - vx
appwy = truewy - vy
thetaw = Norm(-np.arctan2(appwy, appwx) + yaw)
vw = np.sqrt(appwx ** 2 + appwy ** 2) * 1.6 # For wind gradient
if flopsail:
deltas = abs(deltas) if thetaw > 0 else -abs(deltas)
#print("thetac ", thetac, " vc ", vc, " thetaw ", thetaw, " vw ", vw)
Flon, Flat, tau, newheel = self.NetForce(
thetaw, vw, thetac, vc, deltas, deltar, heel, omega, debugf)
if False:
# For approximating damping force, with overall force as input,
# state as [pos, vel]
Ac = np.matrix([[0.0, 1.0],
[0.0, -self.Bv / self.m]])
Bc = np.matrix([[0.0], [1.0 / self.m]])
(Ad, Bd, _, _, _) = scipy.signal.cont2discrete((Ac, Bc, Ac, Bc), dt)
statex = np.matrix([[x], [vx]])
forcex = Flon * np.cos(yaw) - Flat * np.sin(yaw)
statex = Ad * statex + Bd * forcex
statey = np.matrix([[y], [vy]])
forcey = Flon * np.sin(yaw) + Flat * np.cos(yaw)
statey = Ad * statey + Bd * forcey
x = statex[0, 0]
y = statey[0, 0]
vx = statex[1, 0]
vy = statey[1, 0]
else:
ax = (Flon * np.cos(yaw) - Flat * np.sin(yaw)) / self.m
ay = (Flon * np.sin(yaw) + Flat * np.cos(yaw)) / self.m
x += vx * dt + 0.5 * ax * dt ** 2
y += vy * dt + 0.5 * ay * dt ** 2
vx += ax * dt
vy += ay * dt
alpha = tau / self.J
yaw += omega * dt + 0.5 * alpha * dt ** 2
yaw = Norm(yaw)
omega += alpha * dt
kHeel = 0.3
heel = heel + (1.0 - np.exp(-kHeel * dt)) * (newheel - heel)
# heel = newheel
thetac = -Norm(np.arctan2(vy, vx) - yaw)
vc = np.sqrt(vx ** 2 + vy ** 2)
return x, y, vx, vy, yaw, omega, heel, thetac, vc, thetaw, vw
def RunBase(self, ts, winds, x0, v0, yaw0, omega0, heel0, control,
flopsail=False, debugf=None):
"""
ts: Times to simulate over, e.g. [0, .1, .2, .3, .4]
to simulate 4 steps of 0.1sec each
winds: list of two lists, where each sublist
is of length ts and contains the true wind
at that time
x0: list of length 2 = (x, y) initial positoin
v0: list of length 2 = (x, y) initial velocity
yaw0: float, initial yaw
omega0: float, initial time derivative of yaw
heel0: float, intitial heel
control: Function, of the form:
Params:
i: current index from ts/winds that we are at
t: ts[i]
thetaw: Apparent wind dir
vw: Apparent wind vel
thetac: Apparent current
vc: Apparent water speed
Returns: deltas, deltar
"""
xs = [x0[0]]
ys = [x0[1]]
vxs = [v0[0]]
vys = [v0[1]]
yaws = [yaw0]
omegas = [omega0]
heels = [heel0]
vcs = [np.hypot(v0[0], v0[1])]
thetacs = [Norm(np.arctan2(v0[1], v0[0]) + yaws[0])]
vws = [0.0]
thetaws = [0.0]
deltass = []
deltars = []
for i in range(1, len(ts)):
dt = np.clip(ts[i] - ts[i - 1], 0.001, 0.2)
wx = winds[0][i]
wy = winds[1][i]
deltas, deltar = control(
i, ts[i], thetaws[-1], vws[-1], thetacs[-1], vcs[-1], yaws[-1], omegas[-1])
deltass.append(deltas)
deltars.append(deltar)
x, y, vx, vy, yaw, omega, heel, thetac, vc, thetaw, vw = self.Update(
wx, wy, xs[-1], ys[-1], vxs[-1], vys[-1], yaws[-1], omegas[-1],
deltas, deltar, heels[-1], dt, flopsail, debugf)
if abs(vx) > 100:
vx = 0
vy = 0
omega = 0
heel = 0
xs.append(x)
ys.append(y)
vxs.append(vx)
vys.append(vy)
yaws.append(yaw)
omegas.append(omega)
heels.append(heel)
thetacs.append(thetac)
vcs.append(vc)
thetaws.append(thetaw)
vws.append(vw)
deltass.append(0.0)
deltars.append(0.0)
return xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs,\
thetaws, vws, deltass, deltars
def Run(self, wind, v0, omega0, heel0, control, dt=0.01, niter=200, flopsail=True, debugf=None):
winds = wind
if not isinstance(wind[0], list):
wx = [wind[0]] * niter
wy = [wind[1]] * niter
winds = [wx, wy]
ts = [i * dt for i in range(niter)]
return self.RunBase(ts, winds, [0.0, 0.0], v0, 0.0, omega0, heel0,
control, flopsail=flopsail, debugf=debugf)
xs = [0]
ys = [0]
vxs = [v0[0]]
vys = [v0[1]]
yaws = [0]
omegas = [omega0]
heels = [heel0]
vcs = [np.hypot(v0[0], v0[1])]
thetacs = [Norm(np.arctan2(v0[1], v0[0]) + yaws[0])]
for i in range(niter):
#print(i * dt)
x, y, vx, vy, yaw, omega, heel, thetac, vc = self.Update(
wx, wy, xs[-1], ys[-1], vxs[-1], vys[-1], yaws[-1], omegas[-1],
deltas, deltar, heels[-1], dt)
if abs(vx) > 100:
vx = 0
vy = 0
omega = 0
heel = 0
xs.append(x)
ys.append(y)
vxs.append(vx)
vys.append(vy)
yaws.append(yaw)
omegas.append(omega)
heels.append(heel)
thetacs.append(thetac)
vcs.append(vc)
return xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs
class Controller(object):
def __init__(self, physics):
self.physics = physics
self.maxrud = 0.25
self.Qtau = 0.01
self.Qf = 1.0
self.goalyaw = -np.pi / 2.0
self.maxyawrefacc = 0.2
self.maxyawrefvel = 0.2
self.yawref = 0.0
self.omegaref = 0.0
self.Kbeta = np.diag([0.0, 0.0, 0.01, 0.05, 0.01, 0.0])
self.beta = np.matrix([[physics.Blon],
[physics.Bomega],
[physics.rudder.A],
[physics.rs],
[0.0],
[1.0]])
self.betamin = np.matrix([[0.0],
[0.0],
[0.01],
[-1.0],
[-10.0],
[1.0]])
self.betamax = np.matrix([[1000.0],
[10000.0],
[1.0],
[1.0],
[10.0],
[1.0]])
self.Lambda = np.diag([1.0, 1.0])
self.lastt = float("nan")
self.Kref = 0.95
self.betas = []
self.torques = []
self.yawrefs = []
def Clear(self):
self.betas = []
self.torques = []
self.yawrefs = []
def ClipSail(self, deltas, thetaw):
maxsail = abs(Norm(np.pi - thetaw))
return np.clip(deltas, 0.0 if thetaw > 0.0 else -maxsail,
maxsail if thetaw > 0.0 else 0.0)
def ClipRudder(self, deltar, thetac):
return np.clip(deltar, -self.maxrud - thetac, self.maxrud - thetac)
def Adapt(self, thetaw, vw, thetac, vc, yaw, omega, deltas, deltar,
goalyaw, goalomega):
# u = Y beta
# u = u_r + diff = Y beta
Y = self.physics.Yadaptive(
thetaw, vw, thetac, vc, yaw, omega, deltas, deltar)
yawdiff = Norm(goalyaw - yaw)
omegadiff = goalomega - omega
vcgoal = vc
diff = np.matrix([[0.0], [omegadiff]]) +\
self.Lambda * np.matrix([[vcgoal - vc], [yawdiff]])
#print("diff: ", diff)
#print("dot: ", (Y.T * diff).T)
betadot = -self.Kbeta * Y.T * diff
return betadot
def ControlMaxForce(self, i, t, thetaw, vw, thetac, vc, yaw, omega):
dt = t - self.lastt
if np.isnan(self.lastt):
dt = 0.0
self.lastt = t
# self.Qtau = 1.0
goalomega = 0.0
taue = 20.0 * Norm(self.goalyaw - yaw) + (goalomega - omega) * 15.0\
- self.beta[4, 0]
#taue = 0.0
constraint = 0.0
_, _, _, taues, mini, deltas, deltar = control.GlobalMaxForceTorque(
thetaw, vw, thetac, vc, taue, constraint, 20)
if mini >= 0:
self.torques.append(taues[mini])
else:
self.torques.append(float("nan"))
self.yawrefs.append(self.yawref)
if np.isnan(deltas) and constraint == 0.0:
self.betas.append(self.beta)
return 0.0, 0.0
betadot = self.Adapt(
thetaw, vw, thetac, vc, yaw, omega, deltas, deltar,
self.yawref, self.omegaref)
if vc < 0.5:
betadot *= 0
self.beta += betadot * dt
self.beta = np.clip(self.beta, self.betamin, self.betamax)
self.betas.append(self.beta)
#print(self.beta.T)
self.physics.rudder.A = self.beta[2, 0]
self.physics.rs = self.beta[3, 0]
cur_yaw = self.yawref
cur_omega = self.omegaref
if i % 1 == 0:
K = self.Kref
cur_yaw = K * self.yawref + (1 - K) * yaw
cur_omega = K * self.omegaref + (1 - K) * omega
max_acc = self.maxyawrefacc
max_vel = self.maxyawrefvel
exp_vel = np.clip(Norm(self.goalyaw - cur_yaw), -max_vel, max_vel)
exp_acc = np.clip(exp_vel - cur_omega, -max_acc, max_acc)
if self.maxyawrefvel < 0.0:
self.yawref = self.goalyaw
elif self.maxyawrefacc < 0.0:
self.yawref = cur_yaw + exp_vel * dt
else:
self.omegaref = cur_omega + exp_acc * dt
self.yawref = cur_yaw + cur_omega * dt + exp_acc * 0.5 * dt * dt
self.yawref = Norm(self.yawref)
if np.isnan(deltas) and constraint != 0:
taue = -np.sign(constraint) * float("inf")
_, _, _, _, _, deltas, deltar = control.GlobalMaxForceTorque(
thetaw, vw, thetac, vc, taue, constraint, 20)
return deltas, deltar
def ControlGradDescent(self, i, t, thetaw, vw, thetac, vc, omega):
ds = np.clip(Norm(np.pi - thetaw), -np.pi / 2.0, np.pi / 2.0)
#print("thetaw ", thetaw, " ds ", ds)
deltari = 0.25
return self.MaxForceForTorque(thetaw, vw, thetac, vc, ds, deltari)
def TorqueConstrainedRudder(
self, taus, tauk, taug, heel, thetac, vc):
CR = 5.0 # TODO: parameterize properly.
denom = 0.5 * self.physics.rudder.rho * self.physics.rudder.A\
* vc ** 2 * CR * np.cos(thetac) * np.cos(heel) * self.physics.rr
return -(taus + tauk - taug) / denom - thetac
def GlobalMaxForceTorque(
self, thetaw, vw, thetac, vc, taug, constraint, nsteps):
"""
Parameters:
thetaw, vw, thetac, vc: Wind and current directions and speeds
taug: Nominal torque to work to
constraint: Whether we should attempt equality (0.0),
maximize (1.0), or minimize (-1.0) torque, using taug
as a constraint either for the equality or as a lower/upper
bound on the torque.
nsteps: Number of sail positions to consider.
"""
maxsail = abs(Norm(np.pi - thetaw))
minsail = maxsail - np.pi / 2.0
minds = max(0.0, minsail) if thetaw > 0.0 else -maxsail
maxds = maxsail if thetaw > 0.0 else min(-minsail, 0.0)
mindr = -self.maxrud - thetac
maxdr = self.maxrud - thetac
deltass = []
deltars = []
Flons = []
taues = []
costs = []
mincost = float("inf")
mini = -1
deltasmax = float("nan")
deltarmax = float("nan")
hardtorque = constraint == 0.0
maximizetau = constraint > 0.0 # Worry about Q_F?
for deltas in np.linspace(minds, maxds, num=nsteps):
Fs, gammas, dFsds, dgsds = self.physics.SailForces(
thetaw, vw, deltas)
Fk, gammak = self.physics.KeelForces(thetac, vc)
heel, dheelds = self.physics.ApproxHeel(
Fs, gammas, Fk, gammak, dFsds, dgsds)
taus, dtausds = self.physics.SailTorque(
Fs, gammas, deltas, heel, dFsds, dgsds, dheelds)
tauk = self.physics.KeelTorque(Fk, gammak, heel)
deltarconstrained = self.TorqueConstrainedRudder(
taus, tauk, taug, heel, thetac, vc)
deltar = deltarconstrained
if hardtorque:
deltar = self.ClipRudder(deltar, thetac)
else:
# Increasing deltar decreases torque (normally)
if maximizetau:
deltar = min(deltarconstrained, mindr)
else:
deltar = max(deltarconstrained, maxdr)
if self.ClipRudder(deltar, thetac) != deltar:
# Invalid result for rudder.
continue
Fr, gammar, dFrdr, dgrdr = self.physics.RudderForces(
thetac, vc, deltar)
taur, dtaurdr, dtaurds = self.physics.RudderTorque(
Fr, gammar, heel, dFrdr, dheelds)
Flon = Fs * np.cos(gammas) + Fk * np.cos(gammak) \
+ Fr * np.cos(gammar)
taue = taus + tauk + taur
# Could be signtau = -np.sign(constraint)
signtau = 0.0 if hardtorque else (-1.0 if maximizetau else 1.0)
cost = signtau * self.Qtau * taue - self.Qf * Flon
if hardtorque:
cost += self.Qtau * (taue - taug) * (taue - taug)
deltass.append(deltas)
deltars.append(deltar)
Flons.append(Flon)
taues.append(taue)
costs.append(cost)
if cost < mincost:
mini = len(deltass) - 1
mincost = cost
deltasmax = deltas
deltarmax = deltar
return deltass, deltars, Flons, taues, mini, deltasmax, deltarmax
def MaxForceForTorque(self, thetaw, vw, thetac, vc, deltasi, deltari):
"""
Given a particular set of conditions, adjusts deltar
and deltas to optimize for net forwards force while
maintaining the torque generated by the original
conditions.
"""
laststep = 0.0
deltasstep = 0.0
taunom = float('nan')
clipr = deltari
clips = deltasi
deltar = deltari
deltas = deltasi
#print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
#print("thetaw ", thetaw, " vw ", vw, " thetac ", thetac, " vc ", vc, " deltasi ", deltasi, " deltari ", deltari)
while deltasstep * laststep >= 0.0:# or np.isnan(taunom):
#print("Iter")
Fs, gammas, dFsds, dgsds = self.physics.SailForces(thetaw, vw, deltas)
# print("Fs ", Fs, " gammas ", gammas, " dFsds ", dFsds, " dgsds ", dgsds)
Fk, gammak = self.physics.KeelForces(thetac, vc)
heel, dheelds = self.physics.ApproxHeel(Fs, gammas, Fk, gammak, dFsds, dgsds)
Fr, gammar, dFrdr, dgrdr = self.physics.RudderForces(thetac, vc, deltar)
taus, dtausds = self.physics.SailTorque(Fs, gammas, deltas, heel, dFsds, dgsds, dheelds)
# Ignore the keel...
# print("Fr ", Fr, " gammar ", gammar, " dFrdr ", dFrdr, " dgrdr", dgrdr)
taur, dtaurdr, dtaurds = self.physics.RudderTorque(Fr, gammar, heel, dFrdr, dheelds)
taunet = taus + taur
if np.isnan(taunom):
taunom = taunet
# print("Taunom: ", taunom)
tauerr = taunet - taunom
#print("tauerr: ", tauerr)
dFlonds = dFsds * np.cos(gammas) - Fs * np.sin(gammas) * dgsds
# print("dFlonds: ", dFlonds, " taunet: ", taunet)
laststep = deltasstep
deltasstep = 0.01 * Sign(dFlonds)
deltas += deltasstep
dtau = dtausds * deltasstep + dtaurds * deltasstep
# print("dtau ", dtau, " dtausds ", dtausds, " dtaurds ", dtaurds, " dtaurdr ", dtaurdr)
deltarstep = -(dtau + tauerr) / dtaurdr
deltar += deltarstep
clips = self.ClipSail(deltas, thetaw)
clipr = self.ClipRudder(deltar, thetac)
#print("clips ", clips, " clipr ", clipr)
if clips != deltas or clipr != deltar:
# print("breaking due to limit")
break
return clips, clipr
# TODO:
# Only reduce longitudinal sail/rudder authority when heeled.
# For control strategy:
# Maximize forwards force while providing at least X turning torque,
# provide range of turning torques to planner, generate approximatino
# of max forwards force as functino of turning torque, supply leeways.
# Maximum allowable torque is the maximum generatable from the rudder
# with current heel and sail. From there, we then begin to try
# to improve forwards force by following the gradient (we adjust the
# sail and then adjust the rudder, iteratively).
def SimpleControl(i, t, tw, vw, tc, vc, yaw, omega, goalyaw):
deltas = Norm(np.pi - Norm(tw)) / 2.0
deltar = np.clip(-Norm(goalyaw - yaw), -0.3, 0.3)
return deltas, deltar
def SailForcesAndTorque(physics, thetaw, vw, thetac, vc, deltas):
Fs, gammas, _, _ = physics.SailForces(thetaw, vw, deltas)
Fk, gammak = physics.KeelForces(thetac, vc)
heel, _ = physics.ApproxHeel(Fs, gammas, Fk, gammak, 0.0, 0.0)
taus, _ = physics.SailTorque(Fs, gammas, deltas, heel, 0, 0, 0)
Fslon = Fs * np.cos(gammas)
return Fslon, taus, heel
def PlotSail(physics, thetaw, vw, thetac, vc, fname=None):
maxsail = abs(Norm(np.pi - thetaw))
minsail = maxsail - np.pi / 2.0
minds = max(0.0, minsail) if thetaw > 0.0 else -maxsail
maxds = maxsail if thetaw > 0.0 else min(-minsail, 0.0)
Fss = []
tauss = []
heels = []
deltass = np.arange(minds, maxds, 0.01)
for deltas in deltass:
Fs, taus, heel = SailForcesAndTorque(
physics, thetaw, vw, thetac, vc, deltas)
Fss.append(Fs)
tauss.append(taus)
heels.append(heel)
tack = "running" if abs(thetaw) < 0.5 else \
"broad reach" if abs(thetaw) < 1.4 else \
"beam reach" if abs(thetaw) < 1.8 else \
"close reach" if abs(thetaw) < 2.5 else \
"close hauled" if abs(thetaw) < 2.8 else \
"in irons"
plt.figure()
plt.title("Sail Forces for Various $\delta_s$, thetaw=%f (%s)" % (thetaw, tack))
plt.plot(deltass, Fss, label="Sail forward force ($F_{s,lon}$)")
plt.plot(deltass, tauss, label="Sail yaw torque ($\\tau_s$)")
plt.xlabel("Sail angle, $\delta_s$ (radians), from %s (left) to %s (right)"
% ("fully stalled" if thetaw > 0 else "luffing",
"fully stalled" if thetaw <= 0 else "luffing"))
plt.ylabel("Force (N), Torque (N-m)")
plt.legend(loc='upper left')
ax = plt.twinx()
ax.plot(deltass, heels, 'r', label="Heel angle ($\psi$)")
ax.set_ylabel("Heel Angle (radians)")
ax.legend(loc='upper right')
plt.xlim((minds, maxds))
if fname != None:
plt.savefig(fname)
def PlotMaxForceForTorque(control, thetaw, vw, thetac, vc, taue, nsteps):
deltass, deltars, Flons, taues, mini, deltasmax, deltarmax = \
control.GlobalMaxForceTorque(thetaw, vw, thetac, vc, taue, 0.0, nsteps)
plt.figure()
plt.plot(deltass, Flons, label="$F_{lon}$")
plt.plot(deltass, taues, label="$\\tau_e$")
plt.legend(loc='upper left')
ax = plt.twinx()
ax.plot(deltass, deltars, 'r', label="$\delta_r$")
ax.legend(loc='upper right')
def PlotTrajectory(
sim, fcontrol, goalyaw, wind, title=None, fname=None, control=None):
control=None
if title:
print("Starting ", title)
if control:
control.Clear()
v0 = [0.0, 0.0]
omega0 = 0.0
heel0 = 0.0
dt = 0.01
niter = 3000
t = [dt * n for n in range(niter)]
xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs, thetaws, vws,\
deltasopt, deltaropt = sim.Run(
wind, v0, omega0, heel0, fcontrol, dt, niter)
plt.figure()
if control:
plt.subplot(211)
plt.plot(t, yaws, 'b', label='yaw')
plt.plot(t, [goalyaw] * len(t), 'b--', label='goal yaw')
if control:
plt.plot(t[0:-1], control.yawrefs, 'b*', label='yawref')
plt.ylabel("Yaw (radians)")
l = plt.legend(loc='upper left')
l.set_zorder(0)
twin = plt.twinx()
twin.plot(t, vcs, 'g', label='speed')
twin.plot(t, omegas, 'r', label='omega')
twin.set_ylabel("Speed (m/s)")
l = twin.legend(loc='upper right')
l.set_zorder(0)
plt.xlabel("Time (sec)")
if title != None:
plt.title(title)
if control:
plt.subplot(212, sharex=twin)
plt.plot(t[:-1], [b[2, 0] for b in control.betas], 'b', label='Ar')
plt.plot(t[:-1], [b[3, 0] for b in control.betas], 'g', label='rs')
plt.plot(t[:-1], [b[4, 0] for b in control.betas], 'r', label='taubias')
plt.legend(loc='upper left')
plt.twinx()
plt.plot(t[0:-1], [t / 10. for t in control.torques], 'y', label='torques')
plt.legend(loc='upper right')
if fname != None:
plt.savefig(fname)
def MakeWind(speedmean, speedstd, dirmean, dirstd, n):
"""
Uses auto-regressive process to compute a set
of wind x/y velocities. Returns a 2-item list where each
item is a list of all the x/y velocities respectively.
"""
N = 10
phispeed = matlib.ones((1, N)) / N * 0.99
phidir = matlib.ones((1, N)) / N * 0.99
s0 = speedmean
d0 = dirmean
speeds = [s0]
dirs = [d0]
xs = []
ys = []
espeed = lambda: random.normal(speedmean, speedstd)
edir = lambda: random.normal(dirmean, dirstd)
for ii in range(1, n+1):
Xspeed = matlib.zeros(phispeed.shape).T
Xdir = matlib.zeros(phidir.shape).T
for jj in range(N):
idx = max(ii + jj - N, 0)
Xspeed[jj, 0] = speeds[idx] - speedmean
Xdir[jj, 0] = dirs[idx] - dirmean
speeds.append(float(phispeed * Xspeed + espeed()))
dirs.append(float(phidir * Xdir + edir()))
xs.append(speeds[-1] * np.cos(dirs[-1]))
ys.append(speeds[-1] * np.sin(dirs[-1]))
return [xs, ys]
if __name__ == "__main__":
sim = Physics()
wind = [0.0, -3.0]
v0 = [0.0, 0.0]
omega0 = 0.0
heel0 = 0.0
deltas = 0.0
deltar = 0.25
dt = 0.01
niter = 5000
t = [dt * n for n in range(niter)]
forces = DebugForces()
control = lambda i, t, tw, vw, tc, vc, yaw, om: (deltas, deltar)
xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs, thetaws, vws, _, _ =\
sim.Run(
wind, v0, omega0, heel0, control, dt=dt, niter=niter)
if 0:
PlotSail(sim, 0.001, 3.0, 0.0, 1.0)
PlotSail(sim, np.pi / 4.0, 3.0, 0.0, 1.0)
PlotSail(sim, np.pi / 2.0, 3.0, 0.0, 1.0, 'sail_forces_beam.eps')
PlotSail(sim, 3 * np.pi / 4.0, 3.0, 0.0, 1.0)
PlotSail(sim, 7 * np.pi / 8.0, 3.0, 0.0, 1.0)
PlotSail(sim, 3.0, 3.0, 0.0, 1.0)
controlsim = Physics()
control = Controller(controlsim)
if 0:
PlotMaxForceForTorque(control, np.pi / 2.0, 3.0, 0.05, 0.4, -2.0, 50)
# control.MaxForceForTorque(-1.51716946346, 4.56503205727,
# -0.0564452767422, 0.648521086573, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51638429183, 4.56599217829,
# -0.0695781219434, 0.640581832306, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51716946346, 4.56503205727,
# -0.0564452767422, 0.648521086573, -1.57079632679, 0.25)
# control.MaxForceForTorque(-1.51638429183, 4.56599217829,
# -0.0695781219434, 0.640581832306, -1.57079632679, 0.25)
# sys.exit()
#deltasopt = []
#deltaropt = []
#for i in range(len(thetaws)):
# print(i)
# ds = deltasopt[-1] if len(deltasopt) > 0 else deltas
# ds = np.clip(Norm(np.pi - thetaws[i]), -np.pi / 2.0, np.pi / 2.0)
# ds = abs(ds) if thetaws[i] > 0 else -abs(ds)
# dsopt, dropt = control.MaxForceForTorque(
# thetaws[i], vws[i], thetacs[i], vcs[i], ds, deltar)
# print("ds ", dsopt, " dr ", dropt)
# deltasopt.append(dsopt)
# deltaropt.append(dropt)
gyaw = 0.1
control.goalyaw = gyaw
simple_ctrl = lambda i, t, tw, vw, tc, vc, yaw, om: \
SimpleControl(i, t, tw, vw, tc, vc, yaw, om, control.goalyaw)
PlotTrajectory(sim, simple_ctrl, control.goalyaw, wind,
title="Old Controller", fname="old_beam.eps")
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions",
fname="full_nominal_beam.eps", control=control)
old_wind = wind
wind = MakeWind(3.0, 0.1, -np.pi / 2.0, 0.05, 3000)
controlsim = Physics()
control = Controller(controlsim)
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, Noisy Wind",
fname="full_nominal_beam_noisy_wind.eps", control=control)
wind = old_wind
controlsim = Physics()
control = Controller(controlsim)
control.goalyaw = gyaw
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, $K_\\beta = 0$",
fname="kb0_nominal_beam.eps", control=control)
controlsim.rs += 0.5
controlsim.hs *= 0.7
controlsim.Blon -= 10
controlsim.keel.A *= 0.8
# controlsim.sail.A *= 0.8
controlsim.rr *= 1.2
controlsim.Blat *= 0.9
controlsim.Bomega *= 5.0
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed Simulation",
fname="full_skewed_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.99
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0.99",
fname="kref99_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.9
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0.9",
fname="kref9_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 1.0
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=1.0",
fname="kref1_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.Kref = 0.0
control.goalyaw = gyaw
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed, Kref=0",
fname="kref0_skew_beam.eps", control=control)
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Skewed Simulation, no correction",
fname="kb0_skewed_beam.eps", control=control)
gyaw = np.pi / 4.0
controlsim = Physics()
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
simple_ctrl = lambda i, t, tw, vw, tc, vc, yaw, om: \
SimpleControl(i, t, tw, vw, tc, vc, yaw, om, control.goalyaw)
PlotTrajectory(sim, simple_ctrl, control.goalyaw, wind,
title="Old Controller, upwind",
fname="old_upwind.eps")
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, upwind",
fname="full_nominal_upwind.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefvel = -1.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="No Ramp, upwind",
fname="full_nominal_upwind_noramp.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefvel = 0.2
control.maxyawrefacc = -1.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Inf accel ramp, upwind",
fname="inf_acc_ramp_upwind.eps")
control = Controller(copy.deepcopy(controlsim))
control.goalyaw = gyaw
control.maxyawrefacc = 0.2
control.Kbeta *= 0.0
PlotTrajectory(sim, control.ControlMaxForce, control.goalyaw,
wind, title="Nominal Conditions, upwind, $K_\\beta = 0$",
fname="kb0_nominal_upwind.eps")
plt.show()
sys.exit()
plt.figure()
axxy = plt.subplot(111)
axxy.plot(t, xs, 'b', label="x")
axxy.plot(t, ys, 'g', label="y")
axxy.plot(t, vxs, 'b*', label="vx")
axxy.plot(t, vys, 'g*', label="vy")
axang = axxy.twinx()
axang.plot(t, yaws, 'c', label="yaw")
axang.plot(t, omegas, 'y', label="omega")
axang.plot(t, heels, 'r', label="heel")
axang.plot(t, thetacs, 'm', label="Leeway")
axang.plot(t, thetaws, 'k', label="Apparent Wind")
axxy.legend(loc='upper left')
axang.legend(loc='upper right')
axxy.grid()
axang.grid()
xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs, thetaws, vws,\
deltasopt, deltaropt = sim.Run(
wind, v0, omega0, heel0, control.ControlMaxForce, dt, niter, debugf=forces)
forces.UpdateZero()
plt.figure()
plt.plot(xs, ys, 'o')
plt.title("Overall X/Y")
plt.savefig('circles_sim_starboard_turn.eps')
plt.figure()
axxy = plt.subplot(111, sharex=axxy, sharey=axxy)
axxy.plot(t, xs, 'b', label="x")
axxy.plot(t, ys, 'g', label="y")
axxy.plot(t, vxs, 'b*', label="vx")
axxy.plot(t, vys, 'g*', label="vy")
axxy.plot(t, vcs, 'm--', label="vc")
axxy.plot(t, vws, 'k--', label="vw")
axxy.set_ylim([-2, 2])
axang2 = axxy.twinx()
axang2.get_shared_y_axes().join(axang, axang2)
axang2.plot(t, yaws, 'c', label="yaw")
axang2.plot(t, omegas, 'y', label="omega")
axang2.plot(t, heels, 'r', label="heel")
axang2.plot(t, thetacs, 'm', label="Leeway")
axang2.plot(t, thetaws, 'k', label="Apparent Wind")
axxy.legend(loc='upper left')
axang2.legend(loc='upper right')
axang2.set_ylim([-np.pi, np.pi])
axxy.grid()
axang2.grid()
plt.figure()
axopts = plt.subplot(111, sharex=axxy)
plt.title("Controller values for deltas, deltar")
axopts.plot(t, deltasopt, 'b', label="Sail Opt")
axopts.plot(t,
[-Norm(ds + w + np.pi) for ds, w in zip(deltasopt, thetaws)],
'r', label="Sail Angle of Attack")
axoptr = axopts.twinx()
axoptr.plot(t, deltaropt, 'g', label="Rudder Opt")
axoptr.legend(loc='upper right')
axopts.legend(loc='upper left')
plt.grid()
plt.figure()
axtau = plt.subplot(111, sharex=axxy)
axtau.plot(t, forces.taunet, label="Net Torque")
axtau.plot(t, forces.taus, label="Sail Torque")
axtau.plot(t, forces.tauk, label="Keel Torque")
axtau.plot(t, forces.taur, label="Rudder Torque")
axtau.plot(t, forces.tauB, label="Damping Torque")
axtau.set_ylim([-20, 20])
axtau.legend()
Fslon, Fslat = forces.Fslonlat()
Fklon, Fklat = forces.Fklonlat()
Frlon, Frlat = forces.Frlonlat()
plt.figure()
axflon = plt.subplot(211, sharex=axxy)
plt.title('Longitudinal Forces')
axflat = plt.subplot(212, sharex=axxy)
plt.title('Lateral Forces')
axflon.plot(t, forces.Flon, label="Net Longitudinal")
axflon.plot(t, Fslon, label="Sail")
axflon.plot(t, Fklon, label="Keel")
axflon.plot(t, Frlon, label="Rudder")
axflon.plot(t, forces.FBlon, label="Damping")
axflon.set_ylim([-20, 20])
axflon.legend()
axflat.plot(t, forces.Flat, label="Net Lateral")
axflat.plot(t, Fslat, label="Sail")
axflat.plot(t, Fklat, label="Keel")
axflat.plot(t, Frlat, label="Rudder")
axflat.plot(t, forces.FBlat, label="Damping")
axflat.set_ylim([-20, 20])
axflat.legend()
plt.show()
| 34.395276 | 117 | 0.596836 | 6,159 | 43,682 | 4.210099 | 0.121773 | 0.01157 | 0.007173 | 0.006479 | 0.379483 | 0.332433 | 0.300617 | 0.268261 | 0.236251 | 0.222021 | 0 | 0.036999 | 0.256284 | 43,682 | 1,269 | 118 | 34.42238 | 0.761166 | 0.179525 | 0 | 0.229834 | 0 | 0.001105 | 0.048413 | 0.005179 | 0 | 0 | 0 | 0.001576 | 0 | 1 | 0.047514 | false | 0 | 0.009945 | 0.00663 | 0.102762 | 0.001105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d521ab84f8b14d9b5f58fe4cba2c88a22ff8f98f | 1,105 | py | Python | evechem_api/security/definitions.py | mylesgallagher/evechemapi | d096a2d13b84c3ac15fedf9795177c619f96a36d | [
"MIT"
] | null | null | null | evechem_api/security/definitions.py | mylesgallagher/evechemapi | d096a2d13b84c3ac15fedf9795177c619f96a36d | [
"MIT"
] | null | null | null | evechem_api/security/definitions.py | mylesgallagher/evechemapi | d096a2d13b84c3ac15fedf9795177c619f96a36d | [
"MIT"
] | null | null | null | from .base import BaseKey, BaseKeyControl
from .exceptions import KeyNotFound
from evechem_api.maps import application_map
from evechem_api.models import Error
class APIKey(BaseKey):
valid_permissions = [
'master',
'director',
'manager',
'auditor',
'customer']
def __init__(self, value, operation_id, permissions, name):
super(APIKey, self).__init__(value, permissions)
self.operation_id = operation_id
self.name = name
@classmethod
def lookup(cls, key_value):
qKey = application_map.Key
session = application_map.Session()
q = session.query(qKey).filter(qKey.value == key_value)
q_key = q.one_or_none()
if q_key is not None:
key= cls(
value=q_key.value,
operation_id=q_key.operation_id,
permissions=[q_key.permission],
name=q_key.name
)
return key
else:
raise KeyNotFound("Key {} was not found.".format(key_value))
class APIKeyControl(BaseKeyControl):
def auth_required(self):
error = Error('Authentication Required')
return error, 401
def unauthorized(self):
error = Error('Key was invalid or insufficient')
return error, 403
| 24.555556 | 63 | 0.730317 | 148 | 1,105 | 5.243243 | 0.418919 | 0.030928 | 0.036082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006494 | 0.163801 | 1,105 | 44 | 64 | 25.113636 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0.100452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d526df5a2127ccb12862e8d089efd56dff94d881 | 3,947 | py | Python | backup.py | alidevjimmy/backup.py | 863b9b06118891f880617f3f27c9ee8ece5e41b4 | [
"MIT"
] | null | null | null | backup.py | alidevjimmy/backup.py | 863b9b06118891f880617f3f27c9ee8ece5e41b4 | [
"MIT"
] | null | null | null | backup.py | alidevjimmy/backup.py | 863b9b06118891f880617f3f27c9ee8ece5e41b4 | [
"MIT"
] | null | null | null | import sys
import tkinter as tk
from tkinter import messagebox as tkMessageBox
import re
import subprocess
import datetime
import getpass
import tkinter.simpledialog
import os
# location of configuration file
# this file in this version nees to command
# 1- COMMAND -> command that run for make backup -> use rsync recommended
# 2- PY3_COMMADN -> command that program can run python code in your pc
CONF_DIR_PATH = "/etc/backup.py.conf"
# location of log files for better debuging and performance
LOG_DIR_PATH = "/var/log/backup.py.log"
# backup.py.db is file for check today we have backup or not
# this file contain to type lines
# 1- Started DATE
# 2- Completed DATE
DB_DIR_PATH = "/var/log/backup.py.db"
# our code date format standard
DATE_FORMAT = "%Y-%m-%d"
RUNNER_PATH = "/usr/local/bin/backup.py.run.sh"
BACKUP_PY_PATH = "/usr/local/bin/backup.py"
# check user run command using sudo or not
def sudoOnly():
try:
open("/etc/foo", 'a')
except IOError as _:
pushLogs("\nNOTE: delete all your {} file content".format(DB_DIR_PATH))
# send gui alert to user
def alert(title, message):
root = tk.Tk()
root.withdraw()
tkMessageBox.showinfo(title, message)
# ask question from user using gui
def askQuestion(title, message):
answer = tkMessageBox.askyesno(title, message)
return answer
# read tags (commands) from configuration file using regex
# ex : COMMAND="rsync -axv SOURCE DEST"
# in above example tag is COMMAND
def readTagFromConf(tag):
regex = re.compile(r'^{}=\"(.*)\"'.format(tag))
cmd = ""
with open(CONF_DIR_PATH, 'r') as content:
for c in content:
checkCmd = re.match(regex, c)
if checkCmd != None:
cmd = checkCmd.group(1)
break
if cmd == "":
pushLogs("\ntag {} not found in {}" .format(tag, CONF_DIR_PATH))
return cmd
# write logs in log file that located in LOG_DIR_PATH const
def pushLogs(log):
with open(LOG_DIR_PATH, "a") as logFile:
logFile.write(log)
# write data in database file that located in DB_DIR_PATH const
def pushToDB(message):
with open(DB_DIR_PATH, "a") as db:
db.write(message)
# checkTodayHaveBackup function check DB_DIR_PATH file and Completed type date for checking
def checkTodayHaveBackup():
have = False
with open(DB_DIR_PATH, "r") as db:
for d in db:
try:
if d.split(": ")[0] == "Completed":
date = d.split(": ")[1]
y, m, d = date.split('-')
date = datetime.datetime(int(y), int(
m), int(d)).strftime(DATE_FORMAT)
if date == datetime.datetime.today().strftime(DATE_FORMAT):
have = True
break
except EOFError as _:
pushLogs(
"\nNOTE: delete all your {} file content".format(DB_DIR_PATH))
return have
def getpwd():
tk.Tk().withdraw()
return tkinter.simpledialog.askstring("Password", "Enter password:", show='*')
# optimaze logs
def createLog(log):
return '\n'+"-"*20+"\nDate Time: " + str(datetime.datetime.now()) + "\n" + log
if __name__ == "__main__":
if checkTodayHaveBackup() is False:
if getpass.getuser() == "root":
if askQuestion("Backup", "Do you want to get backup now?"):
pushToDB("Started: {}\n".format(
datetime.datetime.today().strftime(DATE_FORMAT)))
log = subprocess.getoutput(readTagFromConf("COMMAND"))
log = createLog(log)
pushLogs(log)
pushToDB("Completed: {}\n".format(
datetime.datetime.today().strftime(DATE_FORMAT)))
print("Backup Completed!")
else:
pwd = getpwd()
subprocess.call("echo {} | sudo python3 backup.py".format(pwd) , shell=True)
| 29.901515 | 91 | 0.613884 | 510 | 3,947 | 4.662745 | 0.352941 | 0.038267 | 0.026493 | 0.036585 | 0.151808 | 0.137511 | 0.084104 | 0.084104 | 0.045416 | 0.045416 | 0 | 0.003821 | 0.270585 | 3,947 | 131 | 92 | 30.129771 | 0.82216 | 0.221687 | 0 | 0.073171 | 0 | 0 | 0.144215 | 0.032121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109756 | false | 0.036585 | 0.109756 | 0.012195 | 0.280488 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5285bf4cb0c251133aab700e3e6910bf4b734e8 | 3,061 | py | Python | mlctl/plugins/sagemaker/SagemakerHosting.py | LaudateCorpus1/mlctl | 0a42035ce9c4999e1b3565ba41fe69d6d3552273 | [
"Apache-2.0"
] | 24 | 2021-06-25T04:31:10.000Z | 2022-01-12T12:53:42.000Z | mlctl/plugins/sagemaker/SagemakerHosting.py | srivathsanvc/mlctl | 0a42035ce9c4999e1b3565ba41fe69d6d3552273 | [
"Apache-2.0"
] | 14 | 2021-06-25T04:46:43.000Z | 2021-08-19T00:01:42.000Z | mlctl/plugins/sagemaker/SagemakerHosting.py | LaudateCorpus1/mlctl | 0a42035ce9c4999e1b3565ba41fe69d6d3552273 | [
"Apache-2.0"
] | 8 | 2021-07-16T18:14:17.000Z | 2022-02-24T08:05:39.000Z | from mlctl.interfaces.Hosting import Hosting
from mlctl.plugins.utils import parse_config
import boto3
class SagemakerHosting(Hosting):
def __init__(self, profile=None):
if profile:
boto3.setup_default_session(profile_name=profile)
self._client = boto3.client("sagemaker")
def create(self, model_config):
try:
kwargs = parse_config(model_config, ["ModelName",
"PrimaryContainer",
"Container",
"InferenceExecutionConfig",
"ExecutionRoleArn",
"Tags",
"VpcConfig",
"EnableNetworkIsolation"])
return self._client.create_model(**{k: v for k, v in kwargs.items() if v is not None})
except Exception as e:
return str(e)
def deploy(self, endpoint_name, endpoint_config_name=None, endpoint_config=None, tags=None):
config_name = endpoint_config_name
try:
if endpoint_config:
kwargs = parse_config(endpoint_config, ["EndpointConfigName",
"ProductionVariants",
"DataCaptureConfig",
"Tags",
"KmsKeyId"])
response = self._client.create_endpoint_config(
**{k: v for k, v in kwargs.items() if v is not None})
print(response)
config_name = kwargs.get("EndpointConfigName")
if tags:
return self._client.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=config_name,
Tags=tags
)
else:
return self._client.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=config_name
)
except Exception as e:
return str(e)
def undeploy(self, endpoint_name, endpoint_config_name=None):
message = "Successfully undeployed endpoint: " + endpoint_name
try:
self._client.delete_endpoint(EndpointName=endpoint_name)
if endpoint_config_name:
self._client.delete_endpoint_config(
EndpointConfigName=endpoint_config_name
)
message += "\nSuccessfully deleted endpoint config: " + endpoint_config_name
return message
except Exception as e:
return str(e)
def get_endpoint_info(self, endpoint_name):
try:
return self._client.describe_endpoint(EndpointName=endpoint_name)
except Exception as e:
return str(e)
| 43.112676 | 98 | 0.499837 | 250 | 3,061 | 5.888 | 0.276 | 0.11413 | 0.07337 | 0.048913 | 0.29144 | 0.29144 | 0.29144 | 0.23981 | 0.154891 | 0.154891 | 0 | 0.001738 | 0.436132 | 3,061 | 70 | 99 | 43.728571 | 0.851101 | 0 | 0 | 0.28125 | 0 | 0 | 0.08984 | 0.015028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.046875 | 0 | 0.28125 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5285c58efb554dcd2bf8ff1ee7abffb6aec165d | 1,836 | py | Python | tests/test_helpers.py | pablocael/handwritten-number-generator | 931e61469d610c48c7a36d316845026926d72eb2 | [
"MIT"
] | null | null | null | tests/test_helpers.py | pablocael/handwritten-number-generator | 931e61469d610c48c7a36d316845026926d72eb2 | [
"MIT"
] | null | null | null | tests/test_helpers.py | pablocael/handwritten-number-generator | 931e61469d610c48c7a36d316845026926d72eb2 | [
"MIT"
] | null | null | null | import numpy as np
from number_generator import helpers
def test_calculate_binary_image_contents_bbox():
# create an empty image
empty_image = np.zeros((28, 28), dtype=np.uint8)
bbox = helpers.calculate_binary_image_contents_bbox(empty_image)
# bbox of empty image (background only) should be all zeros
assert bbox == (0, 0, 0, 0)
# create a image with only two pixels defining the interest region
simple_bounds = np.zeros((50, 50), dtype=np.uint8)
simple_bounds[10,12] = 50
simple_bounds[45,42] = 200
bbox = helpers.calculate_binary_image_contents_bbox(simple_bounds)
# bbox of empty image (background only) should be all zeros
assert bbox == (12, 10, 42, 45)
# create a image with only two pixels defining the whole image region
simple_bounds = np.zeros((100, 100), dtype=np.uint8)
simple_bounds[0, 0] = 50
simple_bounds[99, 99] = 200
bbox = helpers.calculate_binary_image_contents_bbox(simple_bounds)
# bbox of empty image (background only) should be all zeros
assert bbox == (0, 0, 99, 99)
def test_zero_pad_centered_axis():
# test non divisible by two width padding
output_width = 111
input_width = 50
input_height = 28
input_image = np.ones((input_height, input_width))
result = helpers.zero_pad_centered_axis(input_image, 1, output_width)
assert result.shape[1] == output_width
# assert the we pad zeros on the left and on the right
# since image is all ones, we can check padding
# lets use contents bbox detector for checking
x0, _, x1, _ = helpers.calculate_binary_image_contents_bbox(result)
# check if the relevant data size is correct after ignoring padding
assert (x1 - x0)+1 == input_width
# image height should not be changed
assert result.shape[0] == input_height
| 32.785714 | 73 | 0.712418 | 278 | 1,836 | 4.517986 | 0.327338 | 0.076433 | 0.079618 | 0.111465 | 0.461783 | 0.35828 | 0.327229 | 0.292994 | 0.292994 | 0.229299 | 0 | 0.050999 | 0.209695 | 1,836 | 55 | 74 | 33.381818 | 0.814611 | 0.333878 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5296fe11fece1d8f3fbfcbc77a7811a8729397b | 1,538 | py | Python | uva/452.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 3 | 2020-06-25T21:04:02.000Z | 2021-05-12T03:33:19.000Z | uva/452.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | null | null | null | uva/452.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 1 | 2020-06-25T21:04:06.000Z | 2020-06-25T21:04:06.000Z | # Author: btjanaka (Bryon Tjanaka)
# Problem: (UVa) 452
# Title: Project Scheduling
# Link: https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&category=0&problem=393
# Idea: Shortest path algorithm in a DAG - find topological ordering then go
# through and relax all edges - O(E) time.
# Difficulty: easy
# Tags: DAG, topological-sort, shortest-path
import sys
from collections import defaultdict
from collections import deque
ca = int(input())
input()
for caa in range(ca):
g = defaultdict(set)
cost = defaultdict(int)
indeg = defaultdict(int)
while True:
try:
line = input().strip()
except EOFError:
break
if line == "": break
tokens = line.split()
if len(tokens) == 2:
v, c = tokens
incoming = ""
else:
v, c, incoming = tokens
cost[v] = -int(c)
indeg[v] = len(incoming)
for u in incoming:
g[u].add(v)
g[v]
# topo sort
topo = []
q = deque()
dist = {u: 1 << 31 for u in g}
for u in indeg:
if indeg[u] == 0:
q.append(u)
dist[u] = cost[u]
while len(q) > 0:
u = q.popleft()
topo.append(u)
for v in g[u]:
indeg[v] -= 1
if indeg[v] == 0: q.append(v)
# find min
for u in topo:
for v in g[u]:
dist[v] = min(dist[v], dist[u] + cost[v])
print(-min(dist.values()))
if caa != ca - 1: print()
| 24.806452 | 118 | 0.535111 | 213 | 1,538 | 3.85446 | 0.441315 | 0.019488 | 0.029233 | 0.017052 | 0.019488 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016618 | 0.33485 | 1,538 | 61 | 119 | 25.213115 | 0.785924 | 0.252926 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068182 | 0 | 0.068182 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d52e8f311e133d5943a854b889a728f1890959c9 | 6,563 | py | Python | model.py | yucicheung/AdaptiveReconNet | 953ad374150cbd488c468cc7c7d35a7409f8e92a | [
"MIT"
] | 6 | 2018-10-08T00:31:47.000Z | 2020-10-21T11:30:52.000Z | model.py | yucicheung/AdaptiveReconNet | 953ad374150cbd488c468cc7c7d35a7409f8e92a | [
"MIT"
] | 1 | 2019-01-18T10:32:41.000Z | 2019-02-18T07:28:47.000Z | model.py | yucicheung/AdaptiveReconNet | 953ad374150cbd488c468cc7c7d35a7409f8e92a | [
"MIT"
] | 4 | 2018-10-08T00:31:48.000Z | 2021-03-24T00:54:06.000Z | from utils import (
read_data,
input_setup,
imsave,
merge
)
import time
import os
import numpy as np
import tensorflow as tf
from math import ceil
class RECONNET(object):
def __init__(self,
sess,
image_size=33,
label_size=33,
batch_size=128,
c_dim=1,
measurement_rate=1e-1,
checkpoint_dir=None,
sample_dir=None):
self.sess = sess
self.is_grayscale = (c_dim == 1)
self.image_size = image_size
self.label_size = label_size
self.batch_size = batch_size
self.measurement_rate = measurement_rate
self.c_dim = c_dim
self.checkpoint_dir = checkpoint_dir
self.sample_dir = sample_dir
self.build_model()
def build_model(self):
self.fc_size=int(ceil(self.measurement_rate*1089))
self.images = tf.placeholder(tf.float32, [None, self.image_size,self.image_size, self.c_dim], name='images')
self.labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.c_dim], name='labels')
self.weights = {
'fc1w': tf.Variable(tf.random_normal([1089,self.fc_size], stddev=1e-2), name='fc1w'),
'fc2w': tf.Variable(tf.random_normal([self.fc_size,1089], stddev=1e-2), name='fc2w'),
'w1': tf.Variable(tf.random_normal([11, 11, 1, 64], stddev=1e-1), name='w1'),
'w2': tf.Variable(tf.random_normal([1, 1, 64, 32], stddev=1e-1), name='w2'),
'w3': tf.Variable(tf.random_normal([7, 7, 32, 1], stddev=1e-1), name='w3'),
'w4': tf.Variable(tf.random_normal([11, 11, 1, 64], stddev=1e-1), name='w4'),
'w5': tf.Variable(tf.random_normal([1, 1, 64, 32], stddev=1e-1), name='w5'),
'w6': tf.Variable(tf.random_normal([7, 7, 32, 1], stddev=1e-1), name='w6'),
}
self.biases = {
'fc1b': tf.Variable(tf.zeros([109]), name='fc1b'),
'fc2b': tf.Variable(tf.zeros([1089]), name='fc2b'),
'b1': tf.Variable(tf.zeros([64]), name='b1'),
'b2': tf.Variable(tf.zeros([32]), name='b2'),
'b3': tf.Variable(tf.zeros([1]), name='b3'),
'b4': tf.Variable(tf.zeros([64]), name='b4'),
'b5': tf.Variable(tf.zeros([32]), name='b5'),
'b6': tf.Variable(tf.zeros([1]), name='b6')
}
self.pred = self.model()
# Loss function (MSE)
self.loss = tf.reduce_mean(tf.square(self.labels - self.pred))
self.saver = tf.train.Saver()
def train(self, config):
if config.is_train:
input_setup(self.sess, config)
else:
nx, ny, pad_h, pad_w = input_setup(self.sess, config)
if config.is_train:
data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "train.h5")
else:
data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5")
train_data, train_label = read_data(data_dir)
# Stochastic gradient descent
self.train_op = tf.train.MomentumOptimizer(config.learning_rate,0.9).minimize(self.loss)
tf.global_variables_initializer().run()
counter = 0
start_time = time.time()
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
if config.is_train:
print("Training...")
for ep in xrange(config.epoch):
# Run by batch images
batch_idxs = len(train_data) // config.batch_size
for idx in xrange(0, batch_idxs):
batch_images = train_data[idx*config.batch_size : (idx+1)*config.batch_size]
batch_labels = train_label[idx*config.batch_size : (idx+1)*config.batch_size]
counter += 1
_, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})
if counter % 10 == 0:
print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
% ((ep+1), counter, time.time()-start_time, err))
if counter % 500 == 0:
self.save(config.checkpoint_dir, counter)
else:
print("Testing...")
result = self.pred.eval({self.images: train_data, self.labels: train_label})
result = merge(result, [nx, ny])
result = result.squeeze()
# change back to original size
h, w = np.shape(result)
result = result[0:(h-pad_h), 0:(w-pad_w)]
image_path = os.path.join(os.getcwd(), config.sample_dir)
image_path = os.path.join(image_path, "test.png")
imsave(result, image_path)
def model(self):
flattenimg = tf.reshape(self.images,[-1,self.image_size * self.image_size * self.c_dim])
fc1 = tf.matmul(flattenimg,self.weights['fc1w']) + self.biases['fc1b']
fc2 = tf.matmul(fc1,self.weights['fc2w']) + self.biases['fc2b']
fc2_reshape = tf.reshape(fc2,[-1,self.image_size,self.image_size, self.c_dim])
conv1 = tf.nn.relu(tf.nn.conv2d(fc2_reshape, self.weights['w1'], strides=[1,1,1,1], padding='SAME') + self.biases['b1'])
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, self.weights['w2'], strides=[1,1,1,1], padding='SAME') + self.biases['b2'])
conv3 = tf.nn.relu(tf.nn.conv2d(conv2, self.weights['w3'], strides=[1,1,1,1], padding='SAME') + self.biases['b3'])
conv4 = tf.nn.relu(tf.nn.conv2d(conv3, self.weights['w4'], strides=[1, 1, 1, 1], padding='SAME') + self.biases['b4'])
conv5 = tf.nn.relu(tf.nn.conv2d(conv4, self.weights['w5'], strides=[1, 1, 1, 1], padding='SAME') + self.biases['b5'])
conv6 = tf.nn.conv2d(conv5, self.weights['w6'], strides=[1, 1, 1, 1], padding='SAME') + self.biases['b6']
return conv6
def save(self, checkpoint_dir, step):
model_name = "Reconnet.model"
model_dir = "%s_%s" % ("reconnet", self.label_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
model_dir = "%s_%s" % ("reconnet", self.label_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
| 37.936416 | 127 | 0.607039 | 936 | 6,563 | 4.110043 | 0.199786 | 0.010398 | 0.049909 | 0.037432 | 0.377697 | 0.314791 | 0.240187 | 0.224071 | 0.224071 | 0.124253 | 0 | 0.042922 | 0.226116 | 6,563 | 172 | 128 | 38.156977 | 0.714511 | 0.014627 | 0 | 0.090909 | 0 | 0.007576 | 0.058029 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.121212 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d534c630f8ec7158aa367faf059fd230fe16ae61 | 1,035 | py | Python | CamJam EduKit 2 - GPIO Zero/Code/5-PIR.py | CamJam-EduKit/EduKit2 | e7920420fa6b46233304ae57d8eb39255ade3e1e | [
"MIT"
] | 45 | 2016-02-03T21:59:28.000Z | 2021-11-14T02:24:02.000Z | CamJam EduKit 2 - GPIO Zero/Code/5-PIR.py | CamJam-EduKit/EduKit2 | e7920420fa6b46233304ae57d8eb39255ade3e1e | [
"MIT"
] | 7 | 2017-05-24T11:44:31.000Z | 2022-03-13T11:56:08.000Z | CamJam EduKit 2 - GPIO Zero/Code/5-PIR.py | CamJam-EduKit/EduKit2 | e7920420fa6b46233304ae57d8eb39255ade3e1e | [
"MIT"
] | 29 | 2016-02-13T13:37:54.000Z | 2021-04-28T16:43:49.000Z | # CamJam EduKit 2 - Sensors (GPIO Zero)
# Worksheet 5 - Movement
# Import Python header files
from gpiozero import MotionSensor
import time
# Set a variable to hold the GPIO Pin identity
pir = MotionSensor(17)
print("Waiting for PIR to settle")
pir.wait_for_no_motion()
print("PIR Module Test (CTRL-C to exit)")
# Variables to hold the current and last states
currentstate = False
previousstate = False
try:
# Loop until users quits with CTRL-C
while True:
# Read PIR state
currentstate = pir.motion_detected
# If the PIR is triggered
if currentstate == True and previousstate == False:
print(" Motion detected!")
# Record previous state
previousstate = True
# If the PIR has returned to ready state
elif currentstate == False and previousstate == True:
print(" No Motion")
previousstate = False
# Wait for 10 milliseconds
time.sleep(0.01)
except KeyboardInterrupt:
print(" Quit")
| 25.243902 | 61 | 0.651208 | 129 | 1,035 | 5.193798 | 0.581395 | 0.080597 | 0.026866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012113 | 0.282126 | 1,035 | 40 | 62 | 25.875 | 0.889637 | 0.32657 | 0 | 0.1 | 0 | 0 | 0.143066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d534d4c11072c8911ea392648daa8cbe0a59680b | 5,014 | py | Python | match.py | skysea04/Scard | 11c30084398860bc31326c56424d6794806245eb | [
"Apache-2.0"
] | 5 | 2021-08-05T16:06:39.000Z | 2021-11-20T04:52:02.000Z | match.py | skysea04/Scard | 11c30084398860bc31326c56424d6794806245eb | [
"Apache-2.0"
] | null | null | null | match.py | skysea04/Scard | 11c30084398860bc31326c56424d6794806245eb | [
"Apache-2.0"
] | 5 | 2021-07-29T03:05:26.000Z | 2022-03-08T14:02:34.000Z | import sys, random, math, time, json
from datetime import date, timedelta
import threading
from models.model import db, User, Scard, cache
import mysql.connector
from app import app, mysql_host, mysql_user, mysql_password, mysql_database
db.__init__(app)
'''
測試區
'''
start_time = time.time()
today = date.today()
yesterday = today - timedelta(days=1)
dby = yesterday - timedelta(days=1)
'''
測試區
'''
# 新增測試帳號
def create_user():
def add_user(f_id, l_id):
user_db = mysql.connector.connect(
host = mysql_host,
user = mysql_user,
password = mysql_password,
database = mysql_database
)
user_cursor = user_db.cursor()
for i in range(f_id, l_id):
sql = 'INSERT INTO user (email, password, name, collage, department, gender, birthday, verify_status, days_no_open_scard) VALUES (%s, %s, %s, %s, %s, %s, %s ,%s ,%s ,%s)'
val = (f'test{i}@test.com', '123', f'測試人員{i}', 'test collage', 'test department', 'male', date(1996,5,23), 'scard', 0)
user_cursor.execute(sql, val)
print(i)
user_db.commit()
# 起頭人數
n = 0
thread_num = 1
threads = []
for i in range(thread_num):
threads.append(threading.Thread(target=add_user, args= ((i+n)*1000+1, (i+1+n) * 1000+1)))
threads[i].start()
for i in range(thread_num):
threads[i].join()
# 增加未開卡天數
def update_no_scard_days():
User.query.filter(User.days_no_open_scard <= 3, User.id > 1000).update({User.days_no_open_scard: User.days_no_open_scard + 1})
db.session.commit()
# 建立配對(多執行緒)
def match_user_method():
new_db = mysql.connector.connect(
host = mysql_host,
user = mysql_user,
password = mysql_password,
database = mysql_database
)
new_cursor = new_db.cursor()
# 刪掉昨天沒有成為朋友的配對
new_cursor.execute('DELETE FROM scard WHERE is_friend IS False AND create_date=%s', (dby,))
# 建立本次要抽卡的使用者清單, 第一位測試帳號永遠開放抽卡
new_cursor.execute('UPDATE user SET days_no_open_scard=0 WHERE id=1')
new_db.commit()
user_list, matches_list = [], []
new_cursor.execute('SELECT id, match_list FROM user WHERE verify_status="scard" AND days_no_open_scard <= 3')
all_users = new_cursor.fetchall()
for user in all_users:
user_list.append(user[0])
matches_list.append(json.loads(user[1]))
new_db.close()
# 查看本次抽卡人數,若非偶數則剔除第一位測試帳號
user_count = len(user_list)
if user_count % 2 != 0:
del user_list[0]
del matches_list[0]
user_count -= 1
# print(user_count)
# 配對函式
def matching(first_index, end_index):
# print(first_index, end_index)
scard_db = mysql.connector.connect(
host = mysql_host,
user = mysql_user,
password = mysql_password,
database = mysql_database
)
cursor = scard_db.cursor()
for user_index in range(first_index, end_index):
user_id = user_list[user_index]
match_list = matches_list[user_index]
# user_id若已經配對過則值為0,不用再配對,直接進入下一輪
if user_id == 0: continue
# 隨機配對一位使用者,配對者id一定大於(>)使用者id
match_index = random.randrange(user_index + 1, end_index)
match_id = user_list[match_index]
# 若match_id為零(已在本輪配對過),或是已經有過相同的配對紀錄(old_match_list),則重新配對一次
while (match_id == 0) or (match_id in match_list):
match_index = random.randrange(user_index + 1, end_index)
match_id = user_list[match_index]
print('user_id: ', user_id, ', match_id: ', match_id)
cursor.execute('UPDATE user SET match_list=JSON_ARRAY_APPEND(match_list, "$" , %s) WHERE id=%s'%(match_id, user_id))
cursor.execute('INSERT INTO scard (user_1, user_2) VALUES (%s, %s)'%(user_id, match_id))
# 將已經配對的id設為0
user_list[user_index] = 0
user_list[match_index] = 0
scard_db.commit()
scard_db.close()
return 'ok'
# 如果配對人數不多,直接執行配對函式
if user_count <= 10000:
matching(0, user_count)
# 若人數大於10000,分10個執行緒,執行配對函式
else:
group_user_count = math.ceil(user_count / 10)
# print(group_user_count)
# 確認每組人數也都是偶數
if (group_user_count % 2) != 0:
group_user_count += 1
threads = []
# 永遠開10個執行緒跑
for i in range(10):
if i == 9:
threads.append(threading.Thread(target=matching, args= (i*group_user_count, user_count)))
else:
threads.append(threading.Thread(target=matching, args= (i*group_user_count, (i+1)*group_user_count)))
threads[i].start()
for i in range(10):
threads[i].join()
# 新增測試帳號
# create_user()
# 增加未開卡天數
# update_no_scard_days()
# 建立配對(多執行緒)
match_user_method()
end_time = time.time()
print(f'共花{end_time-start_time}秒') | 30.573171 | 182 | 0.6071 | 663 | 5,014 | 4.339367 | 0.239819 | 0.046924 | 0.008342 | 0.009732 | 0.278067 | 0.219673 | 0.219673 | 0.188043 | 0.188043 | 0.184567 | 0 | 0.021535 | 0.277623 | 5,014 | 164 | 183 | 30.573171 | 0.772501 | 0.086957 | 0 | 0.271845 | 0 | 0.009709 | 0.131068 | 0.018756 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048544 | false | 0.048544 | 0.058252 | 0 | 0.116505 | 0.029126 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5372720a1be5cab4ade8dc5c07291e11eb0a610 | 825 | py | Python | github_releaser/cmd/asset.py | dfurtado/github-releaser | 6b4fc35d1abc6ce3d2e5ab1441d3df0ad482bf65 | [
"MIT"
] | 2 | 2020-07-03T09:44:08.000Z | 2020-07-03T13:10:48.000Z | github_releaser/cmd/asset.py | dfurtado/github-releaser | 6b4fc35d1abc6ce3d2e5ab1441d3df0ad482bf65 | [
"MIT"
] | 2 | 2020-07-21T10:50:17.000Z | 2020-08-03T11:07:22.000Z | github_releaser/cmd/asset.py | dfurtado/github-releaser | 6b4fc35d1abc6ce3d2e5ab1441d3df0ad482bf65 | [
"MIT"
] | null | null | null | import click
import os
from github_releaser import GithubReleaser
@click.command(name="upload-assets", help="Upload assets to a existent release")
@click.option("--account", "--a", required=True, help="Account")
@click.option("--repository", "--r", required=True, help="Repository")
@click.option("--token", help="GitHub's API token")
@click.option("--tag-name", "--t", required=True, help="The release tag")
@click.argument("assets", nargs=-1, type=str)
def upload_assets(account, repository, token, tag_name, assets):
access_token = token or os.getenv("GITHUB_TOKEN", None)
if not access_token:
print(
"access token is required. Use --token or set GITHUB_TOKEN in our environment"
)
gh = GithubReleaser(account, repository, access_token)
gh.upload_assets(tag_name, assets)
| 34.375 | 90 | 0.700606 | 111 | 825 | 5.117117 | 0.432432 | 0.084507 | 0.084507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001422 | 0.147879 | 825 | 23 | 91 | 35.869565 | 0.806543 | 0 | 0 | 0 | 0 | 0 | 0.289697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d53971f07f775a3aa711042218ce3a65fcaf552c | 2,355 | py | Python | decisionTrees/decisionTree/partii/Gain.py | evamy/mlBucket | 15fb9687a4c853edeaace23e752de069214f3cf3 | [
"MIT"
] | null | null | null | decisionTrees/decisionTree/partii/Gain.py | evamy/mlBucket | 15fb9687a4c853edeaace23e752de069214f3cf3 | [
"MIT"
] | null | null | null | decisionTrees/decisionTree/partii/Gain.py | evamy/mlBucket | 15fb9687a4c853edeaace23e752de069214f3cf3 | [
"MIT"
] | null | null | null | ##
# Gain Calculation
# for implementing decision trees
#
# @author antriksh
# Version 1: 09/16/2017
from Dataset import Dataset
import pandas as pd
import numpy as np
import random
class Gain():
def __init__(self):
pass
def entropy(self, dataset):
"""
Find the degree of randomness of the dataset
"""
if dataset.isEmpty():
return 0.0
posCount, negCount, totalCount = dataset.getCount()
# Taking care of divide-by-zero error
if posCount == 0.0:
probPos = 0.0
else:
probPos = posCount / totalCount
if negCount == 0.0:
probNeg = 0.0
else:
probNeg = negCount / totalCount
# Taking care of divide-by-zero error
if probPos == 0.0:
posTerm = 0.0
else:
posTerm = (probPos * np.log2(probPos))
if probNeg == 0.0:
negTerm = 0.0
else:
negTerm = (probNeg * np.log2(probNeg))
H = - (posTerm + negTerm)
return float("{0:.8f}".format(H))
def informationGain(self, dataset, attribute):
"""
Find the information gain of an attribute over the dataset
"""
HS = self.entropy(dataset)
infoGain = HS
for value in [0, 1]:
data0 = Dataset(data=dataset.select(
attribute, value).data, attribute=attribute)
if not data0.isEmpty():
pos, neg, total = data0.getCount()
H = self.entropy(data0)
infoGain -= (pos / total) * (H) + (neg / total) * (H)
return infoGain
def bestInfoGain(self, dataset):
"""
Select and return an attribute with best information gain.
"""
attributes = list(dataset.x.columns)
maxGain = -9999
maxGainAttr = None
for attribute in attributes:
gain = self.informationGain(dataset, attribute)
if gain >= maxGain:
maxGain = gain
maxGainAttr = attribute
return maxGainAttr
def randomSelect(self, dataset):
"""
Select and return a random attribute of the left out atttributes.
"""
attributes = list(dataset.x.columns)
return random.choice(attributes)
| 23.787879 | 77 | 0.539278 | 249 | 2,355 | 5.084337 | 0.365462 | 0.014218 | 0.018957 | 0.028436 | 0.135861 | 0.048973 | 0.048973 | 0.048973 | 0 | 0 | 0 | 0.027609 | 0.369427 | 2,355 | 98 | 78 | 24.030612 | 0.824916 | 0.16518 | 0 | 0.113208 | 0 | 0 | 0.003786 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0.018868 | 0.075472 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d53e4eb300c1cc0390929d5bc39197d9c1362656 | 667 | py | Python | tests/getnet/services/token/test_card_number.py | rafagonc/getnet-py | d2a5278b497408b5245d5d0fecd2e424f4ddb0d5 | [
"MIT"
] | null | null | null | tests/getnet/services/token/test_card_number.py | rafagonc/getnet-py | d2a5278b497408b5245d5d0fecd2e424f4ddb0d5 | [
"MIT"
] | null | null | null | tests/getnet/services/token/test_card_number.py | rafagonc/getnet-py | d2a5278b497408b5245d5d0fecd2e424f4ddb0d5 | [
"MIT"
] | null | null | null | import unittest
from getnet.services.token import CardNumber
class CardNumberTest(unittest.TestCase):
def testInvalidCardNumber(self):
with self.assertRaises(AttributeError):
CardNumber("123", "123")
def testInvalidCustomerId(self):
with self.assertRaises(AttributeError):
CardNumber("5155901222280001", "a" * 101)
def testAsDict(self):
object = CardNumber("5155901222280001", "customer_21081826")
self.assertDictEqual(
{"card_number": "5155901222280001", "customer_id": "customer_21081826"},
object.as_dict(),
)
if __name__ == "__main__":
unittest.main()
| 26.68 | 84 | 0.661169 | 58 | 667 | 7.37931 | 0.586207 | 0.037383 | 0.056075 | 0.11215 | 0.224299 | 0.224299 | 0 | 0 | 0 | 0 | 0 | 0.141473 | 0.226387 | 667 | 24 | 85 | 27.791667 | 0.687985 | 0 | 0 | 0.117647 | 0 | 0 | 0.178411 | 0 | 0 | 0 | 0 | 0 | 0.176471 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d53f47f532735f17bfb2f4eb7d8d52537f38dd98 | 935 | py | Python | inference_converter/utils/argparser.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | inference_converter/utils/argparser.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | inference_converter/utils/argparser.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | import sys
import json
import argparse
import sys
def _initialize():
parser = argparse.ArgumentParser(
description="General config loader."
)
parser.add_argument(
"-c",
"--config_file",
help="Address of the config file."
)
return parser
def _get_options(parser, args=sys.argv[1:]):
options = parser.parse_args(args)
return options
def parse_options(config_path=None):
sys.path.append('../')
config_file = None
if config_path is None:
parser = _initialize()
options = _get_options(parser)
config_file = options.config_file
if config_file is None:
print('Please provide the config file.')
parser.print_help()
exit()
else:
config_file = config_path
parameters = None
with open(config_file) as json_file:
parameters = json.load(json_file)
return parameters
| 20.777778 | 52 | 0.627807 | 110 | 935 | 5.136364 | 0.381818 | 0.159292 | 0.046018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00149 | 0.282353 | 935 | 44 | 53 | 21.25 | 0.840537 | 0 | 0 | 0.058824 | 0 | 0 | 0.104925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.117647 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5434871e6b9c2f656f5e120340151b549c7af58 | 12,703 | py | Python | three_wolves/envs/contact_cube_env.py | 42jaylonw/rrc_2021_three_wolves | f5b8c1589f14c6b8455f438cbb62ed74e9ad8551 | [
"BSD-3-Clause"
] | null | null | null | three_wolves/envs/contact_cube_env.py | 42jaylonw/rrc_2021_three_wolves | f5b8c1589f14c6b8455f438cbb62ed74e9ad8551 | [
"BSD-3-Clause"
] | null | null | null | three_wolves/envs/contact_cube_env.py | 42jaylonw/rrc_2021_three_wolves | f5b8c1589f14c6b8455f438cbb62ed74e9ad8551 | [
"BSD-3-Clause"
] | 1 | 2022-01-05T11:40:32.000Z | 2022-01-05T11:40:32.000Z | import time
import gym
import numpy as np
import pybullet
from trifinger_simulation import TriFingerPlatform, visual_objects
from trifinger_simulation.tasks import move_cube_on_trajectory as task
from three_wolves.envs.base_cube_env import ActionType, BaseCubeTrajectoryEnv
from three_wolves.envs.utilities.env_utils import HistoryWrapper, resetCamera
from three_wolves.deep_whole_body_controller import position_controller, contact_planner
from three_wolves.deep_whole_body_controller.utility import pinocchio_utils, reward_utils, trajectory
class ContactControlEnv(BaseCubeTrajectoryEnv):
def render(self, mode='human'):
pass
def __init__(self, goal_trajectory, visualization, randomization, evaluation=False, history_num=1, robot_type='sim'):
super(ContactControlEnv, self).__init__(
goal_trajectory=goal_trajectory,
action_type=ActionType.POSITION,
step_size=3)
self.visualization = visualization
self.randomization = randomization
self.evaluation = evaluation
self.observer = HistoryWrapper(history_num)
self.kinematics = pinocchio_utils.Kinematics(robot_type)
self.contact_planner = contact_planner.ContactPlanner()
self.position_controller = position_controller.PositionController(self.kinematics,
self.observer, self.step_size)
self.max_episode = task.EPISODE_LENGTH
self.tip_force_offset = []
# create observation space
spaces = TriFingerPlatform.spaces
self.observation_space = gym.spaces.Box(
low=np.hstack([
spaces.object_position.gym.low, # cube position
[-2 * np.pi] * 3, # cube rpy
spaces.object_position.gym.low, # goal position
[-0.3] * 3, # goal-cube difference
[0] # goal-cube distance
]),
high=np.hstack([
spaces.object_position.gym.high, # cube position
[2 * np.pi] * 3, # cube rpy
spaces.object_position.gym.high, # goal position
[0.3] * 3, # goal-cube difference
[1] # goal-cube distance
])
)
self.action_space = self.contact_planner.action_space
def reset(self):
"""Reset the environment."""
# hard-reset simulation
self.goal_marker = None
del self.platform
# initialize simulation
initial_robot_position = (
TriFingerPlatform.spaces.robot_position.default
)
# initialize cube at the centre
_random_obj_xy_pos = np.random.uniform(
low=[-0.04] * 2,
high=[0.04] * 2,
)
_random_obj_yaw_ori = np.random.uniform(-2 * np.pi, 2 * np.pi)
_random_obj_yaw_ori = pybullet.getQuaternionFromEuler([0, 0, _random_obj_yaw_ori])
random_object_pose = task.move_cube.Pose(
position=[_random_obj_xy_pos[0],
_random_obj_xy_pos[1],
task.INITIAL_CUBE_POSITION[2]],
orientation=_random_obj_yaw_ori
)
self.platform = TriFingerPlatform(
visualization=self.visualization,
initial_robot_position=initial_robot_position,
initial_object_pose=random_object_pose,
)
if self.randomization:
cube_id = self.platform.cube._object_id
random_mass = 0.094*np.random.uniform(0.9, 1.1)
random_lateral_friction = 1*np.random.uniform(0.9, 1)
random_step_size = np.random.randint(1, 6)
pybullet.changeDynamics(cube_id, -1, mass=random_mass, lateralFriction=random_lateral_friction)
self.step_size = random_step_size
# get goal trajectory
if self.goal is None:
trajectory = task.sample_goal()
else:
trajectory = self.goal
# visualize the goal
if self.visualization:
self.goal_marker = visual_objects.CubeMarker(
width=task.move_cube._CUBE_WIDTH,
position=trajectory[0][1],
orientation=(0, 0, 0, 1),
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
)
resetCamera()
self.info = {"time_index": -1, "trajectory": trajectory, "eval_score": 0}
self.step_count = 0
self.drop_times = 0
self.tip_force_offset = []
# initial step
robot_action = self._gym_action_to_robot_action(self._initial_action)
t = self.platform.append_desired_action(robot_action)
self.info["time_index"] = t
self.step_count += 1
obs, _ = self._create_observation(self.info["time_index"])
return obs
def _create_observation(self, t):
robot_observation = self.platform.get_robot_observation(t)
camera_observation = self.platform.get_camera_observation(t)
object_observation = camera_observation.filtered_object_pose
active_goal = np.asarray(
task.get_active_goal(self.info["trajectory"], t)
)
cube_pos = object_observation.position
cube_orn = pybullet.getEulerFromQuaternion(object_observation.orientation)
finger_pos = self.kinematics.forward_kinematics(robot_observation.position)
obs_dict = {
"joint_position": robot_observation.position, # joint position
"joint_velocity": robot_observation.velocity, # joint velocity
"joint_torque": robot_observation.torque, # joint torque
"tip_force": robot_observation.tip_force, # tip force
"object_position": cube_pos, # cube position
"object_rpy": cube_orn, # cube orientation
"goal_position": active_goal, # goal position
"object_goal_distance": active_goal - cube_pos, # cube to goal distance
"tip_0_position": finger_pos[0], # tri-finger position 0
"tip_1_position": finger_pos[1], # tri-finger position 1
"tip_2_position": finger_pos[2], # tri-finger position 2
}
self.observer.update(obs_dict)
rl_obs = np.hstack([
cube_pos, # cube position
cube_orn, # cube rpy
active_goal, # goal position
active_goal - cube_pos, # goal-cube difference
np.linalg.norm(active_goal - cube_pos) # goal-cube distance
])
return rl_obs, obs_dict
def _internal_step(self, action):
self.step_count += 1
# send action to robot
robot_action = self._gym_action_to_robot_action(action)
t = self.platform.append_desired_action(robot_action)
# update goal visualization
if self.visualization:
goal_position = task.get_active_goal(self.info["trajectory"], t)
self.goal_marker.set_state(goal_position, (0, 0, 0, 1))
time.sleep(0.001)
return t
def apply_action(self, action):
tg = trajectory.get_interpolation_planner(init_pos=self.observer.dt['joint_position'],
tar_pos=action,
start_time=0,
reach_time=self.step_size)
for i in range(self.step_size):
if self.step_count >= self.max_episode:
break
_action = tg(i + 1)
t = self._internal_step(_action)
self.info["time_index"] = t
_, obs_dict = self._create_observation(self.info["time_index"])
if self.evaluation:
eval_score = self.compute_reward(
obs_dict["object_position"],
obs_dict["goal_position"],
self.info,
)
self.info['eval_score'] += eval_score
# return score
def update(self, policy_action):
self._last_goal = self.observer.dt['goal_position']
contact_face_ids, contact_points = self.contact_planner.compute_contact_points(policy_action)
self.position_controller.update(contact_points, contact_face_ids)
def step(self, policy_action):
self.update(policy_action)
self.position_controller.tips_reach(self.apply_action, self.tip_force_offset)
reward = 0
while not self.Dropped() and not self.step_count >= self.max_episode:
if (self._last_goal != self.observer.dt['goal_position']).all():
self.update(policy_action)
cur_phase_action = self.position_controller.get_action()
self.apply_action(cur_phase_action)
reward += self.position_controller.get_reward() * 0.001 * self.step_size
self.drop_times += 1
done = self.drop_times >= 3 or self.step_count >= self.max_episode
if self.evaluation:
done = self.step_count >= self.max_episode
return self._create_observation(self.info["time_index"])[0], reward, done, self.info
def Dropped(self):
tip_touch = np.subtract(self.observer.dt['tip_force'], self.tip_force_offset[0]) > 0
cube_pos = np.array(self.observer.dt['object_position'])
tri_distance = [reward_utils.ComputeDist(self.observer.dt['tip_0_position'], cube_pos),
reward_utils.ComputeDist(self.observer.dt['tip_1_position'], cube_pos),
reward_utils.ComputeDist(self.observer.dt['tip_2_position'], cube_pos)]
is_dropped = np.sum(tip_touch) < 2 or any(np.array(tri_distance) > 0.08)
return is_dropped
class RealContactControlEnv(ContactControlEnv):
def __init__(self,
goal_trajectory):
super().__init__(goal_trajectory=goal_trajectory,
visualization=False,
evaluation=False,
randomization=False,
robot_type='real')
self.max_episode = task.EPISODE_LENGTH
def _internal_step(self, action):
self.step_count += 1
# send action to robot
robot_action = self._gym_action_to_robot_action(action)
t = self.platform.append_desired_action(robot_action)
return t
def step(self, policy_action):
if self.platform is None:
raise RuntimeError("Call `reset()` before starting to step.")
self.update(policy_action)
self.position_controller.tips_reach(self.apply_action, self.tip_force_offset)
reward = 0
while not self.Dropped() and not self.step_count >= self.max_episode:
if list(self._last_goal) != list(self.observer.dt['goal_position']):
self.update(policy_action)
cur_phase_action = self.position_controller.get_action()
self.apply_action(cur_phase_action)
# reward += self.position_controller.get_reward() * 0.001 * self.step_size
# self.drop_times += 1
done = self.step_count >= self.max_episode
return self._create_observation(self.info["time_index"])[0], reward, done, self.info
def reset(self):
import robot_fingers
# cannot reset multiple times
if self.platform is not None:
raise RuntimeError(
"Once started, this environment cannot be reset."
)
self.platform = robot_fingers.TriFingerPlatformWithObjectFrontend()
# get goal trajectory
if self.goal is None:
trajectory = task.sample_goal()
else:
trajectory = self.goal
self.info = {"time_index": -1, "trajectory": trajectory}
self.step_count = 0
# initial step
for i in range(int(1./(0.001*self.step_size))):
robot_action = self._gym_action_to_robot_action(self._initial_action)
t = self.platform.append_desired_action(robot_action)
self.info["time_index"] = t
self.step_count += 1
obs, _ = self._create_observation(self.info["time_index"])
return obs
if __name__ == '__main__':
env = ContactControlEnv(goal_trajectory=None,
visualization=True,
randomization=False)
observation = env.reset()
is_done = False
t = 0
while t < env.max_episode:
observation, score, is_done, info = env.step([0.5 + 0.25 / 2, 0.25 / 2, 0.75 + 0.2 / 2,
0.5, 0.5, 0.5])
print("eval_score:", score)
t += 0.001 * env.step_size
if is_done:
env.reset()
| 41.64918 | 121 | 0.615288 | 1,462 | 12,703 | 5.055404 | 0.156635 | 0.031119 | 0.021107 | 0.023001 | 0.380598 | 0.346773 | 0.31146 | 0.280476 | 0.247057 | 0.247057 | 0 | 0.015077 | 0.295127 | 12,703 | 304 | 122 | 41.786184 | 0.810364 | 0.065103 | 0 | 0.280488 | 0 | 0 | 0.04844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052846 | false | 0.004065 | 0.044715 | 0 | 0.138211 | 0.004065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d54c508466e035352ddb860546e3c55067122dc2 | 599 | py | Python | setup.py | K0lb3/texgenpack_py | 28951dd2eb18c1d84483910eaf29846e7ecdfc33 | [
"Zlib"
] | null | null | null | setup.py | K0lb3/texgenpack_py | 28951dd2eb18c1d84483910eaf29846e7ecdfc33 | [
"Zlib"
] | null | null | null | setup.py | K0lb3/texgenpack_py | 28951dd2eb18c1d84483910eaf29846e7ecdfc33 | [
"Zlib"
] | null | null | null | import os
from setuptools import Extension, setup
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
def ALL_C(folder, exclude=[]):
return [
'/'.join([folder, f])
for f in os.listdir(folder)
if f[-2:] == '.c' and f not in exclude
]
extensions = [
Extension(
name="texgenpy",
sources=[
"texgen.pyx",
*ALL_C('texgenpack'),
],
include_dirs=[
"texgenpack",
],
)
]
if cythonize:
extensions = cythonize(extensions)
setup(ext_modules=extensions)
| 17.617647 | 46 | 0.559265 | 63 | 599 | 5.253968 | 0.619048 | 0.024169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002457 | 0.320534 | 599 | 33 | 47 | 18.151515 | 0.810811 | 0 | 0 | 0.074074 | 0 | 0 | 0.068447 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.148148 | 0.037037 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d54cca8c5b64ee4952f55cc7414cf0f7c89b54c0 | 4,976 | py | Python | app/main/views.py | Amin1014/blogs-1 | 7bb51e270385877edb2e0819b503dde512c27b6d | [
"MIT"
] | null | null | null | app/main/views.py | Amin1014/blogs-1 | 7bb51e270385877edb2e0819b503dde512c27b6d | [
"MIT"
] | null | null | null | app/main/views.py | Amin1014/blogs-1 | 7bb51e270385877edb2e0819b503dde512c27b6d | [
"MIT"
] | null | null | null | from flask import render_template,redirect,url_for,abort,request,flash
from app.main import main
from .forms import UpdateProfile,CreateBlog
from flask_login import login_required,current_user
from ..email import mail_message
from app.models import User,Blog,Comment,Follower
from ..import db
from app.requests import get_quotes
@main.route('/')
@login_required
def index():
quotes = get_quotes()
blogs = Blog.query.all()
if request.method == "POST":
new_follower = Follower(email = request.form.get("follower"))
return render_template("index.html",
blogs = blogs,
quotes = quotes)
def save_picture(form_picture):
picture_path =('app/static/photos')
return picture_path
@main.route('/new_post', methods=['POST','GET'])
@login_required
def new_blog():
followers = Follower.query.all()
form = CreateBlog()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
user_id = current_user._get_current_object().id
blog = Blog(title=title,content=content,user_id=user_id)
blog.save()
for follower in followers:
mail_message("New Blog Post","email/new_blog",follower.email,blog=blog)
return redirect(url_for('main.index'))
flash('You Posted a new Blog')
return render_template('newblogs.html', form = form)
@main.route('/blog/<id>')
@login_required
def blog(id):
comments = Comment.query.filter_by(blog_id=id).all()
blog = Blog.query.get(id)
return render_template('blog.html', blog=blog,comments=comments)
@main.route('/blog/<blog_id>/update',methods = ['GET','POST'])
@login_required
def updatedblog(blog_id):
blog = Blog.query.get(blog_id)
if blog.user != current_user:
abort(403)
form = CreateBlog()
if form.validate_on_submit():
blog.title = form.title.data
blog.content = form.content.data
db.session.commit()
flash("You have updated your Blog!")
return redirect(url_for('main.blog',id = blog.id))
if request.method == 'GET':
form.title.data = blog.title
form.content.data = blog.content
return render_template('newblogs.html', form = form)
@main.route('/blog/<blog_id>/delete', methods=["DELETE"])
@login_required
def delete_post(blog_id):
blog = Blog.query.filter_by(blog_id).first()
db.session.delete(blog)
db.session.commit()
if blog.user != current_user:
abort(403)
blog.delete()
flash("blog deleted")
return redirect(url_for('main.index'))
@main.route('/user/<string:username>')
@login_required
def user_posts(username):
user = User.query.filter_by(username=username).first()
blogs = Blog.query.filter_by(user=user)
return render_template('post.html',blogs=blogs,user = user)
@main.route('/subscribe',methods = ['POST','GET'])
@login_required
def subscribe():
email = request.form.get('follower')
new_follower = Follower(email = email)
new_follower.save_follower()
mail_message("Subscribed to Blog-1","email/follower",new_follower.email,new_follower=new_follower)
flash('Sucessfuly subscribed!')
return redirect(url_for('main.index'))
@main.route('/profile',methods = ['POST','GET'])
@login_required
def profile():
form = UpdateProfile()
if form.validate_on_submit():
if form.profile_pic.data:
picture_file = save_picture(form.profile_pic.data)
current_user.profile_pic_path = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.bio = form.bio.data
db.session.commit()
flash('Succesfully updated your profile')
return redirect(url_for('main.profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.bio.data = current_user.bio
profile_pic_path = url_for('static',filename = 'photos/'+ current_user.profile_pic_path)
return render_template('profile/profile.html', profile_pic_path=profile_pic_path, form = form)
@main.route('/user/<name>/updateprofile', methods = ['POST','GET'])
@login_required
def updateprofile(name):
form = UpdateProfile()
user = User.query.filter_by(username = name).first()
if user == None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save()
return redirect(url_for('.profile',name = name))
return render_template('profile/updateprofile.html',form =form)
@main.route('/comment/<blog_id>', methods = ['Post','GET'])
@login_required
def comment(blog_id):
blog = Blog.query.get(blog_id)
comment =request.form.get('newcomment')
new_comment = Comment(comment = comment, user_id = current_user._get_current_object().id, blog_id=blog_id)
new_comment.save()
return redirect(url_for('main.blog',id = blog.id)) | 33.85034 | 110 | 0.679863 | 658 | 4,976 | 4.966565 | 0.147416 | 0.033048 | 0.04896 | 0.04284 | 0.383415 | 0.233476 | 0.158507 | 0.118727 | 0.078335 | 0.032436 | 0 | 0.002466 | 0.185088 | 4,976 | 147 | 111 | 33.85034 | 0.803453 | 0 | 0 | 0.269841 | 0 | 0 | 0.12176 | 0.02391 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087302 | false | 0 | 0.063492 | 0 | 0.269841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d54e7671cd752c4f6814416f2822aa1db9d9be69 | 380 | py | Python | debufftracker/__main__.py | nstatz/PoEDebuffTracker | 5129c57e2fa9e43d820dec3eb0f44dddbd49c860 | [
"MIT"
] | null | null | null | debufftracker/__main__.py | nstatz/PoEDebuffTracker | 5129c57e2fa9e43d820dec3eb0f44dddbd49c860 | [
"MIT"
] | null | null | null | debufftracker/__main__.py | nstatz/PoEDebuffTracker | 5129c57e2fa9e43d820dec3eb0f44dddbd49c860 | [
"MIT"
] | null | null | null | import os
from debufftracker import screen_tools
current_dir = os.path.dirname( os.path.abspath(__file__))
project_dir = os.path.join(current_dir, os.path.pardir)
# set project source folder as working directory
os.chdir(project_dir)
if __name__ == "__main__":
screentracker = screen_tools.ScreenTracker()
screentracker.create_status_instances()
screentracker.run() | 29.230769 | 57 | 0.784211 | 50 | 380 | 5.56 | 0.6 | 0.086331 | 0.097122 | 0.115108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121053 | 380 | 13 | 58 | 29.230769 | 0.832335 | 0.121053 | 0 | 0 | 0 | 0 | 0.024024 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d55090df04a7b080af6bf93b66774f6acfb91c99 | 1,231 | py | Python | bonus_top_interview_questions/315. Count of Smaller Numbers After Self.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | bonus_top_interview_questions/315. Count of Smaller Numbers After Self.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | bonus_top_interview_questions/315. Count of Smaller Numbers After Self.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | """
Runtime: 3284 ms, faster than 74.29% of Python3 online submissions for Count of Smaller Numbers After Self.
Memory Usage: 33.2 MB, less than 76.39% of Python3 online submissions for Count of Smaller Numbers After Self.
"""
from typing import List
from typing import Optional
class Solution:
def countSmaller(self, nums: List[int]) -> List[int]:
self.nums = nums
self.ans = [0] * len(nums)
indexes = list(range(len(nums)))
_ = self.sortIndices(indexes)
return self.ans
def sortIndices(self, indexes):
half = len(indexes) // 2
if half > 0:
left = self.sortIndices(indexes[:half])
right = self.sortIndices(indexes[half:])
for i in range(len(indexes)-1,-1,-1):
if len(right)==0 or \
(len(left)>0 and self.nums[left[-1]] > self.nums[right[-1]]):
self.ans[left[-1]] += len(right)
indexes[i] = left.pop()
else:
indexes[i] = right.pop()
return indexes
def main():
sol = Solution()
print('Output:', sol.countSmaller([5,2,6,1]))
print('Expected:', [2,1,1,0])
if __name__ == "__main__":
main()
| 33.27027 | 110 | 0.561332 | 161 | 1,231 | 4.236025 | 0.385093 | 0.046921 | 0.096774 | 0.076246 | 0.173021 | 0.173021 | 0.173021 | 0.173021 | 0.173021 | 0.173021 | 0 | 0.041812 | 0.300569 | 1,231 | 36 | 111 | 34.194444 | 0.75029 | 0.177092 | 0 | 0 | 0 | 0 | 0.023881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5511838b4b4d80eeeb07f7837865dc3b34ed429 | 1,130 | py | Python | util/Tool.py | EmeryWan/GradeEntry | 3c4e27588e714df8fe26b29758a961830f9e770d | [
"MIT"
] | 3 | 2019-07-31T13:09:52.000Z | 2019-09-30T10:26:03.000Z | util/Tool.py | EmeryWan/GradeEntry | 3c4e27588e714df8fe26b29758a961830f9e770d | [
"MIT"
] | null | null | null | util/Tool.py | EmeryWan/GradeEntry | 3c4e27588e714df8fe26b29758a961830f9e770d | [
"MIT"
] | null | null | null | import inspect
from singleton.AboutViewSingleton import AboutViewSingle
def is_num(num):
try:
float(num)
return True
except BaseException:
return False
def colname_to_colnum(colname):
if type(colname) is not str:
return colname
col = 0
power = 1
for i in range(len(colname) - 1, -1, -1):
ch = colname[i]
col += (ord(ch) - ord('A') + 1) * power
power *= 26
return col
def colnum_to_colname(colnum):
if not str(colnum).isdigit():
return colnum
colnum = int(colnum)
result = ''
while not (colnum // 26 == 0 and colnum % 26 == 0):
temp = 25
if colnum % 26 == 0:
result += chr(temp + ord('A'))
else:
result += chr(colnum % 26 - 1 + ord('A'))
if colnum % 26 == 0:
colnum //= 26
colnum -= 1
else:
colnum //= 26
# 倒序输出拼写的字符串
return result[::-1]
def get_current_fun_name():
return inspect.stack()[1][3]
def show_error_page():
AboutViewSingle.instance().show()
AboutViewSingle.instance().show_error()
| 21.320755 | 56 | 0.546903 | 140 | 1,130 | 4.335714 | 0.4 | 0.092257 | 0.059308 | 0.036244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043593 | 0.330089 | 1,130 | 52 | 57 | 21.730769 | 0.758256 | 0.00885 | 0 | 0.15 | 0 | 0 | 0.002683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.05 | 0.025 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d553ade15a4eefdbc36f33e88c8c183a77abed27 | 696 | py | Python | Tkinter_Scripts/tkinter-dropdown01.py | anupam-sy/Python-GUI | 5017b5deccd5f7a592922907aba1ff8b062b9b0a | [
"MIT"
] | 1 | 2020-04-25T01:20:01.000Z | 2020-04-25T01:20:01.000Z | Tkinter_Scripts/tkinter-dropdown01.py | anupam-sy/Python-GUI | 5017b5deccd5f7a592922907aba1ff8b062b9b0a | [
"MIT"
] | null | null | null | Tkinter_Scripts/tkinter-dropdown01.py | anupam-sy/Python-GUI | 5017b5deccd5f7a592922907aba1ff8b062b9b0a | [
"MIT"
] | null | null | null | # Implementation of Dropdown in tkinter
from tkinter import *
root = Tk()
root.title("Dropdown Implementation")
root.geometry("200x50")
def response():
print("Button clicked.")
# create a menubar
menubar = Menu(root)
# configure root to use that menubar
# display the menu
root.config(menu=menubar)
# Add items in menu bar
menubar.add_command(label="Hello!", command=response)
menubar.add_command(label="Quit!", command=root.quit)
root.mainloop()
"""
Note: Pulldown menus (and other submenus) are created in a similar fashion.
The main difference is that they are attached to a parent menu
(using add_cascade), instead of a toplevel window.
See the example: tkinter-dropdown02.py
"""
| 22.451613 | 75 | 0.752874 | 101 | 696 | 5.158416 | 0.60396 | 0.057582 | 0.065259 | 0.084453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011725 | 0.142241 | 696 | 30 | 76 | 23.2 | 0.860972 | 0.183908 | 0 | 0 | 0 | 0 | 0.168712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d55987fd12bdc575cb7211586969ce4f4d999611 | 11,031 | py | Python | chaos_genius/core/rca/rca_utils/api_utils.py | chaos-genius/chaos_genius | b5eadb6c38b5d449e54889a167c9034f6cec6009 | [
"MIT"
] | 320 | 2022-02-18T18:18:42.000Z | 2022-03-31T16:42:38.000Z | chaos_genius/core/rca/rca_utils/api_utils.py | chaos-genius/chaos_genius | b5eadb6c38b5d449e54889a167c9034f6cec6009 | [
"MIT"
] | 115 | 2022-02-18T16:39:01.000Z | 2022-03-31T15:23:52.000Z | chaos_genius/core/rca/rca_utils/api_utils.py | chaos-genius/chaos_genius | b5eadb6c38b5d449e54889a167c9034f6cec6009 | [
"MIT"
] | 18 | 2022-02-18T18:44:01.000Z | 2022-03-10T08:33:34.000Z | """Utility functions for RCA API endpoints."""
import logging
from datetime import date, datetime, timedelta
from typing import List
from chaos_genius.databases.models.anomaly_data_model import AnomalyDataOutput
from chaos_genius.extensions import db
from chaos_genius.controllers.kpi_controller import get_kpi_data_from_id
from chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY
from chaos_genius.databases.models.rca_data_model import RcaData
from chaos_genius.utils.datetime_helper import (
convert_datetime_to_timestamp,
get_datetime_string_with_tz,
get_lastscan_string_with_tz,
get_rca_date_from_string,
)
from sqlalchemy import func, and_
logger = logging.getLogger(__name__)
def kpi_aggregation(kpi_id, timeline="last_30_days"):
"""Get KPI aggregation data."""
final_data = {}
status = "success"
message = ""
try:
kpi_info = get_kpi_data_from_id(kpi_id)
end_date = get_rca_output_end_date(kpi_info)
data_point = (
RcaData.query.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "agg")
& (RcaData.timeline == timeline)
& (RcaData.end_date <= end_date)
)
.order_by(RcaData.created_at.desc())
.first()
)
rca_end_date = data_point.end_date
anomaly_data_point = AnomalyDataOutput.query.filter(
(AnomalyDataOutput.kpi_id == kpi_id)
& (AnomalyDataOutput.anomaly_type == "overall")
& (AnomalyDataOutput.is_anomaly != 0)
& (AnomalyDataOutput.data_datetime <= rca_end_date + timedelta(days=1))
& (AnomalyDataOutput.data_datetime >= rca_end_date - timedelta(days=7))
).count()
if data_point:
analysis_date = get_analysis_date(kpi_id, end_date)
final_data = {
"aggregation": [
{
"label": "group1_value",
"value": data_point.data["group1_value"],
},
{
"label": "group2_value",
"value": data_point.data["group2_value"],
},
{
"label": "difference",
"value": data_point.data["difference"],
},
{
"label": "perc_change",
"value": data_point.data["perc_change"],
},
{
"label": "anomalous_points",
"value": anomaly_data_point,
},
],
"analysis_date": get_datetime_string_with_tz(analysis_date),
"timecuts_date": get_timecuts_dates(analysis_date, timeline),
"last_run_time_rca": get_lastscan_string_with_tz(
kpi_info["scheduler_params"]["last_scheduled_time_rca"]
),
"anomalous_points_str": "Last 7 Days",
}
else:
raise ValueError("No data found")
except Exception as err: # noqa: B902
logger.error(f"Error in KPI aggregation retrieval: {err}", exc_info=1)
status = "error"
message = str(err)
final_data = {
"aggregation": [
{
"label": "group1_value",
"value": "-",
},
{
"label": "group2_value",
"value": "-",
},
{
"label": "difference",
"value": "-",
},
{
"label": "perc_change",
"value": "-",
},
],
"analysis_date": "",
}
return status, message, final_data
def kpi_line_data(kpi_id, download=False):
"""Get KPI line data."""
final_data = []
status = "success"
message = ""
try:
kpi_info = get_kpi_data_from_id(kpi_id)
end_date = get_rca_output_end_date(kpi_info)
data_point = (
RcaData.query.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "line")
& (RcaData.end_date <= end_date)
)
.order_by(RcaData.created_at.desc())
.first()
)
if not data_point:
raise ValueError("No data found.")
final_data = data_point.data
if not download:
for row in final_data:
row["date"] = convert_datetime_to_timestamp(
get_rca_date_from_string(row["date"])
)
else:
for row in final_data:
row["date"] = get_rca_date_from_string(row["date"])
except Exception as err: # noqa: B902
logger.error(f"Error in KPI Line data retrieval: {err}", exc_info=1)
status = "error"
message = str(err)
return status, message, final_data
def rca_analysis(kpi_id, timeline="last_30_days", dimension=None):
"""Get RCA analysis data."""
final_data = {}
status = "success"
message = ""
try:
kpi_info = get_kpi_data_from_id(kpi_id)
end_date = get_rca_output_end_date(kpi_info)
data_point = (
RcaData.query.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "rca")
& (RcaData.timeline == timeline)
& (RcaData.end_date <= end_date)
& (RcaData.dimension == dimension)
)
.order_by(RcaData.created_at.desc())
.first()
)
if data_point:
final_data = data_point.data
final_data["analysis_date"] = get_datetime_string_with_tz(
get_analysis_date(kpi_id, end_date)
)
else:
raise ValueError("No data found.")
except Exception as err: # noqa: B902
logger.error(f"Error in RCA Analysis retrieval: {err}", exc_info=1)
status = "error"
message = str(err)
final_data = {
"chart": {
"chart_data": [],
"y_axis_lim": [],
"chart_table": [],
},
"data_table": [],
"analysis_date": "",
}
return status, message, final_data
def rca_hierarchical_data(kpi_id, timeline="last_30_days", dimension=None):
"""Get RCA hierarchical data."""
final_data = {}
status = "success"
message = ""
try:
kpi_info = get_kpi_data_from_id(kpi_id)
end_date = get_rca_output_end_date(kpi_info)
data_point = (
RcaData.query.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "htable")
& (RcaData.timeline == timeline)
& (RcaData.end_date <= end_date)
& (RcaData.dimension == dimension)
)
.order_by(RcaData.created_at.desc())
.first()
)
if data_point:
final_data = data_point.data
final_data["analysis_date"] = get_datetime_string_with_tz(
get_analysis_date(kpi_id, end_date)
)
else:
raise ValueError("No data found.")
except Exception as err: # noqa: B902
logger.error(
f"Error in RCA hierarchical table retrieval: {err}", exc_info=1
)
status = "error"
message = str(err)
final_data = {"data_table": [], "analysis_date": ""}
return status, message, final_data
def rca_hierarchical_data_all_dims(kpi_id, timeline="last_30_days"):
"""Get RCA hierarchical data for all dimensions."""
final_data_list = {}
status = "success"
message = ""
try:
kpi_info = get_kpi_data_from_id(kpi_id)
end_date = get_rca_output_end_date(kpi_info)
subq = (
db.session.query(
RcaData.dimension,
func.max(RcaData.created_at).label("latest_created_at"),
)
.filter(RcaData.kpi_id == kpi_id)
.group_by(RcaData.dimension)
.subquery()
)
data_points = (
db.session.query(RcaData)
.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "htable")
& (RcaData.timeline == timeline)
& (RcaData.end_date <= end_date)
)
.join(
subq,
and_(
RcaData.dimension == subq.c.dimension,
RcaData.created_at == subq.c.latest_created_at,
),
)
.all()
)
final_data_list = []
if data_points:
for data_point in data_points:
final_data = data_point.data
final_data["analysis_date"] = get_datetime_string_with_tz(
get_analysis_date(kpi_id, end_date)
)
final_data["dimension"] = data_point.dimension
final_data_list.append(final_data)
else:
raise ValueError("No data found.")
except Exception as err: # noqa: B902
logger.error(f"Error in RCA hierarchical table retrieval: {err}", exc_info=1)
status = "error"
message = str(err)
final_data_list = []
return status, message, final_data_list
def get_rca_output_end_date(kpi_info: dict) -> date:
"""Get RCA end date."""
end_date = None
if kpi_info["is_static"]:
end_date = kpi_info["static_params"].get("end_date")
if end_date is None:
return datetime.today().date()
else:
return datetime.strptime(end_date, "%Y-%m-%d").date()
def get_analysis_date(kpi_id: int, end_date: date) -> date:
"""Get analysis date for RCA."""
data_point = (
RcaData.query.filter(
(RcaData.kpi_id == kpi_id)
& (RcaData.data_type == "line")
& (RcaData.end_date <= end_date)
)
.order_by(RcaData.created_at.desc())
.first()
)
final_data = data_point.data if data_point else []
analysis_date = final_data[-1]["date"]
return get_rca_date_from_string(analysis_date)
def get_timecuts_dates(analysis_date: date, timeline: str) -> List:
"""Get timecuts dates for RCA."""
(g1_sd, g1_ed), (g2_sd, g2_ed) = TIME_RANGES_BY_KEY[timeline]["function"](
analysis_date
)
output = [
{
"label": "group1_value",
"start_date": g1_sd,
"end_date": g1_ed,
},
{
"label": "group2_value",
"start_date": g2_sd,
"end_date": g2_ed,
},
]
if timeline == "previous_day":
del output[0]["start_date"]
del output[1]["start_date"]
return output
| 32.06686 | 85 | 0.528329 | 1,164 | 11,031 | 4.67354 | 0.135739 | 0.051471 | 0.016728 | 0.019853 | 0.641176 | 0.559559 | 0.541728 | 0.484191 | 0.452574 | 0.444301 | 0 | 0.007275 | 0.364518 | 11,031 | 343 | 86 | 32.16035 | 0.768759 | 0.028103 | 0 | 0.476351 | 0 | 0 | 0.106082 | 0.002155 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.033784 | 0 | 0.091216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d55c7d8f96a78059d6e8c8a3ed3364b75170413a | 410 | py | Python | tests/Clients/test_serializers.py | Sheshtawy/featurette | d3d75bcf11b7db6f46e35615656e694e13463d1d | [
"MIT"
] | null | null | null | tests/Clients/test_serializers.py | Sheshtawy/featurette | d3d75bcf11b7db6f46e35615656e694e13463d1d | [
"MIT"
] | null | null | null | tests/Clients/test_serializers.py | Sheshtawy/featurette | d3d75bcf11b7db6f46e35615656e694e13463d1d | [
"MIT"
] | null | null | null | from app.Clients.serializers import ClientSchema
from app.Clients.models import Client
class TestClientSchema(object):
def test_init(self, app, db, session):
johnClient = Client.create(name='john the client')
client_schema = ClientSchema()
result = client_schema.dump(johnClient).data
assert result['id'] == johnClient.id
assert result['name'] == johnClient.name
| 31.538462 | 58 | 0.7 | 48 | 410 | 5.916667 | 0.583333 | 0.049296 | 0.098592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197561 | 410 | 12 | 59 | 34.166667 | 0.863222 | 0 | 0 | 0 | 0 | 0 | 0.05122 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d55dda708b14770945e0676691a296378b570ba7 | 4,157 | py | Python | autofields/tests/autofield.py | lygaret/django-autofields | 842a4624b727fabae77a6ad43faf198acf42ecb7 | [
"BSD-3-Clause"
] | 1 | 2015-05-01T23:53:01.000Z | 2015-05-01T23:53:01.000Z | autofields/tests/autofield.py | lygaret/django-autofields | 842a4624b727fabae77a6ad43faf198acf42ecb7 | [
"BSD-3-Clause"
] | null | null | null | autofields/tests/autofield.py | lygaret/django-autofields | 842a4624b727fabae77a6ad43faf198acf42ecb7 | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from django.conf import settings
from .. import fields
from models import *
from datetime import date
incoming_markdown = "**bold**, *italic*"
gen_html = "<p><strong>bold</strong>, <em>italic</em></p>"
class AutoMarkdownTests(TestCase):
def setUp(self):
self.m = TestAutoDescriptionModel()
self.m.text = incoming_markdown
self.m.save()
def tearDown(self):
self.m.delete()
def test_nonpop_markdown(self):
self.assertEquals(self.m.nonpop, "")
def test_auto_markdown(self):
self.assertEquals(self.m.html, gen_html)
def test_only_create_markdown(self):
self.m.text = ""
self.m.save()
self.assertEquals(self.m.html, "")
self.assertEquals(self.m.nonupdate, gen_html)
class AutoSlugTests(TestCase):
def setUp(self):
self.m1 = TestAutoSlugModel(name = "Some String")
self.m1.save()
self.m2 = TestAutoSlugModel(name = "Some String")
self.m2.save()
super(AutoSlugTests, self).setUp()
def tearDown(self):
self.m1.delete()
self.m2.delete()
def test_nongen_slug(self):
m = TestAutoSlugModel(name = "Some String")
m.slug = "a-slug"
m.uniq = "a-slug"
m.save()
self.assertEquals(m.name, "Some String")
self.assertEquals(m.slug, "a-slug")
self.assertEquals(m.uniq, "a-slug")
def test_nonpop_slug(self):
self.assertEquals(self.m1.nonpop, "")
def test_nonuniq_slug(self):
self.assertEquals(self.m1.slug, "some-string")
self.assertEquals(self.m2.slug, "some-string")
def test_uniq_slug(self):
self.assertEquals(self.m1.uniq, "some-string")
self.assertEquals(self.m2.uniq, "some-string-1")
class AutoSlugFieldUniqueTests(TestCase):
def setUp(self):
self.m1 = TestFieldUniqueSlugModel()
self.m1.name = "Jon Raphaelson"
self.m1.date = date(2009, 8, 1)
self.m1.save()
self.m2 = TestFieldUniqueSlugModel()
self.m2.name = "Jon Raphaelson"
self.m2.date = date(2009, 8, 2)
self.m2.save()
self.m3 = TestFieldUniqueSlugModel()
self.m3.name = "Jon Raphaelson"
self.m3.date = date(2009, 8, 2)
self.m3.save()
def test_unique(self):
self.assertEquals(self.m1.slug, "jon-raphaelson")
self.assertEquals(self.m1.uniq, "jon-raphaelson")
self.assertEquals(self.m2.slug, "jon-raphaelson-1")
self.assertEquals(self.m2.uniq, "jon-raphaelson")
self.assertEquals(self.m3.slug, "jon-raphaelson-2")
self.assertEquals(self.m3.uniq, "jon-raphaelson-1")
class SlugFieldFormatTests(TestCase):
def test(self):
settings.AUTOSLUG_FORMAT = "%s.%s"
m1 = TestFieldUniqueSlugModel()
m1.name = "Jon Raphaelson"
m1.date = date(2009, 8, 1)
m1.save()
m2 = TestFieldUniqueSlugModel()
m2.name = "Jon Raphaelson"
m2.date = date(2009, 8, 1)
m2.save()
self.assertEquals(m2.slug, "jon-raphaelson.1")
class SerializedDataTests(TestCase):
def setUp(self):
self.list = TestSerializedDataModel()
self.list.data = [1,2,3,4,5,6,7,8,9]
self.list.save()
self.tuples = TestSerializedDataModel()
self.tuples.data = (1,2,3)
self.tuples.save()
self.null = TestSerializedDataModel()
self.null.data = None
self.null.save()
self.default = TestSerializedDataModel()
self.default.save()
def test_serialized(self):
l = TestSerializedDataModel.objects.get(pk = 1)
self.assertEquals(type(l.data), type([1]))
self.assertEquals(l.data, [1,2,3,4,5,6,7,8,9])
t = TestSerializedDataModel.objects.get(pk = 2)
self.assertEquals(type(t.data), type((1,)))
self.assertEquals(t.data, (1,2,3))
def test_null(self):
n = TestSerializedDataModel.objects.get(pk = 3)
self.assertEquals(n.data, None)
def test_default(self):
d = TestSerializedDataModel.objects.get(pk = 4)
self.assertEquals(d.data, None)
| 30.343066 | 59 | 0.618956 | 514 | 4,157 | 4.957198 | 0.169261 | 0.156986 | 0.117739 | 0.056515 | 0.324176 | 0.186028 | 0.010204 | 0.010204 | 0.010204 | 0.010204 | 0 | 0.032678 | 0.241761 | 4,157 | 136 | 60 | 30.566176 | 0.775698 | 0 | 0 | 0.111111 | 0 | 0 | 0.08612 | 0.006014 | 0 | 0 | 0 | 0 | 0.231481 | 1 | 0.166667 | false | 0 | 0.046296 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d56065015acdcfc5b50c685207e82f83e0d13834 | 7,884 | py | Python | cymlda.py | LaoWang-Lab/multi-dimensional-topic-model | bbebc15e595ac793a790646a2ff5d0677ec6b2de | [
"MIT"
] | 1 | 2015-11-09T14:17:30.000Z | 2015-11-09T14:17:30.000Z | cymlda.py | LaoWang-Lab/multi-dimensional-topic-model | bbebc15e595ac793a790646a2ff5d0677ec6b2de | [
"MIT"
] | null | null | null | cymlda.py | LaoWang-Lab/multi-dimensional-topic-model | bbebc15e595ac793a790646a2ff5d0677ec6b2de | [
"MIT"
] | null | null | null | __author__ = 'Linwei'
import numpy as np
import os, json, sys, re
import _cymlda
from settings import H, E, alpha, beta, gamma, docDir, outputDir, iter_max, run_num, dictionary, docset
PY2 = sys.version_info[0] == 2
if PY2:
range = xrange
def n2s(counts):
"""convert a counts vector to corresponding samples"""
samples = ()
for (value, count) in enumerate(counts):
samples = samples + (value,)*count
return samples
class mylda:
def __init__(self, H=H, E=E, dictionary=dictionary, docDir=docDir, docset=docset):
self._dictionary = {x[:-1]:i for (i, x) in enumerate(open(dictionary))}
self.T = len(self._dictionary)
self.H = H
self.E = E
if docset == 'bagOfWords':
self._docset = docset
self._docDir = docDir # when docset is 'bagOfWords', corpus is cotanined in a single file, hence docDir is a filename(not a directory)
with open(docDir) as f:
self.M = int(f.readline()) # M value of this corpus is recorded in the first line of corpus file
else:
self._docDir = docDir
self.M = len(os.listdir(docDir))
self._n_mh = np.zeros((self.M, H), dtype=np.int32) # counts for words in document m which were labeled as in dimension h
self._n_het = np.zeros((H, E, self.T), dtype=np.int32) # counts for word type t in topic h,e
self._n_he = np.zeros((H, E), dtype=np.int32) # counts for documents which were labeled as in topic e for dimension h
self._z_mh = np.random.randint(E, size=(self.M, H)) # value of zi for document m and dimension h
self._z_mh = np.asarray(self._z_mh, dtype=np.int32)
self._w_mi = [[],]* self.M # value of wi for document m
self._s_mi = [[],] * self.M # value of si for document m
self.n_loaded = 0 # counts for documents loaded
def readDoc(self, fname):
m = self.n_loaded
# self._w_mi[m] = np.genfromtxt(fname, delimiter=',')
doc = open(fname).read().lower()
doc = re.sub(r'\n', ' ', doc)
doc = re.sub(r'-', ' ', doc)
doc = re.sub(r'[^a-z ]', '', doc)
doc = re.sub(r' +', ' ', doc)
words = doc.split()
words_in_use = filter(lambda x:x in self._dictionary.keys(), words)
self._w_mi[m] = np.array([self._dictionary[x] for x in words_in_use])
n = len(self._w_mi[m])
self._n_mh[m] = np.random.multinomial(n, (1/H,)*H)
self._s_mi[m] = np.random.permutation(n2s(self._n_mh[m]))
for (s, z) in enumerate(self._z_mh[m]):
self._n_he[s][z] = self._n_he[s][z] + 1
for (i, s) in enumerate(self._s_mi[m]):
self._n_het[s, self._z_mh[m,s], self._w_mi[m][i]] = self._n_het[s, self._z_mh[m,s], self._w_mi[m][i]] + 1
self.n_loaded += 1
def list2np(self):
self._n_mh = self._n_mh.astype(dtype=np.int32)
self._n_he = self._n_he.astype(dtype=np.int32)
self._n_het = self._n_het.astype(dtype=np.int32)
self._z_mh = self._z_mh.astype(dtype=np.int32)
nmw = [None, None]
nmw[0] = [len(doc) for doc in self._w_mi]
nmw[1] = [0] + nmw[0][:-1]
cum = 0
for i in range(len(nmw[1])):
cum += nmw[1][i]
nmw[1][i] = cum
self._n_mw = np.asarray(nmw, dtype=np.int32)
self._w_mi_ = []
for i in self._w_mi:
self._w_mi_.extend(i)
self._w_mi_ = np.asarray(self._w_mi_, dtype=np.int32)
self._s_mi_ = []
for i in self._s_mi:
self._s_mi_.extend(i)
self._s_mi_ = np.asarray(self._s_mi_, dtype=np.int32)
def readCorpus(self):
if self._docset == 'bagOfWords':
printFlag = True
corpusFile = open(self._docDir).readlines()
self.N_total_words = int(corpusFile[2])
self._w_mi_ = np.zeros(int(self.N_total_words*5), dtype=np.int32)
self._n_mw = np.zeros((2, self.M), dtype=np.int32)
self.cum = 0
for record in open(self._docDir).readlines()[3:]:
m, w, counts = [int(x) for x in record.split()]
m -= 1 # 1 in corpus file is corresponding to 0 in our model
w -= 1
self._w_mi_[self.cum:self.cum+counts] = w
self._n_mw[0,m] += counts
self.cum += counts
if m < self.M - 1:
self._n_mw[1,m+1] = self.cum
if m%20 == 0 and printFlag:
print('read doc %d' % m)
printFlag = False
elif m%20 != 0:
printFlag = True
# self._w_mi[m].extend([w] * counts)
# self._w_mi_tmp[m] += [w] * counts
print('cum is %d' % self.cum)
self._w_mi_ = self._w_mi_[:self.cum]
self._s_mi_ = np.random.randint(self.H, size=self.cum)
self._s_mi_ = np.asarray(self._s_mi_, dtype=np.int32)
self.n_loaded = m
for m in range(self.M):
if m%20 == 0:
print('initialize doc %d' % m)
s_counts = np.bincount(self._s_mi_[self._n_mw[1,m]:self._n_mw[1,m]+self._n_mw[0,m]])
self._n_mh[m,0:len(s_counts)] = s_counts
for (s, z) in enumerate(self._z_mh[m]):
self._n_he[s][z] += 1
for i,s in enumerate(self._s_mi_[self._n_mw[1,m]:self._n_mw[1,m]+self._n_mw[0,m]]):
self._n_het[s, self._z_mh[m,s], self._w_mi_[self._n_mw[1,m]+i]] += 1
else:
for i, docName in enumerate(os.listdir(self._docDir)):
if i % 20 == 0:
print('read docs: %d' % i)
self.readDoc(os.path.join(self._docDir, docName))
self.list2np()
def test_n(self):
for m in range(self.M):
for h in range(H):
assert self._n_mh[m,h] == len(np.where(self._s_mi[m]==h)[0])
for h in range(H):
for e in range(E):
assert self._n_he[h,e] == len(np.where(self._z_mh[:,h]==e)[0])
test_n_het = np.zeros((H,E,self.T))
for m in range(self.M):
for (i,t) in enumerate(self._w_mi[m]):
h = self._s_mi[m][i]
test_n_het[h, self._z_mh[m,h], t] += 1
for h in range(H):
for e in range(E):
for t in range(self.T):
assert test_n_het[h,e,t] == self._n_het[h,e,t]
def train_corpus(self, n_iter):
for i in range(n_iter):
_cymlda._train_corpus(self._n_het, self._n_he, self._n_mh,
self._w_mi_, self._s_mi_, self._n_mw, self._z_mh,
alpha, beta, gamma)
# self.test_n()
# print("sample is OK!")
def output_topic(self, run_id, iteration):
_dir = outputDir + os.path.sep + "H%dE%d_M%d" % (H, E, self.M) + os.path.sep + "run%d" % run_id
if not os.path.exists(_dir):
os.makedirs(_dir)
result = {'H':H,'E':E,'M':self.M,'iter':iteration,'T':self.T,'topic':[[[],] * E,]*H,'delta_n_het':self._delta_n_het}
result['topic'] = self._n_het.tolist()
with open(os.path.join(_dir, "iter%d.json" % iteration),'w') as f:
json.dump(result, f)
def run_once(run_id=1):
print("run_id: %d" % run_id)
go = mylda()
go.readCorpus()
go._n_het_previous = go._n_het.copy()
go._n_word = go._n_het.sum()
for i in range(iter_max):
go.train_corpus(1)
go._delta_n_het = (np.abs(go._n_het - go._n_het_previous).sum()/go._n_word)
print("iter %d\t%.10f" % (i, go._delta_n_het))
go._n_het_previous = go._n_het.copy()
go.output_topic(run_id, i)
def main():
run_once(1)
if __name__ == "__main__":
main()
| 39.818182 | 146 | 0.539954 | 1,258 | 7,884 | 3.141494 | 0.148649 | 0.055668 | 0.037196 | 0.036437 | 0.293775 | 0.201164 | 0.129808 | 0.120192 | 0.111083 | 0.097925 | 0 | 0.017305 | 0.31101 | 7,884 | 197 | 147 | 40.020305 | 0.710236 | 0.09348 | 0 | 0.124224 | 0 | 0 | 0.025116 | 0 | 0 | 0 | 0 | 0 | 0.018634 | 1 | 0.062112 | false | 0 | 0.024845 | 0 | 0.099379 | 0.062112 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5617408fe61ad79898cc6e28c5136f3b8cef58c | 3,176 | py | Python | atom/factory.py | yoshiask/PyZuneCatalogServer | b176155f89e3a990456248175b458ba309b808e0 | [
"MIT"
] | 7 | 2021-02-21T10:45:25.000Z | 2021-03-28T10:29:36.000Z | atom/factory.py | yoshiask/PyZuneCatalogServer | b176155f89e3a990456248175b458ba309b808e0 | [
"MIT"
] | 1 | 2022-02-16T08:18:36.000Z | 2022-02-20T04:17:02.000Z | atom/factory.py | yoshiask/PyZuneCatalogServer | b176155f89e3a990456248175b458ba309b808e0 | [
"MIT"
] | null | null | null | from typing import Dict, Any
from xml.dom import minidom
from xml.dom.minidom import Element, Document, Text
from datetime import datetime
MIME_XML = "text/xml"
MIME_ATOM_XML = "application/atom+xml"
MIME_UIX = "application/uix"
MIME_JPG = "image/jpeg"
def set_element_value(element: Element, value: str):
content = Text()
content.data = value
element.appendChild(content)
def set_value_as_element(doc: Document, element: Element, name: str, value: Any):
prop_element: Element = doc.createElement(name)
if type(value) is dict:
set_values_as_elements(doc, prop_element, value)
else:
set_element_value(prop_element, value)
element.appendChild(prop_element)
def set_values_as_elements(doc: Document, element: Element, props: Dict[str, Any]):
for name in props:
set_value_as_element(doc, element, name, props[name])
def create_feed(doc: Document, title: str, id: str, href: str, date_updated: datetime = datetime.today()) -> Element:
feed = create_empty_feed(doc)
feed.appendChild(create_link(doc, href))
feed.appendChild(create_updated(doc, date_updated))
feed.appendChild(create_title(doc, title))
feed.appendChild(create_id(doc, id))
return feed
def create_empty_feed(doc: Document) -> Element:
# Add namespaces
feed: Element = doc.createElement("a:feed")
feed.setAttribute("xmlns:a", "http://www.w3.org/2005/Atom")
feed.setAttribute("xmlns:os", "http://a9.com/-/spec/opensearch/1.1/")
feed.setAttribute("xmlns", "http://schemas.zune.net/catalog/music/2007/10")
doc.appendChild(feed)
return feed
def create_link(doc: Document, href: str, rel: str = "self", type: str = MIME_ATOM_XML) -> Element:
link: Element = doc.createElement("a:link")
link.setAttribute("rel", rel)
link.setAttribute("type", type)
link.setAttribute("href", href)
return link
def create_updated(doc: Document, date_updated: datetime = datetime.today()) -> Element:
updated: Element = doc.createElement("a:updated")
set_element_value(updated, date_updated.isoformat())
return updated
def create_title(doc: Document, title: str, type: str = "text") -> Element:
title_elem: Element = doc.createElement("a:title")
title_elem.setAttribute("type", type)
set_element_value(title_elem, title)
return title_elem
def create_id(doc: Document, id: str) -> Element:
id_elem: Element = doc.createElement("a:id")
set_element_value(id_elem, id)
return id_elem
def create_entry(doc: Document, title: str, id: str, href: str, date_updated: datetime = datetime.today()) -> Element:
entry: Element = doc.createElement("a:entry")
entry.appendChild(create_link(doc, href))
entry.appendChild(create_updated(doc, date_updated))
entry.appendChild(create_title(doc, title))
entry.appendChild(create_id(doc, id))
return entry
def create_author(doc: Document, name: str) -> Element:
author_elem: Element = doc.createElement("a:author")
author_name_elem: Element = doc.createElement("a:name")
set_element_value(author_name_elem, name)
author_elem.appendChild(author_name_elem)
return author_elem
| 32.408163 | 118 | 0.71694 | 433 | 3,176 | 5.083141 | 0.184758 | 0.049977 | 0.094048 | 0.087233 | 0.284871 | 0.143117 | 0.063607 | 0.063607 | 0.063607 | 0.063607 | 0 | 0.005234 | 0.157746 | 3,176 | 97 | 119 | 32.742268 | 0.81757 | 0.004408 | 0 | 0.029412 | 0 | 0 | 0.081329 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.161765 | false | 0 | 0.058824 | 0 | 0.338235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d564c433a41a9d425ea19b0dd2e0474e38611f74 | 3,296 | py | Python | cyder/base/eav/models.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 6 | 2015-04-16T23:18:22.000Z | 2020-08-25T22:50:13.000Z | cyder/base/eav/models.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 267 | 2015-01-01T00:18:57.000Z | 2015-10-14T00:01:13.000Z | cyder/base/eav/models.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 5 | 2015-03-23T00:57:09.000Z | 2019-09-09T22:42:37.000Z | from django.db import models
from cyder.base.eav.constants import (ATTRIBUTE_TYPES, ATTRIBUTE_INVENTORY,
ATTRIBUTE_OPTION, ATTRIBUTE_STATEMENT)
from cyder.base.eav.fields import AttributeValueTypeField, EAVValueField
from cyder.base.eav.utils import is_hex_byte_sequence
from cyder.base.eav.validators import VALUE_TYPES
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.base.utils import classproperty, transaction_atomic
class Attribute(models.Model):
search_fields = ('name',)
class Meta:
app_label = 'cyder'
db_table = 'attribute'
ordering = ('name',)
name = models.CharField(max_length=255)
attribute_type = models.CharField(max_length=1, choices=ATTRIBUTE_TYPES)
value_type = AttributeValueTypeField(max_length=20, choices=VALUE_TYPES,
attribute_type_field='attribute_type')
def __unicode__(self):
return self.name
class EAVBase(BaseModel, ObjectUrlMixin):
"""The entity-attribute-value base model
When you inherit from this model, you must define the following fields::
entity = ForeignKey(ENTITY)
attribute = EAVAttributeField(Attribute)
where ENTITY is the entity model.
If you define a custom Meta class on your model, ensure it inherits from
:code:`EAVBase.Meta`.
To restrict the attribute field by attribute type, pass EAVAttributeField
the `type_choices` keyword argument with an iterable specifying the
attribute types to allow.
"""
class Meta:
abstract = True
ordering = ('attribute__name',)
unique_together = ('entity', 'attribute')
def check_in_ctnr(self, ctnr):
return ctnr.check_contains_obj(self.entity)
@property
def pretty_name(self):
return self.attribute.name
@classproperty
@classmethod
def pretty_type(cls):
return cls._meta.get_field('entity').rel.to.pretty_type + ' attribute'
value = EAVValueField(max_length=255, attribute_field='attribute')
def __unicode__(self):
kv_formats = {
ATTRIBUTE_INVENTORY: u'{0} = {1}',
ATTRIBUTE_OPTION: u'option {0} {1}',
ATTRIBUTE_STATEMENT: u'{0} {1}',
}
if self.attribute.value_type == 'string':
add_quotes = not is_hex_byte_sequence(self.value)
elif self.attribute.value_type == 'text':
add_quotes = True
else:
add_quotes = False
value = (u'"{0}"' if add_quotes else u'{0}').format(self.value)
return (kv_formats[self.attribute.attribute_type]
.format(self.attribute.name, value))
def details(self):
"""For tables."""
data = super(EAVBase, self).details()
data['data'] = [
('Attribute', 'attribute__name', self.attribute),
('Value', 'value', self.value),
]
return data
@classmethod
def filter_by_ctnr(cls, ctnr, objects=None):
if objects is None:
return cls.objects.all()
else:
return objects
@transaction_atomic
def save(self, *args, **kwargs):
self.full_clean()
super(EAVBase, self).save(*args, **kwargs)
| 31.390476 | 79 | 0.646238 | 383 | 3,296 | 5.389034 | 0.331593 | 0.030523 | 0.044089 | 0.031008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00693 | 0.255765 | 3,296 | 104 | 80 | 31.692308 | 0.834488 | 0.151396 | 0 | 0.117647 | 0 | 0 | 0.064481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0.058824 | 0.485294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5672c64e05c057c731faac1ae947bf84604cb2e | 12,071 | py | Python | cldfbench_pulotu.py | D-PLACE/dplace-dataset-pulotu | a26070a803d75dc9cc67e8233d48eee0f9fa0fcd | [
"CC-BY-4.0"
] | null | null | null | cldfbench_pulotu.py | D-PLACE/dplace-dataset-pulotu | a26070a803d75dc9cc67e8233d48eee0f9fa0fcd | [
"CC-BY-4.0"
] | null | null | null | cldfbench_pulotu.py | D-PLACE/dplace-dataset-pulotu | a26070a803d75dc9cc67e8233d48eee0f9fa0fcd | [
"CC-BY-4.0"
] | null | null | null | import re
import pathlib
import subprocess
import collections
from clldutils.text import split_text
from clldutils.misc import slug
from cldfbench import Dataset as BaseDataset, CLDFSpec
import errata
# The following variables go into LanguageTable, we want to be able to identify these by ID:
MD = {
'Latitude': '5',
'Longitude': '6',
}
QID2MD = {v: k for k, v in MD.items()}
STRIP_FROM_CODES = [
' (SKIP REMAINDER OF SECTION)',
'NA (do not select)',
]
# We want to uniformly add units to relevant questions:
QUESTIONS = {
'Distance to nearest continent': 'Distance to nearest continent (km)',
'Longitude of culture’s location': 'Longitude of culture’s location (°)',
'Latitude of culture’s location': 'Latitude of culture’s location (°)',
}
CNAMES = {
'Maori': 'Māori',
}
CATEGORIES = [
'Traditional Culture',
'Post Contact History',
'Current Culture',
]
SECTIONS = [
'Belief (Current)',
'Religious History',
'Secular History',
'Belief (Indigenous)',
'Isolation',
'Physical Environment',
'Practice (Indigenous)',
'Social Environment',
'Subsistence and Economy',
]
SUBSECTIONS = [
'Supernatural Beings',
'Supernatural Punishment',
'Afterlife and Creation',
'General Features (Indigenous Belief)',
'Classes of Tapu',
'Mana',
'General Supernatural Practices (Indigenous)',
'Rites',
'Conflict',
'Land-based means of subsistence',
'Water-based means of subsistence',
'Commercial Activity',
'Geographical Range of Culture',
'Features of Island with Largest Culture Population',
'Conversion',
'Syncretic Movements',
'Demographic and Social Changes',
'Economic Changes',
'Modern Infrastructure',
'Loss of Autonomy',
'Religious Demographics',
]
def parameter_sort(parameter):
cat = parameter['Category']
sec = parameter['Section']
subsec = parameter['Subsection']
return (
CATEGORIES.index(cat) if cat in CATEGORIES else -1,
SECTIONS.index(sec) if sec in SECTIONS else -1,
SUBSECTIONS.index(subsec) if subsec in SUBSECTIONS else -1
)
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "pulotu"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(
dir=self.cldf_dir,
data_fnames={
'LanguageTable': 'cultures.csv',
'ParameterTable': 'questions.csv',
'ValueTable': 'responses.csv',
},
module="StructureDataset")
def cmd_download(self, args):
"""
Collect the data from the dev branches of the UD repository forks
"""
subprocess.check_call(
'git -C {} submodule update --remote'.format(self.dir.resolve()), shell=True)
def read(self, name, d=None):
for row in (d or self.raw_dir.joinpath('pulotu-internal')).read_csv(name, dicts=True):
yield collections.OrderedDict((k, v.strip()) for k, v in row.items())
def _make_param(self, r, sections, codes, codetable):
name = r['question'].strip()
p = dict(
ID=r['id'],
Name=QUESTIONS.get(name, name),
Simplified_Name=r['simplified_question'],
Description=r['information'].replace('(VARIABLE LABEL REVERSED)', '').strip(),
Section_Notes=sections[r['section_id']]['notes'] or sections[r['subsection_id']]['notes'],
Datatype=r['response_type'] if r['id'] != '10' else 'Int',
Category=sections[r['subsection_id']]['category'] or sections[r['section_id']]['category'],
Section=sections[r['subsection_id']]['section'],
Subsection=sections[r['section_id']]['section'],
)
if r['id'] in codes:
for k, v in codes[r['id']].items():
for s in STRIP_FROM_CODES:
v = v.replace(s, '').strip()
codetable.append(dict(
ID='{}-{}'.format(r['id'], k.replace('?', 'NA')),
Parameter_ID=r['id'],
Name=k,
Description=v,
))
return p
def cmd_makecldf(self, args):
args.writer.cldf.add_columns(
'LanguageTable',
'Comment',
{ 'name': 'Ethonyms', 'separator': '; '})
args.writer.cldf.add_columns(
'ParameterTable',
'Simplified_Name', 'Datatype', 'Section_Notes',
'Category', 'Section', 'Subsection')
args.writer.cldf.add_component('CodeTable')
args.writer.cldf.add_table(
'glossary.csv',
{
'name': 'ID',
'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#id',
},
{
'name': 'Term',
'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#name',
},
{
'name': 'Definition',
'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#description',
},
{
'name': 'Source',
'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#source',
'separator': ';'
},
)
args.writer.cldf.sources.read(self.etc_dir / 'sources.bib')
for r in self.read('core_glossary.csv'):
d = dict(
ID=slug(r['term']), Term=r['term'], Definition=r['definition'])
dd = errata.GLOSSARY.get(d['Term'])
if dd:
if len(dd) == 2:
term, definition = dd
source = None
else:
term, definition, source = dd
d['Source'] = [source]
if term:
d['Term'] = term
d['ID'] = slug(term)
if isinstance(definition, str):
d['Definition'] = definition
else:
d['Definition'] = definition(d['Definition'])
args.writer.objects['glossary.csv'].append(d)
cats = {r['id']: r['category'] for r in self.read('categories.csv')}
sections = {}
for r in self.read('sections.csv'):
r['category'] = cats.get(r['category_id'])
sections[r['id']] = r
abvd2gc = {r['ID']: r['Glottocode'] for r in self.read('languages.csv', d=self.etc_dir)}
l2abvd = {r['id']: r['abvdcode'] for r in self.read('languages.csv')}
c2abvd = {r['culture_id']: l2abvd[r['language_id']] for r in self.read('cultures_languages.csv')}
c2id = {}
cultures = collections.OrderedDict()
for r in self.read('cultures.csv'):
c2id[r['id']] = r['slug']
cultures[r['id']] = dict(
ID=r['slug'],
Name=CNAMES.get(r['culture'], r['culture']),
Comment=r['notes'].replace('Maori', 'Māori'),
Glottocode=abvd2gc.get(c2abvd[r['id']]),
Ethonyms=split_text(r['ethonyms'], separators=';', strip=True),
# FIXME: Add Glottolog classification for navigation/searching?
)
codes = collections.defaultdict(collections.OrderedDict)
for r in self.read('questions_option.csv'):
opts = re.split('(\([0-9?]\))', r['options'])
assert not opts[0].strip()
for k, v in zip(opts[1::2], opts[::2][1:]):
codes[r['question_ptr_id']][k[1:-1]] = v.strip()
public_questions, with_codersnotes = {}, set()
parameters = []
for r in self.read('questions.csv'):
if r['displayPublic'] != 't':
public_questions[r['id']] = r['response_type']
parameters.append(
self._make_param(r, sections, codes, args.writer.objects['CodeTable']))
shuffled = collections.OrderedDict()
for i, p in enumerate(
sorted(parameters, key=parameter_sort), start=1):
shuffled[p['ID']] = {k: str(i) if k == 'ID' else v for k, v in p.items()}
args.writer.objects['ParameterTable'] = list(shuffled.values())
shuffled = {k: v['ID'] for k, v in shuffled.items()}
max_pid = len(parameters)
for r in self.read('questions.csv'):
if r['number'] in ['251', '252', '253']:
assert r['id'] not in public_questions
with_codersnotes.add(r['id'])
max_pid += 1
shuffled[r['id']] = str(max_pid)
public_questions[r['id']] = r['response_type']
p = self._make_param(r, sections, codes, args.writer.objects['CodeTable'])
p['ID'] = str(max_pid)
args.writer.objects['ParameterTable'].append(p)
responses = collections.defaultdict(dict)
for label, t in [('options', 'Option'), ('floats', 'Float'), ('integers', 'Int'), ('texts', 'Text')]:
for r in self.read('responses_{}.csv'.format(label)):
responses[t][r['response_ptr_id']] = r['response']
srcmap = {r['id']: r['slug'] for r in self.read('sources.csv')}
for r in self.read('responses.csv'):
if r['question_id'] in public_questions:
sources = []
for i in range(1, 6):
sid, page = r['source{}_id'.format(i)], r['page{}'.format(i)]
if sid:
sid = srcmap[sid]
if sid not in ['source-not-applicable2014']:
sources.append('{}[{}]'.format(sid, page.replace(';', ',')) if page else sid)
res = responses[public_questions[r['question_id']]][r['id']]
if not res:
continue
if r['question_id'] == '10':
res = int(res.replace(',', ''))
mdkey = QID2MD.get(r['question_id'])
if mdkey in ['Latitude', 'Longitude']:
cultures[r['culture_id']][mdkey] = float(res)
cid = None
if r['question_id'] in codes:
cid = '{}-{}'.format(r['question_id'], res.replace('?', 'NA'))
args.writer.objects['ValueTable'].append(dict(
ID=r['id'],
Language_ID=c2id[r['culture_id']],
Parameter_ID=r['question_id'],
Value=res,
Code_ID=cid,
Source=sources,
# Uncertainty is not really informative or useful.
#Uncertain=r['uncertainty'] == 't',
Comment=r['codersnotes'] if r['question_id'] in with_codersnotes else None,
))
args.writer.objects['LanguageTable'] = list(cultures.values())
for t in ['CodeTable', 'ValueTable']:
for o in args.writer.objects[t]:
o['Parameter_ID'] = shuffled[o['Parameter_ID']]
VPK_2015 = {
"1": "v1",
"2": "v2",
"3": "v3",
"4": "v4",
"5": "v5",
"6": "v6",
"7": "v7",
"8": "v8",
"9": "v9",
"10": "v10",
"11": "v11",
"14": "v14",
"15": "v15",
"16": "v16",
"17": "v17",
"19": "v19",
"20": "v20",
"21": "v21",
"94": "v22",
"23": "v24",
"24": "v25",
"25": "v26",
"26": "v27",
"27": "v28",
"28": "v29",
"140": "v30",
"30": "v31",
"31": "v32",
"34": "v35",
"36": "v37",
"95": "v38",
"37": "v39",
"38": "v40",
"39": "v41",
"40": "v42",
"42": "v44",
"44": "v46",
"45": "v47",
"46": "v48",
"47": "v49",
"49": "v51",
"50": "v52",
"51": "v53",
"54": "v56",
"55": "v57",
"56": "v58",
"57": "v59",
"58": "v60",
"59": "v61",
"61": "v63",
"62": "v64",
"63": "v65",
"64": "v66",
"65": "v67",
"66": "v68",
"67": "v69",
"68": "v70",
"69": "v71",
"70": "v72",
"71": "v73",
"72": "v74",
"73": "v75",
"74": "v76",
"75": "v77",
"77": "v79",
"78": "v80",
"79": "v81",
"80": "v82",
"81": "v83",
"82": "v84",
"83": "v85",
"84": "v86",
"87": "v89",
"88": "v90",
"90": "v92",
"91": "v93",
"92": "v94",
"105": "v105",
"106": "v106",
} | 32.712737 | 109 | 0.515947 | 1,372 | 12,071 | 4.476676 | 0.338192 | 0.010257 | 0.012699 | 0.021166 | 0.144904 | 0.099153 | 0.092804 | 0.054054 | 0.054054 | 0.029958 | 0 | 0.043234 | 0.306354 | 12,071 | 369 | 110 | 32.712737 | 0.690075 | 0.033551 | 0 | 0.036036 | 0 | 0 | 0.258378 | 0.004038 | 0 | 0 | 0 | 0.00271 | 0.006006 | 1 | 0.018018 | false | 0 | 0.024024 | 0.003003 | 0.06006 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d56a97f82f78b9061fb5f7e8a3676e542e4c4969 | 1,908 | py | Python | selenium/test_navbar.py | mattruston/idb | 5c041f0a844fa025b920471bfe826fed0ce23c61 | [
"MIT"
] | 1 | 2017-10-19T21:46:35.000Z | 2017-10-19T21:46:35.000Z | selenium/test_navbar.py | mattruston/idb | 5c041f0a844fa025b920471bfe826fed0ce23c61 | [
"MIT"
] | 1 | 2017-09-22T15:24:27.000Z | 2017-09-22T15:24:27.000Z | selenium/test_navbar.py | mattruston/idb | 5c041f0a844fa025b920471bfe826fed0ce23c61 | [
"MIT"
] | null | null | null | import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class TestNavBar(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://gamingdb.info")
def test_hit_site(self):
self.assertIn("gamingdb", self.driver.title)
self.assertIn("gamingdb.info", self.driver.current_url)
def test_game_nav(self):
self.test_hit_site()
driver = self.driver
driver.find_element_by_link_text("Games").click()
self.assertIn("gamingdb.info/games", driver.current_url)
driver.back()
self.test_hit_site()
def test_dev_nav(self):
self.test_hit_site()
driver = self.driver
driver.find_element_by_link_text("Developers").click()
self.assertIn("gamingdb.info/developers", driver.current_url)
driver.back()
self.test_hit_site()
def test_plat_nav(self):
self.test_hit_site()
driver = self.driver
driver.find_element_by_link_text("Platforms").click()
self.assertIn("gamingdb.info/platforms", driver.current_url)
driver.back()
self.test_hit_site()
def test_char_nav(self):
self.test_hit_site()
driver = self.driver
driver.find_element_by_link_text("Characters").click()
self.assertIn("gamingdb.info/characters", driver.current_url)
driver.back()
self.test_hit_site()
def test_about_nav(self):
self.test_hit_site()
driver = self.driver
driver.find_element_by_link_text("About").click()
self.assertIn("gamingdb.info/about", driver.current_url)
driver.back()
self.test_hit_site()
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | 31.278689 | 70 | 0.630503 | 230 | 1,908 | 4.943478 | 0.208696 | 0.067722 | 0.10642 | 0.131926 | 0.620932 | 0.493404 | 0.493404 | 0.493404 | 0.493404 | 0.493404 | 0 | 0 | 0.256813 | 1,908 | 61 | 71 | 31.278689 | 0.801834 | 0 | 0 | 0.408163 | 0 | 0 | 0.106544 | 0.038399 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.163265 | false | 0 | 0.061224 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d56cf7a16d71912684f9735b1322042ef090fe2a | 12,365 | py | Python | mushr_rhc/cost/block_push.py | Rockett8855/mushr_rhc | 79ea69020ce208c1000ab8a33e5774abf52d882d | [
"BSD-3-Clause"
] | null | null | null | mushr_rhc/cost/block_push.py | Rockett8855/mushr_rhc | 79ea69020ce208c1000ab8a33e5774abf52d882d | [
"BSD-3-Clause"
] | null | null | null | mushr_rhc/cost/block_push.py | Rockett8855/mushr_rhc | 79ea69020ce208c1000ab8a33e5774abf52d882d | [
"BSD-3-Clause"
] | null | null | null | import torch
import math
import threading
import Tkinter as tk
import mushr_rhc.utils as utils
class BlockPush:
def __init__(
self, params, logger, dtype, map, world_rep, value_fn, viz_rollouts_fn
):
self.params = params
self.logger = logger
self.dtype = dtype
self.map = map
self.world_rep = world_rep
self.value_fn = value_fn
self.viz_rollouts_fn = viz_rollouts_fn
self.reset()
def reset(self):
self.T = self.params.get_int("T", default=15)
self.K = self.params.get_int("K", default=62)
self.NPOS = self.params.get_int("npos", default=3)
time_horizon = utils.get_time_horizon(self.params)
self.dt = time_horizon / self.T
self.goal_lock = threading.RLock()
with self.goal_lock:
self.goal = None
self.goal_threshold = self.params.get_float("xy_threshold", default=0.2)
self.dist_horizon = utils.get_distance_horizon(self.params)
self.dist_w = self.params.get_float("cost_fn/dist_w", default=1.0)
self.car_obs_dist_w = self.params.get_float(
"cost_fn/car_obs_dist_w", default=5.0
)
self.block_obs_dist_w = self.params.get_float(
"cost_fn/car_obs_dist_w", default=5.0
)
self.cost2go_w = self.params.get_float("cost_fn/cost2go_w", default=1.0)
self.bounds_cost = self.params.get_float("cost_fn/bounds_cost", default=100.0)
self.dist_w = self.params.get_float("cost_fn/block_push/dist_w", default=.75)
self.manip_w = self.params.get_float("cost_fn/block_push/manip_w", default=4.5)
self.contact_w = self.params.get_float("cost_fn/block_push/manip_w", default=100.0)
# self.decay = torch.exp(-torch.arange(0, self.T).type(self.dtype))
self.decay = torch.ones(self.T,)
self.world_rep.reset()
self.a_diff_w = 1.0
self.block_car_dist_w = 1.0
self.block_car_dist_shift = 2.5
self.debug_vis = False
self.debug_with_sliders = False
if self.debug_vis:
if self.debug_with_sliders:
threading.Thread(target=self.display_window).start()
def display_window(self):
master = tk.Tk()
t = tk.Text(master, height=1)
t.pack()
t.insert(tk.END, "a_diff_w")
t.config(state="disabled")
self.a_diff_scale = tk.Scale(
master, from_=0.0, to=10.0, length=300, orient=tk.HORIZONTAL, resolution=0.1
)
self.a_diff_scale.set(self.a_diff_w)
self.a_diff_scale.pack()
t = tk.Text(master, height=1)
t.pack()
t.insert(tk.END, "block_car_dist_w")
t.config(state="disabled")
self.block_car_dist_scale = tk.Scale(
master, from_=0.0, to=10.0, length=300, orient=tk.HORIZONTAL, resolution=0.1
)
self.block_car_dist_scale.set(self.block_car_dist_w)
self.block_car_dist_scale.pack()
t = tk.Text(master, height=1)
t.pack()
t.insert(tk.END, "block_car_dist_shift")
t.config(state="disabled")
self.block_car_dist_shift_scale = tk.Scale(
master, from_=0.0, to=10.0, length=300, orient=tk.HORIZONTAL, resolution=0.1
)
self.block_car_dist_shift_scale.set(self.block_car_dist_shift)
self.block_car_dist_shift_scale.pack()
t = tk.Text(master, height=1)
t.pack()
t.insert(tk.END, "cost2go_w")
t.config(state="disabled")
self.cost2go_w_scale = tk.Scale(
master, from_=0.0, to=10.0, length=300, orient=tk.HORIZONTAL, resolution=0.1
)
self.cost2go_w_scale.set(self.cost2go_w)
self.cost2go_w_scale.pack()
self.weights_text = tk.Text(master, height=self.K + 2, width=150)
self.weights_text.pack()
tk.mainloop()
def get_weights(self):
if self.debug_with_sliders:
self.a_diff_w = self.a_diff_scale.get()
self.block_car_dist_w = self.block_car_dist_scale.get()
self.block_car_dist_shift = self.block_car_dist_shift_scale.get()
self.cost2go_w = self.cost2go_w_scale.get()
def apply(self, poses, *args, **kwargs):
assert poses.size() == (self.K, self.T, self.NPOS)
# Currently the goal is just a place for the block
# assert goal.size() == (self.NPOS,)
with self.goal_lock:
goal = self.goal
assert goal.size() == (3,)
if self.debug_vis:
self.get_weights()
s_block_goal_vec = goal[:2] - poses[0, 0, 3:5] # (2, )
s_block_goal_dist = s_block_goal_vec.pow(2).sum(dim=0).pow_(0.5)
s_block_car_vec = poses[0, 0, :2] - poses[0, 0, 3:5] # (2,)
s_block_car_dist = s_block_car_vec.pow(2).sum(dim=0).pow_(0.5)
final_idx = min(self.T - 1, int(s_block_goal_dist / self.dt))
car_goal_angle = s_block_goal_vec.dot(s_block_car_vec)
car_goal_angle = car_goal_angle.div_(
torch.norm(s_block_goal_vec) * torch.norm(s_block_car_vec)
)
car_goal_angle = car_goal_angle.acos_()
if car_goal_angle < 0:
car_goal_angle += 2 * math.pi
# step 1 if the car is in between block and goal, get to not there.
# step 2 one past there, turn into the block such that it can be moved straight to the goal
# step 3, once close enough to the block, use block distance to goal as cost
# vector from goal to goal and car respectively (from the final point of the rollout)
f_block_goal_vec = goal[:2] - poses[:, final_idx, 3:5] # (K, 2)
f_block_car_vec = poses[:, final_idx, :2] - poses[:, final_idx, 3:5] # (K, 2)
f_block_car_dist = f_block_car_vec.pow(2).sum(dim=1).pow_(0.5) # (K,)
f_block_goal_dist = f_block_goal_vec.pow(2).sum(dim=1).pow_(0.5) # (K,)
angles = f_block_goal_vec.mul(f_block_car_vec).sum(dim=1)
angles.div_(f_block_goal_dist).div_(f_block_car_dist).acos_()
angles[angles < 0] += 2 * math.pi
if not (
3.0 / 4.0 * math.pi <= car_goal_angle
and car_goal_angle <= 5.0 / 4.0 * math.pi
):
a_diff = (angles - math.pi).abs_()
# want trajectory that points opposite to block -> goal
# AND at least 2m (or something like this) away from block
all_poses = poses.view(self.K * self.T, self.NPOS)
dist_cost = (
(all_poses[:, :2] - all_poses[:, 3:5]).pow(2).sum(dim=1).pow_(0.5)
)
dist_cost.sub_(self.block_car_dist_shift).pow_(2)
# dist_cost = dist_cost.view(self.K, self.T).sum(dim=1)
dist_cost = dist_cost.view(self.K, self.T)[:, self.T - 1]
a_diff.mul_(self.a_diff_w)
dist_cost.mul_(self.block_car_dist_w)
result = a_diff.add(dist_cost)
if self.viz_rollouts_fn:
self.viz_rollouts_fn(
result,
poses,
angles=angles,
car_block_angle_diff=a_diff,
block_car_dist_cost=dist_cost,
block_car_dist=f_block_car_dist,
)
if self.debug_vis:
if self.debug_with_sliders:
text = "NAV2BLOCK PHASE\n"
text += "result\t\ta_diff\t\tdist_cost\n"
for v in zip(result, a_diff, dist_cost):
text += "%f\t\t%f\t\t%f\n" % (v[0], v[1], v[2])
self.weights_text.insert("1.0", text)
elif not (
s_block_car_dist < 0.45
# and (
# car_goal_angle <= 1.0 / 7.0 * math.pi
# or car_goal_angle >= 13.0 / 7.0 * math.pi
# )
):
# want trajectory that points in the same direction as the block to the goal
# AND as close to the block as possible (requires first objective tho)
block_car_dist = torch.norm(poses[:, 0, :2] - poses[:, 0, 3:5], dim=1)
block_car_idx = min(final_idx, int((torch.min(block_car_dist) - 0.42) / self.dt))
dist_cost = (
(poses[:, block_car_idx, :2] - poses[:, block_car_idx, 3:5]).pow(2).sum(dim=1)
) # .pow_(0.5)
# cost2go = self.value_fn.get_value(poses[:, final_idx, 3:]).mul(
# self.cost2go_w
# )
# result = cost2go.add(dist_cost)
result = dist_cost.clone()
if self.viz_rollouts_fn:
self.viz_rollouts_fn(
result,
poses,
angles=angles,
block_car_dist_cost=dist_cost,
block_car_dist=f_block_car_dist,
)
if self.debug_vis:
if self.debug_with_sliders:
text = "COST2GO PHASE\n"
text += "result\t\tdist_cost\t\tblock_car_idx\t\tmin_block_car_dist\n"
for v in zip(result, dist_cost, block_car_dist):
text += "%f\t\t%f\t\t%d\t\t%f\n" % (v[0], v[1], block_car_idx, v[2])
self.weights_text.insert("1.0", text)
else:
# cost2go = self.value_fn.get_value(poses[:, final_idx, 3:]).mul(
# self.cost2go_w
# )
# result = cost2go.add(f_block_goal_dist)
hidx = int(self.T * min(1.0, self.dist_to_goal(poses[0, 0])))
# print waypoint, self.old_ref_idx
# dist = torch.norm(poses[:, self.T - 1, 3:5] - waypoint[:2], dim=1).mul_(self.dist_w)
all_dist = torch.norm(poses[:, :hidx, 3:5] - goal[:2], dim=2)
traj_dists = torch.sum(all_dist, dim=1).div(hidx).mul_(self.dist_w)
# # TRIED TO USE THIS FOR KEEPING THE BLOCK CENTERED
car_block_1 = poses[:, :hidx, 3:5] - poses[:, :hidx, :2]
s = torch.sin(-poses[:, :hidx, 2])
c = torch.cos(-poses[:, :hidx, 2])
block_car_y = car_block_1[:, :, 0] * s + car_block_1[:, :, 1] * c
block_car_y.abs_()
manipulability = torch.matmul(block_car_y, self.decay[:hidx]).div_(hidx).mul_(self.manip_w)
############################
# CALCULATE MANIPULABILITY #
############################
# summanip = torch.sum(manip[:, 1:hidx], dim=1).div_(-(hidx - 1)).mul_(self.manip_w)
contact = torch.isclose(poses[:, hidx - 1, 3:5], poses[:, hidx - 2, :2])
contact = (~(contact[:, 0] & contact[:, 1])).type(self.dtype)
result = traj_dists.clone()
result.add_(manipulability)
result.add_(self.contact_w * (1 - contact))
if self.viz_rollouts_fn:
self.viz_rollouts_fn(
result,
poses,
)
if self.debug_vis:
if self.debug_with_sliders:
text = "GET TO THE GOAL\n"
self.weights_text.insert("1.0", text)
# raw_input("Press enter:")
return result, False # the false is backward trajectories
def set_goal(self, goal):
"""
Args:
goal [(3,) tensor] -- Goal in "world" coordinates
"""
assert goal.size() == (3,)
with self.goal_lock:
self.goal = goal
return self.value_fn.set_goal(goal)
# Proxy for setting the goal with the complex cost function
def set_trajectory(self, traj):
"""
Args:
goal [(3,) tensor] -- Goal in "world" coordinates
"""
return self.set_goal(traj[-1])
def dist_to_goal(self, state):
# use block, not car as dist to goal
with self.goal_lock:
if self.goal is None:
return False
return self.goal[:2].dist(state[3:5])
def at_goal(self, state):
"""
Args:
state [(3,) tensor] -- Current position in "world" coordinates
"""
with self.goal_lock:
if self.goal is None:
return False
return self.dist_to_goal(state) < self.goal_threshold
def get_desired_speed(self, desired_speed, state):
return desired_speed
| 37.929448 | 103 | 0.559968 | 1,773 | 12,365 | 3.654822 | 0.1348 | 0.05679 | 0.059259 | 0.039506 | 0.45463 | 0.414198 | 0.37284 | 0.341975 | 0.31034 | 0.263889 | 0 | 0.029575 | 0.313627 | 12,365 | 325 | 104 | 38.046154 | 0.733946 | 0.143146 | 0 | 0.294118 | 0 | 0.004525 | 0.04431 | 0.022491 | 0 | 0 | 0 | 0 | 0.013575 | 1 | 0.045249 | false | 0 | 0.022624 | 0.004525 | 0.108597 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d57009adc53facb72827aa979d3b948b07800f00 | 9,629 | py | Python | theano/groundhog/models/LM_model.py | haxzie/deepAPI | 12c11a40074a0a2893102f42859c9e01dc28df37 | [
"MIT"
] | 46 | 2018-09-14T10:33:28.000Z | 2022-03-31T14:06:56.000Z | theano/groundhog/models/LM_model.py | haxzie/deepAPI | 12c11a40074a0a2893102f42859c9e01dc28df37 | [
"MIT"
] | 8 | 2018-10-29T20:10:18.000Z | 2022-03-29T06:04:08.000Z | theano/groundhog/models/LM_model.py | haxzie/deepAPI | 12c11a40074a0a2893102f42859c9e01dc28df37 | [
"MIT"
] | 16 | 2018-11-07T08:20:35.000Z | 2022-02-06T18:19:03.000Z | """
Implementation of a language model class.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import itertools
import logging
import json
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import id_generator
from groundhog.layers.basic import Model
logger = logging.getLogger(__name__)
class LM_Model(Model):
def __init__(self, cost_layer = None,
sample_fn = None,
valid_fn = None,
noise_fn = None,
clean_before_noise_fn = False,
clean_noise_validation=True,
weight_noise_amount = 0,
word_dict=None,
need_inputs_for_generating_noise=False,
word_dict_src=None,
character_level = False,
exclude_params_for_norm=None,
rng = None):
"""
Constructs a model, that respects the interface required by the
trainer class.
:type cost_layer: groundhog layer
:param cost_layer: the cost (last) layer of the model
:type sample_fn: function or None
:param sample_fn: function used to sample from the model
:type valid_fn: function or None
:param valid_fn: function used to compute the validation error on a
minibatch of examples
:type noise_fn: function or None
:param noise_fn: function called to corrupt an input (that
potentially will be denoised by the model)
:type clean_before_noise_fn: bool
:param clean_before_noise_fn: If the weight noise should be removed
before calling the `noise_fn` to corrupt some input
:type clean_noise_validation: bool
:param clean_noise_validation: If the weight noise should be removed
before calling the validation function
:type weight_noise_amount: float or theano scalar
:param weight_noise_amount: weight noise scale (standard deviation
of the Gaussian from which it is sampled)
:type word_dict: string or None
:param word_dict: path to the file describing how to match words (or characters) to indices
:type need_inputs_for_generating_noise: bool
:param need_inputs_for_generating_noise: flag saying if the shape of
the inputs affect the shape of the weight noise that is generated at each step
:type word_dict_src: string or None
:param word_dict_src: similar to indx_word (but for the source language
:type character_level: bool
:param character_level: flag used when sampling, saying if we are
running the model on characters or words
:type excluding_params_for_norm: None or list of theano variables
:param excluding_params_for_norm: list of parameters that should not
be included when we compute the norm of the gradient (for norm
clipping). Usually the output weights if the output layer is large
:type rng: numpy random generator
:param rng: numpy random generator
"""
super(LM_Model, self).__init__(output_layer=cost_layer,
sample_fn=sample_fn,
word_dict=word_dict,
word_dict_src=word_dict_src,
rng=rng)
if exclude_params_for_norm is None: self.exclude_params_for_norm = []
else: self.exclude_params_for_norm = exclude_params_for_norm
self.need_inputs_for_generating_noise=need_inputs_for_generating_noise
self.cost_layer = cost_layer
self.validate_step = valid_fn
self.clean_noise_validation = clean_noise_validation
self.noise_fn = noise_fn
self.clean_before = clean_before_noise_fn
self.weight_noise_amount = weight_noise_amount
self.character_level = character_level
self.valid_costs = ['cost','ppl']
# Assume a single cost
# We need to merge these lists
state_below = self.cost_layer.state_below
if hasattr(self.cost_layer, 'mask') and self.cost_layer.mask:
num_words = TT.sum(self.cost_layer.mask)
else: num_words = TT.cast(state_below.shape[0], 'float32')
scale = getattr(self.cost_layer, 'cost_scale', numpy.float32(1))
if not scale: scale = numpy.float32(1)
scale *= numpy.float32(numpy.log(2))
grad_norm = TT.sqrt(sum(TT.sum(x**2)
for x,p in zip(self.param_grads, self.params) if p not in
self.exclude_params_for_norm))
new_properties = [('grad_norm', grad_norm),
('log2_p_word', self.train_cost / num_words / scale),
('log2_p_expl', self.cost_layer.cost_per_sample.mean() / scale)]
self.properties += new_properties
if len(self.noise_params) >0 and weight_noise_amount:
if self.need_inputs_for_generating_noise:
inps = self.inputs
else: inps = []
self.add_noise = theano.function(inps,[],name='add_noise',
updates = [(p,
self.trng.normal(shp_fn(self.inputs),
avg =0,
std=weight_noise_amount,
dtype=p.dtype))
for p, shp_fn in
zip(self.noise_params,
self.noise_params_shape_fn)],
on_unused_input='ignore')
self.del_noise = theano.function(inps,[],
name='del_noise',
updates=[(p,
TT.zeros(shp_fn(self.inputs),
p.dtype))
for p, shp_fn in
zip(self.noise_params,
self.noise_params_shape_fn)],
on_unused_input='ignore')
else:
self.add_noise = None
self.del_noise = None
def validate(self, data_iterator, train=False):
cost = 0
n_batches = 0
n_steps = 0
if self.del_noise and self.clean_noise_validation:
if self.need_inputs_for_generating_noise:
self.del_noise(**vals)
else: self.del_noise()
for vals in data_iterator:
n_batches += 1
if isinstance(vals, dict):
val = vals.values()[0]
if val.ndim ==3: n_steps += val.shape[0]*val.shape[1]
else: n_steps += val.shape[0]
_rvals = self.validate_step( **vals)
cost += _rvals
else:
# not dict
if vals[0].ndim ==3:
n_steps += vals[0].shape[0]*vals[1].shape[1]
else: n_steps += vals[0].shape[0]
if self.del_noise and self.clean_noise_validation:
if self.need_inputs_for_generating_noise:
self.del_noise(*vals)
else: self.del_noise()
inps = list(vals)
_rvals = self.validate_step(*inps)
_cost += _rvals
n_steps = numpy.log(2.)*n_steps
cost = cost / n_steps
entropy = cost# (numpy.log(2.))
ppl = 10**(numpy.log(2)*cost/numpy.log(10))
return [('cost',entropy), ('ppl',ppl)]
def load_dict(self, opts):
"""
Loading the dictionary that goes from indices to actual words
"""
data_dict= json.loads(open(self.word_dict, "r").readline())
self.word_indxs = {v: k for k, v in data_dict.items()}
self.word_indxs[opts['null_sym_target']] = '</s>'
self.word_indxs[opts['unk_sym_target']] = opts['oov']
data_dict= json.loads(open(self.word_dict_src, "r").readline())
self.word_indxs_src = {v: k for k, v in data_dict.items()}
self.word_indxs_src[opts['null_sym_source']] = '</s>'
self.word_indxs_src[opts['unk_sym_source']] = opts['oov']
def get_samples(self, length = 30, temp=1, *inps):
if not hasattr(self, 'word_indxs'):
self.load_dict()
self._get_samples(self, length, temp, *inps)
def perturb(self, *args, **kwargs):
if args:
inps = args
assert not kwargs
if kwargs:
inps = kwargs
assert not args
if self.noise_fn:
if self.clean_before and self.del_noise:
if self.need_inputs_for_generating_noise:
self.del_noise(*args, **kwargs)
else: self.del_noise()
inps = self.noise_fn(*args, **kwargs)
if self.add_noise:
if self.need_inputs_for_generating_noise:
self.add_noise(*args, **kwargs)
else: self.add_noise()
return inps
| 40.120833 | 99 | 0.555094 | 1,145 | 9,629 | 4.4131 | 0.221834 | 0.021373 | 0.026123 | 0.045518 | 0.273105 | 0.172769 | 0.143677 | 0.136948 | 0.123887 | 0.114387 | 0 | 0.007403 | 0.368678 | 9,629 | 239 | 100 | 40.288703 | 0.82382 | 0.220895 | 0 | 0.148649 | 0 | 0 | 0.0383 | 0 | 0 | 0 | 0 | 0.004184 | 0.013514 | 1 | 0.033784 | false | 0 | 0.060811 | 0 | 0.114865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5749c06941decb603c446b4a6e5e526c6140f9d | 2,099 | py | Python | sqlib.py | OwnerHunter/round2bot | 634bf2592216903c12636becdd91141d4b0588f7 | [
"MIT"
] | null | null | null | sqlib.py | OwnerHunter/round2bot | 634bf2592216903c12636becdd91141d4b0588f7 | [
"MIT"
] | null | null | null | sqlib.py | OwnerHunter/round2bot | 634bf2592216903c12636becdd91141d4b0588f7 | [
"MIT"
] | null | null | null | import sqlite3
class Table:
def __init__(self, table, columns: tuple):
self.conn = sqlite3.connect('data.db')
self.c = self.conn.cursor()
self.table = table
self.columns = columns
def get(self, id_str, columns: str='*'):
self.c.execute("SELECT {0} FROM {1} WHERE id=:id".format(columns, self.table), {'id': id_str})
return self.c.fetchone()
def get_all(self, columns: str='*'):
self.c.execute("SELECT {0} FROM {1}".format(columns, self.table))
return self.c.fetchall()
def add_element(self, id_str, values: dict=None):
if values is None:
values = {}
values['id'] = id_str
for column in self.columns:
if column not in values:
values[column] = 0 # sets default value 0
with self.conn:
self.c.execute(
"INSERT INTO {0} VALUES {1}".format(
self.table,
tuple(map(lambda col: ':' + col, self.columns))
).replace("'", ''),
values
)
return values
def update(self, id_str, values: dict):
values['id'] = id_str
with self.conn:
self.c.execute(
"UPDATE {0} SET {1} WHERE id=:id".format(
self.table,
tuple(map(lambda col: col + ' = :' + col, values))
).replace("'", '').replace('(', '').replace(')', ''),
values
)
return values
def add_to_value(self, id_str, column: str, val_to_add):
current = self.get(id_str, column)[0]
new = current + val_to_add
with self.conn:
self.update(id_str, {column: new})
return new
def sort(self, column: str):
data_list = self.get_all('id, ' + column)
data_list.sort(key=lambda element: element[1], reverse=True)
return data_list
tickets = Table('tickets', ('id', 'author', 'server', 'info', 'added', 'closed'))
servers = Table('servers', ('id', 'prefix', 'channel', 'role'))
| 30.42029 | 102 | 0.519771 | 252 | 2,099 | 4.230159 | 0.281746 | 0.042214 | 0.033771 | 0.045028 | 0.286116 | 0.174484 | 0.129456 | 0.129456 | 0.06379 | 0 | 0 | 0.009979 | 0.331586 | 2,099 | 68 | 103 | 30.867647 | 0.749822 | 0.009528 | 0 | 0.25 | 0 | 0 | 0.09533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0 | 0.019231 | 0 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d579c5b3c6c69430ad1dba477769dc68edc6b543 | 3,114 | py | Python | plagiarismdetector/tokenizer.py | singhay/plagiarism-detector | fca69af56974bb1ff6e62cfa81eea521df659b61 | [
"MIT"
] | null | null | null | plagiarismdetector/tokenizer.py | singhay/plagiarism-detector | fca69af56974bb1ff6e62cfa81eea521df659b61 | [
"MIT"
] | 1 | 2018-02-27T02:34:26.000Z | 2018-02-27T02:34:26.000Z | plagiarismdetector/tokenizer.py | singhay/plagiarism-detector | fca69af56974bb1ff6e62cfa81eea521df659b61 | [
"MIT"
] | null | null | null | import re
class TreebankWordTokenizer:
"""
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
This is the method that is invoked by ``tokenize()``.
This tokenizer performs the following steps:
- split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll``
- treat most punctuation characters as separate tokens
- split off commas and single quotes, when followed by whitespace
- separate periods that appear at the end of line
>>> from plagiarismdetector.tokenizer import TreebankWordTokenizer
>>> s = "They'll save and invest more."
>>> TreebankWordTokenizer().tokenize(s)
['They', "'ll", 'save', 'and', 'invest', 'more', '.']
>>> s = "hi, my name can't hello,"
>>> TreebankWordTokenizer().tokenize(s)
['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ',']
"""
def __init__(self):
pass
STARTING_QUOTES = [
(re.compile(r'^\"'), r'``'),
(re.compile(r'(``)'), r' \1 '),
(re.compile(r'([ (\[{<])"'), r'\1 `` '),
]
PUNCTUATION = [
(re.compile(r'([:,])([^\d])'), r' \1 \2'),
(re.compile(r'([:,])$'), r' \1 '),
(re.compile(r'\.\.\.'), r' ... '),
(re.compile(r'[;@#$%&]'), r' \g<0> '),
(re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), r'\1 \2\3 '), # Handles the final period.
(re.compile(r'[?!]'), r' \g<0> '),
(re.compile(r"([^'])' "), r"\1 ' "),
]
PARENS_BRACKETS = (re.compile(r'[\]\[\(\)\{\}\<\>]'), r' \g<0> ')
DOUBLE_DASHES = (re.compile(r'--'), r' -- ')
ENDING_QUOTES = [
(re.compile(r'"'), " '' "),
(re.compile(r'(\S)(\'\')'), r'\1 \2 '),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# Adapted from Robert MacIntyre's tokenizer.
_contractions = [r"(?i)\b(can)(?#X)(not)\b",
r"(?i)\b(d)(?#X)('ye)\b",
r"(?i)\b(gim)(?#X)(me)\b",
r"(?i)\b(gon)(?#X)(na)\b",
r"(?i)\b(got)(?#X)(ta)\b",
r"(?i)\b(lem)(?#X)(me)\b",
r"(?i)\b(mor)(?#X)('n)\b",
r"(?i)\b(wan)(?#X)(na)\s",
r"(?i) ('t)(?#X)(is)\b",
r"(?i) ('t)(?#X)(was)\b"]
CONTRACTIONS = list(map(re.compile, _contractions))
def tokenize(self, text):
for regexp, substitution in self.STARTING_QUOTES:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PUNCTUATION:
text = regexp.sub(substitution, text)
# Handles parentheses.
regexp, substitution = self.PARENS_BRACKETS
text = regexp.sub(substitution, text)
# Handles double dash.
regexp, substitution = self.DOUBLE_DASHES
text = regexp.sub(substitution, text)
for regexp, substitution in self.ENDING_QUOTES:
text = regexp.sub(substitution, text)
return text.split()
| 35.793103 | 97 | 0.477521 | 376 | 3,114 | 3.917553 | 0.316489 | 0.10387 | 0.108622 | 0.074678 | 0.326544 | 0.324508 | 0.190767 | 0.145961 | 0.138493 | 0.076035 | 0 | 0.008043 | 0.28131 | 3,114 | 86 | 98 | 36.209302 | 0.650134 | 0.295119 | 0 | 0.102041 | 0 | 0 | 0.227401 | 0.120057 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0.020408 | 0.020408 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d57c7e00fb1f3b54590c0aed7560a46aa3cf7ab3 | 628 | py | Python | test_project/rename_forward/urls.py | epoiate/django-autocomplete-light | 6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886 | [
"MIT"
] | 1,368 | 2015-01-03T09:52:33.000Z | 2022-03-27T09:06:00.000Z | test_project/rename_forward/urls.py | epoiate/django-autocomplete-light | 6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886 | [
"MIT"
] | 919 | 2015-01-01T05:17:48.000Z | 2022-03-25T22:41:14.000Z | test_project/rename_forward/urls.py | epoiate/django-autocomplete-light | 6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886 | [
"MIT"
] | 469 | 2015-01-19T21:40:30.000Z | 2022-03-26T17:27:40.000Z | from dal import autocomplete
from django.conf.urls import url
from .models import TModel
class LinkedDataView(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = super(LinkedDataView, self).get_queryset()
possessor = self.forwarded.get('possessor', None)
secret = self.forwarded.get('secret', None)
if secret != 42:
return qs.none()
if possessor:
return qs.filter(owner_id=possessor)
return qs
urlpatterns = [
url(
'^linked_data/$',
LinkedDataView.as_view(model=TModel),
name='linked_data_rf'
),
]
| 20.258065 | 57 | 0.632166 | 69 | 628 | 5.652174 | 0.536232 | 0.061538 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006508 | 0.265924 | 628 | 30 | 58 | 20.933333 | 0.839479 | 0 | 0 | 0 | 0 | 0 | 0.068471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d57d8c0b27441b3e6a130ef67c3109a4e8ff4d0b | 1,319 | py | Python | script/data_annotation/annotate_cluster.py | carushi/Catactor | 27d35261249daf695659f2f329aa470922f60922 | [
"MIT"
] | null | null | null | script/data_annotation/annotate_cluster.py | carushi/Catactor | 27d35261249daf695659f2f329aa470922f60922 | [
"MIT"
] | null | null | null | script/data_annotation/annotate_cluster.py | carushi/Catactor | 27d35261249daf695659f2f329aa470922f60922 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import sys
gse_number = sys.argv[1]
for dirpath, dnames, fnames in os.walk("./"+gse_number+'/'):
all = None
for fname in fnames:
tail = 'celltype_annotation'
if tail in fname:
continue
if 'cell_ng' in fname and 'meta' in fname:
df = pd.read_csv(os.path.join(gse_number, fname)).loc[:,['cluster', 'celltype']]
print(df.head())
print(set(df.loc[~pd.isnull(df.loc[:,'cluster']),'cluster'].values))
if all is not None:
print(all.head())
all = pd.concat([all.reset_index(drop=True), df.reset_index(drop=True)], axis=0, ignore_index=True)
else: all = df
print(df.head())
pdf = pd.DataFrame(all.loc[:,['cluster', 'celltype']].groupby(['cluster', 'celltype']).size().reset_index().groupby(['cluster']).max())
all.loc[pd.isnull(all.loc[:,'celltype']),'celltype'] = 'NA'
adf = pd.DataFrame(all.loc[:,['cluster', 'celltype']].groupby(['cluster', 'celltype']).size().reset_index().groupby(['cluster']).max())
print(pdf)
print(adf)
for index, row in adf.iterrows():
if index not in pdf.index:
pdf.loc[index] = adf.loc[index,:]
pdf = pdf.sort_index()
pdf.to_csv(gse_number+'_cluster_celltype_annotation.csv') | 43.966667 | 139 | 0.595148 | 180 | 1,319 | 4.266667 | 0.344444 | 0.117188 | 0.070313 | 0.046875 | 0.221354 | 0.221354 | 0.221354 | 0.221354 | 0.221354 | 0.221354 | 0 | 0.001944 | 0.219864 | 1,319 | 30 | 140 | 43.966667 | 0.744412 | 0 | 0 | 0.068966 | 0 | 0 | 0.140909 | 0.024242 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.103448 | 0.206897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d581a27c402119743d0fd709f001a408b09eda63 | 1,092 | py | Python | blink_dl.py | CodeYouMust/blink_cam_vision | de5838e35d15a4ba3d4cd38b633e3f35decbafc5 | [
"BSD-3-Clause"
] | 1 | 2019-09-07T21:31:03.000Z | 2019-09-07T21:31:03.000Z | blink_dl.py | CodeYouMust/blink_cam_vision | de5838e35d15a4ba3d4cd38b633e3f35decbafc5 | [
"BSD-3-Clause"
] | null | null | null | blink_dl.py | CodeYouMust/blink_cam_vision | de5838e35d15a4ba3d4cd38b633e3f35decbafc5 | [
"BSD-3-Clause"
] | null | null | null | from fire import Fire
from cameras.blink_api import BlinkApi
import time
from util.polite_access import PoliteAccess
POLITE_DL_WAIT_SECONDS = 60 * 1.5
polite = PoliteAccess('blink-api-access')
def dl_blink_videos(user, pwd, fldr):
x = BlinkApi()
x.login(user, pwd)
x.dl_all_videos(fldr)
polite.set_access_time()
return
def polite_dl_blink_videos(user, pwd, fldr, skip_if_frequent=True):
'''
Prevent frequent access to blink. Helpful in CRON
'''
is_dl = can_dl_polite_wait(skip_if_frequent)
if is_dl:
dl_blink_videos(user, pwd, fldr)
return
def can_dl_polite_wait(skip_if_frequent):
sleep_seconds = polite.calc_sleep_seconds(POLITE_DL_WAIT_SECONDS)
is_dl = not sleep_seconds
if sleep_seconds:
if skip_if_frequent:
print('Too soon to DL by {} seconds. Skip.'.format(sleep_seconds))
else:
print('Too soon to DL. Sleeping: {}'.format(sleep_seconds))
time.sleep(sleep_seconds)
is_dl = True
return is_dl
if __name__ == '__main__':
Fire(dl_blink_videos)
| 25.395349 | 78 | 0.690476 | 160 | 1,092 | 4.3625 | 0.31875 | 0.120344 | 0.074499 | 0.073066 | 0.232092 | 0.186246 | 0.083095 | 0 | 0 | 0 | 0 | 0.004706 | 0.221612 | 1,092 | 42 | 79 | 26 | 0.816471 | 0.044872 | 0 | 0.066667 | 0 | 0 | 0.084713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.133333 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d583ddd9da1e8a66b1c4175158f5b3a31784216e | 622 | py | Python | component/logging_.py | laashub-sua/businesser | 5c50daa561fe35c74d1ce1a2ba0e645f769a165c | [
"Apache-2.0"
] | 1 | 2020-12-17T10:56:44.000Z | 2020-12-17T10:56:44.000Z | component/logging_.py | laashub-sua/businesser | 5c50daa561fe35c74d1ce1a2ba0e645f769a165c | [
"Apache-2.0"
] | 1 | 2020-12-30T05:53:34.000Z | 2020-12-30T05:53:34.000Z | component/logging_.py | laashub-sua/businesser | 5c50daa561fe35c74d1ce1a2ba0e645f769a165c | [
"Apache-2.0"
] | null | null | null | import logging
import os
from logging.handlers import TimedRotatingFileHandler
logging.basicConfig(
level=logging.INFO
)
def do_init():
from __init__ import app
if not os.path.exists("logs"):
os.mkdir("logs")
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s:%(lineno)d][%(levelname)s][%(thread)d] - %(message)s")
handler = TimedRotatingFileHandler(
"logs/flask.log", when="D", interval=1, backupCount=15,
encoding="UTF-8", delay=False, utc=True)
app.logger.addHandler(handler)
handler.setFormatter(formatter)
def info(msg):
logging.info(msg)
| 23.923077 | 90 | 0.672026 | 76 | 622 | 5.434211 | 0.618421 | 0.053269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007843 | 0.180064 | 622 | 25 | 91 | 24.88 | 0.801961 | 0 | 0 | 0 | 0 | 0.052632 | 0.172026 | 0.104502 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58403c1fa4c0290dcdfef391ce8c75ae48bc48f | 944 | py | Python | backend/user/filters.py | sonpham08/dj_angular | 17881978726969c31c61febc51f4b8a552323873 | [
"MIT"
] | null | null | null | backend/user/filters.py | sonpham08/dj_angular | 17881978726969c31c61febc51f4b8a552323873 | [
"MIT"
] | 9 | 2020-06-05T21:28:57.000Z | 2022-02-12T12:30:39.000Z | backend/user/filters.py | sonpham08/dj_angular | 17881978726969c31c61febc51f4b8a552323873 | [
"MIT"
] | null | null | null | import django_filters
from .models import User
import coreapi
import coreschema
from django_filters.rest_framework import DjangoFilterBackend
class UserFilter(DjangoFilterBackend):
"""
Overrides get_schema_fields() to show filter_fields in Swagger.
"""
def get_schema_fields(self, view):
assert (
coreapi is not None
), "coreapi must be installed to use `get_schema_fields()`"
assert (
coreschema is not None
), "coreschema must be installed to use `get_schema_fields()`"
# append filter fields to existing fields
fields = super().get_schema_fields(view)
if hasattr(view, "filter_fields"):
fields += view.filter_fields
return [
coreapi.Field(
name=field,
location='query',
required=False,
type='string',
) for field in fields
]
| 28.606061 | 70 | 0.608051 | 101 | 944 | 5.524752 | 0.475248 | 0.080645 | 0.134409 | 0.060932 | 0.125448 | 0.125448 | 0.125448 | 0.125448 | 0 | 0 | 0 | 0 | 0.318856 | 944 | 32 | 71 | 29.5 | 0.867807 | 0.110169 | 0 | 0.083333 | 0 | 0 | 0.163835 | 0.050971 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.041667 | false | 0 | 0.208333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5845720b89f922c6865488be9b0fe6954b474be | 1,089 | py | Python | ejemplos/7 hola mundo con imagenes/hola.py | gentooza/taller-licenciado-software | 421ab6d23a945ef71b5a31c9d0a1c47b75a3ebac | [
"CC-BY-3.0"
] | null | null | null | ejemplos/7 hola mundo con imagenes/hola.py | gentooza/taller-licenciado-software | 421ab6d23a945ef71b5a31c9d0a1c47b75a3ebac | [
"CC-BY-3.0"
] | null | null | null | ejemplos/7 hola mundo con imagenes/hola.py | gentooza/taller-licenciado-software | 421ab6d23a945ef71b5a31c9d0a1c47b75a3ebac | [
"CC-BY-3.0"
] | null | null | null | #!/bin/python3
'''
Copyright 2022 Joaquín Cuéllar.
This file is part of Hola Mundo Especial.
Hola Mundo Especial is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version.
Hola Mundo Especial is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Hola Mundo Especial.
If not, see <https://www.gnu.org/licenses/>.
'''
import tkinter as tk
from PIL import ImageTk, Image
import os
root = tk.Tk()
message = tk.Label(root, text="Hola mundo!")
message.pack()
img = ImageTk.PhotoImage(Image.open("images/hola.png"))
panel = tk.Label(root, image = img)
panel.pack(side = "bottom", fill = "both", expand = "yes")
root.mainloop()
| 33 | 102 | 0.724518 | 168 | 1,089 | 4.696429 | 0.60119 | 0.057034 | 0.086185 | 0.072243 | 0.103929 | 0.070976 | 0 | 0 | 0 | 0 | 0 | 0.006857 | 0.196511 | 1,089 | 32 | 103 | 34.03125 | 0.894857 | 0.666667 | 0 | 0 | 0 | 0 | 0.125402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5846b72013188d68b2e5949bd8de4747ea8754d | 1,824 | py | Python | plugins/holland.lib.lvm/tests/xfs/test_snapshot.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | 1 | 2019-06-06T01:07:34.000Z | 2019-06-06T01:07:34.000Z | plugins/holland.lib.lvm/tests/xfs/test_snapshot.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | null | null | null | plugins/holland.lib.lvm/tests/xfs/test_snapshot.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | 2 | 2015-12-04T12:17:59.000Z | 2022-03-23T07:22:02.000Z | import shutil
from nose.tools import *
from holland.lib.lvm import LogicalVolume
from holland.lib.lvm.snapshot import *
from tests.constants import *
class TestSnapshot(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tmpdir)
def test_snapshot_fsm(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
snapshot.start(lv)
def test_snapshot_fsm_with_callbacks(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def handle_event(event, *args, **kwargs):
pass
snapshot.register('pre-mount', handle_event)
snapshot.register('post-mount', handle_event)
snapshot.start(lv)
def test_snapshot_fsm_with_failures(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def bad_callback(event, *args, **kwargs):
raise Exception("Oooh nooo!")
for evt in ('initialize', 'pre-snapshot', 'post-snapshot',
'pre-mount', 'post-mount', 'pre-unmount', 'post-unmount',
'pre-remove', 'post-remove', 'finish'):
snapshot.register(evt, bad_callback)
assert_raises(CallbackFailuresError, snapshot.start, lv)
snapshot.unregister(evt, bad_callback)
if snapshot.sigmgr._handlers:
raise Exception("WTF. sigmgr handlers still exist when checking event => %r", evt)
| 34.415094 | 98 | 0.616776 | 215 | 1,824 | 5.093023 | 0.334884 | 0.032877 | 0.035616 | 0.049315 | 0.357991 | 0.357991 | 0.357991 | 0.357991 | 0.290411 | 0.290411 | 0 | 0.002229 | 0.262061 | 1,824 | 52 | 99 | 35.076923 | 0.811293 | 0.010965 | 0 | 0.341463 | 0 | 0 | 0.129444 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.170732 | false | 0.02439 | 0.121951 | 0 | 0.317073 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d585e6b9ba2ab30b151e86f51487d51f7096feb6 | 4,094 | py | Python | src/lecture10/output.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | 1 | 2021-05-03T09:11:35.000Z | 2021-05-03T09:11:35.000Z | src/lecture10/output.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | null | null | null | src/lecture10/output.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | null | null | null | import os
import numpy as np
def grid(xp, yp, m, n, dt):
os.makedirs(f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}',
exist_ok=True)
grid_mat = np.stack([xp, yp])
np.savetxt(
f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/grid.csv',
grid_mat, delimiter=',', fmt='%.10f')
return
def solution(p, u, v):
print(f"\nvelocity u")
print(u)
print(f"\nvelocity v")
print(v)
print(f"\npressure")
print(p)
return
def divergent(p, u, v, dx, dy, m, n, dt):
md = 202
nd = 202
div = np.zeros((md, nd))
for i in range(1, m + 1):
for j in range(1, n + 1):
div[i][j] = (u[i + 1][j] - u[i - 1][j]) / dx / 2\
+ (v[i][j + 1] - v[i][j - 1]) / dy / 2
# div[i][j] = (u[i][j] - u[i - 1][j]) / dx\
# + (v[i][j] - v[i][j - 1]) / dy
os.makedirs(f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}',
exist_ok=True)
np.savetxt(
f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/divergent.csv',
div, delimiter=',', fmt='%.10f')
return
def solution_post(p, u, v, m, n, dt):
md = 202
nd = 202
u_cnt = np.zeros((md, nd))
v_cnt = np.zeros((md, nd))
# interpolation at p-center grid
for i in range(1, m + 1):
for j in range(1, n + 1):
u_cnt[i][j] = u[i][j]
v_cnt[i][j] = v[i][j]
for j in range(1, n + 1):
u_cnt[0][j] = u_cnt[1][j]
v_cnt[0][j] = v_cnt[1][j]
u_cnt[m + 1][j] = u_cnt[m][j]
v_cnt[m + 1][j] = v_cnt[m][j]
for i in range(m + 2):
u_cnt[i][0] = u_cnt[i][1]
v_cnt[i][0] = v_cnt[i][1]
u_cnt[i][n + 1] = u_cnt[i][n]
v_cnt[i][n + 1] = v_cnt[i][n]
# for i in range(1, m + 1):
# for j in range(1, n + 1):
# u_cnt[i][j] = 0.5 * (u[i][j] + u[i - 1][j])
# v_cnt[i][j] = 0.5 * (v[i][j] + v[i][j - 1])
#
# for j in range(1, n + 1):
# u_cnt[0][j] = u[0][j]
# v_cnt[0][j] = 0.5 * (v[0][j] + v[0][j - 1])
# u_cnt[m + 1][j] = 0.5 * (u[m + 1][j] + u[m][j])
# v_cnt[m + 1][j] = 0.5 * (v[m + 1][j] + v[m + 1][j - 1])
#
# for i in range(m + lecture10):
# u_cnt[i][0] = 0.5 * (u[i][0] + u[i - 1][0])
# v_cnt[i][0] = v[i][0]
# u_cnt[i][n + 1] = 0.5 * (u[i][n + 1] + u[i - 1][n + 1])
# v_cnt[i][n + 1] = v[i][n]
os.makedirs(f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}',
exist_ok=True)
np.savetxt(
f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/velocity_u.csv',
u_cnt, delimiter=',', fmt='%.10f')
np.savetxt(
f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/velocity_v.csv',
v_cnt, delimiter=',', fmt='%.10f')
np.savetxt(
f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/pressure.csv',
p, delimiter=',', fmt='%.10f')
return
def paraview(p, xp, yp, m, n, u, v, dt):
os.makedirs(f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}',
exist_ok=True)
p_w = f'../../data/lecture10/dt_{round(dt, 3)}/{m}_{n}/output_paraview.vtk'
with open(p_w, mode='w') as f:
f.write("# vtk DataFile Version 3.0\n")
f.write("2D flow\n")
f.write("ASCII\n")
f.write("DATASET STRUCTURED_GRID\n")
f.write(f"DIMENSIONS {m} {n} 1\n")
f.write(f"POINTS {m * n} float\n")
for j in range(1, n + 1):
for i in range(1, m + 1):
f.write(f"{round(xp[i], 4)} {round(yp[j], 4)} 0.0000\n")
f.write(f"POINT_DATA {m * n}\n")
# velocity vector
f.write(f"VECTORS velocity float\n")
for j in range(1, n + 1):
for i in range(1, m + 1):
f.write(f"{round(u[i][j], 4)} {round(v[i][j], 4)} 0.0000\n")
# pressure
f.write(f"SCALARS pressure float\n")
f.write(f"LOOKUP_TABLE default\n")
for j in range(1, n + 1):
for i in range(1, m + 1):
f.write(f"{round(p[i][j], 4)}\n")
return
| 28.430556 | 79 | 0.445774 | 737 | 4,094 | 2.385346 | 0.118046 | 0.021615 | 0.063709 | 0.091013 | 0.627986 | 0.512514 | 0.449374 | 0.40785 | 0.40785 | 0.39306 | 0 | 0.057541 | 0.316561 | 4,094 | 143 | 80 | 28.629371 | 0.570765 | 0.172692 | 0 | 0.375 | 0 | 0.090909 | 0.275483 | 0.140267 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056818 | false | 0 | 0.022727 | 0 | 0.136364 | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d586e0b9a83c43e39d9998f70d6cc8f9eb25632d | 1,655 | py | Python | mmaction/core/utils/dist_utils.py | sovrasov/mmaction2 | 055625bf6d6e06e9f811cc4f8b0332c18cebc98c | [
"Apache-2.0"
] | null | null | null | mmaction/core/utils/dist_utils.py | sovrasov/mmaction2 | 055625bf6d6e06e9f811cc4f8b0332c18cebc98c | [
"Apache-2.0"
] | null | null | null | mmaction/core/utils/dist_utils.py | sovrasov/mmaction2 | 055625bf6d6e06e9f811cc4f8b0332c18cebc98c | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors, _take_tensors
from mmcv.runner import OptimizerHook
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_tensors(tensors, coalesce=True, bucket_size_mb=-1):
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(tensors, world_size, bucket_size_mb)
else:
for tensor in tensors:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
super().__init__(grad_clip)
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_epoch(self, runner):
tensors = [t for n, t in runner.model.named_buffers() if 'num_batches_tracked' not in n]
allreduce_tensors(tensors, self.coalesce, self.bucket_size_mb)
| 34.479167 | 96 | 0.693656 | 213 | 1,655 | 5.037559 | 0.314554 | 0.102516 | 0.100652 | 0.036347 | 0.177074 | 0.132339 | 0.085741 | 0.085741 | 0 | 0 | 0 | 0.009368 | 0.225982 | 1,655 | 47 | 97 | 35.212766 | 0.828259 | 0 | 0 | 0.108108 | 0 | 0 | 0.01148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.108108 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d586f302acc1562c0c4ae689fc7c91008984ede8 | 469 | py | Python | app/assets.py | tobymccann/flask-base | 3a93a9171b07d97036d144df7c45397735789431 | [
"MIT"
] | null | null | null | app/assets.py | tobymccann/flask-base | 3a93a9171b07d97036d144df7c45397735789431 | [
"MIT"
] | null | null | null | app/assets.py | tobymccann/flask-base | 3a93a9171b07d97036d144df7c45397735789431 | [
"MIT"
] | null | null | null | from flask_assets import Bundle
app_css = Bundle('app.scss', filters='scss', output='css/app.css')
app_js = Bundle('app.js', filters='jsmin', output='js/app.js')
vendor_css = Bundle('vendor/semantic.css', 'vendor/components/*.css', output='css/vendor.css')
vendor_js = Bundle('vendor/jquery-3.1.1.min.js', 'vendor/semantic.min.js', 'vendor/jquery.tablesort.min.js',
'vendor/zxcvbn.js', 'vendor/*.js', filters='jsmin', output='scripts/vendor.js')
| 42.636364 | 108 | 0.684435 | 70 | 469 | 4.514286 | 0.314286 | 0.126582 | 0.10443 | 0.126582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007177 | 0.108742 | 469 | 10 | 109 | 46.9 | 0.748804 | 0 | 0 | 0 | 0 | 0 | 0.481876 | 0.215352 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d589965f796bd5e6baad650eddf2a9493846b256 | 3,438 | py | Python | scripts/getPrototypeInfo.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | 4 | 2016-06-03T18:41:43.000Z | 2020-04-17T20:28:58.000Z | scripts/getPrototypeInfo.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | scripts/getPrototypeInfo.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# scripts/setEnv scripts/getPrototypeInfo.py
import sys
import os
import dbus
import io
import voxie
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args, enableService=True)
instance = context.createInstance()
allPropertyTypes = set()
def showComponentInfo(component):
component = voxie.castImplicit(
component, 'de.uni_stuttgart.Voxie.Component')
print(' Component:')
container = component.ComponentContainer
# print (container.SupportedInterfaces)
found = False
for interface in container.SupportedInterfaces:
if interface == 'de.uni_stuttgart.Voxie.Plugin':
plugin = container.CastTo('de.uni_stuttgart.Voxie.Plugin')
print(' Plugin: %s%s' % (repr(plugin.Name),
' (core plugin)' if plugin.IsCorePlugin else ''))
found = True
break
if interface == 'de.uni_stuttgart.Voxie.Extension':
extension = container.CastTo('de.uni_stuttgart.Voxie.Extension')
print(' Extension: %s' % (repr(extension.ExecutableFilename),))
found = True
break
if not found:
print(' Unknown container')
print(' Name: %s' % (repr(component.Name),))
print(' Type: %s' % (repr(component.ComponentType),))
print()
prototypes = instance.ListPrototypes()
# print (prototypes)
for prototype in prototypes:
print(prototype.Name)
print(' DisplayName: %s' % (repr(prototype.DisplayName),))
print(' Description: %s' % (repr(prototype.Description),))
# print (' Allowed Input Types: %s' % ([ t.Name for t in prototype.ListAllowedInputTypes() ],))
print(' Properties:')
for prop in prototype.ListObjectProperties():
print(' %s' % (repr(prop.DisplayName),))
print(' Type: %s' % (prop.Type.Name,))
allPropertyTypes.add(prop.Type)
showComponentInfo(prototype)
if False:
allPropertyTypes = list(allPropertyTypes)
allPropertyTypes.sort(key=lambda p: p.Name)
propertyTypesPrinted = set()
for ptype in allPropertyTypes:
if ptype._objectPath in propertyTypesPrinted:
continue
propertyTypesPrinted.add(ptype._objectPath)
print(ptype.Name)
showComponentInfo(ptype)
context.client.destroy()
| 36.967742 | 100 | 0.689645 | 401 | 3,438 | 5.892768 | 0.428928 | 0.037241 | 0.029623 | 0.040203 | 0.066864 | 0.054168 | 0 | 0 | 0 | 0 | 0 | 0.003325 | 0.212624 | 3,438 | 92 | 101 | 37.369565 | 0.869597 | 0.370855 | 0 | 0.074074 | 0 | 0 | 0.152082 | 0.072064 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.092593 | 0 | 0.111111 | 0.259259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58d09288e8a693fa23a5cc7bf11d95086946be2 | 11,038 | py | Python | src/sos/substep_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | src/sos/substep_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | src/sos/substep_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import contextlib
import subprocess
import sys
import os
from io import StringIO
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg
from .eval import SoS_exec
from .executor_utils import (
clear_output,
create_task,
get_traceback_msg,
kill_all_subprocesses,
prepare_env,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
)
from .targets import RemovedTarget, RuntimeInfo, UnavailableLock, sos_targets
from .utils import ArgumentError, StopInputGroup, TerminateExecution, ProcessKilled, env
@contextlib.contextmanager
def stdoutIO():
oldout = sys.stdout
olderr = sys.stderr
stdout = StringIO()
stderr = StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
sys.stdout = oldout
sys.stderr = olderr
def execute_substep(
stmt,
global_def,
global_vars,
task="",
task_params="",
proc_vars={},
shared_vars=[],
config={},
cwd=None,
):
"""Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
"""
assert not env.zmq_context.closed
assert "workflow_id" in proc_vars
assert "step_id" in proc_vars
assert "_input" in proc_vars
assert "_output" in proc_vars
assert "_depends" in proc_vars
assert "step_output" in proc_vars
assert "_index" in proc_vars
assert "result_push_socket" in config["sockets"]
# this should not happen but check nevertheless
if (
env.result_socket_port is not None
and env.result_socket_port != config["sockets"]["result_push_socket"]
):
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
# the result_socket_port contains IP of the worker that request the substep
env.result_socket.connect(env.result_socket_port)
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config,
cwd=cwd,
)
env.result_socket.send(encode_msg(res))
def _execute_substep(
stmt, global_def, global_vars, task, task_params, proc_vars, shared_vars, config, cwd
):
# vatlab/sos-notebook#272
# if config contains exec_mode, we remove it to avoid it manifest the worker exec_mode
config.pop("exec_mode", None)
# passing configuration and port numbers to the subprocess
env.config.update(config)
# prepare a working environment with sos symbols and functions
prepare_env(global_def, global_vars)
# update it with variables passed from master process
env.sos_dict.quick_update(proc_vars)
if env.config["sig_mode"] == "ignore" or env.sos_dict["_output"].unspecified():
sig = None
else:
sig = RuntimeInfo(
statementMD5([stmt, task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=shared_vars,
)
outmsg = ""
errmsg = ""
capture_output = env.config["run_mode"] == "interactive"
idx = env.sos_dict["_index"]
original_cwd = None
try:
if cwd:
original_cwd = os.getcwd()
if original_cwd != cwd:
os.chdir(cwd)
if sig:
# if not in distributed mode, the signature must have been checked at
# the step level
if env.config["sig_mode"] in ("distributed", "build"):
matched = validate_step_sig(sig)
if matched:
# avoid sig being released in the final statement
sig = None
# complete case: concurrent ignore without task
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
res = {
"index": idx,
"ret_code": 0,
"sig_skipped": 1,
"output": matched["output"],
"shared": matched["vars"],
}
if task:
# if there is task, let the master know that the task is
# skipped
res["task_id"] = None
return res
sig.lock()
# check if input and depends targets actually exist
#
# if depends on a sos_variable but the variable is not actually used in
# the substep, it is ok to ignore it. If the variable is used in the substep
# it should have been included as part of the signature variables.
verify_input(ignore_internal_targets=True)
if stmt:
# statement can be empty for task only substep
if capture_output:
with stdoutIO() as (out, err):
SoS_exec(stmt, return_result=False)
outmsg = out.getvalue()
errmsg = err.getvalue()
else:
SoS_exec(stmt, return_result=False)
if not task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{env.sos_dict["step_name"]}`` (index={idx}) is ``completed``.'
)
if task:
task_id, taskdef, task_vars = create_task(
global_def, global_vars, task, task_params
)
res = {
"index": idx,
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
else:
if env.sos_dict["step_output"].undetermined():
env.sos_dict.set("_output", reevaluate_output())
res = {"index": idx, "ret_code": 0}
if sig:
sig.set_output(env.sos_dict["_output"])
# sig.write will use env.master_push_socket
if sig.write():
res["shared"] = sig.content["end_context"]
if "output_obj" in sig.content:
res["output"] = sig.content["output_obj"]
else:
res["output"] = env.sos_dict["_output"]
if capture_output:
res.update({"stdout": outmsg, "stderr": errmsg})
# complete case: concurrent execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
return res
except (StopInputGroup, TerminateExecution, RemovedTarget, UnavailableLock) as e:
# stop_if is not considered as an error
if isinstance(e, StopInputGroup):
if e.message:
env.logger.info(e.message)
# we do not really treat this as an exception
if env.sos_dict["step_output"].undetermined():
env.sos_dict.set("_output", reevaluate_output())
res = {"index": idx, "ret_code": 0}
if task:
res["task_id"] = None
if not e.keep_output:
# treat as an error
clear_output()
res["output"] = sos_targets([])
elif sig:
sig.set_output(env.sos_dict["_output"])
# sig.write will use env.master_push_socket
if sig.write():
res["shared"] = sig.content["end_context"]
if "output_obj" in sig.content:
res["output"] = sig.content["output_obj"]
else:
res["output"] = env.sos_dict["_output"]
else:
clear_output()
res = {"index": idx, "ret_code": 1, "exception": e}
if capture_output:
res.update({"stdout": outmsg, "stderr": errmsg})
return res
except (KeyboardInterrupt, SystemExit) as e:
clear_output()
kill_all_subprocesses()
raise e
except subprocess.CalledProcessError as e:
clear_output()
# cannot pass CalledProcessError back because it is not pickleable
res = {
"index": idx,
"ret_code": e.returncode,
"exception": RuntimeError(e.stderr),
}
if capture_output:
res.update({"stdout": outmsg, "stderr": errmsg})
return res
except ArgumentError as e:
clear_output()
return {"index": idx, "ret_code": 1, "exception": e}
except ProcessKilled as e:
clear_output()
res = {"index": idx, "ret_code": 1, "exception": e}
return res
except Exception as e:
clear_output()
res = {
"index": idx,
"ret_code": 1,
"exception": RuntimeError(get_traceback_msg(e)),
}
if capture_output:
res.update({"stdout": outmsg, "stderr": errmsg})
return res
finally:
if original_cwd:
os.chdir(original_cwd)
# release the lock even if the process becomes zombie? #871
if sig:
sig.release(quiet=True)
| 35.152866 | 90 | 0.590959 | 1,294 | 11,038 | 4.867852 | 0.23493 | 0.017146 | 0.028576 | 0.019051 | 0.293856 | 0.233212 | 0.221305 | 0.200191 | 0.184632 | 0.161772 | 0 | 0.002549 | 0.324606 | 11,038 | 313 | 91 | 35.265176 | 0.842388 | 0.255662 | 0 | 0.333333 | 0 | 0 | 0.103512 | 0.003848 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.013333 | false | 0 | 0.053333 | 0 | 0.097778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58d12ad60defa24864c4dcc70d63506876a50bc | 830 | py | Python | pytorch-frontend/caffe2/python/test/fakefp16_transform_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 40 | 2021-06-01T07:37:59.000Z | 2022-03-25T01:42:09.000Z | pytorch-frontend/caffe2/python/test/fakefp16_transform_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 14 | 2021-06-01T11:52:46.000Z | 2022-03-25T02:13:08.000Z | pytorch-frontend/caffe2/python/test/fakefp16_transform_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 7 | 2021-07-20T19:34:26.000Z | 2022-03-13T21:07:36.000Z | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python.fakefp16_transform_lib import fakeFp16FuseOps
from caffe2.python import core
class Transformer(unittest.TestCase):
def test_fuse(self):
net_swish = core.Net("test_swish")
net_swish_init = core.Net("test_swish_init")
deq = core.CreateOperator("Int8DequantizeNNPI", ["Xq"], ["X"])
swish = core.CreateOperator("SwishFakeFp16NNPI", ["X"], ["Y"])
quant = core.CreateOperator("Int8QuantizeNNPI", ["Y"], ["Y_q"])
net_swish.Proto().op.extend(
[
deq, swish, quant
]
)
print(net_swish.Proto())
out_net = fakeFp16FuseOps(net_swish.Proto())
assert(len(out_net.op) == 1)
| 33.2 | 71 | 0.654217 | 94 | 830 | 5.468085 | 0.457447 | 0.077821 | 0.093385 | 0.062257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020249 | 0.226506 | 830 | 24 | 72 | 34.583333 | 0.780374 | 0 | 0 | 0 | 0 | 0 | 0.10241 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.285714 | 0 | 0.380952 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58e3dacf06c22cbf3e53b3c6fce879593ab1ce8 | 12,546 | py | Python | bert_ptrnet_coqa.py | yumere/for-QuAC | af1594a0856e20a526e3c3f383b1f8fbfdf7ddd3 | [
"MIT"
] | 2 | 2019-07-30T15:38:24.000Z | 2019-08-08T15:49:13.000Z | bert_ptrnet_coqa.py | yumere/for-QuAC | af1594a0856e20a526e3c3f383b1f8fbfdf7ddd3 | [
"MIT"
] | null | null | null | bert_ptrnet_coqa.py | yumere/for-QuAC | af1594a0856e20a526e3c3f383b1f8fbfdf7ddd3 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import math
import numpy as np
import torch
from pytorch_transformers import AdamW, WarmupLinearSchedule
from pytorch_transformers import BertTokenizer
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from tqdm import tqdm
from bert_ptrnet_coqa_util import CoQAOrderDataset
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
def evaluate(outputs, targets):
total_result = 0
total = 0
results = []
for output, target in zip(outputs, targets):
try:
index = target.index(-1)
except ValueError:
index = len(output)
if output[:index] == target[:index]:
results.append(1)
else:
results.append(0)
total_result += (np.array(output[:index]) == np.array(target[:index])).sum()
total += len(output[:index])
return sum(results) / len(results), total_result / total
class GeLU(nn.Module):
def __init__(self):
super(GeLU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class OrderNet(BertPreTrainedModel):
def __init__(self, config):
super(OrderNet, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
mlp_hidden_size = 2048
self.mlp_hidden_size = mlp_hidden_size
self.read = nn.Sequential(nn.BatchNorm1d(config.hidden_size),
nn.Linear(config.hidden_size, mlp_hidden_size), GeLU(), nn.BatchNorm1d(mlp_hidden_size),
nn.Linear(mlp_hidden_size, mlp_hidden_size), GeLU(), nn.BatchNorm1d(mlp_hidden_size),
nn.Linear(mlp_hidden_size, mlp_hidden_size), GeLU(), nn.BatchNorm1d(mlp_hidden_size))
rnn_hidden_size = mlp_hidden_size
self.proc_step = 5
self.encoder = nn.LSTMCell(mlp_hidden_size, rnn_hidden_size)
self.encoder_attn = nn.MultiheadAttention(embed_dim=rnn_hidden_size, num_heads=1, dropout=config.attention_probs_dropout_prob)
self.proj = nn.Linear(mlp_hidden_size + rnn_hidden_size, rnn_hidden_size, bias=False)
self.decoder = nn.LSTMCell(mlp_hidden_size, rnn_hidden_size)
self.decoder_attn = nn.MultiheadAttention(embed_dim=rnn_hidden_size, num_heads=1, dropout=config.attention_probs_dropout_prob)
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
self.apply(self.init_weights)
def forward(self, input_ids: torch.Tensor, input_mask: torch.Tensor,
segment_ids: torch.Tensor, question_mask: torch.Tensor):
device = input_ids.device
batch_size, max_q_len, seq_len = input_ids.shape
q_len = question_mask.sum(dim=1) # batch_size
mask = question_mask.unsqueeze(-1).expand(-1, -1, seq_len) # batch_size x max_q_len x seq_len
input_ids = input_ids.masked_select(mask == 1).reshape(-1, seq_len)
input_mask = input_mask.masked_select(mask == 1).reshape(-1, seq_len)
segment_ids = segment_ids.masked_select(mask == 1).reshape(-1, seq_len)
sequence_outputs, pooled_outputs = self.bert(input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
memory = self.read(self.dropout(pooled_outputs))
memory = pad_sequence(memory.split(q_len.tolist())) # max_q_len, batch_size, read_hidden_size
_, _, input_size = memory.shape
init_x = torch.zeros(batch_size, input_size).to(device)
h_t, c_t = [torch.zeros(batch_size, self.encoder.hidden_size).to(device) for i in range(2)]
for i in range(self.proc_step):
h_t, c_t = self.encoder(init_x, (h_t, c_t))
attn_output, attn_output_weights = self.encoder_attn(h_t.unsqueeze(0), memory, memory, question_mask == 0)
attn_output = attn_output.squeeze(0)
h_t = self.proj(torch.cat([h_t, attn_output], dim=1))
outputs = []
for i in range(max_q_len):
h_t, c_t = self.decoder(init_x, (h_t, c_t))
attn_output, attn_output_weights = self.decoder_attn(h_t.unsqueeze(0), memory, memory, question_mask == 0)
outputs.append(attn_output_weights.squeeze(1))
probs = torch.stack(outputs, dim=1)
return probs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train_file", default=None, type=str, required=True)
parser.add_argument("--dev_file", default=None, type=str, required=True)
parser.add_argument("--do_train", action="store_true", default=False)
parser.add_argument("--do_eval", action="store_true", default=False)
parser.add_argument("--model_name_or_path", default="bert-base-uncased", type=str)
parser.add_argument("--output_dir", default=None, type=str, required=True)
parser.add_argument("--num_train_epochs", default=3, type=int)
parser.add_argument("--max_steps", default=-1, type=int)
parser.add_argument("--warmup_steps", default=0, type=int)
# TODO: Need to apply gradient accumulation
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--train_batch_size", default=8, type=int)
parser.add_argument("--dev_batch_size", default=8, type=int)
# dataset configuration
parser.add_argument("--max_question_len", type=int, default=15, metavar="15")
parser.add_argument("--max_sequence_len", type=int, default=24, metavar="24")
parser.add_argument("--samples_no", type=int, default=5, metavar="5")
parser.add_argument("--do_lower_case", action="store_true", default=False)
parser.add_argument("--learning_rate", default=3e-5, type=float)
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--logging_steps", default=10, type=int)
parser.add_argument("--saving_steps", default=100, type=int)
parser.add_argument("--no_cuda", default=False, action="store_true")
parser.add_argument('--in_answer', default=False, action='store_true')
args = parser.parse_args()
assert args.do_train or args.do_eval, "You must do train or eval by using --do_train/do_eval"
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
logger.warning("Device: {}, n_gpu: {}".format(device, args.n_gpu))
args.train_batch_size = args.train_batch_size * max(1, args.n_gpu)
args.dev_batch_size = args.dev_batch_size * max(1, args.n_gpu)
if args.do_train:
model = OrderNet.from_pretrained(args.model_name_or_path)
model.zero_grad()
model.to(device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
dataset = CoQAOrderDataset(args.train_file, "coqa-train.pkl", args.do_lower_case,
max_question_len=args.max_question_len, max_sequence_len=args.max_sequence_len,
samples_no=args.samples_no, in_answer=args.in_answer)
# TODO: Change shuffle state from False to True
loader = DataLoader(dataset, batch_size=args.train_batch_size, shuffle=True, drop_last=True, num_workers=1, collate_fn=CoQAOrderDataset.collate_fn)
args.t_total = len(loader) * args.num_train_epochs
logger.info("Total step: {:,}".format(args.t_total))
max_grad_norm = 1.0
criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
no_decay = ["bias", "LayerNorm.weight"]
# TODO: Check whether named_parameters return my mlp and lstm cell parameter
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=args.t_total)
tb_writer = SummaryWriter(logdir=args.output_dir)
global_step = 0
for e in tqdm(list(range(args.num_train_epochs)), desc="Epoch", ncols=75):
for i, batch in enumerate(tqdm(loader, desc="Step", ncols=75)):
model.train()
model.zero_grad()
global_step += 1
batch_size, max_q_len, max_seq_len = batch[0].shape
inputs = {
"input_ids": batch[0].to(device),
"input_mask": batch[1].to(device),
"segment_ids": batch[2].to(device),
"question_mask": batch[4].to(device)
}
targets = batch[3].to(device)
outputs = model(**inputs)
loss = criterion(outputs.reshape(-1, max_q_len), targets.reshape(-1))
loss = loss.sum() / batch_size
loss.backward()
clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step()
optimizer.step()
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tqdm.write("Step: {:,} Loss: {}".format(global_step, loss.item()))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", loss.item(), global_step)
if args.saving_steps > 0 and global_step % args.saving_steps == 0:
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
torch.save("", os.path.join(args.output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", args.output_dir)
# TODO: evaluate
if args.do_eval:
model = OrderNet.from_pretrained(args.output_dir)
model.to(device)
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
dataset = CoQAOrderDataset(args.train_file, "coqa-dev.pkl", args.do_lower_case,
max_question_len=args.max_question_len, max_sequence_len=args.max_sequence_len,
samples_no=1)
# TODO: Change shuffle state from False to True
loader = DataLoader(dataset, batch_size=args.dev_batch_size, shuffle=False, drop_last=True, num_workers=1,
collate_fn=CoQAOrderDataset.collate_fn)
targets_eval = []
outputs_eval = []
for i, batch in enumerate(tqdm(loader)):
model.eval()
with torch.no_grad():
batch_size, max_q_len, max_seq_len = batch[0].shape
inputs = {
"input_ids": batch[0].to(device),
"input_mask": batch[1].to(device),
"segment_ids": batch[2].to(device),
"question_mask": batch[4].to(device)
}
targets = batch[3].to(device)
outputs = model(**inputs)
outputs = outputs.argmax(dim=2)
for j, (target, output) in enumerate(zip(targets.tolist(), outputs.tolist())):
if args.dev_batch_size * i + j < 10:
print(output)
print(target)
print("=" * 20)
outputs_eval.append(output)
targets_eval.append(target)
entire_acc, acc = evaluate(outputs_eval, targets_eval)
print("{:.4f} {:.4f}".format(entire_acc, acc))
| 45.129496 | 155 | 0.640682 | 1,658 | 12,546 | 4.583233 | 0.181544 | 0.034215 | 0.053691 | 0.012502 | 0.376629 | 0.335044 | 0.296355 | 0.278589 | 0.257139 | 0.231478 | 0 | 0.012513 | 0.241989 | 12,546 | 277 | 156 | 45.292419 | 0.78654 | 0.026303 | 0 | 0.113744 | 0 | 0 | 0.071194 | 0.002376 | 0 | 0 | 0 | 0.00361 | 0.004739 | 1 | 0.023697 | false | 0 | 0.080569 | 0.004739 | 0.127962 | 0.023697 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58e44cff21b274169bf4e894702c779f915b4ea | 2,192 | py | Python | sdk/containerregistry/azure-containerregistry/samples/sample_repository_actions.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | 1 | 2021-04-05T17:38:42.000Z | 2021-04-05T17:38:42.000Z | sdk/containerregistry/azure-containerregistry/samples/sample_repository_actions.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | null | null | null | sdk/containerregistry/azure-containerregistry/samples/sample_repository_actions.py | romahamu/azure-sdk-for-python | a57c9f73b9121f79d317e1679b81fd460d6a25b8 | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import os
class SampleRepositoryActions(object):
base_url = os.environ.get("ACR_BASE_URL")
def view_repositories(self):
client = ContainerRegistryClient(self.base_url, DefaultAzureCredential())
repositories = client.list_repositories()
for idx, repo in enumerate(repositories):
print("Repository #{}: {}".format(idx, repo))
def get_repository_metadata(self):
client = ContainerRegistryClient(self.base_url, DefaultAzureCredential())
repo_client = client.get_repository_client("hello-world")
attributes = repo_client.get_attributes()
print(attributes.name)
print(attributes.registry)
print(attributes.created_time)
print(attributes.last_updated_time)
print(attributes.manifest_count)
print(attributes.tag_count)
print(attributes.permission.can_list)
print(attributes.permission.can_read)
print(attributes.permission.can_write)
print(attributes.permission.can_delete)
def set_repository_permissions(self):
client = ContainerRegistryClient(self.base_url, DefaultAzureCredential())
repo_client = client.get_repository_client("hello-world")
permissions = ContentPermissions(list=true, read=true, write=true, delete=false)
repo_client.set_permissions(permissions)
def delete_repository(self):
client = ContainerRegistryClient(self.base_url, DefaultAzureCredential())
result = client.delete_repository("hello-world")
for manifest in result.manifests_deleted:
print("Deleted {}".format(manifest))
for tag in result.tags_deleted:
print("Deleted tags {}".format(tag))
| 35.934426 | 94 | 0.662865 | 223 | 2,192 | 6.35426 | 0.408072 | 0.105857 | 0.093155 | 0.104446 | 0.249824 | 0.249824 | 0.249824 | 0.156669 | 0.156669 | 0.156669 | 0 | 0.00056 | 0.184763 | 2,192 | 60 | 95 | 36.533333 | 0.792389 | 0.206204 | 0 | 0.176471 | 0 | 0 | 0.050808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.029412 | 0 | 0.205882 | 0.382353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d58f37dc64174329c72118114782bf793424350d | 15,588 | py | Python | python/ray/rllib/ddpg/models.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-06-25T08:00:51.000Z | 2018-06-25T08:00:51.000Z | python/ray/rllib/ddpg/models.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-01-26T05:11:04.000Z | 2018-01-26T05:11:04.000Z | python/ray/rllib/ddpg/models.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2020-10-16T08:42:32.000Z | 2020-10-16T08:42:32.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from ray.rllib.models import ModelCatalog
def _build_p_network(registry, inputs, dim_actions, config):
"""
map an observation (i.e., state) to an action where
each entry takes value from (0, 1) due to the sigmoid function
"""
frontend = ModelCatalog.get_model(registry, inputs, 1, config["model"])
hiddens = config["actor_hiddens"]
action_out = frontend.last_layer
for hidden in hiddens:
action_out = layers.fully_connected(
action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
# Use sigmoid layer to bound values within (0, 1)
# shape of action_scores is [batch_size, dim_actions]
action_scores = layers.fully_connected(
action_out, num_outputs=dim_actions, activation_fn=tf.nn.sigmoid)
return action_scores
# As a stochastic policy for inference, but a deterministic policy for training
# thus ignore batch_size issue when constructing a stochastic action
def _build_action_network(p_values, low_action, high_action, stochastic, eps,
theta, sigma):
# shape is [None, dim_action]
deterministic_actions = (high_action - low_action) * p_values + low_action
exploration_sample = tf.get_variable(
name="ornstein_uhlenbeck",
dtype=tf.float32,
initializer=low_action.size * [.0],
trainable=False)
normal_sample = tf.random_normal(
shape=[low_action.size], mean=0.0, stddev=1.0)
exploration_value = tf.assign_add(
exploration_sample,
theta * (.0 - exploration_sample) + sigma * normal_sample)
stochastic_actions = deterministic_actions + eps * (
high_action - low_action) * exploration_value
return tf.cond(stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
def _build_q_network(registry, inputs, action_inputs, config):
frontend = ModelCatalog.get_model(registry, inputs, 1, config["model"])
hiddens = config["critic_hiddens"]
q_out = tf.concat([frontend.last_layer, action_inputs], axis=1)
for hidden in hiddens:
q_out = layers.fully_connected(
q_out, num_outputs=hidden, activation_fn=tf.nn.relu)
q_scores = layers.fully_connected(q_out, num_outputs=1, activation_fn=None)
return q_scores
def _huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta))
def _minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return gradients
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
class ModelAndLoss(object):
"""Holds the model and loss function.
Both graphs are necessary in order for the multi-gpu SGD implementation
to create towers on each device.
"""
def __init__(self, registry, dim_actions, low_action, high_action, config,
obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights):
# p network evaluation
with tf.variable_scope("p_func", reuse=True) as scope:
self.p_t = _build_p_network(registry, obs_t, dim_actions, config)
# target p network evaluation
with tf.variable_scope("target_p_func") as scope:
self.p_tp1 = _build_p_network(registry, obs_tp1, dim_actions,
config)
self.target_p_func_vars = _scope_vars(scope.name)
# Action outputs
with tf.variable_scope("a_func", reuse=True):
deterministic_flag = tf.constant(value=False, dtype=tf.bool)
zero_eps = tf.constant(value=.0, dtype=tf.float32)
output_actions = _build_action_network(
self.p_t, low_action, high_action, deterministic_flag,
zero_eps, config["exploration_theta"],
config["exploration_sigma"])
output_actions_estimated = _build_action_network(
self.p_tp1, low_action, high_action, deterministic_flag,
zero_eps, config["exploration_theta"],
config["exploration_sigma"])
# q network evaluation
with tf.variable_scope("q_func") as scope:
self.q_t = _build_q_network(registry, obs_t, act_t, config)
self.q_func_vars = _scope_vars(scope.name)
with tf.variable_scope("q_func", reuse=True):
self.q_tp0 = _build_q_network(registry, obs_t, output_actions,
config)
# target q network evalution
with tf.variable_scope("target_q_func") as scope:
self.q_tp1 = _build_q_network(registry, obs_tp1,
output_actions_estimated, config)
self.target_q_func_vars = _scope_vars(scope.name)
q_t_selected = tf.squeeze(self.q_t, axis=len(self.q_t.shape) - 1)
q_tp1_best = tf.squeeze(
input=self.q_tp1, axis=len(self.q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = (
rew_t + config["gamma"]**config["n_step"] * q_tp1_best_masked)
# compute the error (potentially clipped)
self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
if config.get("use_huber"):
errors = _huber_loss(self.td_error, config.get("huber_threshold"))
else:
errors = 0.5 * tf.square(self.td_error)
weighted_error = tf.reduce_mean(importance_weights * errors)
self.loss = weighted_error
# for policy gradient
self.actor_loss = -1.0 * tf.reduce_mean(self.q_tp0)
class DDPGGraph(object):
def __init__(self, registry, env, config, logdir):
self.env = env
dim_actions = env.action_space.shape[0]
low_action = env.action_space.low
high_action = env.action_space.high
actor_optimizer = tf.train.AdamOptimizer(
learning_rate=config["actor_lr"])
critic_optimizer = tf.train.AdamOptimizer(
learning_rate=config["critic_lr"])
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
self.cur_observations = tf.placeholder(
tf.float32, shape=(None, ) + env.observation_space.shape)
# Actor: P (policy) network
p_scope_name = "p_func"
with tf.variable_scope(p_scope_name) as scope:
p_values = _build_p_network(registry, self.cur_observations,
dim_actions, config)
p_func_vars = _scope_vars(scope.name)
# Action outputs
a_scope_name = "a_func"
with tf.variable_scope(a_scope_name):
self.output_actions = _build_action_network(
p_values, low_action, high_action, self.stochastic, self.eps,
config["exploration_theta"], config["exploration_sigma"])
with tf.variable_scope(a_scope_name, reuse=True):
exploration_sample = tf.get_variable(name="ornstein_uhlenbeck")
self.reset_noise_op = tf.assign(exploration_sample,
dim_actions * [.0])
# Replay inputs
self.obs_t = tf.placeholder(
tf.float32,
shape=(None, ) + env.observation_space.shape,
name="observation")
self.act_t = tf.placeholder(
tf.float32, shape=(None, ) + env.action_space.shape, name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(
tf.float32, shape=(None, ) + env.observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(
tf.float32, [None], name="weight")
def build_loss(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
return ModelAndLoss(registry, dim_actions, low_action, high_action,
config, obs_t, act_t, rew_t, obs_tp1,
done_mask, importance_weights)
self.loss_inputs = [
("obs", self.obs_t),
("actions", self.act_t),
("rewards", self.rew_t),
("new_obs", self.obs_tp1),
("dones", self.done_mask),
("weights", self.importance_weights),
]
loss_obj = build_loss(self.obs_t, self.act_t, self.rew_t, self.obs_tp1,
self.done_mask, self.importance_weights)
self.build_loss = build_loss
actor_loss = loss_obj.actor_loss
weighted_error = loss_obj.loss
q_func_vars = loss_obj.q_func_vars
target_p_func_vars = loss_obj.target_p_func_vars
target_q_func_vars = loss_obj.target_q_func_vars
self.p_t = loss_obj.p_t
self.q_t = loss_obj.q_t
self.q_tp0 = loss_obj.q_tp0
self.q_tp1 = loss_obj.q_tp1
self.td_error = loss_obj.td_error
if config["l2_reg"] is not None:
for var in p_func_vars:
if "bias" not in var.name:
actor_loss += config["l2_reg"] * 0.5 * tf.nn.l2_loss(var)
for var in q_func_vars:
if "bias" not in var.name:
weighted_error += config["l2_reg"] * 0.5 * tf.nn.l2_loss(
var)
# compute optimization op (potentially with gradient clipping)
if config["grad_norm_clipping"] is not None:
self.actor_grads_and_vars = _minimize_and_clip(
actor_optimizer,
actor_loss,
var_list=p_func_vars,
clip_val=config["grad_norm_clipping"])
self.critic_grads_and_vars = _minimize_and_clip(
critic_optimizer,
weighted_error,
var_list=q_func_vars,
clip_val=config["grad_norm_clipping"])
else:
self.actor_grads_and_vars = actor_optimizer.compute_gradients(
actor_loss, var_list=p_func_vars)
self.critic_grads_and_vars = critic_optimizer.compute_gradients(
weighted_error, var_list=q_func_vars)
self.actor_grads_and_vars = [(g, v)
for (g, v) in self.actor_grads_and_vars
if g is not None]
self.critic_grads_and_vars = [(g, v)
for (g, v) in self.critic_grads_and_vars
if g is not None]
self.grads_and_vars = (
self.actor_grads_and_vars + self.critic_grads_and_vars)
self.grads = [g for (g, v) in self.grads_and_vars]
self.actor_train_expr = actor_optimizer.apply_gradients(
self.actor_grads_and_vars)
self.critic_train_expr = critic_optimizer.apply_gradients(
self.critic_grads_and_vars)
# update_target_fn will be called periodically to copy Q network to
# target Q network
self.tau_value = config.get("tau")
self.tau = tf.placeholder(tf.float32, (), name="tau")
update_target_expr = []
for var, var_target in zip(
sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
for var, var_target in zip(
sorted(p_func_vars, key=lambda v: v.name),
sorted(target_p_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
self.update_target_expr = tf.group(*update_target_expr)
# support both hard and soft sync
def update_target(self, sess, tau=None):
return sess.run(
self.update_target_expr,
feed_dict={self.tau: tau or self.tau_value})
def act(self, sess, obs, eps, stochastic=True):
return sess.run(
self.output_actions,
feed_dict={
self.cur_observations: obs,
self.stochastic: stochastic,
self.eps: eps
})
def compute_gradients(self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err, grads = sess.run(
[self.td_error, self.grads],
feed_dict={
self.obs_t: obs_t,
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: obs_tp1,
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err, grads
def compute_td_error(self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err = sess.run(
self.td_error,
feed_dict={
self.obs_t: [np.array(ob) for ob in obs_t],
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: [np.array(ob) for ob in obs_tp1],
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err
def apply_gradients(self, sess, grads):
assert len(grads) == len(self.grads_and_vars)
feed_dict = {ph: g for (g, ph) in zip(grads, self.grads)}
sess.run(
[self.critic_train_expr, self.actor_train_expr],
feed_dict=feed_dict)
def compute_apply(self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err, _, _ = sess.run(
[self.td_error, self.critic_train_expr, self.actor_train_expr],
feed_dict={
self.obs_t: obs_t,
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: obs_tp1,
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err
def reset_noise(self, sess):
sess.run(self.reset_noise_op)
| 39.765306 | 79 | 0.608417 | 2,006 | 15,588 | 4.422233 | 0.147557 | 0.018036 | 0.020291 | 0.019276 | 0.436591 | 0.376959 | 0.332995 | 0.255777 | 0.225566 | 0.183632 | 0 | 0.009169 | 0.300359 | 15,588 | 391 | 80 | 39.867008 | 0.804236 | 0.096869 | 0 | 0.214286 | 0 | 0 | 0.032797 | 0 | 0 | 0 | 0 | 0 | 0.003571 | 1 | 0.057143 | false | 0 | 0.071429 | 0.010714 | 0.178571 | 0.003571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d590209692c1d7210deccd9547439bea4bd197cf | 51,866 | py | Python | MindSPONGE/mindsponge/md/simulation.py | mindspore-ai/mindscience | b5269245915695de2d99fb290fef662c241db189 | [
"Apache-2.0"
] | 3 | 2021-11-10T06:17:50.000Z | 2022-03-21T14:25:30.000Z | MindSPONGE/mindsponge/md/simulation.py | mindspore-ai/mindscience | b5269245915695de2d99fb290fef662c241db189 | [
"Apache-2.0"
] | null | null | null | MindSPONGE/mindsponge/md/simulation.py | mindspore-ai/mindscience | b5269245915695de2d99fb290fef662c241db189 | [
"Apache-2.0"
] | 1 | 2021-12-05T11:41:29.000Z | 2021-12-05T11:41:29.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simulation"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore import nn
from mindspore.common.parameter import Parameter
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindsponge import Angle
from mindsponge import Bond
from mindsponge import Dihedral
from mindsponge import LangevinLiujian
from mindsponge import LennardJonesInformation
from mindsponge import MdInformation
from mindsponge import NonBond14
from mindsponge import NeighborList
from mindsponge import ParticleMeshEwald
from mindsponge import RestrainInformation
from mindsponge import SimpleConstarin
from mindsponge import VirtualInformation
from mindsponge import CoordinateMolecularMap
from mindsponge import MCBARO
class Controller:
'''controller'''
def __init__(self, args_opt):
self.input_file = args_opt.i
self.initial_coordinates_file = args_opt.c
self.amber_parm = args_opt.amber_parm
self.restrt = args_opt.r
self.mdcrd = args_opt.x
self.mdout = args_opt.o
self.mdbox = args_opt.box
self.command_set = {}
self.md_task = None
self.commands_from_in_file()
self.punctuation = ","
def commands_from_in_file(self):
'''command from in file'''
file = open(self.input_file, 'r')
context = file.readlines()
file.close()
self.md_task = context[0].strip()
for val in context:
val = val.strip()
if val and val[0] != '#' and ("=" in val):
val = val[:val.index(",")] if ',' in val else val
assert len(val.strip().split("=")) == 2
flag, value = val.strip().split("=")
value = value.replace(" ", "")
flag = flag.replace(" ", "")
if flag not in self.command_set:
self.command_set[flag] = value
else:
print("ERROR COMMAND FILE")
class Simulation(nn.Cell):
'''simulation'''
def __init__(self, args_opt):
super(Simulation, self).__init__()
self.control = Controller(args_opt)
self.md_info = MdInformation(self.control)
self.mode = self.md_info.mode
self.bond = Bond(self.control)
self.bond_is_initialized = self.bond.is_initialized
self.angle = Angle(self.control)
self.angle_is_initialized = self.angle.is_initialized
self.dihedral = Dihedral(self.control)
self.dihedral_is_initialized = self.dihedral.is_initialized
self.nb14 = NonBond14(self.control, self.dihedral, self.md_info.atom_numbers)
self.nb14_is_initialized = self.nb14.is_initialized
self.nb_info = NeighborList(self.control, self.md_info.atom_numbers, self.md_info.box_length)
self.lj_info = LennardJonesInformation(self.control, self.md_info.nb.cutoff, self.md_info.sys.box_length)
self.lj_info_is_initialized = self.lj_info.is_initialized
self.liujian_info = LangevinLiujian(self.control, self.md_info.atom_numbers)
self.liujian_info_is_initialized = self.liujian_info.is_initialized
self.pme_method = ParticleMeshEwald(self.control, self.md_info)
self.pme_is_initialized = self.pme_method.is_initialized
self.restrain = RestrainInformation(self.control, self.md_info.atom_numbers, self.md_info.crd)
self.restrain_is_initialized = self.restrain.is_initialized
self.simple_constrain_is_initialized = 0
self.simple_constrain = SimpleConstarin(self.control, self.md_info, self.bond, self.angle, self.liujian_info)
self.simple_constrain_is_initialized = self.simple_constrain.is_initialized
print("self.simple_constrain_is_initialized", self.simple_constrain_is_initialized)
self.freedom = self.simple_constrain.system_freedom
self.vatom = VirtualInformation(self.control, self.md_info, self.md_info.sys.freedom)
self.vatom_is_initialized = 1
self.random = P.UniformReal(seed=1)
self.pow = P.Pow()
self.mol_map = CoordinateMolecularMap(self.md_info.atom_numbers, self.md_info.sys.box_length, self.md_info.crd,
self.md_info.nb.excluded_atom_numbers, self.md_info.nb.h_excluded_numbers,
self.md_info.nb.h_excluded_list_start, self.md_info.nb.h_excluded_list)
self.mol_map_is_initialized = 1
self.init_params()
self.init_tensor()
self.op_define()
self.depend = P.Depend()
self.total_count = Parameter(Tensor(0, mstype.int32), requires_grad=False)
self.accept_count = Parameter(Tensor(0, mstype.int32), requires_grad=False)
self.is_molecule_map_output = self.md_info.output.is_molecule_map_output
self.target_pressure = self.md_info.sys.target_pressure
self.nx = self.nb_info.nx
self.ny = self.nb_info.ny
self.nz = self.nb_info.nz
self.pme_inverse_box_vector = Parameter(Tensor(self.pme_method.pme_inverse_box_vector, mstype.float32),
requires_grad=False)
self.pme_inverse_box_vector_init = Parameter(Tensor(self.pme_method.pme_inverse_box_vector, mstype.float32),
requires_grad=False)
self.mc_baro_is_initialized = 0
self.bd_baro_is_initialized = 0
self.constant_unit_max_float = 4294967296.0
self.volume = Parameter(Tensor(0, mstype.float32), requires_grad=False)
if self.mode == 2 and self.control.command_set["barostat"] == "monte_carlo":
self.mc_baro = MCBARO(self.control, self.md_info.atom_numbers, self.md_info.sys.target_pressure,
self.md_info.sys.box_length, self.md_info.res.is_initialized, self.md_info.mode)
self.mc_baro_is_initialized = self.mc_baro.is_initialized
self.update_interval = self.mc_baro.update_interval
self.mc_baro_energy_old = Parameter(Tensor(0, mstype.float32), requires_grad=False)
self.potential = Parameter(Tensor(0, mstype.float32), requires_grad=False)
self.frc_backup = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.crd_backup = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.crd_scale_factor = Parameter(Tensor(0.0, mstype.float32), requires_grad=False)
self.system_reinitializing_count = Parameter(Tensor(0, mstype.int32), requires_grad=False)
self.mc_baro_energy_new = Parameter(Tensor(0.0, mstype.float32), requires_grad=False)
self.scale_coordinate_by_residue = self.md_info.res.is_initialized
self.extra_term = Parameter(Tensor(0, mstype.float32), requires_grad=False)
self.delta_v = Parameter(Tensor(0.0, mstype.float32), requires_grad=False)
self.target_temperature = self.md_info.sys.target_temperature
self.vdevided = Parameter(Tensor(0.0, mstype.float32), requires_grad=False)
self.log = P.Log()
self.mc_baro_accept_possibility = Parameter(Tensor(0, mstype.float32), requires_grad=False)
self.exp = P.Exp()
self.mc_baro_new_v = self.mc_baro.newv
self.mc_baro_v0 = Parameter(Tensor(self.mc_baro.V0, mstype.float32), requires_grad=False)
self.mc_baro_new_v = self.mc_baro.newv
self.check_interval = self.mc_baro.check_interval
self.mc_baro_deltav_max = self.mc_baro.deltav_max
def init_params(self):
"""init params"""
self.bond_energy_sum = Tensor(0, mstype.int32)
self.angle_energy_sum = Tensor(0, mstype.int32)
self.dihedral_energy_sum = Tensor(0, mstype.int32)
self.nb14_lj_energy_sum = Tensor(0, mstype.int32)
self.nb14_cf_energy_sum = Tensor(0, mstype.int32)
self.lj_energy_sum = Tensor(0, mstype.int32)
self.ee_ene = Tensor(0, mstype.int32)
self.total_energy = Tensor(0, mstype.int32)
# Init scalar
self.ntwx = self.md_info.ntwx
self.atom_numbers = self.md_info.atom_numbers
self.residue_numbers = self.md_info.residue_numbers
self.bond_numbers = self.bond.bond_numbers
self.angle_numbers = self.angle.angle_numbers
self.dihedral_numbers = self.dihedral.dihedral_numbers
self.nb14_numbers = self.nb14.nb14_numbers
self.nxy = self.nb_info.nxy
self.grid_numbers = self.nb_info.grid_numbers
self.max_atom_in_grid_numbers = self.nb_info.max_atom_in_grid_numbers
self.max_neighbor_numbers = self.nb_info.max_neighbor_numbers
self.excluded_atom_numbers = self.md_info.nb.excluded_atom_numbers
self.refresh_count = Parameter(Tensor(self.nb_info.refresh_count, mstype.int32), requires_grad=False)
self.refresh_interval = self.nb_info.refresh_interval
self.skin = self.nb_info.skin
self.cutoff = self.nb_info.cutoff
self.cutoff_square = self.nb_info.cutoff_square
self.cutoff_with_skin = self.nb_info.cutoff_with_skin
self.half_cutoff_with_skin = self.nb_info.half_cutoff_with_skin
self.cutoff_with_skin_square = self.nb_info.cutoff_with_skin_square
self.half_skin_square = self.nb_info.half_skin_square
self.beta = self.pme_method.beta
self.d_beta = Parameter(Tensor(self.pme_method.beta, mstype.float32), requires_grad=False)
self.beta_init = Parameter(Tensor(self.pme_method.beta, mstype.float32), requires_grad=False)
self.fftx = self.pme_method.fftx
self.ffty = self.pme_method.ffty
self.fftz = self.pme_method.fftz
self.random_seed = self.liujian_info.random_seed
self.dt = self.liujian_info.dt
self.half_dt = self.liujian_info.half_dt
self.exp_gamma = self.liujian_info.exp_gamma
self.update = False
self.file = None
self.datfile = None
self.max_velocity = self.liujian_info.max_velocity
# bingshui
self.constant_kb = 0.00198716
def init_tensor(self):
'''init tensor'''
# MD_Reset_Atom_Energy_And_Virial
self.uint_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.uint32), mstype.uint32),
requires_grad=False)
self.need_potential = Tensor(0, mstype.int32)
self.need_pressure = Tensor(0, mstype.int32)
self.atom_energy = Parameter(Tensor([0] * self.atom_numbers, mstype.float32), requires_grad=False)
self.atom_virial = Parameter(Tensor([0] * self.atom_numbers, mstype.float32), requires_grad=False)
self.frc = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.crd = Parameter(
Tensor(np.array(self.md_info.coordinate).reshape([self.atom_numbers, 3]), mstype.float32),
requires_grad=False)
self.crd_to_uint_crd_cof = Parameter(Tensor(self.md_info.pbc.crd_to_uint_crd_cof, mstype.float32),
requires_grad=False)
self.quarter_crd_to_uint_crd_cof = Parameter(
Tensor(self.md_info.pbc.quarter_crd_to_uint_crd_cof, mstype.float32), requires_grad=False)
self.uint_dr_to_dr_cof = Parameter(Tensor(self.md_info.pbc.uint_dr_to_dr_cof, mstype.float32),
requires_grad=False)
self.box_length = Parameter(Tensor(self.md_info.box_length, mstype.float32), requires_grad=False)
self.charge = Parameter(Tensor(np.asarray(self.md_info.h_charge, dtype=np.float32), mstype.float32),
requires_grad=False)
self.old_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.float32), mstype.float32),
requires_grad=False)
self.last_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.float32), mstype.float32),
requires_grad=False)
self.mass = Tensor(self.md_info.h_mass, mstype.float32)
self.mass_inverse = Tensor(self.md_info.h_mass_inverse, mstype.float32)
self.res_mass = Tensor(self.md_info.res.h_mass, mstype.float32)
self.res_mass_inverse = Tensor(self.md_info.res.h_mass_inverse, mstype.float32)
self.res_start = Tensor(self.md_info.h_res_start, mstype.int32)
self.res_end = Tensor(self.md_info.h_res_end, mstype.int32)
self.velocity = Parameter(Tensor(self.md_info.velocity, mstype.float32), requires_grad=False)
self.acc = Parameter(Tensor(np.zeros([self.atom_numbers, 3], np.float32), mstype.float32), requires_grad=False)
self.bond_atom_a = Tensor(np.asarray(self.bond.h_atom_a, np.int32), mstype.int32)
self.bond_atom_b = Tensor(np.asarray(self.bond.h_atom_b, np.int32), mstype.int32)
self.bond_k = Tensor(np.asarray(self.bond.h_k, np.float32), mstype.float32)
self.bond_r0 = Tensor(np.asarray(self.bond.h_r0, np.float32), mstype.float32)
self.angle_atom_a = Tensor(np.asarray(self.angle.h_atom_a, np.int32), mstype.int32)
self.angle_atom_b = Tensor(np.asarray(self.angle.h_atom_b, np.int32), mstype.int32)
self.angle_atom_c = Tensor(np.asarray(self.angle.h_atom_c, np.int32), mstype.int32)
self.angle_k = Tensor(np.asarray(self.angle.h_angle_k, np.float32), mstype.float32)
self.angle_theta0 = Tensor(np.asarray(self.angle.h_angle_theta0, np.float32), mstype.float32)
self.dihedral_atom_a = Tensor(np.asarray(self.dihedral.h_atom_a, np.int32), mstype.int32)
self.dihedral_atom_b = Tensor(np.asarray(self.dihedral.h_atom_b, np.int32), mstype.int32)
self.dihedral_atom_c = Tensor(np.asarray(self.dihedral.h_atom_c, np.int32), mstype.int32)
self.dihedral_atom_d = Tensor(np.asarray(self.dihedral.h_atom_d, np.int32), mstype.int32)
self.pk = Tensor(np.asarray(self.dihedral.h_pk, np.float32), mstype.float32)
self.gamc = Tensor(np.asarray(self.dihedral.h_gamc, np.float32), mstype.float32)
self.gams = Tensor(np.asarray(self.dihedral.h_gams, np.float32), mstype.float32)
self.pn = Tensor(np.asarray(self.dihedral.h_pn, np.float32), mstype.float32)
self.ipn = Tensor(np.asarray(self.dihedral.h_ipn, np.int32), mstype.int32)
self.nb14_atom_a = Tensor(np.asarray(self.nb14.h_atom_a, np.int32), mstype.int32)
self.nb14_atom_b = Tensor(np.asarray(self.nb14.h_atom_b, np.int32), mstype.int32)
self.lj_scale_factor = Tensor(np.asarray(self.nb14.h_lj_scale_factor, np.float32), mstype.float32)
self.cf_scale_factor = Tensor(np.asarray(self.nb14.h_cf_scale_factor, np.float32), mstype.float32)
self.grid_n = Tensor(self.nb_info.grid_n, mstype.int32)
self.grid_length = Parameter(Tensor(self.nb_info.grid_length, mstype.float32), requires_grad=False)
self.grid_length_inverse = Parameter(Tensor(self.nb_info.grid_length_inverse, mstype.float32),
requires_grad=False)
self.bucket = Parameter(Tensor(
np.asarray(self.nb_info.bucket, np.int32).reshape([self.grid_numbers, self.max_atom_in_grid_numbers]),
mstype.int32), requires_grad=False) #Tobe updated
self.bucket_init = Parameter(Tensor(
np.asarray(self.nb_info.bucket, np.int32).reshape([self.grid_numbers, self.max_atom_in_grid_numbers]),
mstype.int32), requires_grad=False) # Tobe updated
self.atom_numbers_in_grid_bucket = Parameter(Tensor(self.nb_info.atom_numbers_in_grid_bucket, mstype.int32),
requires_grad=False) # to be updated
self.atom_numbers_in_grid_bucket_init = Parameter(
Tensor(self.nb_info.atom_numbers_in_grid_bucket, mstype.int32),
requires_grad=False) # to be updated
self.atom_in_grid_serial = Parameter(Tensor(np.zeros([self.nb_info.atom_numbers,], np.int32), mstype.int32),
requires_grad=False) # to be updated
self.atom_in_grid_serial_init = Parameter(
Tensor(np.zeros([self.nb_info.atom_numbers,], np.int32), mstype.int32),
requires_grad=False) # to be updated
self.pointer = Parameter(
Tensor(np.asarray(self.nb_info.pointer, np.int32).reshape([self.grid_numbers, 125]), mstype.int32),
requires_grad=False)
self.pointer_init = Parameter(
Tensor(np.asarray(self.nb_info.pointer, np.int32).reshape([self.grid_numbers, 125]), mstype.int32),
requires_grad=False)
self.nl_atom_numbers = Parameter(Tensor(np.zeros([self.atom_numbers,], np.int32), mstype.int32),
requires_grad=False)
self.nl_atom_serial = Parameter(
Tensor(np.zeros([self.atom_numbers, self.max_neighbor_numbers], np.int32), mstype.int32),
requires_grad=False)
self.excluded_list_start = Tensor(np.asarray(self.md_info.nb.h_excluded_list_start, np.int32), mstype.int32)
self.excluded_list = Tensor(np.asarray(self.md_info.nb.h_excluded_list, np.int32), mstype.int32)
self.excluded_numbers = Tensor(np.asarray(self.md_info.nb.h_excluded_numbers, np.int32), mstype.int32)
self.need_refresh_flag = Tensor(np.asarray([0], np.int32), mstype.int32)
self.atom_lj_type = Tensor(self.lj_info.atom_lj_type, mstype.int32)
self.lj_a = Tensor(self.lj_info.h_lj_a, mstype.float32)
self.lj_b = Tensor(self.lj_info.h_lj_b, mstype.float32)
self.sqrt_mass = Tensor(self.liujian_info.h_sqrt_mass, mstype.float32)
self.rand_state = Parameter(Tensor(self.liujian_info.rand_state, mstype.float32))
self.zero_fp_tensor = Tensor(np.asarray([0,], np.float32))
self.set_zero = Tensor(np.asarray(0, np.int32))
self.zero_frc = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.float32), mstype.float32),
requires_grad=False)
def op_define(self):
'''op define'''
self.crd_to_uint_crd = P.CrdToUintCrd(self.atom_numbers)
self.crd_to_uint_crd_quarter = P.CrdToUintCrdQuarter(self.atom_numbers)
self.mdtemp = P.MDTemperature(self.residue_numbers, self.atom_numbers)
self.setup_random_state = P.MDIterationSetupRandState(self.atom_numbers, self.random_seed)
self.bond_force_with_atom_energy_virial = P.BondForceWithAtomEnergyAndVirial(bond_numbers=self.bond_numbers,
atom_numbers=self.atom_numbers)
self.angle_force_with_atom_energy = P.AngleForceWithAtomEnergy(angle_numbers=self.angle_numbers)
self.dihedral_force_with_atom_energy = P.DihedralForceWithAtomEnergy(dihedral_numbers=self.dihedral_numbers)
self.nb14_force_with_atom_energy = P.Dihedral14LJCFForceWithAtomEnergy(nb14_numbers=self.nb14_numbers,
atom_numbers=self.atom_numbers)
self.lj_force_pme_direct_force = P.LJForceWithPMEDirectForce(self.atom_numbers, self.cutoff, self.beta)
self.pme_excluded_force = P.PMEExcludedForce(atom_numbers=self.atom_numbers,
excluded_numbers=self.excluded_atom_numbers, beta=self.beta)
self.pme_reciprocal_force = P.PMEReciprocalForce(self.atom_numbers, self.beta, self.fftx, self.ffty, self.fftz,
self.md_info.box_length[0], self.md_info.box_length[1],
self.md_info.box_length[2])
self.bond_energy = P.BondEnergy(self.bond_numbers, self.atom_numbers)
self.angle_energy = P.AngleEnergy(self.angle_numbers)
self.dihedral_energy = P.DihedralEnergy(self.dihedral_numbers)
self.nb14_lj_energy = P.Dihedral14LJEnergy(self.nb14_numbers, self.atom_numbers)
self.nb14_cf_energy = P.Dihedral14CFEnergy(self.nb14_numbers, self.atom_numbers)
self.lj_energy = P.LJEnergy(self.atom_numbers, self.cutoff_square)
self.pme_energy = P.PMEEnergy(self.atom_numbers, self.excluded_atom_numbers, self.beta, self.fftx, self.ffty,
self.fftz, self.md_info.box_length[0], self.md_info.box_length[1],
self.md_info.box_length[2])
self.md_iteration_leap_frog_liujian = P.MDIterationLeapFrogLiujian(self.atom_numbers, self.half_dt, self.dt,
self.exp_gamma)
self.md_iteration_leap_frog_liujian_with_max_vel = P.MDIterationLeapFrogLiujianWithMaxVel(self.atom_numbers,
self.half_dt, self.dt,
self.exp_gamma,
self.max_velocity)
self.neighbor_list_update_all()
self.random_force = Tensor(np.zeros([self.atom_numbers, 3], np.float32), mstype.float32)
# simple_constrain
self.constrain_pair_numbers = self.simple_constrain.constrain_pair_numbers
self.last_pair_dr = Parameter(Tensor(np.zeros([self.constrain_pair_numbers, 3], np.float32), mstype.float32),
requires_grad=False)
if self.simple_constrain_is_initialized:
self.constrain_pair_numbers = self.simple_constrain.constrain_pair_numbers
self.last_crd_to_dr = P.LastCrdToDr(self.atom_numbers, self.constrain_pair_numbers)
self.constrain_pair = np.array(self.simple_constrain.h_constrain_pair)
self.atom_i_serials = Tensor(self.constrain_pair[:, 0], mstype.int32)
self.atom_j_serials = Tensor(self.constrain_pair[:, 1], mstype.int32)
self.constant_rs = Tensor(self.constrain_pair[:, 2], mstype.float32)
self.constrain_ks = Tensor(self.constrain_pair[:, 3], mstype.float32)
self.last_pair_dr = Parameter(
Tensor(np.zeros([self.constrain_pair_numbers, 3], np.float32), mstype.float32), requires_grad=False)
self.constrain_frc = Parameter(Tensor(np.zeros([self.atom_numbers, 3], np.float32), mstype.float32),
requires_grad=False)
self.iteration_numbers = self.simple_constrain.info.iteration_numbers
self.half_exp_gamma_plus_half = self.simple_constrain.half_exp_gamma_plus_half
self.refresh_uint_crd = P.RefreshUintCrd(self.atom_numbers, self.half_exp_gamma_plus_half)
self.constrain_force_cycle_with_virial = P.ConstrainForceCycleWithVirial(self.atom_numbers,
self.constrain_pair_numbers)
self.constrain_force_cycle = P.ConstrainForceCycle(self.atom_numbers, self.constrain_pair_numbers)
self.dt_inverse = self.simple_constrain.dt_inverse
self.refresh_crd_vel = P.RefreshCrdVel(self.atom_numbers, self.dt_inverse, self.dt, self.exp_gamma,
self.half_exp_gamma_plus_half)
print("self.mol_map_is_initialized", self.mol_map_is_initialized)
if self.mol_map_is_initialized:
self.refresh_boxmaptimes = P.RefreshBoxmapTimes(self.atom_numbers)
self.box_map_times = Parameter(Tensor(self.mol_map.h_box_map_times, mstype.int32), requires_grad=False)
self.residue_numbers = self.md_info.residue_numbers
self.getcenterofmass = P.GetCenterOfMass(self.residue_numbers)
self.mapcenterofmass = P.MapCenterOfMass(self.residue_numbers)
self.md_iteration_leap_frog = P.MDIterationLeapFrog(self.atom_numbers, self.dt)
self.md_iteration_leap_frog_with_max_vel = P.MDIterationLeapFrogWithMaxVel(self.atom_numbers, self.dt,
self.max_velocity)
self.md_information_gradient_descent = P.MDIterationGradientDescent(self.atom_numbers, self.dt * self.dt)
def neighbor_list_update_all(self):
"""neighbor list update all func"""
self.neighbor_list_update = P.NeighborListRefresh(grid_numbers=self.grid_numbers,
atom_numbers=self.atom_numbers,
not_first_time=1, nxy=self.nxy,
excluded_atom_numbers=self.excluded_atom_numbers,
cutoff_square=self.cutoff_square,
half_skin_square=self.half_skin_square,
cutoff_with_skin=self.cutoff_with_skin,
half_cutoff_with_skin=self.half_cutoff_with_skin,
cutoff_with_skin_square=self.cutoff_with_skin_square,
refresh_interval=self.refresh_interval, cutoff=self.cutoff,
skin=self.skin,
max_atom_in_grid_numbers=self.max_atom_in_grid_numbers,
max_neighbor_numbers=self.max_neighbor_numbers)
self.neighbor_list_update_forced_update = \
P.NeighborListRefresh(grid_numbers=self.grid_numbers,
atom_numbers=self.atom_numbers,
not_first_time=1, nxy=self.nxy,
excluded_atom_numbers=self.excluded_atom_numbers,
cutoff_square=self.cutoff_square,
half_skin_square=self.half_skin_square,
cutoff_with_skin=self.cutoff_with_skin,
half_cutoff_with_skin=self.half_cutoff_with_skin,
cutoff_with_skin_square=self.cutoff_with_skin_square,
refresh_interval=self.refresh_interval,
cutoff=self.cutoff,
skin=self.skin,
max_atom_in_grid_numbers=self.max_atom_in_grid_numbers,
max_neighbor_numbers=self.max_neighbor_numbers,
forced_update=1)
self.neighbor_list_update_nb = P.NeighborListRefresh(grid_numbers=self.grid_numbers,
atom_numbers=self.atom_numbers,
not_first_time=1, nxy=self.nxy,
excluded_atom_numbers=self.excluded_atom_numbers,
cutoff_square=self.cutoff_square,
half_skin_square=self.half_skin_square,
cutoff_with_skin=self.cutoff_with_skin,
half_cutoff_with_skin=self.half_cutoff_with_skin,
cutoff_with_skin_square=self.cutoff_with_skin_square,
refresh_interval=self.refresh_interval,
cutoff=self.cutoff,
skin=self.skin,
max_atom_in_grid_numbers=self.max_atom_in_grid_numbers,
max_neighbor_numbers=self.max_neighbor_numbers,
forced_update=1, forced_check=1)
self.neighbor_list_update_mc = P.NeighborListRefresh(grid_numbers=self.grid_numbers,
atom_numbers=self.atom_numbers,
not_first_time=1, nxy=self.nxy,
excluded_atom_numbers=self.excluded_atom_numbers,
cutoff_square=self.cutoff_square,
half_skin_square=self.half_skin_square,
cutoff_with_skin=self.cutoff_with_skin,
half_cutoff_with_skin=self.half_cutoff_with_skin,
cutoff_with_skin_square=self.cutoff_with_skin_square,
refresh_interval=self.refresh_interval,
cutoff=self.cutoff,
skin=self.skin,
max_atom_in_grid_numbers=self.max_atom_in_grid_numbers,
max_neighbor_numbers=self.max_neighbor_numbers,
forced_update=0, forced_check=1)
def simulation_beforce_caculate_force(self):
'''simulation before calculate force'''
self.uint_crd = self.crd_to_uint_crd_quarter(self.quarter_crd_to_uint_crd_cof, self.crd)
return self.uint_crd
def simulation_caculate_force(self, uint_crd, scaler, nl_atom_numbers, nl_atom_serial):
'''simulation calculate force'''
uint_crd = self.simulation_beforce_caculate_force()
force = self.zero_frc
if self.lj_info_is_initialized:
lj_force = self.lj_force_pme_direct_force(uint_crd, self.atom_lj_type, self.charge, scaler, nl_atom_numbers,
nl_atom_serial, self.lj_a, self.lj_b)
force = force + lj_force
if self.pme_is_initialized:
pme_excluded_force = self.pme_excluded_force(uint_crd, scaler, self.charge, self.excluded_list_start,
self.excluded_list, self.excluded_numbers)
pme_reciprocal_force = self.pme_reciprocal_force(uint_crd, self.charge)
force = force + pme_excluded_force
force = force + pme_reciprocal_force
if self.nb14_is_initialized:
nb14_force, _ = self.nb14_force_with_atom_energy(uint_crd, self.atom_lj_type, self.charge,
scaler, self.nb14_atom_a, self.nb14_atom_b,
self.lj_scale_factor, self.cf_scale_factor,
self.lj_a, self.lj_b)
force = force + nb14_force
if self.bond_is_initialized:
bond_force, _, _ = self.bond_force_with_atom_energy_virial(uint_crd, scaler, self.bond_atom_a,
self.bond_atom_b, self.bond_k, self.bond_r0)
force = force + bond_force
if self.angle_is_initialized:
angle_force, _ = self.angle_force_with_atom_energy(uint_crd, scaler, self.angle_atom_a,
self.angle_atom_b, self.angle_atom_c,
self.angle_k, self.angle_theta0)
force = force + angle_force
if self.dihedral_is_initialized:
dihedral_force, _ = self.dihedral_force_with_atom_energy(uint_crd, scaler,
self.dihedral_atom_a,
self.dihedral_atom_b,
self.dihedral_atom_c,
self.dihedral_atom_d, self.ipn,
self.pk, self.gamc, self.gams,
self.pn)
force = force + dihedral_force
return force
def simulation_caculate_energy(self, uint_crd, uint_dr_to_dr_cof):
'''simulation calculate energy'''
lj_energy = self.lj_energy(uint_crd, self.atom_lj_type, self.charge, uint_dr_to_dr_cof, self.nl_atom_numbers,
self.nl_atom_serial, self.lj_a, self.lj_b)
lj_energy_sum = P.ReduceSum(True)(lj_energy)
reciprocal_energy, self_energy, direct_energy, correction_energy = self.pme_energy(uint_crd, self.charge,
self.nl_atom_numbers,
self.nl_atom_serial,
uint_dr_to_dr_cof,
self.excluded_list_start,
self.excluded_list,
self.excluded_numbers)
ee_ene = reciprocal_energy + self_energy + direct_energy + correction_energy
nb14_lj_energy = self.nb14_lj_energy(uint_crd, self.atom_lj_type, self.charge, uint_dr_to_dr_cof,
self.nb14_atom_a, self.nb14_atom_b, self.lj_scale_factor, self.lj_a,
self.lj_b)
nb14_cf_energy = self.nb14_cf_energy(uint_crd, self.atom_lj_type, self.charge, uint_dr_to_dr_cof,
self.nb14_atom_a, self.nb14_atom_b, self.cf_scale_factor)
nb14_lj_energy_sum = P.ReduceSum(True)(nb14_lj_energy)
nb14_cf_energy_sum = P.ReduceSum(True)(nb14_cf_energy)
bond_energy = self.bond_energy(uint_crd, uint_dr_to_dr_cof, self.bond_atom_a, self.bond_atom_b, self.bond_k,
self.bond_r0)
bond_energy_sum = P.ReduceSum(True)(bond_energy)
angle_energy = self.angle_energy(uint_crd, uint_dr_to_dr_cof, self.angle_atom_a, self.angle_atom_b,
self.angle_atom_c, self.angle_k, self.angle_theta0)
angle_energy_sum = P.ReduceSum(True)(angle_energy)
dihedral_energy = self.dihedral_energy(uint_crd, uint_dr_to_dr_cof, self.dihedral_atom_a, self.dihedral_atom_b,
self.dihedral_atom_c, self.dihedral_atom_d, self.ipn, self.pk, self.gamc,
self.gams, self.pn)
dihedral_energy_sum = P.ReduceSum(True)(dihedral_energy)
total_energy = P.AddN()(
[bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, nb14_cf_energy_sum,
lj_energy_sum, ee_ene])
return bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, nb14_cf_energy_sum, \
lj_energy_sum, ee_ene, total_energy
def simulation_temperature(self):
'''caculate temperature'''
res_ek_energy = self.mdtemp(self.res_start, self.res_end, self.velocity, self.mass)
temperature = P.ReduceSum()(res_ek_energy)
return temperature
def simulation_mditeration_leapfrog_liujian(self, inverse_mass, sqrt_mass_inverse, crd, frc, rand_state,
random_frc):
'''simulation leap frog iteration liujian'''
if self.max_velocity <= 0:
crd = self.md_iteration_leap_frog_liujian(inverse_mass, sqrt_mass_inverse, self.velocity, crd, frc,
self.acc,
rand_state, random_frc)
else:
crd = self.md_iteration_leap_frog_liujian_with_max_vel(inverse_mass, sqrt_mass_inverse, self.velocity, crd,
frc, self.acc,
rand_state, random_frc)
vel = F.depend(self.velocity, crd)
acc = F.depend(self.acc, crd)
return vel, crd, acc
def simulation_mditeration_leapfrog(self, force):
'''simulation leap frog'''
if self.max_velocity <= 0:
res = self.md_iteration_leap_frog(self.velocity, self.crd, force, self.acc, self.mass_inverse)
else:
res = self.md_iteration_leap_frog_with_max_vel(self.velocity, self.crd, force, self.acc, self.mass_inverse)
vel = F.depend(self.velocity, res)
crd = F.depend(self.crd, res)
return vel, crd, res
def simulation_mdinformation_gradient_descent(self, force):
res = self.md_information_gradient_descent(self.crd, force)
self.velocity = self.zero_frc
vel = F.depend(self.velocity, res)
crd = F.depend(self.crd, res)
return vel, crd, res
def main_print(self, *args):
"""compute the temperature"""
steps, temperature, total_potential_energy, sigma_of_bond_ene, sigma_of_angle_ene, sigma_of_dihedral_ene, \
nb14_lj_energy_sum, nb14_cf_energy_sum, lj_energy_sum, ee_ene = list(args)
if steps == 0:
print("_steps_ _TEMP_ _TOT_POT_ENE_ _BOND_ENE_ "
"_ANGLE_ENE_ _DIHEDRAL_ENE_ _14LJ_ENE_ _14CF_ENE_ _LJ_ENE_ _CF_PME_ENE_")
temperature = temperature.asnumpy()
total_potential_energy = total_potential_energy.asnumpy()
print("{:>7.0f} {:>7.3f} {:>11.3f}".format(steps + 1, float(temperature), float(total_potential_energy)),
end=" ")
if self.bond.bond_numbers > 0:
sigma_of_bond_ene = sigma_of_bond_ene.asnumpy()
print("{:>10.3f}".format(float(sigma_of_bond_ene)), end=" ")
if self.angle.angle_numbers > 0:
sigma_of_angle_ene = sigma_of_angle_ene.asnumpy()
print("{:>11.3f}".format(float(sigma_of_angle_ene)), end=" ")
if self.dihedral.dihedral_numbers > 0:
sigma_of_dihedral_ene = sigma_of_dihedral_ene.asnumpy()
print("{:>14.3f}".format(float(sigma_of_dihedral_ene)), end=" ")
if self.nb14.nb14_numbers > 0:
nb14_lj_energy_sum = nb14_lj_energy_sum.asnumpy()
nb14_cf_energy_sum = nb14_cf_energy_sum.asnumpy()
print("{:>10.3f} {:>10.3f}".format(float(nb14_lj_energy_sum), float(nb14_cf_energy_sum)), end=" ")
lj_energy_sum = lj_energy_sum.asnumpy()
ee_ene = ee_ene.asnumpy()
print("{:>7.3f}".format(float(lj_energy_sum)), end=" ")
print("{:>12.3f}".format(float(ee_ene)))
if self.file is not None:
self.file.write("{:>7.0f} {:>7.3f} {:>11.3f} {:>10.3f} {:>11.3f} {:>14.3f} {:>10.3f} {:>10.3f} {:>7.3f}"
" {:>12.3f}\n".format(steps, float(temperature), float(total_potential_energy),
float(sigma_of_bond_ene), float(sigma_of_angle_ene),
float(sigma_of_dihedral_ene), float(nb14_lj_energy_sum),
float(nb14_cf_energy_sum), float(lj_energy_sum), float(ee_ene)))
if self.datfile is not None:
self.datfile.write(self.crd.asnumpy())
def export_restart_file(self):
"""export restart file"""
#self.atom_numbers, self.crd, self.vel, self.box_length
filename = self.control.restrt
file = open(filename, "w")
file.write("mask\n")
file.write(str(self.atom_numbers) + " " + "20210805 \n")
vel = self.velocity.asnumpy()
crd = self.crd.asnumpy()
box_length = self.box_length.asnumpy()
if self.atom_numbers % 2 == 0:
for i in range(0, self.atom_numbers, 2):
file.write("{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}\n"
"".format(float(crd[i][0]), float(crd[i][1]), float(crd[i][2]),
float(crd[i + 1][0]), float(crd[i + 1][1]), float(crd[i + 1][2])))
for i in range(0, self.atom_numbers, 2):
file.write("{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}\n"
"".format(float(vel[i][0]), float(vel[i][1]), float(vel[i][2]),
float(vel[i + 1][0]), float(vel[i + 1][1]), float(vel[i + 1][2])))
else:
for i in range(0, self.atom_numbers - 1, 2):
file.write("{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}\n"
"".format(float(crd[i][0]), float(crd[i][1]), float(crd[i][2]),
float(crd[i + 1][0]), float(crd[i + 1][1]), float(crd[i + 1][2])))
file.write("{:12.7f}{:12.7f}{:12.7f}\n".format(float(crd[-1][0]), float(crd[-1][1]), float(crd[-1][2])))
for i in range(0, self.atom_numbers - 1, 2):
file.write("{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}{:12.7f}\n"
"".format(float(vel[i][0]), float(vel[i][1]), float(vel[i][2]),
float(vel[i + 1][0]), float(vel[i + 1][1]), float(vel[i + 1][2])))
file.write("{:12.7f}{:12.7f}{:12.7f}\n".format(float(vel[-1][0]), float(vel[-1][1]), float(vel[-1][2])))
file.write("{:12.7f} {:12.7f} {:12.7f} {:12.7f} {:12.7f} {:12.7f}\n"
"".format(float(box_length[0]), float(box_length[1]), float(box_length[2]), 90.0, 90.0, 90.0))
file.close()
def main_initial(self):
"""main initial"""
if self.control.mdout:
self.file = open(self.control.mdout, 'w')
self.file.write("_steps_ _TEMP_ _TOT_POT_ENE_ _BOND_ENE_ "
"_ANGLE_ENE_ _DIHEDRAL_ENE_ _14LJ_ENE_ _14CF_ENE_ _LJ_ENE_ _CF_PME_ENE_\n")
if self.control.mdcrd:
self.datfile = open(self.control.mdcrd, 'wb')
def main_destroy(self):
"""main destroy"""
if self.file is not None:
self.file.close()
print("Save .out file successfully!")
if self.datfile is not None:
self.datfile.close()
print("Save .dat file successfully!")
def Constrain(self):
"SIMPLE_CONSTARIN Constrain"
constrain_frc = self.zero_frc
# if self.need_pressure: #TODO
for _ in range(self.iteration_numbers):
test_uint_crd = self.refresh_uint_crd(self.crd, self.quarter_crd_to_uint_crd_cof, constrain_frc,
self.mass_inverse)
if self.need_pressure:
force, _ = self.constrain_force_cycle_with_virial(test_uint_crd, self.uint_dr_to_dr_cof,
self.last_pair_dr, self.atom_i_serials,
self.atom_j_serials, self.constant_rs,
self.constrain_ks)
else:
force = self.constrain_force_cycle(test_uint_crd, self.uint_dr_to_dr_cof, self.last_pair_dr,
self.atom_i_serials,
self.atom_j_serials, self.constant_rs, self.constrain_ks)
constrain_frc = constrain_frc + force
# if self.need_pressure: #TODO
res = self.refresh_crd_vel(self.crd, self.velocity, constrain_frc, self.mass_inverse)
crd = self.depend(self.crd, res)
vel = self.depend(self.velocity, res)
return crd, vel, res
def main_iteration(self, force):
"""Main_Iteration"""
# Remember_Last_Coordinates
if self.simple_constrain_is_initialized:
self.last_pair_dr = self.last_crd_to_dr(self.crd, self.quarter_crd_to_uint_crd_cof, self.uint_dr_to_dr_cof,
self.atom_i_serials,
self.atom_j_serials, self.constant_rs, self.constrain_ks)
if self.mode == 0: # NVE
self.velocity, self.crd, _ = self.simulation_mditeration_leapfrog(force)
elif self.mode == -1: # Minimization
self.velocity, self.crd, _ = self.simulation_mdinformation_gradient_descent(force)
else:
if self.liujian_info_is_initialized:
self.velocity, self.crd, _ = self.simulation_mditeration_leapfrog_liujian(self.mass_inverse,
self.sqrt_mass, self.crd,
force, self.rand_state,
self.random_force)
if self.simple_constrain_is_initialized:
self.crd, self.velocity, res1 = self.Constrain()
else:
res1 = self.zero_fp_tensor
self.uint_crd = self.crd_to_uint_crd_quarter(self.quarter_crd_to_uint_crd_cof, self.crd)
res2 = self.neighbor_list_update(self.atom_numbers_in_grid_bucket,
self.bucket,
self.crd,
self.box_length,
self.grid_n,
self.grid_length_inverse,
self.atom_in_grid_serial,
self.old_crd,
self.crd_to_uint_crd_cof,
self.uint_crd,
self.pointer,
self.nl_atom_numbers,
self.nl_atom_serial,
self.uint_dr_to_dr_cof,
self.excluded_list_start,
self.excluded_list,
self.excluded_numbers,
self.need_refresh_flag,
self.refresh_count)
res3 = self.refresh_boxmaptimes(self.crd, self.old_crd, 1.0 / self.box_length, self.box_map_times)
return self.velocity, self.crd, res1, res2, res3
def get_pressure(self, vel, mass, virial, volume, is_download):
# calculate MD_Atom_Ek
ek = 0.5 * mass * P.ReduceSum(True)(vel * vel, 0) # 可以优化
sum_of_atom_ek = P.ReduceSum(True)(ek)
atom_virial = P.ReduceSum(True)(virial)
v_inverse = 1 / volume
pressure = (sum_of_atom_ek + atom_virial) / 3 * v_inverse
if is_download:
return pressure
return 0
def get_potential(self, atom_energy, is_download):
potential = P.ReduceSum(True)(atom_energy)
if is_download:
return potential
return 0
def construct(self, step, print_step):
'''construct'''
if step == 0:
res = self.neighbor_list_update_forced_update(self.atom_numbers_in_grid_bucket,
self.bucket,
self.crd,
self.box_length,
self.grid_n,
self.grid_length_inverse,
self.atom_in_grid_serial,
self.old_crd,
self.crd_to_uint_crd_cof,
self.uint_crd,
self.pointer,
self.nl_atom_numbers,
self.nl_atom_serial,
self.uint_dr_to_dr_cof,
self.excluded_list_start,
self.excluded_list,
self.excluded_numbers,
self.need_refresh_flag,
self.refresh_count)
else:
res = self.zero_fp_tensor
force = self.simulation_caculate_force(self.uint_crd, self.uint_dr_to_dr_cof, self.nl_atom_numbers,
self.nl_atom_serial)
if step == 0:
self.rand_state = self.setup_random_state()
self.velocity, self.crd, res1, res2, res3 = self.main_iteration(force)
# self.velocity, self.crd, res1, res2, res3 = self.Main_Iteration(step, force)
temperature = self.simulation_temperature()
if print_step == 0:
bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, nb14_cf_energy_sum, \
lj_energy_sum, ee_ene, total_energy = self.simulation_caculate_energy(self.uint_crd, self.uint_dr_to_dr_cof)
else:
bond_energy_sum = self.zero_fp_tensor
angle_energy_sum = self.zero_fp_tensor
dihedral_energy_sum = self.zero_fp_tensor
nb14_lj_energy_sum = self.zero_fp_tensor
nb14_cf_energy_sum = self.zero_fp_tensor
lj_energy_sum = self.zero_fp_tensor
ee_ene = self.zero_fp_tensor
total_energy = self.zero_fp_tensor
return temperature, total_energy, bond_energy_sum, angle_energy_sum, dihedral_energy_sum, nb14_lj_energy_sum, \
nb14_cf_energy_sum, lj_energy_sum, ee_ene, res, res1, res2, res3
| 62.71584 | 120 | 0.578433 | 6,119 | 51,866 | 4.58065 | 0.069946 | 0.044739 | 0.020336 | 0.031467 | 0.66499 | 0.571016 | 0.486782 | 0.41953 | 0.368297 | 0.330622 | 0 | 0.024143 | 0.331566 | 51,866 | 826 | 121 | 62.791768 | 0.784332 | 0.028072 | 0 | 0.264498 | 0 | 0.008487 | 0.018627 | 0.006262 | 0 | 0 | 0 | 0.001211 | 0.001414 | 1 | 0.032532 | false | 0 | 0.029703 | 0 | 0.084866 | 0.022631 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d593ab3607b07be083ccac7468de89c6a5f553aa | 346 | py | Python | download_wmt16_en_de_data.py | kgarg8/NMT-RNN | 3c97f94dc244e7fbe188204793651ade36f4dced | [
"MIT"
] | 1 | 2021-10-01T15:03:35.000Z | 2021-10-01T15:03:35.000Z | download_wmt16_en_de_data.py | kgarg8/NMT-RNN | 3c97f94dc244e7fbe188204793651ade36f4dced | [
"MIT"
] | null | null | null | download_wmt16_en_de_data.py | kgarg8/NMT-RNN | 3c97f94dc244e7fbe188204793651ade36f4dced | [
"MIT"
] | null | null | null | from datasets import load_dataset
dataset = load_dataset('wmt16', 'de-en')
base = 'data/wmt16_en_de/'
with open(base + 'test.en','w') as en:
for data in dataset['test']['translation']:
en.write(data['en']+'\n')
with open(base + 'test.de','w') as de:
for data in dataset['test']['translation']:
de.write(data['de']+'\n') | 28.833333 | 47 | 0.615607 | 54 | 346 | 3.87037 | 0.37037 | 0.105263 | 0.114833 | 0.15311 | 0.296651 | 0.296651 | 0 | 0 | 0 | 0 | 0 | 0.013937 | 0.17052 | 346 | 12 | 48 | 28.833333 | 0.714286 | 0 | 0 | 0.222222 | 0 | 0 | 0.233429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d594e3e8fe7c45b6ff37f0de5953a37cae49e1bc | 1,771 | py | Python | WordVectorFetcher.py | whosxavierwu/Chinese-Word-Vectors | 7841eb6a4ec235662828b3cb13f45956c28f2a3c | [
"Apache-2.0"
] | null | null | null | WordVectorFetcher.py | whosxavierwu/Chinese-Word-Vectors | 7841eb6a4ec235662828b3cb13f45956c28f2a3c | [
"Apache-2.0"
] | null | null | null | WordVectorFetcher.py | whosxavierwu/Chinese-Word-Vectors | 7841eb6a4ec235662828b3cb13f45956c28f2a3c | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Created by: wuzewei
# Created on: 2019/3/25 0025
from gensim.models import KeyedVectors
import numpy as np
from pyhanlp import HanLP
class WordVectorFetcher:
def __init__(self, filename):
self.wv_filename = filename
self.wv = None
def init(self):
self.wv = KeyedVectors.load_word2vec_format(self.wv_filename)
def get_word_vector(self, word):
if word not in self.wv:
return np.zeros(self.wv.vector_size)
else:
return self.wv[word]
def get_sentence_vector(self, sentence):
words = [item.word for item in HanLP.segment(sentence)]
cnt = 0
vec_fin = np.zeros(self.wv.vector_size)
for w in words:
if w in self.wv:
vec_fin += self.get_word_vector(w)
cnt += 1
if cnt > 0:
vec_fin = vec_fin / cnt
return vec_fin
def get_sentence_similarity(self, s1, s2):
v1 = self.get_sentence_vector(s1)
v2 = self.get_sentence_vector(s2)
return self.wv.cosine_similarities(v1, [v2])
# return self.wv.wmdistance(s1, s2)
if __name__ == '__main__':
fn = 'SGNS/sgns.target.word-word.dynwin5.thr10.neg5.dim300.iter5/sgns.target.word-word.dynwin5.thr10.neg5.dim300.iter5'
fetcher = WordVectorFetcher(fn)
fetcher.init()
# wv1 = fetcher.get_sentence_vector(u'今天天气算不错的了')
# wv2 = fetcher.get_sentence_vector(u'今天没下雨')
print(fetcher.get_sentence_similarity(u'今天天气算不错的了', u'今天在北京没下雨'))
print(fetcher.get_sentence_similarity(u'车头大面积进气格栅用镀铬材质进行装饰后年轻化效果显著', u'同时,在车头两侧,还有LED光源的头灯进行加持,夜间点亮后辨识度也很高'))
print(fetcher.get_sentence_similarity(u'方向盘低速灵活高速平稳,就算18寸的大脚跑高速120都稳稳得一点都不飘', u'在路上不放音乐听发动机声音很平顺,高速过弯车身倾斜也很小,高速120会有风噪声'))
| 33.415094 | 126 | 0.667984 | 235 | 1,771 | 4.838298 | 0.378723 | 0.058047 | 0.074758 | 0.060686 | 0.253298 | 0.209323 | 0.079156 | 0.079156 | 0.079156 | 0 | 0 | 0.037901 | 0.225296 | 1,771 | 52 | 127 | 34.057692 | 0.790816 | 0.108978 | 0 | 0 | 0 | 0.027778 | 0.173248 | 0.157325 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.083333 | 0 | 0.361111 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5984597e026da67fe45ea35e3d5d4f7f24dc819 | 1,083 | py | Python | common_functions.py | KirillYabl/Comics_publisher | 85df6020f24fc5984f28ce0f875485e4f421f3a2 | [
"MIT"
] | null | null | null | common_functions.py | KirillYabl/Comics_publisher | 85df6020f24fc5984f28ce0f875485e4f421f3a2 | [
"MIT"
] | null | null | null | common_functions.py | KirillYabl/Comics_publisher | 85df6020f24fc5984f28ce0f875485e4f421f3a2 | [
"MIT"
] | null | null | null | import requests
def load_image(url, path):
"""Load image by url to file.
Params
--------------------------------------------
:param url: str
Url with image.
:param path: str
Path, where image will be saved.
--------------------------------------------
"""
response = requests.get(url)
raise_response_errors(response)
with open(path, 'wb') as f:
f.write(response.content)
def raise_response_errors(response):
"""Check response for errors.
raise error if some error in response
:param response: requests response object
"""
# check HTTPError
response.raise_for_status()
# some sites can return 200 and write error in body
if 'error' in response.json():
raise requests.exceptions.HTTPError(response.json()['error'])
def get_last_xkcd_num():
"""Load num of last xkcd comic on now.
:return: str, num of last xkcd comic
"""
url = 'https://xkcd.com/info.0.json'
response = requests.get(url)
raise_response_errors(response)
return response.json()['num']
| 25.186047 | 69 | 0.597415 | 135 | 1,083 | 4.703704 | 0.414815 | 0.075591 | 0.089764 | 0.127559 | 0.211024 | 0.154331 | 0.154331 | 0.154331 | 0 | 0 | 0 | 0.004768 | 0.2253 | 1,083 | 42 | 70 | 25.785714 | 0.752086 | 0.422899 | 0 | 0.266667 | 0 | 0 | 0.077758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d599ecfbc81eccc7acb2cf1afb0c4d9b6fde3f7a | 736 | py | Python | input_output/gpio_input.py | rakibulmdalam/IOT-Hardware-Abstraction-Layer | 4d344a82aa94ae561a7c2889f942c6a892e6810e | [
"MIT"
] | 1 | 2018-06-12T15:40:45.000Z | 2018-06-12T15:40:45.000Z | input_output/gpio_input.py | UyumazHakan/Hardware-Abstraction-Platform | 52f13df333351516a88497e4a655ee1333abe7eb | [
"MIT"
] | null | null | null | input_output/gpio_input.py | UyumazHakan/Hardware-Abstraction-Platform | 52f13df333351516a88497e4a655ee1333abe7eb | [
"MIT"
] | null | null | null | from .gpio_input_output import *
from time import sleep
from threading import Timer, Thread
class GPIOInput(GPIOInputOutput):
def __init__(self, config):
super(GPIOInput, self).__init__(config)
if self.config["gpiopullupdown"] != "none":
GPIO.setup(self.pin, GPIO.IN, \
pull_up_down = GPIO.PUD_UP if self.config["gpiopullupdown"] == "up" \
else GPIO.PUD_DOWN)
else:
GPIO.setup(self.pin, GPIO.IN)
self.state = GPIO.input(self.pin)
def on_change(self, callback, bouncetime=100):
GPIO.add_event_detect(self.pin, GPIO.BOTH, callback=callback, bouncetime=bouncetime)
def stop_on_change(self):
GPIO.remove_event_detect(self.pin)
def get_state(self):
self.state = GPIO.input(self.pin)
return self.state
| 27.259259 | 86 | 0.736413 | 108 | 736 | 4.814815 | 0.398148 | 0.080769 | 0.063462 | 0.1 | 0.180769 | 0.180769 | 0 | 0 | 0 | 0 | 0 | 0.004724 | 0.137228 | 736 | 26 | 87 | 28.307692 | 0.814173 | 0 | 0 | 0.1 | 0 | 0 | 0.046322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d59beefcb5d771dbc0b133551011f217f29ab97f | 964 | py | Python | os_scrapy_linkextractor/lx_extensions/regex.py | Ghamster0/scrapy-linkextractor | 11cd09c30e1a0d7f18474f240dc974488fb17c63 | [
"MIT"
] | 2 | 2020-07-20T03:54:01.000Z | 2020-07-29T12:00:59.000Z | os_scrapy_linkextractor/lx_extensions/regex.py | Ghamster0/os-scrapy-linkextractor | 11cd09c30e1a0d7f18474f240dc974488fb17c63 | [
"MIT"
] | 1 | 2020-07-21T03:14:02.000Z | 2020-07-21T03:14:02.000Z | os_scrapy_linkextractor/lx_extensions/regex.py | Ghamster0/os-scrapy-linkextractor | 11cd09c30e1a0d7f18474f240dc974488fb17c63 | [
"MIT"
] | 1 | 2020-07-21T01:17:56.000Z | 2020-07-21T01:17:56.000Z | from os_scrapy_linkextractor.linkextractors.regex import RegexLinkExtractor
from os_scrapy_linkextractor.lx_extensions import LinkExtractorExtension
class ReLinkExtractorExtension(LinkExtractorExtension):
def __init__(self):
self.name = "re"
super(ReLinkExtractorExtension, self).__init__(RegexLinkExtractor)
def _match_rule(self, rule):
return rule.get("type", None) == "re"
def _new_linkextractor(self, rule):
lx_kwargs = {}
for key in ["allow_domains", "deny_domains"]:
v = rule.get(key, None)
if v is not None and (
isinstance(v, str)
or (isinstance(v, (list, tuple)) and all(isinstance(i, str) for i in v))
):
lx_kwargs[key] = v
lx_kwargs["same_domain_only"] = rule.get("same_domain_only", None)
return self.lx_cls(**lx_kwargs)
@classmethod
def from_crawler(cls, crawler):
return cls()
| 34.428571 | 88 | 0.640041 | 112 | 964 | 5.25 | 0.455357 | 0.054422 | 0.040816 | 0.085034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258299 | 964 | 27 | 89 | 35.703704 | 0.822378 | 0 | 0 | 0 | 0 | 0 | 0.067427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.090909 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a0090aa43ff53c9b28a557356175161cf42d2f | 736 | py | Python | falseSubmission.py | suhailnajeeb/pneumothorax-lungnet | eb3cafcef74e3840a5b013727d6cb39d0062f7ee | [
"MIT"
] | null | null | null | falseSubmission.py | suhailnajeeb/pneumothorax-lungnet | eb3cafcef74e3840a5b013727d6cb39d0062f7ee | [
"MIT"
] | null | null | null | falseSubmission.py | suhailnajeeb/pneumothorax-lungnet | eb3cafcef74e3840a5b013727d6cb39d0062f7ee | [
"MIT"
] | null | null | null | import pandas as pd
import glob2
import os
PATH_VAL = '..\\Data\\dicom-images-test\\'
CSVFILE = '..\\Data\\kanvari.csv'
SUBCSV = '..\\out\\submission.csv'
val = glob2.glob(os.path.join(PATH_VAL,'**/*.dcm'))
df = pd.read_csv('..\\Data\\kanvari.csv')
ids = []
rles = []
# Issue: f = '1.2.276.0.7230010.3.1.4.8323329.7020.1517875202.386064'
for f in val:
id = f.split('\\')[-1][:-4]
x = df.loc[df.ImageId == id]
try:
x = x.iloc[0]['EncodedPixels']
if (x == -1):
rle = '-1'
else:
rle = '0'
except:
rle = '0'
ids.append(id)
rles.append(rle)
sub_df = pd.DataFrame({'ImageId': ids, 'EncodedPixels': rles})
sub_df.head()
sub_df.to_csv(SUBCSV, index=False) | 20.444444 | 69 | 0.558424 | 110 | 736 | 3.672727 | 0.536364 | 0.037129 | 0.069307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.222826 | 736 | 36 | 70 | 20.444444 | 0.615385 | 0.091033 | 0 | 0.076923 | 0 | 0 | 0.211078 | 0.140719 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a07c9caf586abadb82cefa679b347214d86c90 | 12,967 | py | Python | aizynthfinder/analysis/routes.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | 219 | 2020-06-15T08:04:53.000Z | 2022-03-31T09:02:47.000Z | aizynthfinder/analysis/routes.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | 56 | 2020-08-14T14:50:42.000Z | 2022-03-22T12:49:06.000Z | aizynthfinder/analysis/routes.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | 58 | 2020-06-15T13:36:42.000Z | 2022-03-21T06:18:02.000Z | """ Module containing classes to store and manipulate collections of synthetic routes.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from route_distances.clustering import ClusteringHelper
from route_distances.route_distances import route_distances_calculator
from aizynthfinder.analysis.utils import (
CombinedReactionTrees,
RouteSelectionArguments,
)
from aizynthfinder.reactiontree import ReactionTree
from aizynthfinder.search.mcts import MctsSearchTree, MctsNode
from aizynthfinder.analysis import TreeAnalysis
if TYPE_CHECKING:
from aizynthfinder.utils.type_utils import (
StrDict,
PilImage,
Optional,
Any,
Dict,
Sequence,
)
from aizynthfinder.context.scoring import Scorer
class RouteCollection:
"""
Holds a collections of reaction routes.
If can be the top scored nodes, their scores and
the reaction trees created from them.
It can also be a cluster of such routes.
The class has the functionality to compute collective results
for the different routes such as images.
Properties of individual route can be obtained with simple indexing.
.. code-block::
route0 = collection[0]
:ivar all_scores: all the computed scores for the routes
:ivar nodes: the top-ranked MCTS-like nodes
:ivar scores: initial scores of top-ranked nodes or routes
:ivar reaction_trees: the reaction trees created from the top-ranked nodes
:ivar clusters: the created clusters from the collection
:param reaction_trees: the trees to base the collection on
"""
def __init__(self, reaction_trees: Sequence[ReactionTree], **kwargs) -> None:
self._routes: Sequence[StrDict] = [{} for _ in range(len(reaction_trees))]
self.reaction_trees = reaction_trees
self._update_route_dict(reaction_trees, "reaction_tree")
self.nodes = self._unpack_kwarg_with_default("nodes", None, **kwargs)
self.scores = self._unpack_kwarg_with_default("scores", np.nan, **kwargs)
self.all_scores = self._unpack_kwarg_with_default("all_scores", dict, **kwargs)
self._dicts: Optional[Sequence[StrDict]] = self._unpack_kwarg("dicts", **kwargs)
self._images: Optional[Sequence[PilImage]] = self._unpack_kwarg(
"images", **kwargs
)
self._jsons: Optional[Sequence[str]] = self._unpack_kwarg("jsons", **kwargs)
self.clusters: Optional[Sequence[RouteCollection]] = self._unpack_kwarg(
"clusters", **kwargs
)
self._distance_matrix: Dict[str, np.ndarray] = {}
self._combined_reaction_trees: Optional[CombinedReactionTrees] = None
@classmethod
def from_analysis(
cls, analysis: TreeAnalysis, selection: RouteSelectionArguments = None
) -> "RouteCollection":
"""
Create a collection from a tree analysis.
:param analysis: the tree analysis to use
:param selection: selection criteria for the routes
:return: the created collection
"""
items, scores = analysis.sort(selection)
all_scores = [{repr(analysis.scorer): score} for score in scores]
kwargs = {"scores": scores, "all_scores": all_scores}
if isinstance(analysis.search_tree, MctsSearchTree):
kwargs["nodes"] = items
reaction_trees = [
from_node.to_reaction_tree()
for from_node in items
if isinstance(from_node, MctsNode)
]
else:
reaction_trees = items # type: ignore
return cls(reaction_trees, **kwargs)
def __getitem__(self, index: int) -> StrDict:
if index < 0 or index >= len(self):
raise IndexError("Index out of range")
return self._routes[index]
def __len__(self) -> int:
return len(self.reaction_trees)
@property
def dicts(self) -> Sequence[StrDict]:
"""Returns a list of dictionary representation of the routes"""
if self._dicts is None:
self._dicts = self.make_dicts()
return self._dicts
@property
def images(self) -> Sequence[PilImage]:
"""Returns a list of pictoral representation of the routes"""
if self._images is None:
self._images = self.make_images()
return self._images
@property
def jsons(self) -> Sequence[str]:
"""Returns a list of JSON string representation of the routes"""
if self._jsons is None:
self._jsons = self.make_jsons()
return self._jsons
def cluster(
self,
n_clusters: int,
max_clusters: int = 5,
distances_model: str = "ted",
**kwargs: Any
) -> np.ndarray:
"""
Cluster the route collection into a number of clusters.
Additional arguments to the distance or clustering algorithm
can be passed in as key-word arguments.
When `distances_model` is "lstm", a key-word argument `model_path` needs to be given
when `distances_model` is "ted", two optional key-word arguments `timeout` and `content`
can be given.
If the number of reaction trees are less than 3, no clustering will be performed
:param n_clusters: the desired number of clusters, if less than 2 triggers optimization
:param max_clusters: the maximum number of clusters to consider
:param distances_model: can be ted or lstm and determines how the route distances are computed
:return: the cluster labels
"""
if len(self.reaction_trees) < 3:
return np.asarray([])
dist_kwargs = {
"content": kwargs.pop("content", "both"),
"timeout": kwargs.pop("timeout", None),
"model_path": kwargs.pop("model_path", None),
}
try:
distances = self.distance_matrix(model=distances_model, **dist_kwargs)
except ValueError:
return np.asarray([])
labels = ClusteringHelper.cluster(
distances,
n_clusters,
max_clusters=max_clusters,
**kwargs,
)
self._make_clusters(labels)
return labels
def combined_reaction_trees(self, recreate: bool = False) -> CombinedReactionTrees:
"""
Return an object that combines all the reaction tree into a single reaction tree graph
:param recreate: if False will return a cached object if available, defaults to False
:return: the combined trees
"""
if not self._combined_reaction_trees or recreate:
self._combined_reaction_trees = CombinedReactionTrees(self.reaction_trees)
return self._combined_reaction_trees
def compute_scores(self, *scorers: Scorer) -> None:
"""
Compute new scores for all routes in this collection.
They can then be accessed with the ``all_scores`` attribute.
"""
if self.nodes[0]:
list_ = self.nodes
else:
list_ = self.reaction_trees
for scorer in scorers:
for idx, score in enumerate(scorer(list_)): # type: ignore
self.all_scores[idx][repr(scorer)] = score
self._update_route_dict(self.all_scores, "all_score")
def dict_with_scores(self) -> Sequence[StrDict]:
"""
Return the routes as dictionaries with all scores added
to the root (target) node.
:return: the routes as dictionaries
"""
dicts = []
for dict_, scores in zip(self.dicts, self.all_scores):
dicts.append(dict(dict_))
dicts[-1]["scores"] = dict(scores)
return dicts
def distance_matrix(
self, recreate: bool = False, model: str = "ted", **kwargs: Any
) -> np.ndarray:
"""
Compute the distance matrix between each pair of reaction trees
All key-word arguments are passed along to the `route_distance_calculator`
function from the `route_distances` package.
When `model` is "lstm", a key-word argument `model_path` needs to be given
when `model` is "ted", two optional key-word arguments `timeout` and `content`
can be given.
:param recreate: if False, use a cached one if available
:param model: the type of model to use "ted" or "lstm"
:return: the square distance matrix
"""
if model == "lstm" and not kwargs.get("model_path"):
raise KeyError(
"Need to provide 'model_path' argument when using LSTM model for computing distances"
)
content = kwargs.get("content", "both")
cache_key = kwargs.get("model_path", "") if model == "lstm" else content
if self._distance_matrix.get(cache_key) is not None and not recreate:
return self._distance_matrix[cache_key]
calculator = route_distances_calculator(model, **kwargs)
distances = calculator(self.dicts)
self._distance_matrix[cache_key] = distances
return distances
def make_dicts(self) -> Sequence[StrDict]:
"""Convert all reaction trees to dictionaries"""
self._dicts = [tree.to_dict() for tree in self.reaction_trees]
self._update_route_dict(self._dicts, "dict")
return self._dicts
def make_images(self) -> Sequence[Optional[PilImage]]:
"""Convert all reaction trees to images"""
self._images = []
for tree in self.reaction_trees:
try:
img = tree.to_image()
except ValueError:
self._images.append(None)
else:
self._images.append(img)
self._update_route_dict(self._images, "image")
return self._images
def make_jsons(self) -> Sequence[str]:
"""Convert all reaction trees to JSON strings"""
self._jsons = [tree.to_json() for tree in self.reaction_trees]
self._update_route_dict(self._jsons, "json")
return self._jsons
def rescore(self, scorer: Scorer) -> None:
"""
Rescore the routes in the collection, and thereby re-order them.
This will replace the ``scores`` attribute, and update the ``all_scores``
attribute with another entry.
:param scorer: the scorer to use
"""
if self.nodes[0]:
self.nodes, self.scores, sortidx = scorer.sort(self.nodes)
self.reaction_trees = [self.reaction_trees[idx] for idx in sortidx]
else:
self.reaction_trees, self.scores, sortidx = scorer.sort(self.reaction_trees)
self._routes = [self._routes[idx] for idx in sortidx]
self.all_scores = [self.all_scores[idx] for idx in sortidx]
if self._dicts:
self._dicts = [self._dicts[idx] for idx in sortidx]
if self._images:
self._images = [self._images[idx] for idx in sortidx]
if self._jsons:
self._jsons = [self._jsons[idx] for idx in sortidx]
for idx, score in enumerate(self.scores):
self.all_scores[idx][repr(scorer)] = score
self._update_route_dict(self.all_scores, "all_score")
def _make_clusters(self, clusters: np.ndarray) -> None:
n_clusters = max(clusters) + 1
self.clusters = []
for cluster in range(n_clusters):
selection = clusters == cluster
kwargs = {
"reaction_trees": self._select_subset(self.reaction_trees, selection),
"nodes": self._select_subset(self.nodes, selection),
"scores": self._select_subset(self.scores, selection),
}
if self._images:
kwargs["images"] = self._select_subset(self.images, selection)
if self._dicts:
kwargs["dicts"] = self._select_subset(self.dicts, selection)
if self._jsons:
kwargs["jsons"] = self._select_subset(self.jsons, selection)
self.clusters.append(RouteCollection(**kwargs))
def _unpack_kwarg(self, key: str, **kwargs: Any) -> Optional[Sequence[Any]]:
if key not in kwargs:
return None
arr = kwargs[key]
self._update_route_dict(arr, key[:-1])
return arr
def _unpack_kwarg_with_default(
self, key: str, default: Any, **kwargs: Any
) -> Sequence[Any]:
arr = self._unpack_kwarg(key, **kwargs)
if arr is not None:
return arr
return [
default() if callable(default) else default
for _ in range(len(self.reaction_trees))
]
def _update_route_dict(self, arr: Sequence[Any], key: str) -> None:
for i, value in enumerate(arr):
self._routes[i][key] = value
@staticmethod
def _select_subset(arr: Sequence[Any], selection: Sequence[bool]) -> Sequence[Any]:
return [item for sel, item in zip(selection, arr) if sel]
| 37.915205 | 102 | 0.632529 | 1,551 | 12,967 | 5.121857 | 0.166989 | 0.058912 | 0.0321 | 0.016742 | 0.171198 | 0.111908 | 0.077039 | 0.060675 | 0.060675 | 0.060675 | 0 | 0.001279 | 0.276625 | 12,967 | 341 | 103 | 38.026393 | 0.845629 | 0.244004 | 0 | 0.164319 | 0 | 0 | 0.039785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098592 | false | 0 | 0.051643 | 0.00939 | 0.253521 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a1131395736b2e09683a257a59f8aeb1dd31b9 | 15,570 | py | Python | specutils/manipulation/resample.py | parejkoj/specutils | 53a00def68882d9e91044d00a043f1bff1a046fd | [
"BSD-3-Clause"
] | null | null | null | specutils/manipulation/resample.py | parejkoj/specutils | 53a00def68882d9e91044d00a043f1bff1a046fd | [
"BSD-3-Clause"
] | null | null | null | specutils/manipulation/resample.py | parejkoj/specutils | 53a00def68882d9e91044d00a043f1bff1a046fd | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
import numpy as np
from scipy.interpolate import CubicSpline
from astropy.units import Quantity
from astropy.nddata import StdDevUncertainty, VarianceUncertainty, InverseVariance
from ..spectra import Spectrum1D
__all__ = ['ResamplerBase', 'FluxConservingResampler',
'LinearInterpolatedResampler', 'SplineInterpolatedResampler']
class ResamplerBase(ABC):
"""
Base class for resample classes. The algorithms and needs for difference
resamples will vary quite a bit, so this class is relatively sparse.
Init paramtere here is not yet hooked up to the rest of the code, but to
show how we will want to use it in the future.
"""
def __init__(self, bin_edges='nan_fill'):
self.bin_edges = bin_edges
@abstractmethod
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return NotImplemented
@abstractmethod
def resample1d(self, orig_spectrum, fin_lamb):
"""
Workhorse method that will return the resampled Spectrum1D
object.
"""
return NotImplemented
@staticmethod
def _calc_bin_edges(x):
"""
Calculate the bin edge values of an input dispersion axis. Input values
are assumed to be the center of the bins.
todo: this should live in the main spectrum object, but we're still
figuring out the details to that implementation, so leaving here
for now.
Parameters
----------
x : ndarray
The input dispersion axis values.
Returns
-------
edges : ndarray
Calcualated bin edges, including left and right most bin edges.
"""
inside_edges = (x[1:] + x[:-1]) / 2
edges = np.insert(inside_edges, 0, 2 * x[0] - inside_edges[0])
edges = np.append(edges, 2 * x[-1] - inside_edges[-1])
return edges
class FluxConservingResampler(ResamplerBase):
"""
This resampling algorithm conserves overall integrated flux (as opposed to
flux density).
Algorithm based on the equations documented in the following paper:
https://ui.adsabs.harvard.edu/abs/2017arXiv170505165C/abstract
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
a flux conserving algorithm:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import FluxConservingResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = FluxConservingResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _resample_matrix(self, orig_lamb, fin_lamb):
"""
Create a re-sampling matrix to be used in re-sampling spectra in a way
that conserves flux. This code was heavily influenced by Nick Earl's
resample rough draft: nmearl@0ff6ef1.
Parameters
----------
orig_lamb : ndarray
The original dispersion array.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_mat : ndarray
An [[N_{fin_lamb}, M_{orig_lamb}]] matrix.
"""
# Lower bin and upper bin edges
orig_edges = self._calc_bin_edges(orig_lamb)
fin_edges = self._calc_bin_edges(fin_lamb)
# I could get rid of these alias variables,
# but it does add readability
orig_low = orig_edges[:-1]
fin_low = fin_edges[:-1]
orig_upp = orig_edges[1:]
fin_upp = fin_edges[1:]
# Here's the real work in figuring out the bin overlaps
# i.e., contribution of each original bin to the resampled bin
l_inf = np.where(orig_low > fin_low[:, np.newaxis],
orig_low, fin_low[:, np.newaxis])
l_sup = np.where(orig_upp < fin_upp[:, np.newaxis],
orig_upp, fin_upp[:, np.newaxis])
resamp_mat = (l_sup - l_inf).clip(0)
resamp_mat *= (orig_upp - orig_low)
# set bins that don't overlap 100% with original bins
# to zero by checking edges, and applying generated mask
left_clip = np.where(fin_edges[:-1] - orig_edges[0] < 0, 0, 1)
right_clip = np.where(orig_edges[-1] - fin_edges[1:] < 0, 0, 1)
keep_overlapping_matrix = left_clip * right_clip
resamp_mat *= keep_overlapping_matrix[:, np.newaxis]
return resamp_mat
def resample1d(self, orig_spectrum, fin_lamb):
"""
Create a re-sampling matrix to be used in re-sampling spectra in a way
that conserves flux. If an uncertainty is present in the input spectra
it will be propagated through to the final resampled output spectra
as an InverseVariance uncertainty.
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
# Check if units on original spectrum and new wavelength (if defined)
# match
if isinstance(fin_lamb, Quantity):
if orig_spectrum.spectral_axis_unit != fin_lamb.unit:
return ValueError("Original spectrum dispersion grid and new"
"dispersion grid must have the same units.")
# todo: Would be good to return uncertainty in type it was provided?
# todo: add in weighting options
# Get provided uncertainty into variance
if orig_spectrum.uncertainty is not None:
if isinstance(orig_spectrum.uncertainty, StdDevUncertainty):
pixel_uncer = np.square(orig_spectrum.uncertainty.array)
elif isinstance(orig_spectrum.uncertainty, VarianceUncertainty):
pixel_uncer = orig_spectrum.uncertainty.array
elif isinstance(orig_spectrum.uncertainty, InverseVariance):
pixel_uncer = np.reciprocal(orig_spectrum.uncertainty.array)
else:
pixel_uncer = None
# todo: Current code doesn't like the inputs being quantity objects, may
# want to look into this more in the future
resample_grid = self._resample_matrix(np.array(orig_spectrum.spectral_axis),
np.array(fin_lamb))
# Now for some broadcasting magic to handle multi dimensional flux inputs
# Essentially this part is inserting length one dimensions as fillers
# For example, if we have a (5,6,10) input flux, and an output grid
# of 3, flux will be broadcast to (5,6,1,10) and resample_grid will
# Be broadcast to (1,1,3,10). The sum then reduces down the 10, the
# original dispersion grid, leaving 3, the new dispersion grid, as
# the last index.
new_flux_shape = list(orig_spectrum.flux.shape)
new_flux_shape.insert(-1, 1)
in_flux = orig_spectrum.flux.reshape(new_flux_shape)
ones = [1] * len(orig_spectrum.flux.shape[:-1])
new_shape_resample_grid = ones + list(resample_grid.shape)
resample_grid = resample_grid.reshape(new_shape_resample_grid)
# Calculate final flux
out_flux = np.sum(in_flux * resample_grid, axis=-1) / np.sum(
resample_grid, axis=-1)
# Calculate output uncertainty
if pixel_uncer is not None:
pixel_uncer = pixel_uncer.reshape(new_flux_shape)
out_variance = np.sum(pixel_uncer * resample_grid**2, axis=-1) / np.sum(
resample_grid**2, axis=-1)
out_uncertainty = InverseVariance(np.reciprocal(out_variance))
else:
out_uncertainty = None
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
resampled_spectrum = Spectrum1D(flux=out_flux,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit,
uncertainty=out_uncertainty)
return resampled_spectrum
class LinearInterpolatedResampler(ResamplerBase):
"""
Resample a spectrum onto a new ``spectral_axis`` using linear interpolation.
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
linear interpolation:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import LinearInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = LinearInterpolatedResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __init__(self, bin_edges='nan_fill'):
super().__init__(bin_edges)
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _interpolation(self, orig_dispersion, flux, fin_lamb):
"""
Use specified interpolation to calculated resampled
flux.
Parameters
----------
orig_dispersion : ndarray
The original dispersion array.
flux: ndarray
The flux array from the input Spectrum1D
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_flux : ndarray
The resampled flux array generated from the interpolation.
"""
return np.interp(fin_lamb, orig_dispersion, flux, left=np.nan, right=np.nan)
def resample1d(self, orig_spectrum, fin_lamb):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
if orig_spectrum.uncertainty is not None:
warn("Linear interpolation currently does not propogate uncertainties")
out_flux = self._interpolation(orig_spectrum.spectral_axis, orig_spectrum.flux,
fin_lamb)
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
# todo: handle uncertainties for interpolated cases.
resampled_spectrum = Spectrum1D(flux=out_flux * orig_spectrum.flux.unit,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit)
return resampled_spectrum
class SplineInterpolatedResampler(ResamplerBase):
"""
This resample algorithim uses a cubic spline interpolator. In the future
this can be expanded to use splines of different degrees.
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
a cubic spline interpolator:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import SplineInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = SplineInterpolatedResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __init__(self, bin_edges='nan_fill'):
super().__init__(bin_edges)
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _interpolation(self, orig_dispersion, flux, fin_lamb):
"""
Use specified interpolation to calculated resampled
flux.
Parameters
----------
orig_dispersion : ndarray
The original dispersion array.
flux: ndarray
The flux array from the input Spectrum1D
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_flux : ndarray
The resampled flux array generated from the interpolation.
"""
cubic_spline = CubicSpline(orig_dispersion, flux, extrapolate=False)
return cubic_spline(fin_lamb)
def resample1d(self, orig_spectrum, fin_lamb):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
out_flux = self._interpolation(orig_spectrum.spectral_axis, orig_spectrum.flux,
fin_lamb)
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
# todo: handle uncertainties for interpolated cases.
resampled_spectrum = Spectrum1D(flux=out_flux * orig_spectrum.flux.unit,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit)
return resampled_spectrum
| 38.06846 | 108 | 0.634297 | 1,874 | 15,570 | 5.113127 | 0.187834 | 0.045085 | 0.021916 | 0.021812 | 0.547694 | 0.534961 | 0.514506 | 0.497913 | 0.497913 | 0.47871 | 0 | 0.017728 | 0.282659 | 15,570 | 408 | 109 | 38.161765 | 0.840183 | 0.511111 | 0 | 0.342593 | 0 | 0 | 0.040717 | 0.012105 | 0 | 0 | 0 | 0.009804 | 0 | 1 | 0.138889 | false | 0 | 0.055556 | 0 | 0.351852 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a265575d027993c7c43d372f8dbcbce4bf1072 | 3,283 | py | Python | src/utils/utilities.py | ShivangMathur1/Face-Recognition-System | 3a7eb1af8830d6c36218652ed30edd8a49b7bb4d | [
"MIT"
] | null | null | null | src/utils/utilities.py | ShivangMathur1/Face-Recognition-System | 3a7eb1af8830d6c36218652ed30edd8a49b7bb4d | [
"MIT"
] | 3 | 2022-01-15T06:46:26.000Z | 2022-02-23T11:14:03.000Z | src/utils/utilities.py | ShivangMathur1/Face-Recognition-System | 3a7eb1af8830d6c36218652ed30edd8a49b7bb4d | [
"MIT"
] | 3 | 2022-01-11T08:33:15.000Z | 2022-02-21T09:26:26.000Z | from json import load
import os
from datetime import datetime
import cv2
import requests
def get_user_id(name, creds):
student_url = 'http://127.0.0.1:8080/api/students'
student_details = requests.get(student_url, data={'first_name': name}, auth=(creds[0], creds[1]), params={'first_name': name}).json()
if student_details:
return student_details[0]['id']
return None
def add_attendance(name, status, creds):
student_id = get_user_id(name, creds)
if student_id is not None:
attendance_url = 'http://127.0.0.1:8080/api/attendances'
requests.post(attendance_url, data={'student_id': student_id, 'status': status}, auth=(creds[0], creds[1]), headers={"Authorization": "Token " + creds[2]})
def login(creds):
res = requests.post("http://localhost:8080/auth/token/login", data={'password': creds[1], 'email': creds[0]})
creds.append(res.json()['auth_token'])
return creds
def facial_extraction(image, bbox, padding, size=(256, 256)):
x, y, _, _ = bbox
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x, y, w, h = round(x), round(y), round(w), round(h)
start_y, end_y = y - padding, y + h + padding
start_x, end_x = x - padding, x + w + padding
if start_y < 0:
start_y = 0
if end_y > image.shape[0]:
end_y = image.shape[0]
if start_x < 0:
start_x = 0
if end_x > image.shape[1]:
end_x = image.shape[1]
ratio = image.shape[1] // image.shape[0]
try:
face = cv2.resize(
image[start_y:end_y, start_x:end_x], (size[0], ratio * size[0])
)
except:
face = cv2.resize(image, (size[0], ratio * size[0]))
return face, (size[0], ratio * size[0])
def record(name, creds, status):
filepath = 'data/records/' + str(datetime.now().strftime('%d-%B-%Y'))
try:
f = open(filepath + '/records.csv', 'x')
f.close()
except:
pass
os.makedirs(filepath, exist_ok=True)
with open(filepath + '/records.csv', 'r+') as f:
lines = f.readlines()
records = [line.strip().split(',') for line in lines]
index = None
for i in range(len(records)):
if records[i][0] == name:
index = i
if index is None or records[index][3] != status:
now = datetime.now()
time = now.strftime('%I:%M:%S:%p')
date = now.strftime('%d-%B-%Y')
f.writelines(f'{name},{time},{date},{status}\n')
add_attendance(name, status, creds)
# records = [line.split(',')[0] + line.split(',')[3].strip() for line in lines]
# if name + status not in records:
# now = datetime.now()
# time = now.strftime('%I:%M:%S:%p')
# date = now.strftime('%d-%B-%Y')
# print(name+status)
# f.writelines(f'{name},{time},{date},{status}\n')
# add_attendance(name, creds)
# def load_database(path):
# students = requests.post
# database = []
# for name in names:
# img = cv2.imread(f'{path}/{name}')
# database.append((name.split('_')[0], img))
# return database
# if __name__ == '__main__':
# print(load_database("D:\Python\Projects\Face-Recognition-System\data\database")) | 33.845361 | 163 | 0.570819 | 460 | 3,283 | 3.96087 | 0.258696 | 0.032931 | 0.018112 | 0.023052 | 0.254665 | 0.130626 | 0.130626 | 0.130626 | 0.108672 | 0.108672 | 0 | 0.027425 | 0.255864 | 3,283 | 97 | 164 | 33.845361 | 0.718379 | 0.194639 | 0 | 0.0625 | 0 | 0 | 0.109631 | 0.011801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0.03125 | 0.078125 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a43ee3c91d95efecf4342234f30ff3f2618e78 | 3,584 | py | Python | tests/test_models/test_stylegan1.py | shinya7y/mmgeneration | c3b9e0af29d0117e27b18b712b0ddbf343275859 | [
"Apache-2.0"
] | 1 | 2021-05-27T13:04:41.000Z | 2021-05-27T13:04:41.000Z | tests/test_models/test_stylegan1.py | shinya7y/mmgeneration | c3b9e0af29d0117e27b18b712b0ddbf343275859 | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_stylegan1.py | shinya7y/mmgeneration | c3b9e0af29d0117e27b18b712b0ddbf343275859 | [
"Apache-2.0"
] | null | null | null | import math
import pytest
import torch
from mmgen.models import build_model
# from mmgen.models.gans import StyleGANV1
class TestStyleGANV1:
@classmethod
def setup_class(cls):
cls.generator_cfg = dict(
type='StyleGANv1Generator', out_size=32, style_channels=512)
cls.discriminator_cfg = dict(type='StyleGAN1Discriminator', in_size=32)
cls.gan_loss = dict(type='GANLoss', gan_type='wgan')
cls.disc_auxiliary_loss = [
dict(
type='R1GradientPenalty',
loss_weight=10,
norm_mode='HWC',
data_info=dict(
discriminator='disc_partial', real_data='real_imgs'))
]
cls.train_cfg = dict(
use_ema=True,
nkimgs_per_scale={
'8': 0.006,
'16': 0.006,
'32': 0.012
},
optimizer_cfg=dict(
generator=dict(type='Adam', lr=0.003, betas=(0.0, 0.99)),
discriminator=dict(type='Adam', lr=0.003, betas=(0.0, 0.99))),
g_lr_base=0.003,
d_lr_base=0.003)
cls.stylegan_cfg = dict(
type='ProgressiveGrowingGAN',
generator=cls.generator_cfg,
discriminator=cls.discriminator_cfg,
gan_loss=cls.gan_loss,
disc_auxiliary_loss=cls.disc_auxiliary_loss,
train_cfg=cls.train_cfg)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_stylegan1_cuda(self):
# test default config
stylegan = build_model(self.stylegan_cfg).cuda()
data_batch = dict(real_img=torch.randn(3, 3, 32, 32).cuda())
for iter_num in range(5):
outputs = stylegan.train_step(
data_batch,
None,
running_status=dict(iteration=iter_num, batch_size=3))
results = outputs['results']
if iter_num == 1:
assert results['fake_imgs'].shape == (3, 3, 8, 8)
elif iter_num == 2:
assert results['fake_imgs'].shape == (3, 3, 16, 16)
assert math.isclose(
stylegan._actual_nkimgs[0], 0.006, abs_tol=1e-8)
elif iter_num == 3:
assert results['fake_imgs'].shape == (3, 3, 16, 16)
elif iter_num == 4:
assert results['fake_imgs'].shape == (3, 3, 32, 32)
assert math.isclose(
stylegan._actual_nkimgs[1], 0.012, abs_tol=1e-8)
def test_stylegan1_cpu(self):
# test default config
stylegan = build_model(self.stylegan_cfg)
data_batch = dict(real_img=torch.randn(3, 3, 32, 32))
for iter_num in range(5):
outputs = stylegan.train_step(
data_batch,
None,
running_status=dict(iteration=iter_num, batch_size=3))
results = outputs['results']
if iter_num == 1:
assert results['fake_imgs'].shape == (3, 3, 8, 8)
elif iter_num == 2:
assert results['fake_imgs'].shape == (3, 3, 16, 16)
assert math.isclose(
stylegan._actual_nkimgs[0], 0.006, abs_tol=1e-8)
elif iter_num == 3:
assert results['fake_imgs'].shape == (3, 3, 16, 16)
elif iter_num == 4:
assert results['fake_imgs'].shape == (3, 3, 32, 32)
assert math.isclose(
stylegan._actual_nkimgs[1], 0.012, abs_tol=1e-8)
| 37.333333 | 79 | 0.538225 | 434 | 3,584 | 4.237327 | 0.262673 | 0.045677 | 0.073953 | 0.091354 | 0.555737 | 0.555737 | 0.555737 | 0.555737 | 0.555737 | 0.555737 | 0 | 0.066183 | 0.34654 | 3,584 | 95 | 80 | 37.726316 | 0.719044 | 0.022321 | 0 | 0.444444 | 0 | 0 | 0.064571 | 0.012286 | 0 | 0 | 0 | 0 | 0.148148 | 1 | 0.037037 | false | 0 | 0.049383 | 0 | 0.098765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a5d9f285d99e4d51d173ac72c21fd4c540fc07 | 7,891 | py | Python | script/src/page_categories/page_categories.py | ncatlab/nlab | 8e35d93f6e34dd3d21c59bbb76b40b334365deda | [
"Ruby"
] | 78 | 2015-05-14T06:27:13.000Z | 2022-03-27T16:35:09.000Z | script/src/page_categories/page_categories.py | ncatlab/nlab | 8e35d93f6e34dd3d21c59bbb76b40b334365deda | [
"Ruby"
] | 10 | 2019-03-17T14:48:41.000Z | 2021-12-02T16:30:36.000Z | script/src/page_categories/page_categories.py | ncatlab/nlab | 8e35d93f6e34dd3d21c59bbb76b40b334365deda | [
"Ruby"
] | 11 | 2018-01-17T19:36:06.000Z | 2022-03-22T17:32:37.000Z | #!/usr/bin/python3.7
"""
API for listing all page categories for a given web, and for checking whether
a given string defines a page category in this web
---
To use, set up (if it is not already in place) a virtual environment as follows.
python3 -m venv venv
source venv/bin/activate
pip3 install MySQLdb
deactivate
Once the virtual environment has been set up, to use the API, launch the
virtual environment by running:
source venv/bin/activate
Then run the script as follows (it will not work if using the ./ syntax).
python page_categories.py --help
This will describe the available options. As will be seen, there are three
subcommands, 'is_category', 'all_categories', and 'has_categories', whose
descriptions can be obtained by running
python page_categories.py is_category --help
or
python page_categories.py all_categories --help
or
python page_categories.py has_categories --help
When finished, shut down the virtual environment by running:
deactivate
"""
import argparse
import json
import logging
import MySQLdb
import os
import sys
import time
"""
Initialises logging. Logs to
page_categories.log
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging_stream_handler = logging.StreamHandler()
logging_stream_handler.setLevel(logging.INFO)
logging.Formatter.converter = time.gmtime
logging_formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(name)s %(message)s")
logging_stream_handler.setFormatter(logging_formatter)
logger.addHandler(logging_stream_handler)
log_directory = os.environ["NLAB_LOG_DIRECTORY"]
logging_file_handler = logging.FileHandler(
os.path.join(log_directory, "page_categories.log"))
logging_file_handler.setFormatter(logging_formatter)
logger.addHandler(logging_file_handler)
class FailedToCarryOutQueryException(Exception):
pass
"""
For a single database query
"""
def execute_single_with_parameters(query, parameters):
database_user = os.environ["NLAB_DATABASE_USER"]
database_password = os.environ["NLAB_DATABASE_PASSWORD"]
database_name = os.environ["NLAB_DATABASE_NAME"]
database_connection = MySQLdb.connect(
user = database_user,
password= database_password,
db = database_name,
charset = "utf8",
use_unicode = True)
cursor = database_connection.cursor()
try:
cursor.execute(query, parameters)
results = cursor.fetchall()
database_connection.commit()
except MySQLdb.Error as e:
logger.warning(
"Failed to carry out the query " +
query +
" with parameters: " +
str(parameters) +
". Error: " +
str(e))
database_connection.rollback()
raise FailedToCarryOutQueryException()
finally:
cursor.close()
database_connection.close()
return results
"""
Determines whether the string category_name is the name of a page category
for the given web
"""
def is_category(web_id, category_name):
query_results = execute_single_with_parameters(
"SELECT wiki_references.id FROM wiki_references " +
"LEFT JOIN pages ON pages.id = wiki_references.page_id " +
"WHERE web_id = %s AND referenced_name = %s AND link_type = %s " +
"ORDER BY wiki_references.id LIMIT 1",
[web_id, category_name, "C"])
try:
query_results[0]
return True
except IndexError:
return False
"""
Lists all names of page categories for a given web
"""
def all_categories(web_id):
query_results = execute_single_with_parameters(
"SELECT DISTINCT(referenced_name) FROM wiki_references " +
"LEFT JOIN pages ON pages.id = wiki_references.page_id " +
"WHERE web_id = %s AND link_type = %s",
[web_id, "C"])
categories = [ query_result[0] for query_result in query_results ]
sorted_categories = sorted(categories, key=str.lower)
return sorted_categories
def has_categories(web_id):
query_results = execute_single_with_parameters(
"SELECT wiki_references.id FROM wiki_references " +
"LEFT JOIN pages ON pages.id = wiki_references.page_id " +
"WHERE web_id = %s AND link_type = %s "
"ORDER BY wiki_references.id LIMIT 1",
[web_id, "C"])
try:
query_results[0]
return True
except IndexError:
return False
"""
Sets up the command line argument parsing
"""
def argument_parser():
parser = argparse.ArgumentParser(
description = (
"Lists the names of all categories in a given web, or checks " +
"whether a given string defines a category"))
subparsers = parser.add_subparsers(dest="subcommand")
parser_is_category = subparsers.add_parser(
"is_category",
help = "Checks whether a given string defines a category in a given " +
"web, returning 'True' or 'False'")
parser_all_categories = subparsers.add_parser(
"all_categories",
help = "Returns the list of all categories in a given web")
parser_has_categories = subparsers.add_parser(
"has_categories",
help = "Checks whether a given web has any page categories")
parser_is_category.add_argument(
"web_id",
type=int,
help = "Id of a web")
parser_is_category.add_argument(
"category",
help = "Name of possible category")
parser_all_categories.add_argument(
"web_id",
type=int,
help = "Id of a web")
parser_has_categories.add_argument(
"web_id",
type=int,
help = "Id of a web")
return parser
def main():
parser = argument_parser()
arguments = parser.parse_args()
web_id = arguments.web_id
if arguments.subcommand == "all_categories":
try:
categories = all_categories(web_id)
logger.info(
"Successfully found all categories for web with id " +
str(web_id))
print(json.dumps(categories))
return
except Exception as e:
logger.warning(
"Due to an unforeseen error, could not obtain the list of " +
"all categories for the web with id: " +
str(web_id))
sys.exit(1)
if arguments.subcommand == "has_categories":
try:
has_category = has_categories(web_id)
if has_category:
message = " has at least one page category"
else:
message = " does not have any page categories"
logger.info(
"Successfully found that the web with id " +
str(web_id) +
message)
print(has_category)
return
except Exception as e:
logger.warning(
"Due to an unforeseen error, could not determine whether " +
"the web with id: " +
str(web_id) +
"has any page categories. Error: " +
str(e))
sys.exit(1)
category_name = arguments.category
try:
found_category = is_category(web_id, category_name)
if found_category:
message = "defines"
else:
message = "does not define"
logger.info(
"Successfully found that " +
category_name +
" " +
message +
" a category for web with id " +
str(web_id))
print(found_category)
return
except Exception as e:
logger.warning(
"Due to an unforeseen error, could not determine whether " +
category_name +
" defines a category for the web with id: " +
str(web_id) +
". Error: " +
str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| 30.70428 | 80 | 0.637435 | 957 | 7,891 | 5.073145 | 0.229885 | 0.023687 | 0.011123 | 0.01483 | 0.378167 | 0.333883 | 0.287951 | 0.245108 | 0.208445 | 0.208445 | 0 | 0.002293 | 0.28146 | 7,891 | 256 | 81 | 30.824219 | 0.853968 | 0.128121 | 0 | 0.382514 | 0 | 0 | 0.260916 | 0.017648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0.016393 | 0.038251 | 0 | 0.131148 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a7137f1791e922a3801b5fb27951f1e74fe687 | 4,329 | py | Python | evaluation/check_grammaticality_preservation.py | demelin/wsd_biases_for_nmt | cd0bccfd653d5f5ee9286a98791944f876b97f1b | [
"MIT"
] | 2 | 2020-12-02T09:59:09.000Z | 2022-02-18T19:58:48.000Z | evaluation/check_grammaticality_preservation.py | demelin/wsd_biases_for_nmt | cd0bccfd653d5f5ee9286a98791944f876b97f1b | [
"MIT"
] | null | null | null | evaluation/check_grammaticality_preservation.py | demelin/wsd_biases_for_nmt | cd0bccfd653d5f5ee9286a98791944f876b97f1b | [
"MIT"
] | 1 | 2021-03-26T08:06:01.000Z | 2021-03-26T08:06:01.000Z | import sys
import json
import argparse
import language_tool_python
import numpy as np
def check_grammar(adversarial_samples_path):
""" Checks grammar preservation between natural seed sentences and adversarial samples that have been derived
from them. """
# Read in attractor phrase table
print('Reading-in adversarial samples table ...')
with open(adversarial_samples_path, 'r', encoding='utf8') as asp:
adversarial_samples_table = json.load(asp)
# Initialize trackers
error_counts = list()
seed_error_types = dict()
adv_error_types = dict()
# Initialize language tool
tool = language_tool_python.LanguageTool('en-US')
print('Evaluating samples ...')
# Obtain scores based on seed sentence properties
for term_id, term in enumerate(adversarial_samples_table.keys()):
for seed_cluster in adversarial_samples_table[term].keys():
for adv_cluster in adversarial_samples_table[term][seed_cluster].keys():
for sample in adversarial_samples_table[term][seed_cluster][adv_cluster]:
seed_sentence = sample[1].strip()
adv_sample = sample[0].strip()
seed_matches = tool.check(seed_sentence)
adv_matches = tool.check(adv_sample)
num_seed_matches = 0
num_adv_matches = 0
for sm in seed_matches:
if sm.ruleId not in \
['UPPERCASE_SENTENCE_START', 'PROFANITY', 'COMMA_PARENTHESIS_WHITESPACE']:
num_seed_matches += 1
for am in adv_matches:
if am.ruleId not in \
['UPPERCASE_SENTENCE_START', 'PROFANITY', 'COMMA_PARENTHESIS_WHITESPACE']:
num_adv_matches += 1
error_counts.append((num_seed_matches, num_adv_matches))
for sm in seed_matches:
sm_match_type = sm.ruleId
if seed_error_types.get(sm_match_type, None) is None:
seed_error_types[sm_match_type] = 1
else:
seed_error_types[sm_match_type] += 1
for am in adv_matches:
am_match_type = am.ruleId
if adv_error_types.get(am_match_type, None) is None:
adv_error_types[am_match_type] = 1
else:
adv_error_types[am_match_type] += 1
if len(error_counts) % 1000 == 0 and len(error_counts) > 0:
print('Seen {:d} samples'.format(len(error_counts)))
print('Seen {:d} samples'.format(len(error_counts)))
# Report
equal_errors = 0
more_seed = 0
more_adv = 0
for ec in error_counts:
if ec[0] == ec[1]:
equal_errors += 1
elif ec[0] > ec[1]:
more_seed += 1
else:
more_adv += 1
seed_mean_errors = np.mean([ec[0] for ec in error_counts])
adv_mean_errors = np.mean([ec[1] for ec in error_counts])
print('Number of samples with equal number of errors in seed and adv: {:d}'.format(equal_errors))
print('Number of samples with more errors in the seed sentence: {:d}'.format(more_seed))
print('Number of samples with more errors in the adversarial sample: {:d}'.format(more_adv))
print('Mean number of errors in seed sentences: {:.4f}'.format(seed_mean_errors))
print('Mean number of errors in adversarial samples: {:.4f}'.format(adv_mean_errors))
print('=' * 20)
print('ERROR TYPE COUNTS (seed | adv)')
all_error_types = list(set(list(seed_error_types.keys()) + list(adv_error_types.keys())))
for et in all_error_types:
stc = seed_error_types.get(et, 0)
atc = adv_error_types.get(et, 0)
print('{:s} : {:d} | {:d}'.format(et, stc, atc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--adversarial_samples_path', type=str, help='path to the file containing the generated adversarial samples',
required=True)
args = parser.parse_args()
check_grammar(args.adversarial_samples_path)
| 41.228571 | 133 | 0.594133 | 536 | 4,329 | 4.537313 | 0.242537 | 0.057566 | 0.056743 | 0.041118 | 0.349507 | 0.26398 | 0.195724 | 0.120888 | 0.090461 | 0.058388 | 0 | 0.011773 | 0.313236 | 4,329 | 104 | 134 | 41.625 | 0.806256 | 0.057519 | 0 | 0.141026 | 0 | 0 | 0.163632 | 0.031988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.064103 | 0 | 0.076923 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5a9730e5e604235c70d82217d7abcd958783532 | 2,967 | py | Python | scripts/mosh.py | wmodes/blackrockstation | 8134322517803d225b936958a2d4ad16e286397a | [
"MIT"
] | 1 | 2021-04-18T06:46:07.000Z | 2021-04-18T06:46:07.000Z | scripts/mosh.py | wmodes/blackrockstation | 8134322517803d225b936958a2d4ad16e286397a | [
"MIT"
] | null | null | null | scripts/mosh.py | wmodes/blackrockstation | 8134322517803d225b936958a2d4ad16e286397a | [
"MIT"
] | null | null | null | import os
import sys
import getopt
#Load file and create temp files
inputfile = 'blank'
outputfile = 'blank'
filters = 'blank'
def printhelp():
print('py -3 mosh.py -i input.bmp -f [ffmpeg filters] -o output.bmp\npy -3 mosh.py --input=input.bmp --filter=[ffmpeg filters] --output=output.bmp\n[ffmpeg filters]:\n For example:\n volume=volume=3,bass=g=3:f=110:w=0.6')
def loadfi(argv):
try:
opts, args = getopt.getopt(argv,"i:o:f:h:",["input=","output=","filter=","help"])
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
global inputfile
global outputfile
global filters
try:
if opt in ('-h', '--help'):
printhelp()
sys.exit()
elif opt in ("-i", "--input"):
inputfile = arg
elif opt in ("-o", "--output"):
outputfile = arg
elif opt in ("-f", "--filter"):
filters = arg
except NameError:
print('py -3 mosh.py -i input.bmp -o output.bmp')
sys.exit(2)
loadfi(sys.argv[1:])
try:
a = str(inputfile).rsplit('.',1)[0]+'-a.tmp'
b = str(outputfile).rsplit('.',1)[0]+'-b.tmp'
if filters == 'blank':
print("Filters are required to avoid errors.")
sys.exit()
if inputfile.endswith(".bmp") == False:
if inputfile.endswith('.bmp"') == False:
print("Input needs to be .bmp")
sys.exit()
else:
pass
if outputfile.endswith(".bmp") == False:
if outfile.endswith('.bmp"') == False:
print("Output needs to be .bmp")
sys.exit()
else:
pass
if filters.endswith('"') == True:
if filters.endswith('"') == True:
filters = filters.strip()
sys.exit()
else:
pass
with open(inputfile, 'rb') as in_file:
with open(a, 'wb') as out_file:
out_file.write(in_file.read()[36:])
except FileNotFoundError:
loadfi(sys.argv[1:])
except NameError:
print(inputfile)
print(outputfile)
print('py -3 mosh.py -i input.bmp -o output.bmp')
sys.exit(2)
print("Moshing")
#os.system('ffmpeg -f alaw -i "%s" -y -af "volume=volume=3" -ac 1 -f alaw "%s"'%(a,b)) <- Basic
#os.system('ffmpeg -f alaw -i "%s" -y -af "volume=volume=3,bass=g=3:f=110:w=0.6,aformat=channel_layouts=mono,chorus=0.5:0.5:1:0.1:1:2" -ac 1 -f alaw "%s"'%(a,b)) <- My Personal Favorite
os.system('ffmpeg -f alaw -i "%s" -y -af "%s" -ac 1 -f alaw "%s"'%(a,filters,b))
#def mosh(argv):
if os.path.exists(b):
print("Adding Header")
with open(outputfile, 'wb') as o:
with open(inputfile,'rb') as hx:
with open(b,'rb') as i:
o.write(hx.read()[:36]+i.read())
print("Done")
else:
print("Error")
try:
os.remove(a)
os.remove(b)
except FileNotFoundError:
print("Temp files not found, may be result of non-existant input or output.")
sys.exit()
| 31.56383 | 237 | 0.559488 | 431 | 2,967 | 3.839907 | 0.278422 | 0.038066 | 0.016918 | 0.021752 | 0.25861 | 0.200604 | 0.194562 | 0.181269 | 0.167372 | 0.117825 | 0 | 0.020119 | 0.262892 | 2,967 | 93 | 238 | 31.903226 | 0.736626 | 0.109201 | 0 | 0.385542 | 0 | 0.024096 | 0.251232 | 0.024252 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024096 | false | 0.036145 | 0.036145 | 0 | 0.060241 | 0.192771 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5aa8ee377904ab5a674241f789900ee6b5dc751 | 6,432 | py | Python | src/train.py | shabnam-b/crosslingual-nlp | ccd91baaea23004eab9c4d871910945ca3e61ab7 | [
"MIT"
] | 64 | 2019-11-25T09:33:29.000Z | 2022-03-31T22:38:21.000Z | src/train.py | shabnam-b/crosslingual-nlp | ccd91baaea23004eab9c4d871910945ca3e61ab7 | [
"MIT"
] | 2 | 2022-02-14T05:46:10.000Z | 2022-02-18T20:40:02.000Z | src/train.py | shabnam-b/crosslingual-nlp | ccd91baaea23004eab9c4d871910945ca3e61ab7 | [
"MIT"
] | 7 | 2021-04-02T07:00:36.000Z | 2022-03-28T15:08:50.000Z | import os
from argparse import ArgumentParser
import pytorch_lightning as pl
import util
from enumeration import Task
from model import Aligner, Classifier, DependencyParser, Model, Tagger
def main(hparams):
if hparams.cache_dataset:
if not hparams.cache_path:
hparams.cache_path = os.path.join(os.path.expanduser("~"), ".cache/clnlp")
os.makedirs(hparams.cache_path, exist_ok=True)
ModelClass = {
Task.conllner: Tagger,
Task.wikiner: Tagger,
Task.udpos: Tagger,
Task.xnli: Classifier,
Task.pawsx: Classifier,
Task.mldoc: Classifier,
Task.langid: Classifier,
Task.parsing: DependencyParser,
Task.alignment: Aligner,
}[hparams.task]
if hparams.do_train:
model = ModelClass(hparams)
else:
assert os.path.isfile(hparams.checkpoint)
model = ModelClass.load_from_checkpoint(hparams.checkpoint)
os.makedirs(
os.path.join(hparams.default_save_path, hparams.exp_name), exist_ok=True
)
logger = pl.loggers.TensorBoardLogger(
hparams.default_save_path, name=hparams.exp_name, version=None
)
early_stopping = pl.callbacks.EarlyStopping(
monitor=model.selection_criterion,
min_delta=hparams.min_delta,
patience=hparams.patience,
verbose=True,
mode=model.comparsion,
strict=True,
)
base_dir = os.path.join(
hparams.default_save_path,
hparams.exp_name,
f"version_{logger.version}" if logger.version is not None else "",
)
model.base_dir = base_dir
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(base_dir, "ckpts"),
filename="ckpts_{epoch}-{%s:.3f}" % model.selection_criterion,
monitor=model.selection_criterion,
verbose=True,
save_last=hparams.save_last,
save_top_k=hparams.save_top_k,
mode=model.comparsion,
)
logging_callback = util.Logging(base_dir)
lr_logger = pl.callbacks.LearningRateMonitor()
callbacks = [early_stopping, checkpoint_callback, logging_callback, lr_logger]
if isinstance(model, Aligner) and hparams.aligner_sim == "linear":
callbacks.append(util.MappingCheckpoint(base_dir))
trainer = pl.Trainer(
logger=logger,
callbacks=callbacks,
default_root_dir=hparams.default_save_path,
gradient_clip_val=hparams.gradient_clip_val,
num_nodes=hparams.num_nodes,
gpus=hparams.gpus,
auto_select_gpus=True,
overfit_batches=hparams.overfit_batches,
track_grad_norm=hparams.track_grad_norm,
check_val_every_n_epoch=hparams.check_val_every_n_epoch,
fast_dev_run=hparams.fast_dev_run,
accumulate_grad_batches=hparams.accumulate_grad_batches,
max_epochs=hparams.max_epochs,
min_epochs=hparams.min_epochs,
max_steps=hparams.max_steps,
min_steps=hparams.min_steps,
val_check_interval=int(hparams.val_check_interval)
if hparams.val_check_interval > 1
else hparams.val_check_interval,
log_every_n_steps=hparams.log_every_n_steps,
accelerator=hparams.accelerator,
precision=hparams.precision,
resume_from_checkpoint=hparams.resume_from_checkpoint,
replace_sampler_ddp=True,
terminate_on_nan=True,
amp_backend=hparams.amp_backend,
amp_level=hparams.amp_level,
)
if hparams.do_train:
trainer.fit(model)
if hparams.do_test and hparams.tst_langs:
if hparams.do_train:
assert "select" not in trainer.callback_metrics
trainer.callback_metrics["select"] = checkpoint_callback.best_model_score
trainer.test(ckpt_path="best")
else:
trainer.test(model=model)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--exp_name", default="default", type=str)
parser.add_argument("--min_delta", default=1e-3, type=float)
parser.add_argument("--patience", default=10, type=int)
parser.add_argument("--save_last", default=False, type=util.str2bool)
parser.add_argument("--save_top_k", default=1, type=int)
parser.add_argument("--do_train", default=True, type=util.str2bool)
parser.add_argument("--do_test", default=True, type=util.str2bool)
parser.add_argument("--checkpoint", default="", type=str)
parser.add_argument("--cache_dataset", default=False, type=util.str2bool)
parser.add_argument("--cache_path", default="", type=str)
############################################################################
parser.add_argument("--default_save_path", default="./", type=str)
parser.add_argument("--gradient_clip_val", default=0, type=float)
parser.add_argument("--num_nodes", default=1, type=int)
parser.add_argument("--gpus", default=None, type=int)
parser.add_argument("--overfit_batches", default=0.0, type=float)
parser.add_argument("--track_grad_norm", default=-1, type=int)
parser.add_argument("--check_val_every_n_epoch", default=1, type=int)
parser.add_argument("--fast_dev_run", default=False, type=util.str2bool)
parser.add_argument("--accumulate_grad_batches", default=1, type=int)
parser.add_argument("--max_epochs", default=1000, type=int)
parser.add_argument("--min_epochs", default=1, type=int)
parser.add_argument("--max_steps", default=None, type=int)
parser.add_argument("--min_steps", default=None, type=int)
parser.add_argument("--val_check_interval", default=1.0, type=float)
parser.add_argument("--log_every_n_steps", default=10, type=int)
parser.add_argument("--accelerator", default=None, type=str)
parser.add_argument("--precision", default=32, type=int)
parser.add_argument("--resume_from_checkpoint", default=None, type=str)
parser.add_argument("--amp_backend", default="native", type=str)
# only used for non-native amp
parser.add_argument("--amp_level", default="01", type=str)
############################################################################
parser = Model.add_model_specific_args(parser)
parser = Tagger.add_model_specific_args(parser)
parser = Classifier.add_model_specific_args(parser)
parser = DependencyParser.add_model_specific_args(parser)
parser = Aligner.add_model_specific_args(parser)
hparams = parser.parse_args()
main(hparams)
| 41.766234 | 86 | 0.684546 | 793 | 6,432 | 5.278689 | 0.213115 | 0.064501 | 0.121835 | 0.049689 | 0.302914 | 0.271142 | 0.199236 | 0.111085 | 0.021978 | 0.021978 | 0 | 0.006068 | 0.180037 | 6,432 | 153 | 87 | 42.039216 | 0.787637 | 0.004353 | 0 | 0.07971 | 0 | 0 | 0.08528 | 0.0192 | 0 | 0 | 0 | 0 | 0.014493 | 1 | 0.007246 | false | 0 | 0.043478 | 0 | 0.050725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5ae335355bc99ca0c56f7196c057bea2de78e07 | 1,599 | py | Python | tests/test_util_math.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 10 | 2020-03-26T01:08:10.000Z | 2021-12-04T13:02:10.000Z | tests/test_util_math.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 25 | 2020-06-08T14:52:28.000Z | 2022-03-08T02:30:54.000Z | tests/test_util_math.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 8 | 2020-03-24T14:11:25.000Z | 2021-11-06T06:32:59.000Z | """
test_util_stats.py
Author: Jordan Mirocha
Affiliation: McGill
Created on: Tue 24 Mar 2020 22:11:31 EDT
Description:
"""
import numpy as np
from scipy.interpolate import interp1d
from ares.util.Math import interp1d_wrapper, forward_difference, \
central_difference, five_pt_stencil, LinearNDInterpolator, smooth
def test():
# First, test my dumb wrapper around interp1d
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
func1 = interp1d(x, y, kind='cubic')
func2 = interp1d_wrapper(x, y, kind='cubic')
func3 = LinearNDInterpolator(x, y)
x2 = np.linspace(0, 4 * np.pi, 50)
f1 = func1(x2)
f2 = func2(x2)
f3 = func3(x2)
assert np.array_equal(f1, f2)
# Test derivative routines
x1, dydx1 = forward_difference(x, y)
x2, dydx2 = central_difference(x, y)
x3, dydx3 = five_pt_stencil(x, y)
# Smoothing
d = y + np.random.normal(scale=0.5, size=y.size)
std = np.std(d - y)
ds_b = smooth(d, 5, kernel='boxcar')
ds_g = smooth(d, 5, kernel='gaussian')
assert np.std(ds_b - y) < std
assert np.std(ds_g - y) < std
# Next, test LinearNDInterpolator
_x = _y = np.linspace(0, 5, 100)
xx, yy = np.meshgrid(_x, _y)
f = np.sin(xx) + np.cos(yy)
func2d = LinearNDInterpolator([_x, _y], f)
f0 = func2d(np.array([0.5, 1.3]))
_x = _y = _z = np.linspace(0, 5, 100)
xx, yy, zz = np.meshgrid(_x, _y, _z)
g = np.sin(xx) + np.cos(yy) + + np.tan(zz)
func3d = LinearNDInterpolator([_x, _y, _z], g)
g0 = func3d(np.array([0.5, 1.3, 1.5]))
if __name__ == '__main__':
test()
| 23.865672 | 69 | 0.619762 | 255 | 1,599 | 3.729412 | 0.407843 | 0.025237 | 0.046267 | 0.025237 | 0.126183 | 0.126183 | 0.039958 | 0 | 0 | 0 | 0 | 0.062704 | 0.23202 | 1,599 | 66 | 70 | 24.227273 | 0.711726 | 0.143215 | 0 | 0 | 0 | 0 | 0.023581 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5af38e5eeb161d0d352eaa24c10832386dba330 | 6,398 | py | Python | Scripts/convertor2.py | hyller/GladiatorFirmware | 535de3f74a773614648279ceb7cab06714c2cec2 | [
"Unlicense"
] | 1 | 2015-09-17T07:00:59.000Z | 2015-09-17T07:00:59.000Z | Scripts/convertor2.py | hyller/GladiatorFirmware | 535de3f74a773614648279ceb7cab06714c2cec2 | [
"Unlicense"
] | null | null | null | Scripts/convertor2.py | hyller/GladiatorFirmware | 535de3f74a773614648279ceb7cab06714c2cec2 | [
"Unlicense"
] | null | null | null | # 1. Extract crash address form reset information in internal log
# 2. According the crash address to find the crash function by search IAR map file
# 3. Output the crash address and function name to file
# NOTE: map file should in UTF-8 format
import csv
import os
import sys
import os.path
code_list = []
code_list_index = 0
def read_output_log(inputfile, outputfile):
lastTimeStampRaw = 0
with open(outputfile, 'w+') as wf:
with open(inputfile, 'r') as rf:
csv_reader = csv.DictReader(rf)
for row in csv_reader:
timeStampRaw = int(row['Timestamp'])
if(timeStampRaw == 0):
timeStampRaw = lastTimeStampRaw
lastTimeStampRaw = timeStampRaw
timeStamp_hour = timeStampRaw//(1000*60*60) # hours
timeStamp_min = (timeStampRaw//(1000*60)) % 60 # min
timeStamp_sec = (timeStampRaw//(1000)) % 60 # second
timeStamp_ms = timeStampRaw % (1000) # milli second
if(row['Event ID'] == '0x70'):
reason = int(row['Parameter 2'], 16)
wf.write('\n\n')
wf.write('PWR:'+'resetReason-'+row['Parameter 2'])
if reason == 0x501:
wf.write('-WATCHDOG_EXPIRED')
if reason == 0x502:
wf.write('-WATCHDOG_CAUGHT')
if reason == 0x701:
wf.write('-CRASH_ASSERT')
if reason == 0xA01:
wf.write('-HARD_FAULT')
if reason == 0xA02:
wf.write('-MEM_FAULT')
if reason == 0xA03:
wf.write('-BUS_FAULT')
if reason == 0xA04:
wf.write('-USAGE_FAULT')
if reason == 0xA05:
wf.write('-DBGMON_FAULT')
if reason == 0x401:
wf.write('-POWER_RESTART')
wf.write('|' + 'resetCnt-' + row['Parameter 3'] + '\n')
elif(row['Event ID'] == '0x71'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'CRASH__PC-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x72'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[0]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x73'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[1]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x74'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[2]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x75'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[3]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x76'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[4]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
elif(row['Event ID'] == '0x77'):
addr = int(row['Parameter 3'], 16)*65536 + int(row['Parameter 2'], 16)
funct_name = find_function(addr)
wf.write('PWR:'+'RETURN[5]-'+row['Parameter 3'] + "'" + row['Parameter 2'] + "--->" + funct_name + '\n')
rf.close()
wf.close()
def extract_code_from_map_file(inputfile):
ret_funct_line_whole = ""
code_list_index = 0
with open(inputfile, 'r', encoding='utf-8') as rf:
lines = rf.readlines()
entry_list_flag = False
for line in lines:
if "*** ENTRY LIST" in line: # Start at line: *** ENTRY LIST
entry_list_flag = True
continue
if "[1] = " in line and entry_list_flag == True: # End at line: [1] =
entry_list_flag = False
break
if entry_list_flag == True:
if "Code " in line: # Find the line include Code
if line[0] == " ": # Function name is in previous line
funct_name = pre_line.strip()
ret_funct_line_whole = funct_name + " " + line.strip()
else: # All is in one line
funct_name = line.split()[0]
ret_funct_line_whole = line.strip()
code_list.append(ret_funct_line_whole)
pre_line = line
rf.close()
def find_function(addr):
for line in code_list: # sample line: zdoSimpleCommand 0x3'0a77 0x1a Code Gb zdo-cli.o [66]
funct_name = line.split()[0]
funct_addr = int(line.split()[1].replace("'", ""), 16) # Address maybe like this: 0x3'0d89
try:
funct_size = int(line.split()[2], 16)
except:
funct_size = 0
if addr >= funct_addr and addr < (funct_addr + funct_size):
return line
return ""
if __name__ == '__main__':
if len(sys.argv) == 4:
extract_code_from_map_file(sys.argv[2])
read_output_log(sys.argv[1], sys.argv[3])
else:
print('please give the target csv input file, map file, and output file name')
exit()
| 46.362319 | 125 | 0.479056 | 712 | 6,398 | 4.164326 | 0.23736 | 0.125464 | 0.070152 | 0.04317 | 0.35312 | 0.314334 | 0.314334 | 0.314334 | 0.314334 | 0.302192 | 0 | 0.05599 | 0.380275 | 6,398 | 137 | 126 | 46.70073 | 0.691803 | 0.080025 | 0 | 0.208696 | 0 | 0 | 0.14966 | 0 | 0 | 0 | 0.013431 | 0 | 0.008696 | 1 | 0.026087 | false | 0 | 0.034783 | 0 | 0.078261 | 0.008696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
633433b979d95716a0827e164f37cd9e29c19d6a | 1,782 | py | Python | simulator.py | baumrasen/JSN-SR04T-Serial-Simulator | 883ffac31dce1d9a7456af8282fc1bc24a09d0a9 | [
"MIT"
] | null | null | null | simulator.py | baumrasen/JSN-SR04T-Serial-Simulator | 883ffac31dce1d9a7456af8282fc1bc24a09d0a9 | [
"MIT"
] | null | null | null | simulator.py | baumrasen/JSN-SR04T-Serial-Simulator | 883ffac31dce1d9a7456af8282fc1bc24a09d0a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import time
import serial
# create serial
ser = serial.Serial(
port='/dev/ttyAMA1', #Replace ttyAMA1 for your needs
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0.05
)
# edit here for your needs
value = 200
value_max = 8000
value_reset = 200
stepsize = 10
sleep_seconds = 1
# for ever do this
while 1:
# look for a character from serial port - will wait for up to 50ms (specified above in timeout)
data = ser.read(size=1)
# check for the right trigger --> 0x55
if (data == b'\x55'):
# small waiting time
time.sleep(0.05)
### comment out to set a value
# value = 1953
# print current value to console
print('current value: ' + str(value))
# startbit
b0 = 0xFF
# the upper 8 bits of the value
b1 = (value >> 8) & 0xff
# the lower 8 bits of the value
b2 = value & 0xff
# checksum (only low 8 bit)
b3 = (b0 + b1 + b2) & 0xFF
# arr = bytearray([0xFF, 0x07, 0xA1, 0xA7]) # should return 1953
arr = bytearray(4)
# set the right values to the byte array
arr[0] = b0
arr[1] = b1
arr[2] = b2
arr[3] = b3
print('bytearray to send: ' + str(b0) + ' ' + str(b1) + ' ' + str(b2) + ' ' + str(b3))
# send the array to the serial port
ser.write(arr)
# empty line
print()
value += stepsize
if (value > value_max):
# restart from reset value
value = value_reset
# wait a bit
time.sleep(sleep_seconds)
| 23.76 | 102 | 0.530864 | 229 | 1,782 | 4.09607 | 0.475983 | 0.042644 | 0.025586 | 0.021322 | 0.031983 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070789 | 0.373737 | 1,782 | 74 | 103 | 24.081081 | 0.769713 | 0.354097 | 0 | 0 | 0 | 0 | 0.047028 | 0 | 0 | 0 | 0.014197 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
633477a8a78066ac7ae776d824603b118e7b3ca3 | 2,688 | py | Python | binary_image_classifier/simple_keras.py | bsaikiran535/catsdogs | 07a0b15bc5e65cd68d355feb64a622b279b78fd4 | [
"MIT"
] | null | null | null | binary_image_classifier/simple_keras.py | bsaikiran535/catsdogs | 07a0b15bc5e65cd68d355feb64a622b279b78fd4 | [
"MIT"
] | null | null | null | binary_image_classifier/simple_keras.py | bsaikiran535/catsdogs | 07a0b15bc5e65cd68d355feb64a622b279b78fd4 | [
"MIT"
] | null | null | null | from IPython import embed
from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
TRAIN_PATH = '/Users/sai/dev/datasets/catsdogs-kaggle/data2/train/'
# Constants
NUM_CHANNELS = 3
IMG_X = 150
IMG_Y = 150
BATCH_SIZE = 16
TOTAL_NUM_IMAGES = 25000
def get_train_data_augmenter():
# real time image augmentation
augmenter = ImageDataGenerator(
# rotation_range=40,
# width_shift_range=0.2,
# height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
rescale=1./255,
)
return augmenter
def run_sample_image_augmentation(augmenter):
img = load_img(TRAIN_PATH + 'cat/cat.0.jpg')
x = img_to_array(img) # shape = (3, 374, 500)
x = x.reshape((1,) + x.shape)
i = 0
for _ in augmenter.flow(x, batch_size=1, save_to_dir='preview_augmentation',
save_prefix='cat', save_format='jpeg'):
i += 1
if i > 20:
break
def get_train_data_generator(augmenter):
train_generator = augmenter.flow_from_directory(
TRAIN_PATH,
target_size=(IMG_X, IMG_Y),
batch_size=BATCH_SIZE,
class_mode='binary'
)
return train_generator
def get_model():
model = Sequential()
# Conv 1
model.add(
Conv2D(filters=32, kernel_size=(3, 3), input_shape=(IMG_X, IMG_Y, NUM_CHANNELS))
)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Conv 2
model.add(
Conv2D(filters=32, kernel_size=(3, 3))
)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Conv 3
model.add(
Conv2D(filters=64, kernel_size=(3, 3))
)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Fully connected
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
return model
def train_model(model, data_gen):
model.fit_generator(
data_gen,
steps_per_epoch=TOTAL_NUM_IMAGES // BATCH_SIZE,
epochs=50
)
if __name__ == "__main__":
augmenter = get_train_data_augmenter()
# run_sample_image_augmentation(augmenter)
model = get_model()
train_data_gen = get_train_data_generator(augmenter)
train_model(model, train_data_gen)
| 25.358491 | 88 | 0.649926 | 354 | 2,688 | 4.669492 | 0.355932 | 0.072595 | 0.054446 | 0.053237 | 0.251664 | 0.209316 | 0.14882 | 0.14882 | 0.14882 | 0.113733 | 0 | 0.038312 | 0.232887 | 2,688 | 105 | 89 | 25.6 | 0.763337 | 0.075893 | 0 | 0.136986 | 0 | 0 | 0.06877 | 0.021036 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.054795 | 0 | 0.164384 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63348b523a1e8dad3500b346984e69a09705dc2d | 1,326 | py | Python | NicBot/cogs/db/utils.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | NicBot/cogs/db/utils.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | NicBot/cogs/db/utils.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | from json import dump, load
from os import getcwd, mkdir
from os.path import exists
from ...errors import UpdateNewFile
def mkconfig(file: str):
cwd = getcwd().replace('\\', '/')
dirs = (
file
.replace('\\', '/')
.replace(cwd, '')
.strip('./')
.split('/')
)
_file = dirs.pop() # The file without the path.
if (len(dirs) > 0):
dir_to_make = '.'
for folder in dirs:
dir_to_make += '/' + folder
if not exists(dir_to_make):
mkdir(dir_to_make)
try:
open(file, 'r')
except FileNotFoundError as error:
template = {
'INFO': 'This is the database configuration file.',
'Name': {
'type': '',
'database': '',
'username': '',
'password': '',
'host': '',
'port': '',
'extras': {
'auto_commit': False,
'debug': False
}
}
}
with open(file, 'x') as f:
dump(template, f, indent=4)
e = 'New configuration file created at: `%s`' % (file)
raise UpdateNewFile(e) from error
else:
with open(file, 'r') as f:
return load(f)
| 22.862069 | 63 | 0.436652 | 129 | 1,326 | 4.410853 | 0.550388 | 0.035149 | 0.063269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002611 | 0.422323 | 1,326 | 57 | 64 | 23.263158 | 0.740209 | 0.019608 | 0 | 0 | 0 | 0 | 0.122496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0.022222 | 0.088889 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
633516036af3a4c3e8ae99b1bb4a34061f655d7d | 959 | py | Python | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | import gym
from gym import wrappers
env = gym.make('Reacher-v1')
env.reset()
env.render()
outdir = './log/'
f_act = open(outdir + 'log_act.txt', 'w')
f_obs = open(outdir + 'log_obs.txt', 'w')
f_rwd = open(outdir + 'log_rwd.txt', 'w')
f_info = open(outdir + 'log_info.txt', 'w')
env = wrappers.Monitor(env, directory=outdir, force=True)
for i_episode in range(101):
observation = env.reset()
for t in range(100):
env.render()
# action selection
action = env.action_space.sample()
# take the action and observe the reward and next state
observation, reward, done, info = env.step(action)
# print observation
f_act.write(str(action) + "\n")
f_obs.write(str(observation) + "\n")
f_rwd.write(str(reward) + "\n")
f_info.write(str(info) + "\n")
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.monitor.close()
| 25.236842 | 68 | 0.606882 | 137 | 959 | 4.145985 | 0.423358 | 0.079225 | 0.091549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010989 | 0.240876 | 959 | 37 | 69 | 25.918919 | 0.769231 | 0.091762 | 0 | 0.08 | 0 | 0 | 0.124567 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63359a3288cdf817808c76a0f398a98386d7b22c | 1,815 | py | Python | run_pipeline_beaco2n.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | run_pipeline_beaco2n.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | run_pipeline_beaco2n.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | """
This can be used to run the whole data scrape, process and export pipeline.
If you just want to run a single state see the scripts in the beaco2n/ directory.
"""
import argparse
from gather.pipeline import run_beaco2n
if __name__ == "__main__":
example_text = """Usage:
$ python run_pipeline_beaco2n.py --vars co2 --export glasgow_co2_data.json --dir beaco2n/
Downloads, processes and exports the data to a glasgow_co2_data.json file. Retrieved raw files are downloaded to the
beaco2n/ directory.
Similary running
$ python run_pipeline_beaco2n.py --vars co2 --export glasgow_co2_data.json
would do the same thing but would store the downloaded raw files in a temporary directory which is cleaned up
after run.
$ python run_pipeline_beaco2n.py --vars <species to extract> --export <processed data out JSON> --dir <download directory>
"""
parser = argparse.ArgumentParser(
prog="BEACO2N scraping pipeline",
description="Script to allow easy scraping and processing of BEACO2N data.",
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# parser.add_argument("--meta", help="path to JSON metadata file", type=str)
parser.add_argument(
"--vars", help="variables to extract from data such e.g. ch4 co2", nargs="*", type=str
)
parser.add_argument("--export", help="filepath for dashboard data export")
parser.add_argument("--dir", help="directory for data download", type=str)
args = parser.parse_args()
# metadata_path = args.meta
download_path = args.dir
selected_vars = args.vars
export_filepath = args.export
run_beaco2n(
download_path=download_path,
selected_vars=selected_vars,
export_filepath=export_filepath,
)
| 34.245283 | 126 | 0.710744 | 243 | 1,815 | 5.144033 | 0.427984 | 0.0288 | 0.0544 | 0.0576 | 0.1536 | 0.1152 | 0.0912 | 0.0912 | 0.0912 | 0.0912 | 0 | 0.01174 | 0.202204 | 1,815 | 52 | 127 | 34.903846 | 0.851519 | 0.1427 | 0 | 0 | 0 | 0.060606 | 0.54075 | 0.085382 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63375c19200ea5c4ad796897e81df65ed7bb6676 | 3,176 | py | Python | phosphodisco/tests/test_classes.py | ruggleslab/phosphodisco-1 | 4663e2c97b96304483234a80bc9b35befbf92795 | [
"MIT"
] | 1 | 2020-10-30T18:08:58.000Z | 2020-10-30T18:08:58.000Z | phosphodisco/tests/test_classes.py | ruggleslab/phosphodisco-1 | 4663e2c97b96304483234a80bc9b35befbf92795 | [
"MIT"
] | 5 | 2020-09-09T21:53:44.000Z | 2021-11-09T00:43:06.000Z | phosphodisco/tests/test_classes.py | ruggleslab/phosphodisco-1 | 4663e2c97b96304483234a80bc9b35befbf92795 | [
"MIT"
] | 3 | 2020-05-11T14:46:31.000Z | 2021-08-20T19:22:34.000Z | import phosphodisco as phdc
import numpy as np
import pandas as pd
seed = 5
np.random.seed(seed)
prot = pd.util.testing.makeDataFrame()
phospho = pd.util.testing.makeDataFrame()
phospho.index = pd.MultiIndex.from_tuples(
[(prot.index[np.random.randint(0, 15)], ind) for ind in phospho.index]
)
def test_classes_regulators():
proteomics = phdc.ProteomicsData(
phospho, prot, min_common_values=2
).normalize_phospho_by_protein()
proteomics.impute_missing_values()
# proteomics.assign_modules()
proteomics.assign_modules(
pd.DataFrame(
{'test;param-1': [np.random.randint(0, 4) for i in range(30)]},
index=proteomics.normed_phospho.index
)
)
proteomics.calculate_module_scores()
regs = list(set(phospho.sample(3).index.get_level_values(0)))
proteomics.collect_possible_regulators(regs, corr_threshold=0.98)
proteomics.calculate_regulator_association(model='linear', cv_fold=2)
return proteomics
def test_classes_annotations():
proteomics = phdc.ProteomicsData(
phospho, prot, min_common_values=2
).normalize_phospho_by_protein()
proteomics.assign_modules(
pd.DataFrame(
{'test;param-1': [np.random.randint(0, 4) for i in range(30)]},
index=proteomics.normed_phospho.index
)
)
annotations = pd.DataFrame(
{
'cat1': ['A', 'B', 'A', 'B'],
'cat2': ['A', 'B', 'B', 'C'],
'cont1': [0.115, 0.01, 0.3, 0.9],
'cont2': [-1, -2.5, np.nan, 1]
},
index=proteomics.protein.columns
)
proteomics.calculate_module_scores()
proteomics.add_annotations(annotations, pd.Series(['categorical', 0, 'continuous', 1]))
proteomics.calculate_annotation_association()
return proteomics
# phospho = phdc.read_phospho('/Users/lili/dropbox_lili/phosphodisco/results/brca-combined-v4.0-phosphoproteome'
# '-dedup-filtered.csv')
# protein = phdc.read_protein(
# '/Users/lili/dropbox_lili/phosphodisco/results/brca-combined-v4.0-proteome-dedup-filtered.csv')
# normed = phdc.read_phospho(
# '/Users/lili/dropbox_lili/phosphodisco/results/brca.normed_phospho.csv')
# clusters = phdc.read_phospho('~/dropbox_lili/phosphodisco/results/brca_labels.csv')
# regs = phdc.parsers.read_list(
# '/Users/lili/dropbox_lili/phosphodisco/phosphodisco/data/kinases_and_phosphatases.txt')
#
# data = phdc.ProteomicsData(
# phospho=phospho,
# protein=protein,
# normed_phospho=normed,
# modules=clusters,
# possible_regulator_list=regs
# )
# data.add_annotations(
# phdc.parsers.read_annotation(
# '/Users/lili/dropbox_lili/phosphodisco/results/brca-combined-v4.0-sample-annotation.filtered.csv'),
# pd.Series(phdc.parsers.read_list('/Users/lili/dropbox_lili/phosphodisco/results/brca.annotation_cols.txt'
# )),
# )
#
# # data.collect_possible_regulators(corr_threshold=0.9)
# data.calculate_module_scores()
# # data.calculate_regulator_coefficients()
#
# data.calculate_annotation_association(cat_method='RRA')
# data.annotation_association
| 34.901099 | 112 | 0.68199 | 378 | 3,176 | 5.542328 | 0.309524 | 0.036754 | 0.07685 | 0.057279 | 0.397136 | 0.349403 | 0.349403 | 0.344153 | 0.344153 | 0.295465 | 0 | 0.019668 | 0.183564 | 3,176 | 90 | 113 | 35.288889 | 0.788276 | 0.406801 | 0 | 0.367347 | 0 | 0 | 0.041622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.061224 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6337fe4a780a9017d4452a00d6c6cd11afc30a4d | 6,500 | py | Python | parsl/tests/configs/user_opts.py | vkhodygo/parsl | ce2552caed9d223c3d8a84c16f830abc5f926331 | [
"Apache-2.0"
] | 1 | 2020-11-21T17:32:09.000Z | 2020-11-21T17:32:09.000Z | parsl/tests/configs/user_opts.py | vkhodygo/parsl | ce2552caed9d223c3d8a84c16f830abc5f926331 | [
"Apache-2.0"
] | null | null | null | parsl/tests/configs/user_opts.py | vkhodygo/parsl | ce2552caed9d223c3d8a84c16f830abc5f926331 | [
"Apache-2.0"
] | 1 | 2022-03-09T10:51:12.000Z | 2022-03-09T10:51:12.000Z | """
Specification of user-specific configuration options.
The fields must be configured separately for each user. To disable any associated configurations, comment
out the entry.
User specific overrides that should not go in version control can be set by creating a
file called local_user_opts.py, which declares a dictionary local_user_opts. Top level
keys in that dictionary will replace entries in the below user opts file, so it should
be safe to cut-and-paste entries from this file into that file.
"""
from typing import Any, Dict
# PUBLIC_IP = "52.86.208.63" # "128.135.250.229"
# MIDWAY_USERNAME = "yadunand"
# OSG_USERNAME = "yadunand"
# SWAN_USERNAME = "p02509"
# CORI_USERNAME = "yadunand"
# ALCF_USERNAME = "yadunand"
# ALCF_ALLOCATION = "CSC249ADCD01"
# COMET_USERNAME = "yadunand"
user_opts = {
'frontera': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'theta': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'cori': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'summit': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'bluewaters': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'midway': {
'worker_init': 'source ~/setup_parsl_test_env.sh;',
},
'petrelkube': {
'worker_init': '~/setup_parsl_test_env.sh',
},
# 'comet': {
# 'username': COMET_USERNAME,
# 'script_dir': '/home/{}/parsl_scripts'.format(COMET_USERNAME),
# 'scheduler_options': "",
# 'worker_init': 'export PATH:/home/{}/anaconda3/bin/:$PATH; source activate parsl_0.5.0_py3.6;'.format(COMET_USERNAME),
# },
# 'midway': {
# 'username': MIDWAY_USERNAME,
# 'script_dir': '/scratch/midway2/{}/parsl_scripts'.format(MIDWAY_USERNAME),
# 'scheduler_options': "",
# 'worker_init': 'cd /scratch/midway2/{}/parsl_scripts; module load Anaconda3/5.1.0; source activate parsl_testing;'.format(MIDWAY_USERNAME),
# },
# 'osg': {
# 'username': OSG_USERNAME,
# 'script_dir': '/home/{}/parsl_scripts'.format(OSG_USERNAME),
# 'scheduler_options': "",
# 'worker_init' : 'module load python/3.5.2; python3 -m venv parsl_env; source parsl_env/bin/activate; python3 -m pip install parsl==0.5.2'
# },
# 'cori': {
# 'username': CORI_USERNAME,
# 'script_dir': "/global/homes/y/{}/parsl_scripts".format(CORI_USERNAME),
# 'scheduler_options': "#SBATCH --constraint=haswell",
# "worker_init": """module load python/3.6-anaconda-4.4 ;
# source activate parsl_env_3.6"""
# },
# 'swan': {
# 'username': SWAN_USERNAME,
# 'script_dir' : "/home/users/{}/parsl_scripts".format(SWAN_USERNAME),
# 'scheduler_options': "",
# 'worker_init': "module load cray-python/3.6.1.1; source parsl_env/bin/activate"
# },
# 'cooley': {
# 'username': ALCF_USERNAME,
# "account": ALCF_ALLOCATION,
# 'scheduler_options': "",
# "worker_init": "source /home/{}/setup_cooley_env.sh".format(ALCF_USERNAME),
# # Once you log onto Cooley, get the ip address of the login machine
# # by running >> ip addr show | grep -o 10.236.1.[0-9]*
# 'public_ip': '10.236.1.193'
# },
# },
# 'ec2': {
# "region": "us-east-2",
# "image_id": 'ami-82f4dae7',
# "key_name": "parsl.test",
# # Name of the profile used to identify credentials stored in ~/.aws/config
# "profile_name": "parsl",
# },
#
# 'azure': {
#
# # Specifies a username/password which can be used to log into Azure VMs
# # These must be specified but are not used by parsl to access the VMs.
# 'admin_username': 'anyuser',
# 'password': 'mypassword1234567!',
#
# # Characteristics of the VMs to be started:
# 'vm_size': 'Standard_D1',
# 'disk_size_gb': '10',
#
# # Details of the image to be started on each VM.
# # Values can be found using, for example, the `az` command line tool:
# # az vm image list --publisher Debian
# 'publisher': 'Debian',
# 'offer': 'debian-10',
# 'sku': '10',
# 'version': 'latest'
# },
# 'theta': {
# 'username': ALCF_USERNAME,
# "account": ALCF_ALLOCATION,
# 'scheduler_options': "",
# "worker_init": "source /home/{}/setup_theta_env.sh".format(ALCF_USERNAME),
# # Once you log onto theta, get the ip address of the login machine
# # by running >> ip addr show | grep -o 10.236.1.[0-9]*
# 'public_ip': '10.236.1.193'
# },
# 'beagle': {
# 'username': 'fixme',
# "script_dir": "fixme",
# "scheduler_options": "#SBATCH --constraint=haswell",
# "worker_init": """module load python/3.5-anaconda ; source activate parsl_env_3.5"""
# },
# 'cc_in2p3': {
# 'script_dir': "~/parsl_scripts",
# 'scheduler_options': "",
# "worker_init": """export PATH=/pbs/throng/lsst/software/anaconda/anaconda3-5.0.1/bin:$PATH; source activate parsl_env_3.5"""
# },
# 'globus': {
# 'endpoint': 'fixme',
# 'path': 'fixme',
#
# # remote_writeable should specify a directory on a globus endpoint somewhere else,
# # where files can be staged out to via globus during globus staging tests.
# # For example:
# 'remote_writeable': 'globus://af7bda53-6d04-11e5-ba46-22000b92c6ec/home/bzc/'
# },
# 'adhoc': {
# # This specifies configuration parameters when testing an ad-hoc SSH based cluster
# 'username': 'fixme', # username on remote systems
# 'remote_hostnames': ['hostname1', 'hostname2'], # addresses of remote systems
# 'worker_init': 'init commands', # worker_init for remote systems
# 'script_dir': "/path" # script directory on remote systems
# }
#
} # type: Dict[str, Any]
# This block attempts to import local_user_opts.py, which
# can provide local overrides to the version-controlled
# user_opts.
# Users can add their own overrides into local_user_opts
# in local_user_opts.py, which should not exist in a
# pristine parsl source tree, and which should help avoid
# accidentally committing secrets and other per-user
# config into version control.
try:
from .local_user_opts import local_user_opts
user_opts.update(local_user_opts)
except ImportError:
pass
| 39.156627 | 149 | 0.616769 | 795 | 6,500 | 4.869182 | 0.34717 | 0.0465 | 0.026866 | 0.030741 | 0.326531 | 0.267889 | 0.23689 | 0.191682 | 0.137432 | 0.118316 | 0 | 0.029845 | 0.237077 | 6,500 | 165 | 150 | 39.393939 | 0.750756 | 0.777692 | 0 | 0.206897 | 0 | 0 | 0.269706 | 0.139876 | 0 | 0 | 0 | 0.006061 | 0 | 1 | 0 | false | 0.034483 | 0.103448 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6338d410ae60bec391fb5bbc9bdfc0b712ca6028 | 1,771 | py | Python | controller/notebook_menu.py | tuannguyendang/montypython | c0b8ff7a8130e811ba16bfab8d5e013eac37f432 | [
"Apache-2.0"
] | null | null | null | controller/notebook_menu.py | tuannguyendang/montypython | c0b8ff7a8130e811ba16bfab8d5e013eac37f432 | [
"Apache-2.0"
] | null | null | null | controller/notebook_menu.py | tuannguyendang/montypython | c0b8ff7a8130e811ba16bfab8d5e013eac37f432 | [
"Apache-2.0"
] | null | null | null | import sys
from model import NoteBook
class NoteBookMenu:
def __init__(self):
self.notebook = NoteBook()
self.choices = {
"1": self.show_notes,
"2": self.search_note,
"3": self.add_note,
"4": self.modify_note,
"5": self.quit,
}
def run(self):
while True:
self.display_menu()
choice = input('Enter an option: ')
action = self.choices.get(choice)
if action:
action()
else:
print('{0} not input valid option'.format(choice))
def display_menu(self):
print("""
Notebook Menu
1. Show all notes
2. Search note
3. Add new note
4. Modify note
5. Quit
""")
def show_notes(self, notes=None):
if not notes:
notes = self.notebook.notes
for note in notes:
print('{0}: {1}\n{2}'.format(note.get_id(), note.memo, note.tags))
def search_note(self):
filter = input('Search note :')
if not filter:
print('Input invalid!')
notes = self.notebook.search(filter)
self.show_notes(notes)
def add_note(self):
memo = input('Input memo:')
self.notebook.new_note(memo)
print('New node added!')
def modify_note(self):
id = int(input("Input note id:"))
memo = input("Input memo:")
tags = input("Input tags:")
if memo:
self.notebook.modify_memo(id, memo)
if tags:
self.notebook.modify_tags(id, tags)
def quit(self):
print('Thank you for using montypython')
sys.exit(0)
if __name__ == '__main__':
NoteBookMenu().run()
| 24.260274 | 78 | 0.52061 | 209 | 1,771 | 4.282297 | 0.301435 | 0.080447 | 0.02905 | 0.040223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013228 | 0.359684 | 1,771 | 72 | 79 | 24.597222 | 0.776014 | 0 | 0 | 0 | 0 | 0 | 0.188029 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.034483 | 0 | 0.189655 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6339d0b5926989fda79429ab96aca5d2ce51621f | 5,500 | py | Python | prey_and_pred_size.py | gimon0330/Natural-Selection-Simulator | 171a34a901e6d863a8fb179a862e7dc4bc84e495 | [
"Apache-2.0"
] | 1 | 2021-11-12T12:33:36.000Z | 2021-11-12T12:33:36.000Z | prey_and_pred_size.py | gimon0330/Natural-Selection-Simulator | 171a34a901e6d863a8fb179a862e7dc4bc84e495 | [
"Apache-2.0"
] | null | null | null | prey_and_pred_size.py | gimon0330/Natural-Selection-Simulator | 171a34a901e6d863a8fb179a862e7dc4bc84e495 | [
"Apache-2.0"
] | null | null | null | import pygame, random, time, sys, math
pygame.init()
SCREEN_WIDTH = 1080
SCREEN_HEIGHT = 720
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("이미지 불러오기")
clock = pygame.time.Clock()
screen.fill((255,255,255))
class Predetor(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((30, 30))
self.image.fill((255,0,0))
self.rect = self.image.get_rect()
self.pos = pygame.Vector2(800, 600)
self.speed = pygame.Vector2(7.5, 7.5)
self.eaten = 0
def update(self):
self.speed.rotate_ip(random.gauss(0, 1) * 10)
self.pos += self.speed
self.rect.center = self.pos
if self.rect.left < 0:
self.speed.x *= -1
self.rect.left = 0
elif self.rect.right > SCREEN_WIDTH:
self.speed.x *= -1
self.rect.right = SCREEN_WIDTH
if self.rect.top < 0:
self.speed.y *= -1
self.rect.top = 0
elif self.rect.bottom > SCREEN_HEIGHT:
self.speed.y *= -1
self.rect.bottom = SCREEN_HEIGHT
def eat(self):
self.eaten += 1
class Prey(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((20, 20))
self.image.fill((0,0,0))
self.posx = 100 + random.uniform(50.0,-50.0)
self.posy = 100 + random.uniform(50.0,-50.0)
self.pos = pygame.Vector2((self.posx, self.posy))
self.rect = self.image.get_rect()
self.speedsc = random.uniform(0.2, 6.0)
self.speed = pygame.Vector2(10, 10)
self.escape = 0
self.theta = random.uniform(-180.0,180.0)
self.usetime = time.time()
def update(self):
self.speed.rotate_ip(random.gauss(0,1)*5)
self.pos += self.speed
self.rect.center = self.pos
if self.rect.left < 0:
self.speed.x *= -1
self.rect.left = 0
elif self.rect.right > SCREEN_WIDTH:
self.speed.x *= -1
self.rect.right = SCREEN_WIDTH
if self.rect.top < 0:
self.speed.y *= -1
self.rect.top = 0
elif self.rect.bottom > SCREEN_HEIGHT:
self.speed.y *= -1
self.rect.bottom = SCREEN_HEIGHT
all_sprites = pygame.sprite.Group()
predetor_sprites = pygame.sprite.Group()
prey_sprites = pygame.sprite.Group()
for i in range(4):
predetor = Predetor()
predetor_sprites.add(predetor)
all_sprites.add(predetor)
for i in range(1350):
prey = Prey()
prey_sprites.add(prey)
all_sprites.add(prey)
day = 1
day_speed = 3
while True:
print(f"day {day} ======")
############ 하루동안 (낮)
count = time.time() + day_speed
while time.time() < count:
all_sprites.update()
crash = pygame.sprite.groupcollide(prey_sprites, predetor_sprites, False, False)
for prey, pred in crash.items():
if len(prey_sprites) > 15:
pred[0].eat()
prey.kill()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.fill((255,255,255))
all_sprites.draw(screen)
clock.tick(60)
pygame.display.update()
for preys in prey_sprites:
for predetors in predetor_sprites:
dis = preys.pos.distance_to(predetors.pos)
if dis < 50:
cooltime = 0.5
if time.time() < preys.usetime + cooltime:
preys.speed.x = predetor.speed.x * math.cos(preys.theta) - predetor.speed.y * math.sin(preys.theta)
preys.speed.y = predetor.speed.x * math.sin(preys.theta) + predetor.speed.y * math.cos(preys.theta)
preys.usetime = time.time()
############### 하루가 지나고 (밤)
day+=1
average_theta = 0
for preys in prey_sprites:
average_theta += preys.theta
print(f"Amount : {len(prey_sprites)}, Average : {average_theta/len(prey_sprites)}")
for preys in prey_sprites:
new_prey = Prey()
if random.randint(1,100) < 1:
new_prey.theta = preys.theta + random.uniform(-90.0,90.0)
else:
new_prey.theta = preys.theta + random.uniform(2.0,-2.0)
new_prey.pos = preys.rect.center
preys.escape = 0
prey_sprites.add(new_prey)
all_sprites.add(new_prey)
"""for predetors in predetor_sprites:
if predetors.eaten < 5 and len(predetor_sprites) > 1:
predetors.kill()
if predetors.eaten > 12 and len(predetor_sprites) < 12:
new_predetor = Predetor()
new_predetor.pos = predetors.rect.center
predetor_sprites.add(new_predetor)
all_sprites.add(new_predetor)
predetors.eaten = 0"""
if not predetor_sprites:
print("All predetors dead")
pygame.quit()
sys.exit()
if not prey_sprites:
print("All preys dead")
pygame.quit()
sys.exit() | 29.72973 | 124 | 0.535091 | 679 | 5,500 | 4.220913 | 0.184094 | 0.055827 | 0.025122 | 0.018144 | 0.405094 | 0.334962 | 0.315422 | 0.271458 | 0.253315 | 0.253315 | 0 | 0.043261 | 0.344364 | 5,500 | 185 | 125 | 29.72973 | 0.751525 | 0.003636 | 0 | 0.362205 | 0 | 0 | 0.026587 | 0.006801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03937 | false | 0 | 0.007874 | 0 | 0.062992 | 0.031496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6339f1ab281fecc21a9c0911b00b070cc3487a59 | 1,300 | py | Python | Homework3.py | msalum/Phyton-TkInter | b4ee45f6703d0a584970e059438e92ac8dcb0f9f | [
"MIT"
] | null | null | null | Homework3.py | msalum/Phyton-TkInter | b4ee45f6703d0a584970e059438e92ac8dcb0f9f | [
"MIT"
] | null | null | null | Homework3.py | msalum/Phyton-TkInter | b4ee45f6703d0a584970e059438e92ac8dcb0f9f | [
"MIT"
] | null | null | null | from tkinter import *
import tkinter.scrolledtext as scrl
from tkinter import messagebox
import tkinter.filedialog as tkfd
def showAbout():
helloText = "Hello World"
messagebox.showinfo("", helloText)
# SAVE
def saveAs():
fileContent = content.get(1.0, END)
fileName = tkfd.asksaveasfile(mode='w', defaultextension=".txt", filetypes = (("Text file", "*.txt"), ("All files", "*.*")))
if fileName:
fileName
fileName.write(fileContent)
fileName.close()
# OPEN
def openFile():
try:
file = tkfd.askopenfile(mode='r')
fileContent = file.read()
except:
messagebox.warning("Unavailable")
# CLOSE
def exit():
if messagebox.askyesno("Quit", "Are you sure you want to quit?"):
root.destroy()
#GUI
root = Tk()
root.title("Python Notepad")
root.geometry("640x480")
frame1 = Frame(
master = root
)
frame1.pack(fill='both', expand='yes')
# MENU
menuBar = Menu(root)
root.config(menu = menuBar)
fileMenu = Menu(menuBar, tearoff = 0)
fileMenu.add_command(label = 'Open a file...', command = openFile)
fileMenu.add_command(label = 'Save to a file...', command = saveAs)
menuBar.add_cascade(label = 'File', menu = fileMenu)
fileMenu.add_separator()
fileMenu.add_command(label = 'Close', command = exit)
# RUN
root.mainloop() | 20 | 128 | 0.672308 | 159 | 1,300 | 5.465409 | 0.528302 | 0.050633 | 0.06214 | 0.079402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.179231 | 1,300 | 65 | 129 | 20 | 0.804124 | 0.020769 | 0 | 0 | 0 | 0 | 0.123125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.102564 | 0 | 0.205128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
633abef67a261f6f9857eba9e728f553af7a9bfd | 4,403 | py | Python | moto/mediapackage/responses.py | orenmazor/moto | 4778377e8ecaf729d26602a2c5202b72c1438503 | [
"Apache-2.0"
] | 1 | 2021-12-12T04:23:06.000Z | 2021-12-12T04:23:06.000Z | moto/mediapackage/responses.py | orenmazor/moto | 4778377e8ecaf729d26602a2c5202b72c1438503 | [
"Apache-2.0"
] | 4 | 2017-09-30T07:52:52.000Z | 2021-12-13T06:56:55.000Z | moto/mediapackage/responses.py | orenmazor/moto | 4778377e8ecaf729d26602a2c5202b72c1438503 | [
"Apache-2.0"
] | 2 | 2021-11-24T08:05:43.000Z | 2021-11-25T16:18:48.000Z | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import mediapackage_backends
import json
class MediaPackageResponse(BaseResponse):
SERVICE_NAME = "mediapackage"
@property
def mediapackage_backend(self):
return mediapackage_backends[self.region]
def create_channel(self):
description = self._get_param("description")
id = self._get_param("id")
tags = self._get_param("tags")
channel = self.mediapackage_backend.create_channel(
description=description, id=id, tags=tags,
)
return json.dumps(channel.to_dict())
def list_channels(self):
channels = self.mediapackage_backend.list_channels()
return json.dumps(dict(channels=channels))
def describe_channel(self):
id = self._get_param("id")
return json.dumps(self.mediapackage_backend.describe_channel(id=id))
def delete_channel(self):
channel_id = self._get_param("id")
return json.dumps(self.mediapackage_backend.delete_channel(id=channel_id))
def create_origin_endpoint(self):
authorization = self._get_param("authorization")
channel_id = self._get_param("channelId")
cmaf_package = self._get_param("cmafPackage")
dash_package = self._get_param("dashPackage")
description = self._get_param("description")
hls_package = self._get_param("hlsPackage")
id = self._get_param("id")
manifest_name = self._get_param("manifestName")
mss_package = self._get_param("mssPackage")
origination = self._get_param("origination")
startover_window_seconds = self._get_int_param("startoverWindowSeconds")
tags = self._get_param("tags")
time_delay_seconds = self._get_int_param("timeDelaySeconds.member")
whitelist = self._get_list_prefix("whitelist.member")
origin_endpoint = self.mediapackage_backend.create_origin_endpoint(
authorization=authorization,
channel_id=channel_id,
cmaf_package=cmaf_package,
dash_package=dash_package,
description=description,
hls_package=hls_package,
id=id,
manifest_name=manifest_name,
mss_package=mss_package,
origination=origination,
startover_window_seconds=startover_window_seconds,
tags=tags,
time_delay_seconds=time_delay_seconds,
whitelist=whitelist,
)
return json.dumps(origin_endpoint.to_dict())
def list_origin_endpoints(self):
origin_endpoints = self.mediapackage_backend.list_origin_endpoints()
return json.dumps(dict(originEndpoints=origin_endpoints))
def describe_origin_endpoint(self):
id = self._get_param("id")
return json.dumps(self.mediapackage_backend.describe_origin_endpoint(id=id))
def delete_origin_endpoint(self):
id = self._get_param("id")
return json.dumps(self.mediapackage_backend.delete_origin_endpoint(id=id))
def update_origin_endpoint(self):
authorization = self._get_param("authorization")
cmaf_package = self._get_param("cmafPackage")
dash_package = self._get_param("dashPackage")
description = self._get_param("description")
hls_package = self._get_param("hlsPackage")
id = self._get_param("id")
manifest_name = self._get_param("manifestName")
mss_package = self._get_param("mssPackage")
origination = self._get_param("origination")
startover_window_seconds = self._get_int_param("startoverWindowSeconds")
time_delay_seconds = self._get_int_param("timeDelaySeconds")
whitelist = self._get_list_prefix("whitelist.member")
origin_endpoint = self.mediapackage_backend.update_origin_endpoint(
authorization=authorization,
cmaf_package=cmaf_package,
dash_package=dash_package,
description=description,
hls_package=hls_package,
id=id,
manifest_name=manifest_name,
mss_package=mss_package,
origination=origination,
startover_window_seconds=startover_window_seconds,
time_delay_seconds=time_delay_seconds,
whitelist=whitelist,
)
return json.dumps(origin_endpoint.to_dict())
| 40.768519 | 84 | 0.684988 | 477 | 4,403 | 5.932914 | 0.140461 | 0.081625 | 0.114488 | 0.039576 | 0.706007 | 0.64947 | 0.64947 | 0.64947 | 0.576678 | 0.576678 | 0 | 0 | 0.226664 | 4,403 | 107 | 85 | 41.149533 | 0.831131 | 0 | 0 | 0.578947 | 0 | 0 | 0.07881 | 0.015217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.042105 | 0.010526 | 0.273684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |