max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
main.py | Prateek-Bansal-pb/Dice-Simulator | 0 | 6615551 | <gh_stars>0
import random
def roll():
rollResult = random.randint(1, 6)
if rollResult == 1:
print('''
░░███╗░░
░████║░░
██╔██║░░
╚═╝██║░░
███████╗
╚══════╝''')
elif rollResult == 2:
print('''
██████╗░
╚════██╗
░░███╔═╝
██╔══╝░░
███████╗
╚══════╝''')
elif rollResult == 3:
print('''
██████╗░
╚════██╗
░█████╔╝
░╚═══██╗
██████╔╝
╚═════╝░''')
elif rollResult == 4:
print('''
░░██╗██╗
░██╔╝██║
██╔╝░██║
███████║
╚════██║
░░░░░╚═╝''')
elif rollResult == 5:
print('''
███████╗
██╔════╝
██████╗░
╚════██╗
██████╔╝
╚═════╝░''')
else:
print('''
░█████╗░
██╔═══╝░
██████╗░
██╔══██╗
╚█████╔╝
░╚════╝░''')
while True:
print("Hello Do you want to roll a dice")
choice = input("Y/N: ")
if choice.lower() == 'y':
roll()
elif choice.lower() == 'n':
break
else:
print("Error") | import random
def roll():
rollResult = random.randint(1, 6)
if rollResult == 1:
print('''
░░███╗░░
░████║░░
██╔██║░░
╚═╝██║░░
███████╗
╚══════╝''')
elif rollResult == 2:
print('''
██████╗░
╚════██╗
░░███╔═╝
██╔══╝░░
███████╗
╚══════╝''')
elif rollResult == 3:
print('''
██████╗░
╚════██╗
░█████╔╝
░╚═══██╗
██████╔╝
╚═════╝░''')
elif rollResult == 4:
print('''
░░██╗██╗
░██╔╝██║
██╔╝░██║
███████║
╚════██║
░░░░░╚═╝''')
elif rollResult == 5:
print('''
███████╗
██╔════╝
██████╗░
╚════██╗
██████╔╝
╚═════╝░''')
else:
print('''
░█████╗░
██╔═══╝░
██████╗░
██╔══██╗
╚█████╔╝
░╚════╝░''')
while True:
print("Hello Do you want to roll a dice")
choice = input("Y/N: ")
if choice.lower() == 'y':
roll()
elif choice.lower() == 'n':
break
else:
print("Error") | ru | 0.214975 | ░░███╗░░ ░████║░░ ██╔██║░░ ╚═╝██║░░ ███████╗ ╚══════╝ ██████╗░ ╚════██╗ ░░███╔═╝ ██╔══╝░░ ███████╗ ╚══════╝ ██████╗░ ╚════██╗ ░█████╔╝ ░╚═══██╗ ██████╔╝ ╚═════╝░ ░░██╗██╗ ░██╔╝██║ ██╔╝░██║ ███████║ ╚════██║ ░░░░░╚═╝ ███████╗ ██╔════╝ ██████╗░ ╚════██╗ ██████╔╝ ╚═════╝░ ░█████╗░ ██╔═══╝░ ██████╗░ ██╔══██╗ ╚█████╔╝ ░╚════╝░ | 3.93235 | 4 |
aicv/altairviz.py | deeyaviradia/aicv | 0 | 6615552 | import altair as alt
class AltairViz(object):
#data_frame should be an instance of aicv.core.DataFrame
def __init__(self, data_frame):
self.data_frame = data_frame
self.config = None
def set_config(self, config):
self.config = config
def render(self):
if (self.config is None):
raise RuntimeError("You need to call set_config"
+" to set the conifg!")
#TODO
def get_interactive_histogram(colname):
yaxis = alt.Y('count():Q', title="Count")
xaxis = alt.X(colname+':Q', bin=alt.Bin(maxbins=100))
#apparently height/width doesn't include the space for the
# axes labels, so these need to be adjusted a bit.
bg_histogram = alt.Chart(DF_TO_USE).mark_bar().encode(
y=yaxis,
x=xaxis,
color=alt.value('lightgrey')).properties(
width=TOTAL_WIDTH*(1-TSNE_WIDTHFRAC)/4
- (FONTSIZE+PADDING_GUESS),
height=TOTAL_HEIGHT*TSNE_HEIGHTFRAC/3
- (FONTSIZE+PADDING_GUESS),
selection=INTERVAL_SELECTION)
fg_histogram = alt.Chart(DF_TO_USE).mark_bar().encode(
y=yaxis,
color=alt.value('steelblue'),
x=xaxis).transform_filter(COMPOSED_SELECTION)
return (bg_histogram+fg_histogram)
| import altair as alt
class AltairViz(object):
#data_frame should be an instance of aicv.core.DataFrame
def __init__(self, data_frame):
self.data_frame = data_frame
self.config = None
def set_config(self, config):
self.config = config
def render(self):
if (self.config is None):
raise RuntimeError("You need to call set_config"
+" to set the conifg!")
#TODO
def get_interactive_histogram(colname):
yaxis = alt.Y('count():Q', title="Count")
xaxis = alt.X(colname+':Q', bin=alt.Bin(maxbins=100))
#apparently height/width doesn't include the space for the
# axes labels, so these need to be adjusted a bit.
bg_histogram = alt.Chart(DF_TO_USE).mark_bar().encode(
y=yaxis,
x=xaxis,
color=alt.value('lightgrey')).properties(
width=TOTAL_WIDTH*(1-TSNE_WIDTHFRAC)/4
- (FONTSIZE+PADDING_GUESS),
height=TOTAL_HEIGHT*TSNE_HEIGHTFRAC/3
- (FONTSIZE+PADDING_GUESS),
selection=INTERVAL_SELECTION)
fg_histogram = alt.Chart(DF_TO_USE).mark_bar().encode(
y=yaxis,
color=alt.value('steelblue'),
x=xaxis).transform_filter(COMPOSED_SELECTION)
return (bg_histogram+fg_histogram)
| en | 0.829124 | #data_frame should be an instance of aicv.core.DataFrame #TODO #apparently height/width doesn't include the space for the # axes labels, so these need to be adjusted a bit. | 2.779645 | 3 |
Code/Examples/Example_28.py | R6500/SLab | 2 | 6615553 | '''
SLab
Example_28.py
Check a voltage divider
Uses the EZ module
'''
# Locate slab in the parent folder
import sys
sys.path.append('..')
sys.path.append('.')
import slab
# Set prefix to locate calibrations
slab.setFilePrefix("../")
# Import and connect to the board
from slab_ez import *
# Get ADC 1 and ADC 2 data
print "V1 " + str(readVoltage(1))
print "V2 " + str(readVoltage(2))
print "V1-2 " + str(readVoltage(1,2))
| '''
SLab
Example_28.py
Check a voltage divider
Uses the EZ module
'''
# Locate slab in the parent folder
import sys
sys.path.append('..')
sys.path.append('.')
import slab
# Set prefix to locate calibrations
slab.setFilePrefix("../")
# Import and connect to the board
from slab_ez import *
# Get ADC 1 and ADC 2 data
print "V1 " + str(readVoltage(1))
print "V2 " + str(readVoltage(2))
print "V1-2 " + str(readVoltage(1,2))
| en | 0.632004 | SLab
Example_28.py
Check a voltage divider
Uses the EZ module # Locate slab in the parent folder # Set prefix to locate calibrations # Import and connect to the board # Get ADC 1 and ADC 2 data | 2.637767 | 3 |
pretools/utils.py | Y-oHr-N/pretools | 1 | 6615554 | <gh_stars>1-10
"""Utilities."""
import logging
from typing import Any
import numpy as np
import pandas as pd
from sklearn.utils import check_array
def check_X(X: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:
"""Check `X`.
Parameters
----------
X
Data.
estimator
Object to use to fit the data.
**kwargs
Other keywords passed to `sklearn.utils.check_array`.
Returns
-------
X
Converted and validated data.
"""
if not isinstance(X, pd.DataFrame):
X = check_array(X, **kwargs)
X = pd.DataFrame(X)
return X
def get_categorical_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get categorical columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_categorical = X.dtypes == "category"
n_features = np.sum(is_categorical)
logger.info("The number of categorical features is {}.".format(n_features))
if labels:
return X.columns[is_categorical]
return is_categorical
def get_numerical_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get numerical columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_numerical = X.dtypes.apply(lambda x: issubclass(x.type, np.number))
n_features = np.sum(is_numerical)
logger.info("The number of numerical features is {}.".format(n_features))
if labels:
return X.columns[is_numerical]
return is_numerical
def get_time_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get time columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_time = X.dtypes.apply(lambda x: issubclass(x.type, np.datetime64))
n_features = np.sum(is_time)
logger.info("The number of time features is {}.".format(n_features))
if labels:
return X.columns[is_time]
return is_time
def get_unknown_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get unknown columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_unknown = X.dtypes == object
n_features = np.sum(is_unknown)
logger.info("The number of unknown features is {}.".format(n_features))
if labels:
return X.columns[is_unknown]
return is_unknown
def sigmoid(x: float, a: float = 1.0) -> float:
"""Sigmoid function."""
return 1.0 / (1.0 + np.exp(-a * x))
| """Utilities."""
import logging
from typing import Any
import numpy as np
import pandas as pd
from sklearn.utils import check_array
def check_X(X: pd.DataFrame, **kwargs: Any) -> pd.DataFrame:
"""Check `X`.
Parameters
----------
X
Data.
estimator
Object to use to fit the data.
**kwargs
Other keywords passed to `sklearn.utils.check_array`.
Returns
-------
X
Converted and validated data.
"""
if not isinstance(X, pd.DataFrame):
X = check_array(X, **kwargs)
X = pd.DataFrame(X)
return X
def get_categorical_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get categorical columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_categorical = X.dtypes == "category"
n_features = np.sum(is_categorical)
logger.info("The number of categorical features is {}.".format(n_features))
if labels:
return X.columns[is_categorical]
return is_categorical
def get_numerical_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get numerical columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_numerical = X.dtypes.apply(lambda x: issubclass(x.type, np.number))
n_features = np.sum(is_numerical)
logger.info("The number of numerical features is {}.".format(n_features))
if labels:
return X.columns[is_numerical]
return is_numerical
def get_time_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get time columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_time = X.dtypes.apply(lambda x: issubclass(x.type, np.datetime64))
n_features = np.sum(is_time)
logger.info("The number of time features is {}.".format(n_features))
if labels:
return X.columns[is_time]
return is_time
def get_unknown_cols(X: pd.DataFrame, labels: bool = False) -> pd.Series:
"""Get unknown columns."""
X = pd.DataFrame(X)
logger = logging.getLogger(__name__)
is_unknown = X.dtypes == object
n_features = np.sum(is_unknown)
logger.info("The number of unknown features is {}.".format(n_features))
if labels:
return X.columns[is_unknown]
return is_unknown
def sigmoid(x: float, a: float = 1.0) -> float:
"""Sigmoid function."""
return 1.0 / (1.0 + np.exp(-a * x)) | en | 0.313323 | Utilities. Check `X`. Parameters ---------- X Data. estimator Object to use to fit the data. **kwargs Other keywords passed to `sklearn.utils.check_array`. Returns ------- X Converted and validated data. Get categorical columns. Get numerical columns. Get time columns. Get unknown columns. Sigmoid function. | 3.007067 | 3 |
mummi_ras/online/aa/aa_simulation.py | mummi-framework/mummi-ras | 4 | 6615555 | <filename>mummi_ras/online/aa/aa_simulation.py
from subprocess import PIPE, Popen
import os
from pprint import pprint
from time import sleep
import glob
import shutil
from mummi_ras.online.aa.aa_siminputs import AAinput
from mummi_core.utils.utilities import sys_call
import MDAnalysis as mda
from logging import getLogger
LOGGER = getLogger(__name__)
class AAsimulation():
def __init__(self, outpath, locpath):
# Logistical variables
self.popenenv = {
"shell": True,
"universal_newlines": True,
"cwd": '.',
#"env": os.environ,
}
self.is_local = True
if outpath == locpath:
self.is_local = False
self._equi_process = None # Initialize the process to None
self._md_process = None # Initialize the process to None
self.trajname=""
self.toponame="gro2amber.gro"
self.trajtype="TRJ"
self.outpath=outpath
self.curIdx = "0"
self.prev_filedate=0
self.backup_count=0
self.backup_rate=5
# nstlim (number of step limit) of each simulation run
self.nvt_nstlim=125000
self.npt_nstlim=250000 #npt is separated into two runs
# Set the max simtime of each sub-md sim very large - each sim will be stopped by the workflow
self.md_nstlim=12500000 # 50 ns, 1500000 6 ns
# max simulation time 12500000 * 4 fs = 50 ns
self.max_simtime=12500000
def setup(self):
# TODO copy the required files to current working place
LOGGER.info("Starting AMBER setup ....")
#utils.sys_call("cp " + self.outpath + "/amber.* .")
shutil.copyfile(self.outpath + "/amber.prmtop", "amber.prmtop")
shutil.copyfile(self.outpath + "/amber.inpcrd", "amber.inpcrd")
shutil.copyfile(self.outpath + "/gro2amber.gro", "gro2amber.gro")
try:
sys_call("cp " + self.outpath + "/md.*.rst .")
sys_call("cp " + self.outpath + "/md.*.out .")
except:
LOGGER.info('No restart files are there. This is a new simulation')
## NVT
def nvt(self):
#pprint(os.environ)
LOGGER.info("Starting NVT equilibrium ....")
kwargs = self.popenenv
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
AAinput.nvt_input(self.nvt_nstlim)
# pmemd.cuda -O -i nvt.in -p amber.prmtop -c amber.inpcrd -ref amber.inpcrd \
# -r md.NVT.rst -x md.NVT.mdcrd -inf md.NVT.info -l md.NVT.log -o md.NVT.out
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
#"/usr/gapps/mummi/lassen/amber18/bin/pmemd.cuda -O",
"pmemd.cuda -O", "-i nvt.in", "-p amber.prmtop", "-c amber.inpcrd",
"-ref amber.inpcrd", "-r md.NVT.rst", "-x md.NVT.mdcrd", "-inf md.NVT.info",
"-l md.NVT.log", "-o md.NVT.out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#self._equi_process = Popen(cmd, **kwargs)
stdout = os.path.join(self.outpath, "nvt.stderr")
stderr = os.path.join(self.outpath, "nvt.stdout")
with open(stdout, "wb") as out, open(stderr, "wb") as err:
self._equi_process = \
Popen(' '.join(cmd), stdout=out, stderr=err, **kwargs)
sleep(20)
LOGGER.info("Process Running? %s", self._equi_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
#wait until process complete
self._equi_process.wait()
if not os.path.isfile('md.NVT.rst'):
raise Exception("md.NVT.rst is not there")
# backup the calcualtion
if self.is_local:
try:
sys_call("cp md.NVT.* aa_analysis_logger.log " + self.outpath)
except:
LOGGER.info('Backup files fail')
## NPT
def npt(self, num):
LOGGER.info("Starting NPT "+str(num)+" equilibrium ....")
kwargs = self.popenenv
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
AAinput.npt_input(self.npt_nstlim)
# pmemd.cuda -O -i npt.in -p amber.prmtop -c md.NVT.rst -ref md.NVT.rst \
# -r md.NPT.rst -x md.NPT.mdcrd -inf md.NPT.info -l md.NPT.log -o md.NPT.out
cmd =[]
fname="md.NPT."+str(num)
if num==1:
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i npt.in", "-p amber.prmtop", "-c md.NVT.rst",
"-ref md.NVT.rst", "-r "+fname+".rst", "-x "+fname+".mdcrd", "-inf "+fname+".info",
"-l "+fname+".log", "-o "+fname+".out"
]
elif num==2:
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i npt.in", "-p amber.prmtop", "-c md.NPT.1.rst",
"-ref md.NPT.1.rst", "-r md.0.rst", "-x "+fname+".mdcrd", "-inf "+fname+".info",
"-l "+fname+".log", "-o "+fname+".out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#os.system(' '.join(cmd))
stdout = os.path.join(self.outpath, "npt.stdout")
stderr = os.path.join(self.outpath, "npt.stderr")
with open(stdout, "wb") as out, open(stderr, "wb") as err:
self._equi_process = \
Popen(' '.join(cmd), stdout=out, stderr=err, **kwargs)
sleep(5)
LOGGER.info("Process Running? %s", self._equi_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
#wait until process complete
self._equi_process.wait()
if num==1:
if not os.path.isfile('md.NPT.1.rst'):
raise Exception("md.NPT.1.rst is not there")
if num==2:
if not os.path.isfile('md.0.rst'):
raise Exception("md.0.rst is not there")
#utils.sys_call("cp md.NPT.rst md.0.rst")
#shutil.copy("md.NPT.rst", "md.0.rst")
# TODO backup
if self.is_local:
try:
sys_call("cp md.NPT.* md.0.rst aa_analysis_logger.log " + self.outpath)
except:
LOGGER.info('Backup files fail')
def _getRstFileIdx(self, rstfile):
filenamelist=rstfile.split(".")
if len(filenamelist) !=3 :
raise Exception("Restart file "+rstfile+ " is not named correctly")
return int(filenamelist[1])
def _needEquilibrum(self, rstFile, outFile):
if not os.path.isfile(rstFile):
return True
if not os.path.isfile(outFile):
return True
with open(outFile, 'r') as f:
for line in f:
if line[3:25]=='Final Performance Info':
return False
return True
def mdrun(self):
# copy the require files to setup the calculations
LOGGER.info("Start AMBER ....")
if self.is_local:
self.setup()
# run the equilibrum - NVT, NPT1, and NPT2 if it is not run yet
#if not os.path.isfile('md.NVT.rst'):
if self._needEquilibrum('md.NVT.rst', 'md.NVT.out'):
self.nvt()
#if not os.path.isfile('md.NPT.1.rst'):
if self._needEquilibrum('md.NPT.1.rst', 'md.NPT.1.out'):
self.npt(1)
#if not os.path.isfile('md.0.rst'):
if self._needEquilibrum('md.0.rst', 'md.NPT.2.out'):
self.npt(2)
# restart file are named md.0.rst md.1.rst md.2.rst ...
rstlist=glob.glob("md.[0-9]*.rst")
LOGGER.info("MD restart file list {}".format(rstlist))
idxList=[self._getRstFileIdx(item) for item in rstlist]
idxList.sort()
LOGGER.info("MD file index list {}".format(idxList))
#find out the good rst file to restart MD
while len(idxList)>0:
rstfileidx = idxList.pop()
rstfile="md."+str(rstfileidx)+".rst"
if not os.path.isfile(rstfile):
LOGGER.info("Restart file md."+str(rstfileidx)+".rst is not exsited")
else:
LOGGER.info("Use restart file md."+str(rstfileidx)+".rst for simulation")
break
else:
raise Exception("idxList is empty after check rstart files")
preIdx_int = rstfileidx
preIdx=str(preIdx_int)
curIdx=str(int(preIdx)+1)
# these variables needed to be set for aa_analysis.
# before the check of MD has reach the max time step in the previous run
self.trajname="md."+curIdx+".mdcrd"
self.trajnochamber="md."+curIdx+".nochamber.mdcrd"
self.toponame="gro2amber.gro"
self.trajtype="TRJ"
self.curIdx=curIdx
#check if MD has reach the max time step in the previous run
if preIdx != "0":
prv_simtime = self.get_simtime(preIdx)
LOGGER.info("Prevous simulation time step - {}".format(prv_simtime))
if prv_simtime > self.get_maxsimtime():
LOGGER.info("Previous total time step - {} exceeds max value - {}, simulation STOP".format(prv_simtime,self.get_maxsimtime()))
# if the current simulation is skip due to reaching max value then self.curIdx should be set to preDix
self.curIdx = preIdx
return
# check the local trajectories
if preIdx_int > 0:
for i in range(preIdx_int):
traj_local = "md."+str(i+1)+".mdcrd"
traj_name=self.outpath+"/"+traj_local
if os.path.isfile(traj_name):
if not os.path.isfile(traj_local): # skip if local already has traj
sys_call("ln -s "+traj_name+" .")
else:
LOGGER.info("Previous trajectory file "+traj_name+" is not exsited")
raise Exception("Previous trajectory file "+traj_name+" is not exsited")
#pprint(os.environ)
LOGGER.info("mdrun Starting AMBER MD Production ....")
## MD production
kwargs = self.popenenv
AAinput.md_input(self.md_nstlim)
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
LOGGER.info("AMBER max simulation time steps " + str(self.get_maxsimtime()))
# pmemd.cuda -O -i md.in -p amber.prmtop-c md.${PREV}.rst -r md.${THIS}.rst -x md.${THIS}.mdcrd -inf md.${THIS}.info -l md.${THIS}.log -o md.${THIS}.out
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i md.in", "-p amber.prmtop", "-c md."+preIdx+".rst",
"-ref md."+preIdx+".rst", "-r md."+curIdx+".rst", "-x md."+curIdx+".mdcrd", "-inf md."+curIdx+".info",
"-l md."+curIdx+".log", "-o md."+curIdx+".out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#os.system(' '.join(cmd))
self._md_process = Popen(' '.join(cmd), **kwargs)
sleep(5)
LOGGER.info("Process Running? %s", self._md_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
if self._md_process.poll():
out, err = self._md_process.communicate()
LOGGER.info("---------------- AMBER MD stdout --------------\n%s", out)
LOGGER.error("---------------- AMBER MD stderr --------------\n%s", err)
def backup(self):
LOGGER.info("Backup the MD simulation data ....")
'''
#LOGGER.debug('checking for md_process: {}'.format(self._md_process.poll()))
LOGGER.debug("AAsimulation.backup Process Running? %s", self._md_process.poll())
while self._md_process.poll() is None:
# sleep for 5 minutes
sleep(300)
'''
try:
sys_call("cp md."+self.curIdx+".* aa_analysis_logger.log " + self.outpath)
#utils.sys_call("cp md."+self.curIdx+".??? aa_analysis_logger.log " + self.outpath)
#utils.sys_call("rsync --append md." + self.curIdx + ".mdcrd " + self.outpath)
except:
LOGGER.info('Backup md.' + self.curIdx +'.* files fail')
def mdcrd_checkstate(self):
LOGGER.info("Check if " + self.trajname + " is ready for analysis")
time_limit=0
# check if trajectory is written
while not os.path.isfile(self.trajname):
if time_limit>20:
LOGGER.error("Trajectory {} is missing".format(self.trajname))
raise Exception("Trajectory {} is missing".format(self.trajname))
sleep(60)
time_limit=time_limit+1
if os.path.isfile(self.trajname):
"""
# check if trajectory has frame by file size assume size of frame > 1 MB
file_size = os.path.getsize(self.trajname)/1024.0/1024.0 # in MB
time_limit=0
while file_size < 1.0:
if time_limit>20:
break
sleep(60)
file_size = os.path.getsize(self.trajname) / 1024.0 / 1024.0
time_limit=time_limit+1
"""
# check if trajectory is corrupted
time_limit = 0
while True:
try:
aa = mda.Universe(self.toponame, self.trajname, format='NCDF', dt=100)
break
except:
time_limit = time_limit + 1
if time_limit > 20:
LOGGER.error("Trajectory {} is corrupted".format(self.trajname))
raise Exception("Trajectory {} is corrupted".format(self.trajname))
sleep(60)
# check if the trajectory has been updated
curr_filedate = os.stat(self.trajname)[8]
time_limit = 0
while curr_filedate == self.prev_filedate:
if time_limit > 20:
break
sleep(60)
time_limit = time_limit + 1
self.prev_filedate = curr_filedate
# backup file at backup_rate
if self.is_local:
if self.backup_count % self.backup_rate == 0:
self.backup()
self.backup_count = self.backup_count + 1
# end of backup
# If reach the max sim time step, kill the simulation process
cur_simtime=self.get_current_simtime()
LOGGER.info("Current simulation time step - "+str(cur_simtime))
if cur_simtime > self.get_maxsimtime():
LOGGER.info("Current time step - "+str(cur_simtime)+" exceed max value - "+ str(self.get_maxsimtime()))
self._md_process.kill()
# Don't need nochamber
'''
inputs = """# cpptraj script to write as NoChamber
parm amber.prmtop
reference amber.inpcrd
trajin {}
trajout {}
""".format(self.trajname, self.trajnochamber)
with open('mdcrd_checkstate.in', 'w') as f:
f.write(inputs)
while time_limit < 20:
try:
time_limit = time_limit + 1
utils.sys_call("cpptraj -p amber.prmtop -i mdcrd_checkstate.in > mdcrd_checkstate.log")
# backup file at backup_rate
if self.is_local:
if self.backup_count % self.backup_rate == 0:
self.backup()
self.backup_count = self.backup_count+1
# end of backup
break
# make sure
if self.get_simtime() > self.get_maxsimtime():
self._md_process.kill()
except:
sleep(60)
'''
'''
if not os.path.isfile(self.trajnochamber):
raise Exception("AAsimulation.mdcrd_checkstate {} is not there".format(self.trajnochamber))
'''
def running(self):
running = bool(
self._md_process is not None
and self._md_process.poll() is not None
and self._md_process.poll() != 0
)
return running
def stop(self):
if self.running():
self._md_process.kill()
def get_trajname(self):
return self.trajname
def get_trajnochamber(self):
return self.trajnochamber
def get_toponame(self):
return self.toponame
def get_trajtype(self):
return self.trajtype
def get_process(self):
return self._md_process
def get_islocal(self):
return self.is_local
def get_current_simtime(self):
cur_simtime=self.get_simtime(self.curIdx)
return cur_simtime
def get_simtime(self, idxStr):
simtime = []
with open("md."+idxStr+".out", 'r') as f:
for line in f:
if line[1:8] == "NSTEP =":
#print(line[30:44])
simtime.append(int(float(line[30:44]) * 250)-self.nvt_nstlim-self.npt_nstlim*2)
if len(simtime) > 0:
return simtime[-1]
return 0
def set_backuprate(self, rate):
self.backup_rate=rate
def set_maxsimtime(self, max_simtime):
self.max_simtime=max_simtime
def get_maxsimtime(self):
return self.max_simtime
| <filename>mummi_ras/online/aa/aa_simulation.py
from subprocess import PIPE, Popen
import os
from pprint import pprint
from time import sleep
import glob
import shutil
from mummi_ras.online.aa.aa_siminputs import AAinput
from mummi_core.utils.utilities import sys_call
import MDAnalysis as mda
from logging import getLogger
LOGGER = getLogger(__name__)
class AAsimulation():
def __init__(self, outpath, locpath):
# Logistical variables
self.popenenv = {
"shell": True,
"universal_newlines": True,
"cwd": '.',
#"env": os.environ,
}
self.is_local = True
if outpath == locpath:
self.is_local = False
self._equi_process = None # Initialize the process to None
self._md_process = None # Initialize the process to None
self.trajname=""
self.toponame="gro2amber.gro"
self.trajtype="TRJ"
self.outpath=outpath
self.curIdx = "0"
self.prev_filedate=0
self.backup_count=0
self.backup_rate=5
# nstlim (number of step limit) of each simulation run
self.nvt_nstlim=125000
self.npt_nstlim=250000 #npt is separated into two runs
# Set the max simtime of each sub-md sim very large - each sim will be stopped by the workflow
self.md_nstlim=12500000 # 50 ns, 1500000 6 ns
# max simulation time 12500000 * 4 fs = 50 ns
self.max_simtime=12500000
def setup(self):
# TODO copy the required files to current working place
LOGGER.info("Starting AMBER setup ....")
#utils.sys_call("cp " + self.outpath + "/amber.* .")
shutil.copyfile(self.outpath + "/amber.prmtop", "amber.prmtop")
shutil.copyfile(self.outpath + "/amber.inpcrd", "amber.inpcrd")
shutil.copyfile(self.outpath + "/gro2amber.gro", "gro2amber.gro")
try:
sys_call("cp " + self.outpath + "/md.*.rst .")
sys_call("cp " + self.outpath + "/md.*.out .")
except:
LOGGER.info('No restart files are there. This is a new simulation')
## NVT
def nvt(self):
#pprint(os.environ)
LOGGER.info("Starting NVT equilibrium ....")
kwargs = self.popenenv
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
AAinput.nvt_input(self.nvt_nstlim)
# pmemd.cuda -O -i nvt.in -p amber.prmtop -c amber.inpcrd -ref amber.inpcrd \
# -r md.NVT.rst -x md.NVT.mdcrd -inf md.NVT.info -l md.NVT.log -o md.NVT.out
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
#"/usr/gapps/mummi/lassen/amber18/bin/pmemd.cuda -O",
"pmemd.cuda -O", "-i nvt.in", "-p amber.prmtop", "-c amber.inpcrd",
"-ref amber.inpcrd", "-r md.NVT.rst", "-x md.NVT.mdcrd", "-inf md.NVT.info",
"-l md.NVT.log", "-o md.NVT.out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#self._equi_process = Popen(cmd, **kwargs)
stdout = os.path.join(self.outpath, "nvt.stderr")
stderr = os.path.join(self.outpath, "nvt.stdout")
with open(stdout, "wb") as out, open(stderr, "wb") as err:
self._equi_process = \
Popen(' '.join(cmd), stdout=out, stderr=err, **kwargs)
sleep(20)
LOGGER.info("Process Running? %s", self._equi_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
#wait until process complete
self._equi_process.wait()
if not os.path.isfile('md.NVT.rst'):
raise Exception("md.NVT.rst is not there")
# backup the calcualtion
if self.is_local:
try:
sys_call("cp md.NVT.* aa_analysis_logger.log " + self.outpath)
except:
LOGGER.info('Backup files fail')
## NPT
def npt(self, num):
LOGGER.info("Starting NPT "+str(num)+" equilibrium ....")
kwargs = self.popenenv
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
AAinput.npt_input(self.npt_nstlim)
# pmemd.cuda -O -i npt.in -p amber.prmtop -c md.NVT.rst -ref md.NVT.rst \
# -r md.NPT.rst -x md.NPT.mdcrd -inf md.NPT.info -l md.NPT.log -o md.NPT.out
cmd =[]
fname="md.NPT."+str(num)
if num==1:
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i npt.in", "-p amber.prmtop", "-c md.NVT.rst",
"-ref md.NVT.rst", "-r "+fname+".rst", "-x "+fname+".mdcrd", "-inf "+fname+".info",
"-l "+fname+".log", "-o "+fname+".out"
]
elif num==2:
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i npt.in", "-p amber.prmtop", "-c md.NPT.1.rst",
"-ref md.NPT.1.rst", "-r md.0.rst", "-x "+fname+".mdcrd", "-inf "+fname+".info",
"-l "+fname+".log", "-o "+fname+".out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#os.system(' '.join(cmd))
stdout = os.path.join(self.outpath, "npt.stdout")
stderr = os.path.join(self.outpath, "npt.stderr")
with open(stdout, "wb") as out, open(stderr, "wb") as err:
self._equi_process = \
Popen(' '.join(cmd), stdout=out, stderr=err, **kwargs)
sleep(5)
LOGGER.info("Process Running? %s", self._equi_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
#wait until process complete
self._equi_process.wait()
if num==1:
if not os.path.isfile('md.NPT.1.rst'):
raise Exception("md.NPT.1.rst is not there")
if num==2:
if not os.path.isfile('md.0.rst'):
raise Exception("md.0.rst is not there")
#utils.sys_call("cp md.NPT.rst md.0.rst")
#shutil.copy("md.NPT.rst", "md.0.rst")
# TODO backup
if self.is_local:
try:
sys_call("cp md.NPT.* md.0.rst aa_analysis_logger.log " + self.outpath)
except:
LOGGER.info('Backup files fail')
def _getRstFileIdx(self, rstfile):
filenamelist=rstfile.split(".")
if len(filenamelist) !=3 :
raise Exception("Restart file "+rstfile+ " is not named correctly")
return int(filenamelist[1])
def _needEquilibrum(self, rstFile, outFile):
if not os.path.isfile(rstFile):
return True
if not os.path.isfile(outFile):
return True
with open(outFile, 'r') as f:
for line in f:
if line[3:25]=='Final Performance Info':
return False
return True
def mdrun(self):
# copy the require files to setup the calculations
LOGGER.info("Start AMBER ....")
if self.is_local:
self.setup()
# run the equilibrum - NVT, NPT1, and NPT2 if it is not run yet
#if not os.path.isfile('md.NVT.rst'):
if self._needEquilibrum('md.NVT.rst', 'md.NVT.out'):
self.nvt()
#if not os.path.isfile('md.NPT.1.rst'):
if self._needEquilibrum('md.NPT.1.rst', 'md.NPT.1.out'):
self.npt(1)
#if not os.path.isfile('md.0.rst'):
if self._needEquilibrum('md.0.rst', 'md.NPT.2.out'):
self.npt(2)
# restart file are named md.0.rst md.1.rst md.2.rst ...
rstlist=glob.glob("md.[0-9]*.rst")
LOGGER.info("MD restart file list {}".format(rstlist))
idxList=[self._getRstFileIdx(item) for item in rstlist]
idxList.sort()
LOGGER.info("MD file index list {}".format(idxList))
#find out the good rst file to restart MD
while len(idxList)>0:
rstfileidx = idxList.pop()
rstfile="md."+str(rstfileidx)+".rst"
if not os.path.isfile(rstfile):
LOGGER.info("Restart file md."+str(rstfileidx)+".rst is not exsited")
else:
LOGGER.info("Use restart file md."+str(rstfileidx)+".rst for simulation")
break
else:
raise Exception("idxList is empty after check rstart files")
preIdx_int = rstfileidx
preIdx=str(preIdx_int)
curIdx=str(int(preIdx)+1)
# these variables needed to be set for aa_analysis.
# before the check of MD has reach the max time step in the previous run
self.trajname="md."+curIdx+".mdcrd"
self.trajnochamber="md."+curIdx+".nochamber.mdcrd"
self.toponame="gro2amber.gro"
self.trajtype="TRJ"
self.curIdx=curIdx
#check if MD has reach the max time step in the previous run
if preIdx != "0":
prv_simtime = self.get_simtime(preIdx)
LOGGER.info("Prevous simulation time step - {}".format(prv_simtime))
if prv_simtime > self.get_maxsimtime():
LOGGER.info("Previous total time step - {} exceeds max value - {}, simulation STOP".format(prv_simtime,self.get_maxsimtime()))
# if the current simulation is skip due to reaching max value then self.curIdx should be set to preDix
self.curIdx = preIdx
return
# check the local trajectories
if preIdx_int > 0:
for i in range(preIdx_int):
traj_local = "md."+str(i+1)+".mdcrd"
traj_name=self.outpath+"/"+traj_local
if os.path.isfile(traj_name):
if not os.path.isfile(traj_local): # skip if local already has traj
sys_call("ln -s "+traj_name+" .")
else:
LOGGER.info("Previous trajectory file "+traj_name+" is not exsited")
raise Exception("Previous trajectory file "+traj_name+" is not exsited")
#pprint(os.environ)
LOGGER.info("mdrun Starting AMBER MD Production ....")
## MD production
kwargs = self.popenenv
AAinput.md_input(self.md_nstlim)
amber_exe=shutil.which("pmemd.cuda")
LOGGER.info("AMBER executable " + amber_exe )
LOGGER.info("AMBER max simulation time steps " + str(self.get_maxsimtime()))
# pmemd.cuda -O -i md.in -p amber.prmtop-c md.${PREV}.rst -r md.${THIS}.rst -x md.${THIS}.mdcrd -inf md.${THIS}.info -l md.${THIS}.log -o md.${THIS}.out
cmd = [#"source /usr/gapps/mummi/lassen/amber18/amber.sh;",
"pmemd.cuda -O", "-i md.in", "-p amber.prmtop", "-c md."+preIdx+".rst",
"-ref md."+preIdx+".rst", "-r md."+curIdx+".rst", "-x md."+curIdx+".mdcrd", "-inf md."+curIdx+".info",
"-l md."+curIdx+".log", "-o md."+curIdx+".out"
]
LOGGER.info("cmd = %s", " ".join(cmd))
#os.system(' '.join(cmd))
self._md_process = Popen(' '.join(cmd), **kwargs)
sleep(5)
LOGGER.info("Process Running? %s", self._md_process.poll())
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
LOGGER.info("CUDA_VISIBLE_DEVICES=%s", cuda_visible_devices)
if self._md_process.poll():
out, err = self._md_process.communicate()
LOGGER.info("---------------- AMBER MD stdout --------------\n%s", out)
LOGGER.error("---------------- AMBER MD stderr --------------\n%s", err)
def backup(self):
LOGGER.info("Backup the MD simulation data ....")
'''
#LOGGER.debug('checking for md_process: {}'.format(self._md_process.poll()))
LOGGER.debug("AAsimulation.backup Process Running? %s", self._md_process.poll())
while self._md_process.poll() is None:
# sleep for 5 minutes
sleep(300)
'''
try:
sys_call("cp md."+self.curIdx+".* aa_analysis_logger.log " + self.outpath)
#utils.sys_call("cp md."+self.curIdx+".??? aa_analysis_logger.log " + self.outpath)
#utils.sys_call("rsync --append md." + self.curIdx + ".mdcrd " + self.outpath)
except:
LOGGER.info('Backup md.' + self.curIdx +'.* files fail')
def mdcrd_checkstate(self):
LOGGER.info("Check if " + self.trajname + " is ready for analysis")
time_limit=0
# check if trajectory is written
while not os.path.isfile(self.trajname):
if time_limit>20:
LOGGER.error("Trajectory {} is missing".format(self.trajname))
raise Exception("Trajectory {} is missing".format(self.trajname))
sleep(60)
time_limit=time_limit+1
if os.path.isfile(self.trajname):
"""
# check if trajectory has frame by file size assume size of frame > 1 MB
file_size = os.path.getsize(self.trajname)/1024.0/1024.0 # in MB
time_limit=0
while file_size < 1.0:
if time_limit>20:
break
sleep(60)
file_size = os.path.getsize(self.trajname) / 1024.0 / 1024.0
time_limit=time_limit+1
"""
# check if trajectory is corrupted
time_limit = 0
while True:
try:
aa = mda.Universe(self.toponame, self.trajname, format='NCDF', dt=100)
break
except:
time_limit = time_limit + 1
if time_limit > 20:
LOGGER.error("Trajectory {} is corrupted".format(self.trajname))
raise Exception("Trajectory {} is corrupted".format(self.trajname))
sleep(60)
# check if the trajectory has been updated
curr_filedate = os.stat(self.trajname)[8]
time_limit = 0
while curr_filedate == self.prev_filedate:
if time_limit > 20:
break
sleep(60)
time_limit = time_limit + 1
self.prev_filedate = curr_filedate
# backup file at backup_rate
if self.is_local:
if self.backup_count % self.backup_rate == 0:
self.backup()
self.backup_count = self.backup_count + 1
# end of backup
# If reach the max sim time step, kill the simulation process
cur_simtime=self.get_current_simtime()
LOGGER.info("Current simulation time step - "+str(cur_simtime))
if cur_simtime > self.get_maxsimtime():
LOGGER.info("Current time step - "+str(cur_simtime)+" exceed max value - "+ str(self.get_maxsimtime()))
self._md_process.kill()
# Don't need nochamber
'''
inputs = """# cpptraj script to write as NoChamber
parm amber.prmtop
reference amber.inpcrd
trajin {}
trajout {}
""".format(self.trajname, self.trajnochamber)
with open('mdcrd_checkstate.in', 'w') as f:
f.write(inputs)
while time_limit < 20:
try:
time_limit = time_limit + 1
utils.sys_call("cpptraj -p amber.prmtop -i mdcrd_checkstate.in > mdcrd_checkstate.log")
# backup file at backup_rate
if self.is_local:
if self.backup_count % self.backup_rate == 0:
self.backup()
self.backup_count = self.backup_count+1
# end of backup
break
# make sure
if self.get_simtime() > self.get_maxsimtime():
self._md_process.kill()
except:
sleep(60)
'''
'''
if not os.path.isfile(self.trajnochamber):
raise Exception("AAsimulation.mdcrd_checkstate {} is not there".format(self.trajnochamber))
'''
def running(self):
running = bool(
self._md_process is not None
and self._md_process.poll() is not None
and self._md_process.poll() != 0
)
return running
def stop(self):
if self.running():
self._md_process.kill()
def get_trajname(self):
return self.trajname
def get_trajnochamber(self):
return self.trajnochamber
def get_toponame(self):
return self.toponame
def get_trajtype(self):
return self.trajtype
def get_process(self):
return self._md_process
def get_islocal(self):
return self.is_local
def get_current_simtime(self):
cur_simtime=self.get_simtime(self.curIdx)
return cur_simtime
def get_simtime(self, idxStr):
simtime = []
with open("md."+idxStr+".out", 'r') as f:
for line in f:
if line[1:8] == "NSTEP =":
#print(line[30:44])
simtime.append(int(float(line[30:44]) * 250)-self.nvt_nstlim-self.npt_nstlim*2)
if len(simtime) > 0:
return simtime[-1]
return 0
def set_backuprate(self, rate):
self.backup_rate=rate
def set_maxsimtime(self, max_simtime):
self.max_simtime=max_simtime
def get_maxsimtime(self):
return self.max_simtime
| en | 0.420006 | # Logistical variables #"env": os.environ, # Initialize the process to None # Initialize the process to None # nstlim (number of step limit) of each simulation run #npt is separated into two runs # Set the max simtime of each sub-md sim very large - each sim will be stopped by the workflow # 50 ns, 1500000 6 ns # max simulation time 12500000 * 4 fs = 50 ns # TODO copy the required files to current working place #utils.sys_call("cp " + self.outpath + "/amber.* .") ## NVT #pprint(os.environ) # pmemd.cuda -O -i nvt.in -p amber.prmtop -c amber.inpcrd -ref amber.inpcrd \ # -r md.NVT.rst -x md.NVT.mdcrd -inf md.NVT.info -l md.NVT.log -o md.NVT.out #"source /usr/gapps/mummi/lassen/amber18/amber.sh;", #"/usr/gapps/mummi/lassen/amber18/bin/pmemd.cuda -O", #self._equi_process = Popen(cmd, **kwargs) #wait until process complete # backup the calcualtion ## NPT # pmemd.cuda -O -i npt.in -p amber.prmtop -c md.NVT.rst -ref md.NVT.rst \ # -r md.NPT.rst -x md.NPT.mdcrd -inf md.NPT.info -l md.NPT.log -o md.NPT.out #"source /usr/gapps/mummi/lassen/amber18/amber.sh;", #"source /usr/gapps/mummi/lassen/amber18/amber.sh;", #os.system(' '.join(cmd)) #wait until process complete #utils.sys_call("cp md.NPT.rst md.0.rst") #shutil.copy("md.NPT.rst", "md.0.rst") # TODO backup # copy the require files to setup the calculations # run the equilibrum - NVT, NPT1, and NPT2 if it is not run yet #if not os.path.isfile('md.NVT.rst'): #if not os.path.isfile('md.NPT.1.rst'): #if not os.path.isfile('md.0.rst'): # restart file are named md.0.rst md.1.rst md.2.rst ... #find out the good rst file to restart MD # these variables needed to be set for aa_analysis. # before the check of MD has reach the max time step in the previous run #check if MD has reach the max time step in the previous run # if the current simulation is skip due to reaching max value then self.curIdx should be set to preDix # check the local trajectories # skip if local already has traj #pprint(os.environ) ## MD production # pmemd.cuda -O -i md.in -p amber.prmtop-c md.${PREV}.rst -r md.${THIS}.rst -x md.${THIS}.mdcrd -inf md.${THIS}.info -l md.${THIS}.log -o md.${THIS}.out #"source /usr/gapps/mummi/lassen/amber18/amber.sh;", #os.system(' '.join(cmd)) #LOGGER.debug('checking for md_process: {}'.format(self._md_process.poll())) LOGGER.debug("AAsimulation.backup Process Running? %s", self._md_process.poll()) while self._md_process.poll() is None: # sleep for 5 minutes sleep(300) #utils.sys_call("cp md."+self.curIdx+".??? aa_analysis_logger.log " + self.outpath) #utils.sys_call("rsync --append md." + self.curIdx + ".mdcrd " + self.outpath) # check if trajectory is written # check if trajectory has frame by file size assume size of frame > 1 MB file_size = os.path.getsize(self.trajname)/1024.0/1024.0 # in MB time_limit=0 while file_size < 1.0: if time_limit>20: break sleep(60) file_size = os.path.getsize(self.trajname) / 1024.0 / 1024.0 time_limit=time_limit+1 # check if trajectory is corrupted # check if the trajectory has been updated # backup file at backup_rate # end of backup # If reach the max sim time step, kill the simulation process # Don't need nochamber inputs = """# cpptraj script to write as NoChamber parm amber.prmtop reference amber.inpcrd trajin {} trajout {} """.format(self.trajname, self.trajnochamber) with open('mdcrd_checkstate.in', 'w') as f: f.write(inputs) while time_limit < 20: try: time_limit = time_limit + 1 utils.sys_call("cpptraj -p amber.prmtop -i mdcrd_checkstate.in > mdcrd_checkstate.log") # backup file at backup_rate if self.is_local: if self.backup_count % self.backup_rate == 0: self.backup() self.backup_count = self.backup_count+1 # end of backup break # make sure if self.get_simtime() > self.get_maxsimtime(): self._md_process.kill() except: sleep(60) if not os.path.isfile(self.trajnochamber): raise Exception("AAsimulation.mdcrd_checkstate {} is not there".format(self.trajnochamber)) #print(line[30:44]) | 2.050936 | 2 |
app/main.py | suryatejreddy/SPARQL-Endpoint | 0 | 6615556 | from flask import Flask, request, Response
from rdflib import Graph, plugin
import sys
from config import GRAPH_FILE, GRAPH_FORMAT
app = Flask(__name__)
def resultformat_to_mime(format):
if format=='xml': return "application/sparql-results+xml"
if format=='json': return "application/sparql-results+json"
if format=='html': return "text/html"
return "text/plain"
def get_format_and_mimetype(accept_headers, output_format):
if not output_format:
output_format = "xml"
if "text/html" in a:
output_format = "html"
if "application/sparql-results+json" in a:
output_format = "json"
mimetype = resultformat_to_mime(output_format)
mimetype = request.values.get("force-accept", mimetype)
return output_format, mimetype
@app.route('/sparql')
def sparql_endpoint():
query = request.args.get('query')
accept_headers = request.headers["Accept"]
output_format = request.values.get("output", None)
output_format, mimetype = get_format_and_mimetype(accept_headers, output_format)
res = graph.query(query).serialize(format = output_format)
response = Response(res)
response.headers["Content-Type"] = mimetype
return response
if __name__ == "__main__":
graph = Graph()
graph.parse(GRAPH_FILE, format=GRAPH_FORMAT)
app.run(host='0.0.0.0',port=5001)
| from flask import Flask, request, Response
from rdflib import Graph, plugin
import sys
from config import GRAPH_FILE, GRAPH_FORMAT
app = Flask(__name__)
def resultformat_to_mime(format):
if format=='xml': return "application/sparql-results+xml"
if format=='json': return "application/sparql-results+json"
if format=='html': return "text/html"
return "text/plain"
def get_format_and_mimetype(accept_headers, output_format):
if not output_format:
output_format = "xml"
if "text/html" in a:
output_format = "html"
if "application/sparql-results+json" in a:
output_format = "json"
mimetype = resultformat_to_mime(output_format)
mimetype = request.values.get("force-accept", mimetype)
return output_format, mimetype
@app.route('/sparql')
def sparql_endpoint():
query = request.args.get('query')
accept_headers = request.headers["Accept"]
output_format = request.values.get("output", None)
output_format, mimetype = get_format_and_mimetype(accept_headers, output_format)
res = graph.query(query).serialize(format = output_format)
response = Response(res)
response.headers["Content-Type"] = mimetype
return response
if __name__ == "__main__":
graph = Graph()
graph.parse(GRAPH_FILE, format=GRAPH_FORMAT)
app.run(host='0.0.0.0',port=5001)
| none | 1 | 2.6912 | 3 | |
backend/berkeleytime/management/commands/course.py | Boomaa23/berkeleytime | 21 | 6615557 | <reponame>Boomaa23/berkeleytime
"""Update canonical course data, run nightly.
Pull data from SIS Course API to update our database.
Information pertains to a canonical course across semesters.
"""
from django.core.management.base import BaseCommand
from catalog.service import course_service
class Command(BaseCommand):
"""python manage.py course."""
def add_arguments(self, parser):
parser.add_argument(
'--page-number',
action='store',
default=0,
help="Page number for paged course update",
)
parser.add_argument(
'--page-size',
action='store',
default=100,
help="Number of courses per page",
)
def handle(self, *args, **options):
print('Running python3 manage.py course')
course_service.update(
page_number=options['page_number'],
page_size=options['page_size']
)
| """Update canonical course data, run nightly.
Pull data from SIS Course API to update our database.
Information pertains to a canonical course across semesters.
"""
from django.core.management.base import BaseCommand
from catalog.service import course_service
class Command(BaseCommand):
"""python manage.py course."""
def add_arguments(self, parser):
parser.add_argument(
'--page-number',
action='store',
default=0,
help="Page number for paged course update",
)
parser.add_argument(
'--page-size',
action='store',
default=100,
help="Number of courses per page",
)
def handle(self, *args, **options):
print('Running python3 manage.py course')
course_service.update(
page_number=options['page_number'],
page_size=options['page_size']
) | en | 0.783714 | Update canonical course data, run nightly. Pull data from SIS Course API to update our database. Information pertains to a canonical course across semesters. python manage.py course. | 2.410908 | 2 |
sql_to_django/lib.py | rimi-dev/sql_to_orm | 1 | 6615558 | import re
from common.lib import list_whitespace_remove, list_to_dict
class Table:
"""
Table class
Author : rimi
Date : 2020. 05. 27
Description : get/set main table, get/set joined tables
"""
main_named_table = ''
main_table = ''
joined_table = []
@classmethod
def update_main_table(cls, table):
if len(table) > 1:
cls.main_named_table = table[1]
cls.main_table = table[0]
print(table[0])
@classmethod
def update_joined_table(cls, **kwargs):
print(kwargs)
cls.joined_table.append(kwargs)
@classmethod
def get_main_table(cls):
return cls.main_table
@classmethod
def get_main_named_table(cls):
return cls.main_named_table
@classmethod
def get_joined_table(cls):
return cls.joined_table
class Select:
def __init__(self, query):
query = re.split(r'from', query)
target = list_whitespace_remove(query)
table_name = target[1].split()
columns = target[0].split(', ')
value_columns = ''
Table.update_main_table(table_name)
main_table_named = Table.get_main_named_table()
for item in columns:
if main_table_named in item:
if '*' in target[0]:
value_columns += ''
else:
value_columns += f'"{item}", '
self._orm = f'{Table.get_main_table()}.objects.values({value_columns})'
def get_orm(self):
return self._orm
class Join:
@classmethod
def inner_join(cls, **kwargs):
query = kwargs['query'].split()
Table.update_joined_table(table=query[0], named=query[1])
print(Table.get_joined_table())
@classmethod
def left_outer_join(cls, **kwargs):
print(kwargs)
def get_orm(self):
return self._orm
class OrderBy:
def __init__(self, query):
order_by_query = query.split()
len_order_by_query = len(order_by_query)
column = order_by_query[0]
main_table_named = Table.get_main_named_table()
if main_table_named:
# table name 이 있을경우
if main_table_named in column:
column = column.split('.')[1]
# To sort the records in descending order
sort = ''
if len_order_by_query > 1:
if order_by_query[1].lower() == 'desc':
sort = '-'
self._orm = f'.order_by("{sort}{column}")'
def get_orm(self):
return self._orm
class Where:
def __init__(self, query):
and_re = re.split(r'and', query, re.I)
print(and_re)
if 'and' in query:
pass
if 'or' in query:
pass
self._orm = f'.filter({query})'
def get_orm(self):
return self._orm
class QueryFuncManager:
_queryMappingTable = {
"select": Select,
"where": Where,
"inner join": Join.inner_join,
"left outer join": Join,
"order by": OrderBy,
}
@classmethod
def get_query(cls, contentType, *args, **kwargs):
try:
query_func = cls._queryMappingTable.get(contentType)
return query_func(*args, **kwargs)
except KeyError:
raise Exception(f"{contentType} is invalid content type")
| import re
from common.lib import list_whitespace_remove, list_to_dict
class Table:
"""
Table class
Author : rimi
Date : 2020. 05. 27
Description : get/set main table, get/set joined tables
"""
main_named_table = ''
main_table = ''
joined_table = []
@classmethod
def update_main_table(cls, table):
if len(table) > 1:
cls.main_named_table = table[1]
cls.main_table = table[0]
print(table[0])
@classmethod
def update_joined_table(cls, **kwargs):
print(kwargs)
cls.joined_table.append(kwargs)
@classmethod
def get_main_table(cls):
return cls.main_table
@classmethod
def get_main_named_table(cls):
return cls.main_named_table
@classmethod
def get_joined_table(cls):
return cls.joined_table
class Select:
def __init__(self, query):
query = re.split(r'from', query)
target = list_whitespace_remove(query)
table_name = target[1].split()
columns = target[0].split(', ')
value_columns = ''
Table.update_main_table(table_name)
main_table_named = Table.get_main_named_table()
for item in columns:
if main_table_named in item:
if '*' in target[0]:
value_columns += ''
else:
value_columns += f'"{item}", '
self._orm = f'{Table.get_main_table()}.objects.values({value_columns})'
def get_orm(self):
return self._orm
class Join:
@classmethod
def inner_join(cls, **kwargs):
query = kwargs['query'].split()
Table.update_joined_table(table=query[0], named=query[1])
print(Table.get_joined_table())
@classmethod
def left_outer_join(cls, **kwargs):
print(kwargs)
def get_orm(self):
return self._orm
class OrderBy:
def __init__(self, query):
order_by_query = query.split()
len_order_by_query = len(order_by_query)
column = order_by_query[0]
main_table_named = Table.get_main_named_table()
if main_table_named:
# table name 이 있을경우
if main_table_named in column:
column = column.split('.')[1]
# To sort the records in descending order
sort = ''
if len_order_by_query > 1:
if order_by_query[1].lower() == 'desc':
sort = '-'
self._orm = f'.order_by("{sort}{column}")'
def get_orm(self):
return self._orm
class Where:
def __init__(self, query):
and_re = re.split(r'and', query, re.I)
print(and_re)
if 'and' in query:
pass
if 'or' in query:
pass
self._orm = f'.filter({query})'
def get_orm(self):
return self._orm
class QueryFuncManager:
_queryMappingTable = {
"select": Select,
"where": Where,
"inner join": Join.inner_join,
"left outer join": Join,
"order by": OrderBy,
}
@classmethod
def get_query(cls, contentType, *args, **kwargs):
try:
query_func = cls._queryMappingTable.get(contentType)
return query_func(*args, **kwargs)
except KeyError:
raise Exception(f"{contentType} is invalid content type")
| en | 0.500223 | Table class Author : rimi Date : 2020. 05. 27 Description : get/set main table, get/set joined tables # table name 이 있을경우 # To sort the records in descending order | 2.974167 | 3 |
Ene-Jun-2021/monjaras-granados-alicia-montserrat/Primer Parcial/ejercicio 4/chain_of_responsability_test.py | bryanbalderas/DAS_Sistemas | 41 | 6615559 | <reponame>bryanbalderas/DAS_Sistemas
import pytest
from abc import ABCMeta, abstractclassmethod
from chain_of_responsability import CajeroATMChain
@pytest.mark.parametrize("expected", [
(
"Tendras: 5 billetes de 20\n"
)
])
def test_cambio_1(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(100)
x.chain2.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 315 billetes de 50\nTendras: 1 billetes de 20\n"
)
])
def test_cambio_2(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(15770)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 1 billetes de 50\nTendras: 1 billetes de 10\n"
)
])
def test_cambio_3(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(60)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 2 billetes de 50\nTendras: 1 billetes de 20\nTendras: 1 billetes de 10\n"
)
])
def test_cambio_4(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(130)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"No es posible hacer el cambio de billetes\n"
)
])
def test_cambio_5(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(5)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
| import pytest
from abc import ABCMeta, abstractclassmethod
from chain_of_responsability import CajeroATMChain
@pytest.mark.parametrize("expected", [
(
"Tendras: 5 billetes de 20\n"
)
])
def test_cambio_1(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(100)
x.chain2.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 315 billetes de 50\nTendras: 1 billetes de 20\n"
)
])
def test_cambio_2(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(15770)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 1 billetes de 50\nTendras: 1 billetes de 10\n"
)
])
def test_cambio_3(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(60)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"Tendras: 2 billetes de 50\nTendras: 1 billetes de 20\nTendras: 1 billetes de 10\n"
)
])
def test_cambio_4(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(130)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected
@pytest.mark.parametrize("expected", [
(
"No es posible hacer el cambio de billetes\n"
)
])
def test_cambio_5(capfd, expected):
x = CajeroATMChain()
billete_a_cambiar = int(5)
x.chain1.handle(billete_a_cambiar)
out, _ = capfd.readouterr()
assert out == expected | none | 1 | 2.536155 | 3 | |
Simulation/calculation_status.py | SEPHIRONOVA/TradingDataAnalyzer | 0 | 6615560 | <reponame>SEPHIRONOVA/TradingDataAnalyzer
from enum import Enum
__author__ = 'raymond'
class CalculationStatus(Enum):
Invalid = -1
Valid = 1 | from enum import Enum
__author__ = 'raymond'
class CalculationStatus(Enum):
Invalid = -1
Valid = 1 | none | 1 | 2.539458 | 3 | |
labnotebook/main.py | AekYutt/labnotebook | 562 | 6615561 | <reponame>AekYutt/labnotebook
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship, sessionmaker
import datetime as dt
import labnotebook
def initialize(db_string):
"""
initializes the engine given the database, creates session,
and declares the Experiment and TrainingStep objects.
"""
engine = create_engine(db_string)
labnotebook.Base = declarative_base(engine)
Session = sessionmaker(bind = engine)
labnotebook.session = Session()
# Experiment and TrainingStep are objects that
# we need to access everywhere.
# we cannot import before knowing db_string
global Experiment
global TrainingStep
global ModelParams
from labnotebook.model import Experiment, TrainingStep, ModelParams
# we link the Experiment and TrainingStep objects
Experiment.steps = relationship("TrainingStep", order_by=TrainingStep.timestep,
back_populates="experiment")
# and we update the metadata
labnotebook.Base.metadata.create_all(engine)
return Experiment, TrainingStep, ModelParams
def start_experiment(dt = dt.datetime.now(),
gpu_id = 0,
model_desc = None):
"""
initializes the experiment run
___________
PARAMETERS:
___________
dt: the run's start time
gpu_id: which gpu the experiment is run on
model_desc: a dict containing the model's params, or anything else, really
________
RETURNS:
________
the Experiment object
"""
xp = Experiment(dt=dt,
gpu = gpu_id,
model_desc = model_desc)
labnotebook.session.add(xp)
labnotebook.session.commit()
return xp
def end_experiment(xp, final_trainloss=None, final_trainacc=None,
final_valacc=None):
"""
closes the experiment run
___________
PARAMETERS:
___________
xp: the Experiment object
final_trainloss, final_trainacc, final_valacc: optional scalars showing the stage
of the experiment after it ended
________
RETURNS:
________
an Experiment object
"""
xp.final_trainloss = final_trainloss
xp.final_trainacc = final_trainacc
xp.final_valacc = final_valacc
xp.completed = True
labnotebook.session.commit()
return xp
def step_experiment(xp, timestep,
trainloss=None, trainacc=None, valacc=None,
epoch=None, custom_fields=None, model_params=None):
"""
add a step to the experiment
___________
PARAMETERS:
___________
xp: the Experiment object
trainloss, trainacc, valacc: optional scalars showing the state of the xp after the step
timestep: step label
epoch: optional epoch label
custom_fields: whatever extra data you want to save
model_params: weights of the model after the step
________
RETURNS:
________
a tuple (TrainingStep, ModelParams)
"""
modelparams = None
step = TrainingStep(run_id=xp.run_id, timestep=timestep,
trainloss=trainloss, trainacc=trainacc, valacc=valacc,
epoch=epoch, custom_fields=custom_fields)
labnotebook.session.add(step)
if model_params:
modelparams = ModelParams(step_id=step.step_id, model_params=model_params)
labnotebook.session.add(modelparams)
labnotebook.session.commit()
return step, modelparams
| from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship, sessionmaker
import datetime as dt
import labnotebook
def initialize(db_string):
"""
initializes the engine given the database, creates session,
and declares the Experiment and TrainingStep objects.
"""
engine = create_engine(db_string)
labnotebook.Base = declarative_base(engine)
Session = sessionmaker(bind = engine)
labnotebook.session = Session()
# Experiment and TrainingStep are objects that
# we need to access everywhere.
# we cannot import before knowing db_string
global Experiment
global TrainingStep
global ModelParams
from labnotebook.model import Experiment, TrainingStep, ModelParams
# we link the Experiment and TrainingStep objects
Experiment.steps = relationship("TrainingStep", order_by=TrainingStep.timestep,
back_populates="experiment")
# and we update the metadata
labnotebook.Base.metadata.create_all(engine)
return Experiment, TrainingStep, ModelParams
def start_experiment(dt = dt.datetime.now(),
gpu_id = 0,
model_desc = None):
"""
initializes the experiment run
___________
PARAMETERS:
___________
dt: the run's start time
gpu_id: which gpu the experiment is run on
model_desc: a dict containing the model's params, or anything else, really
________
RETURNS:
________
the Experiment object
"""
xp = Experiment(dt=dt,
gpu = gpu_id,
model_desc = model_desc)
labnotebook.session.add(xp)
labnotebook.session.commit()
return xp
def end_experiment(xp, final_trainloss=None, final_trainacc=None,
final_valacc=None):
"""
closes the experiment run
___________
PARAMETERS:
___________
xp: the Experiment object
final_trainloss, final_trainacc, final_valacc: optional scalars showing the stage
of the experiment after it ended
________
RETURNS:
________
an Experiment object
"""
xp.final_trainloss = final_trainloss
xp.final_trainacc = final_trainacc
xp.final_valacc = final_valacc
xp.completed = True
labnotebook.session.commit()
return xp
def step_experiment(xp, timestep,
trainloss=None, trainacc=None, valacc=None,
epoch=None, custom_fields=None, model_params=None):
"""
add a step to the experiment
___________
PARAMETERS:
___________
xp: the Experiment object
trainloss, trainacc, valacc: optional scalars showing the state of the xp after the step
timestep: step label
epoch: optional epoch label
custom_fields: whatever extra data you want to save
model_params: weights of the model after the step
________
RETURNS:
________
a tuple (TrainingStep, ModelParams)
"""
modelparams = None
step = TrainingStep(run_id=xp.run_id, timestep=timestep,
trainloss=trainloss, trainacc=trainacc, valacc=valacc,
epoch=epoch, custom_fields=custom_fields)
labnotebook.session.add(step)
if model_params:
modelparams = ModelParams(step_id=step.step_id, model_params=model_params)
labnotebook.session.add(modelparams)
labnotebook.session.commit()
return step, modelparams | en | 0.673499 | initializes the engine given the database, creates session, and declares the Experiment and TrainingStep objects. # Experiment and TrainingStep are objects that # we need to access everywhere. # we cannot import before knowing db_string # we link the Experiment and TrainingStep objects # and we update the metadata initializes the experiment run ___________ PARAMETERS: ___________ dt: the run's start time gpu_id: which gpu the experiment is run on model_desc: a dict containing the model's params, or anything else, really ________ RETURNS: ________ the Experiment object closes the experiment run ___________ PARAMETERS: ___________ xp: the Experiment object final_trainloss, final_trainacc, final_valacc: optional scalars showing the stage of the experiment after it ended ________ RETURNS: ________ an Experiment object add a step to the experiment ___________ PARAMETERS: ___________ xp: the Experiment object trainloss, trainacc, valacc: optional scalars showing the state of the xp after the step timestep: step label epoch: optional epoch label custom_fields: whatever extra data you want to save model_params: weights of the model after the step ________ RETURNS: ________ a tuple (TrainingStep, ModelParams) | 2.792666 | 3 |
shub_image/build.py | scrapinghub/shub-image | 4 | 6615562 | import os
import re
import click
import warnings
from shub import exceptions as shub_exceptions
from shub.deploy import list_targets
from shub.deploy import _create_default_setup_py
from shub.utils import closest_file, get_config
from shub_image import utils
from shub_image import test as test_mod
SHORT_HELP = 'Build release image.'
HELP = """
Build command uses your Dockerfile to build an image and tag it properly.
Internally, this command is a simple wrapper to `docker build` and uses
docker daemon on your system to build an image. Also it can generate
project version for you, and locate root project directory by itself.
Image should be set via scrapinghub.yml, section "images". If version is not
provided, the tool uses VCS-based stamp over project directory (the same as
shub utils itself).
"""
@click.command(help=HELP, short_help=SHORT_HELP)
@click.argument("target", required=False, default="default")
@click.option("-l", "--list-targets", help="list available targets",
is_flag=True, is_eager=True, expose_value=False,
callback=list_targets)
@click.option("-d", "--debug", help="debug mode", is_flag=True)
@click.option("--version", help="release version")
@click.option("-S", "--skip-tests", help="skip testing image", is_flag=True)
def cli(target, debug, version, skip_tests):
build_cmd(target, version, skip_tests)
def build_cmd(target, version, skip_tests):
client = utils.get_docker_client()
project_dir = utils.get_project_dir()
config = utils.load_release_config()
image = config.get_image(target)
_create_setup_py_if_not_exists()
image_name = utils.format_image_name(image, version)
if not os.path.exists(os.path.join(project_dir, 'Dockerfile')):
raise shub_exceptions.BadParameterException(
'Dockerfile is not found, please use shub-image init cmd')
is_built = False
for data in client.build(path=project_dir, tag=image_name, decode=True):
if 'stream' in data:
utils.debug_log("{}".format(data['stream'][:-1]))
is_built = re.search(
r'Successfully built ([0-9a-f]+)', data['stream'])
elif 'error' in data:
click.echo("Error {}:\n{}".format(
data['error'], data['errorDetail']))
if not is_built:
raise shub_exceptions.RemoteErrorException(
"Build image operation failed")
click.echo("The image {} build is completed.".format(image_name))
# Test the image content after building it
if not skip_tests:
test_mod.test_cmd(target, version)
def _create_setup_py_if_not_exists():
closest = closest_file('scrapy.cfg')
# create default setup.py only if scrapy.cfg is found, otherwise
# consider it as a non-scrapy/non-python project
if not closest:
warnings.warn("scrapy.cfg is not found")
return
with utils.remember_cwd():
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
settings = get_config().get('settings', 'default')
_create_default_setup_py(settings=settings)
| import os
import re
import click
import warnings
from shub import exceptions as shub_exceptions
from shub.deploy import list_targets
from shub.deploy import _create_default_setup_py
from shub.utils import closest_file, get_config
from shub_image import utils
from shub_image import test as test_mod
SHORT_HELP = 'Build release image.'
HELP = """
Build command uses your Dockerfile to build an image and tag it properly.
Internally, this command is a simple wrapper to `docker build` and uses
docker daemon on your system to build an image. Also it can generate
project version for you, and locate root project directory by itself.
Image should be set via scrapinghub.yml, section "images". If version is not
provided, the tool uses VCS-based stamp over project directory (the same as
shub utils itself).
"""
@click.command(help=HELP, short_help=SHORT_HELP)
@click.argument("target", required=False, default="default")
@click.option("-l", "--list-targets", help="list available targets",
is_flag=True, is_eager=True, expose_value=False,
callback=list_targets)
@click.option("-d", "--debug", help="debug mode", is_flag=True)
@click.option("--version", help="release version")
@click.option("-S", "--skip-tests", help="skip testing image", is_flag=True)
def cli(target, debug, version, skip_tests):
build_cmd(target, version, skip_tests)
def build_cmd(target, version, skip_tests):
client = utils.get_docker_client()
project_dir = utils.get_project_dir()
config = utils.load_release_config()
image = config.get_image(target)
_create_setup_py_if_not_exists()
image_name = utils.format_image_name(image, version)
if not os.path.exists(os.path.join(project_dir, 'Dockerfile')):
raise shub_exceptions.BadParameterException(
'Dockerfile is not found, please use shub-image init cmd')
is_built = False
for data in client.build(path=project_dir, tag=image_name, decode=True):
if 'stream' in data:
utils.debug_log("{}".format(data['stream'][:-1]))
is_built = re.search(
r'Successfully built ([0-9a-f]+)', data['stream'])
elif 'error' in data:
click.echo("Error {}:\n{}".format(
data['error'], data['errorDetail']))
if not is_built:
raise shub_exceptions.RemoteErrorException(
"Build image operation failed")
click.echo("The image {} build is completed.".format(image_name))
# Test the image content after building it
if not skip_tests:
test_mod.test_cmd(target, version)
def _create_setup_py_if_not_exists():
closest = closest_file('scrapy.cfg')
# create default setup.py only if scrapy.cfg is found, otherwise
# consider it as a non-scrapy/non-python project
if not closest:
warnings.warn("scrapy.cfg is not found")
return
with utils.remember_cwd():
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
settings = get_config().get('settings', 'default')
_create_default_setup_py(settings=settings)
| en | 0.862902 | Build command uses your Dockerfile to build an image and tag it properly. Internally, this command is a simple wrapper to `docker build` and uses docker daemon on your system to build an image. Also it can generate project version for you, and locate root project directory by itself. Image should be set via scrapinghub.yml, section "images". If version is not provided, the tool uses VCS-based stamp over project directory (the same as shub utils itself). # Test the image content after building it # create default setup.py only if scrapy.cfg is found, otherwise # consider it as a non-scrapy/non-python project | 2.299963 | 2 |
zigbear/custom_protocol/stack.py | philippnormann/zigbear | 14 | 6615563 | from zigbear.custom_protocol.ApplicationLayer import ApplicationLayer
from zigbear.custom_protocol.MACLayer import MACLayer
from zigbear.custom_protocol.NetworkLayer import NetworkLayer
from zigbear.custom_protocol.SecurityLayer import SecurityLayer
class ProtocolStack:
def __init__(self, connector, network_key=None):
self.connector = connector
self.maclayer = MACLayer(self.connector, 0, 0)
self.networklayer = NetworkLayer(self.maclayer)
self.securitylayer = SecurityLayer(self.networklayer, network_key)
self.application = ApplicationLayer(self.securitylayer)
def set_panid(self, pan):
self.maclayer.network = pan
def get_panid(self):
return self.maclayer.network
def set_address(self, address):
self.maclayer.address = address
def get_address(self):
return self.maclayer.address
def get_networkkey(self):
return self.securitylayer.network_key
def set_network_key(self, networkkey):
self.securitylayer.network_key = networkkey
def get_session_count(self):
count = 0
for s in self.application.sessions:
count += len(self.application.sessions[s])
return count
def get_listeners_count(self):
return len(self.application.listeners)
def connect(self, destination, port):
return self.application.connect(destination, port)
def listen(self, port, handler):
return self.application.listen(port, handler)
def get_init_devices(self):
return {
"init_devices": self.securitylayer.get_connection_attempts()
}
def status(self):
return {
"panid": self.get_panid(),
"address": self.get_address(),
"networkkey": self.get_networkkey(),
"session_count": self.get_session_count(),
"listeners_count": self.get_listeners_count()
}
| from zigbear.custom_protocol.ApplicationLayer import ApplicationLayer
from zigbear.custom_protocol.MACLayer import MACLayer
from zigbear.custom_protocol.NetworkLayer import NetworkLayer
from zigbear.custom_protocol.SecurityLayer import SecurityLayer
class ProtocolStack:
def __init__(self, connector, network_key=None):
self.connector = connector
self.maclayer = MACLayer(self.connector, 0, 0)
self.networklayer = NetworkLayer(self.maclayer)
self.securitylayer = SecurityLayer(self.networklayer, network_key)
self.application = ApplicationLayer(self.securitylayer)
def set_panid(self, pan):
self.maclayer.network = pan
def get_panid(self):
return self.maclayer.network
def set_address(self, address):
self.maclayer.address = address
def get_address(self):
return self.maclayer.address
def get_networkkey(self):
return self.securitylayer.network_key
def set_network_key(self, networkkey):
self.securitylayer.network_key = networkkey
def get_session_count(self):
count = 0
for s in self.application.sessions:
count += len(self.application.sessions[s])
return count
def get_listeners_count(self):
return len(self.application.listeners)
def connect(self, destination, port):
return self.application.connect(destination, port)
def listen(self, port, handler):
return self.application.listen(port, handler)
def get_init_devices(self):
return {
"init_devices": self.securitylayer.get_connection_attempts()
}
def status(self):
return {
"panid": self.get_panid(),
"address": self.get_address(),
"networkkey": self.get_networkkey(),
"session_count": self.get_session_count(),
"listeners_count": self.get_listeners_count()
}
| none | 1 | 2.268577 | 2 | |
server/spells/shuffle.py | zorlu/cards2-server | 0 | 6615564 | <reponame>zorlu/cards2-server<gh_stars>0
from server.spell import Spell
from server.player import Player
from app.settings import logger
class ShuffleSpell(Spell):
amount = 1
def __init__(self, **kwargs):
self.type = "Shuffled"
self.amount = kwargs.get('amount', 1)
super(ShuffleSpell, self).__init__(**kwargs)
def cast(self, data=None):
from server.models import Card
tailored = []
targets = self.find_target(data)
for card in targets:
if self.summon_dbid: # in this case, spell target must be pointing to the Player
card_dbid = self.summon_dbid
else: # if shuffling card not specified in the summon field, target must be pointing to a Card
card_dbid = card.dbid
for i in range(self.amount):
newdbcard = Card.objects.get(pk=card_dbid)
vcard = newdbcard.to_virtual(self.card.player)
self.card.player.deck.shuffle(vcard) # TODO check this, looks not shuffling array
shuffle_data = {
'shuffled': {
'deck_count': len(vcard.player.deck.cards),
'copy': {
'uid': vcard.player.uid,
'uuid': vcard.uuid,
'key': vcard.key
}
}
}
# if target is a Card(x:ground:x or summon_db_id), not a Player(spell_target=cardowner)
if not isinstance(card, Player):
shuffle_data['original'] = {
'uid': card.player.uid,
'uuid': card.uuid,
'key': card.key
}
tailored.append(shuffle_data)
logger.info("spell> ShuffleSpell cast result:{0}".format(tailored))
return tailored
| from server.spell import Spell
from server.player import Player
from app.settings import logger
class ShuffleSpell(Spell):
amount = 1
def __init__(self, **kwargs):
self.type = "Shuffled"
self.amount = kwargs.get('amount', 1)
super(ShuffleSpell, self).__init__(**kwargs)
def cast(self, data=None):
from server.models import Card
tailored = []
targets = self.find_target(data)
for card in targets:
if self.summon_dbid: # in this case, spell target must be pointing to the Player
card_dbid = self.summon_dbid
else: # if shuffling card not specified in the summon field, target must be pointing to a Card
card_dbid = card.dbid
for i in range(self.amount):
newdbcard = Card.objects.get(pk=card_dbid)
vcard = newdbcard.to_virtual(self.card.player)
self.card.player.deck.shuffle(vcard) # TODO check this, looks not shuffling array
shuffle_data = {
'shuffled': {
'deck_count': len(vcard.player.deck.cards),
'copy': {
'uid': vcard.player.uid,
'uuid': vcard.uuid,
'key': vcard.key
}
}
}
# if target is a Card(x:ground:x or summon_db_id), not a Player(spell_target=cardowner)
if not isinstance(card, Player):
shuffle_data['original'] = {
'uid': card.player.uid,
'uuid': card.uuid,
'key': card.key
}
tailored.append(shuffle_data)
logger.info("spell> ShuffleSpell cast result:{0}".format(tailored))
return tailored | en | 0.669946 | # in this case, spell target must be pointing to the Player # if shuffling card not specified in the summon field, target must be pointing to a Card # TODO check this, looks not shuffling array # if target is a Card(x:ground:x or summon_db_id), not a Player(spell_target=cardowner) | 2.496699 | 2 |
corkus/objects/player_gamemodes.py | MrBartusek/corkus.py | 5 | 6615565 | from __future__ import annotations
from typing import TYPE_CHECKING
from enum import Enum
from .base import CorkusBase
if TYPE_CHECKING:
from corkus import Corkus
class HardcoreType(Enum):
"""Describes current status of player's hardcore challenge."""
ENABLED = "ENABLED"
DISABLED = "DISABLED"
FAILED = "FAILED"
class PlayerGamemodes(CorkusBase):
"""Challenge gamemodes that are enabled on a :py:class:`PlayerClass`."""
def __init__(self, corkus: Corkus, attributes: dict, deaths: int):
super().__init__(corkus, attributes)
self._deaths = deaths
@property
def craftsman(self) -> str:
"""Does this class have the craftsman challenge enabled."""
return self._attributes.get("craftsman", False)
@property
def hardcore(self) -> HardcoreType:
"""Does this class have the hardcore challenge enabled and what is the status of it."""
enabled = self._attributes.get("hardcore", False)
active = self._deaths <= 0
if enabled and active:
return HardcoreType.ENABLED
elif enabled and not active:
return HardcoreType.FAILED
else:
return HardcoreType.DISABLED
@property
def ironman(self) -> bool:
"""Does this class have the ironman challenge enabled."""
return self._attributes.get("ironman", False)
@property
def hunted(self) -> bool:
"""Does this class have the haunted mode enabled."""
return self._attributes.get("hunted", False)
def __repr__(self) -> str:
return f"<PlayerGamemodes craftsman={self.craftsman!r} hardcore={self.hardcore!r} ironman={self.ironman!r} hunted={self.hunted!r}>"
| from __future__ import annotations
from typing import TYPE_CHECKING
from enum import Enum
from .base import CorkusBase
if TYPE_CHECKING:
from corkus import Corkus
class HardcoreType(Enum):
"""Describes current status of player's hardcore challenge."""
ENABLED = "ENABLED"
DISABLED = "DISABLED"
FAILED = "FAILED"
class PlayerGamemodes(CorkusBase):
"""Challenge gamemodes that are enabled on a :py:class:`PlayerClass`."""
def __init__(self, corkus: Corkus, attributes: dict, deaths: int):
super().__init__(corkus, attributes)
self._deaths = deaths
@property
def craftsman(self) -> str:
"""Does this class have the craftsman challenge enabled."""
return self._attributes.get("craftsman", False)
@property
def hardcore(self) -> HardcoreType:
"""Does this class have the hardcore challenge enabled and what is the status of it."""
enabled = self._attributes.get("hardcore", False)
active = self._deaths <= 0
if enabled and active:
return HardcoreType.ENABLED
elif enabled and not active:
return HardcoreType.FAILED
else:
return HardcoreType.DISABLED
@property
def ironman(self) -> bool:
"""Does this class have the ironman challenge enabled."""
return self._attributes.get("ironman", False)
@property
def hunted(self) -> bool:
"""Does this class have the haunted mode enabled."""
return self._attributes.get("hunted", False)
def __repr__(self) -> str:
return f"<PlayerGamemodes craftsman={self.craftsman!r} hardcore={self.hardcore!r} ironman={self.ironman!r} hunted={self.hunted!r}>"
| en | 0.850436 | Describes current status of player's hardcore challenge. Challenge gamemodes that are enabled on a :py:class:`PlayerClass`. Does this class have the craftsman challenge enabled. Does this class have the hardcore challenge enabled and what is the status of it. Does this class have the ironman challenge enabled. Does this class have the haunted mode enabled. | 2.904679 | 3 |
tests/predictors/cvt_tagger_predictor_test.py | apmoore1/nlp-uncertainty-ssl | 0 | 6615566 | <reponame>apmoore1/nlp-uncertainty-ssl
from pathlib import Path
from typing import Dict, List, Any
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
import nlp_uncertainty_ssl
class TestCvtTaggerPredictor():
def test_standard_use(self):
def keys_in_output(keys: List[str], output: Dict[str, Any]) -> None:
for key in keys:
assert key in output
example_input = {'sentence': "The laptop's"}
example_sentence_token_input = {'tokens': ["The", "laptop's", "case",
"was", "great", "and",
"cover", "was", "rubbish"],
'sentence': "The laptop's case was great"\
" and cover was rubbish"}
example_token_input = {'tokens': ["The", "laptop's", "case", "was",
"great", "and", "cover", "was",
"rubbish"]}
archive_dir = Path(__file__, '..', 'saved_models').resolve()
archive_model = load_archive(str(Path(archive_dir, 'cvt_tagger', 'model.tar.gz')))
predictor = Predictor.from_archive(archive_model, 'cvt-tagger')
output_keys = ['logits', 'mask', 'tags', 'class_probabilities']
result = predictor.predict_json(example_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop", "'s"]
result = predictor.predict_json(example_sentence_token_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop's", "case", "was", "great",
"and", "cover", "was", "rubbish"]
result = predictor.predict_json(example_token_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop's", "case", "was", "great",
"and", "cover", "was", "rubbish"] | from pathlib import Path
from typing import Dict, List, Any
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
import nlp_uncertainty_ssl
class TestCvtTaggerPredictor():
def test_standard_use(self):
def keys_in_output(keys: List[str], output: Dict[str, Any]) -> None:
for key in keys:
assert key in output
example_input = {'sentence': "The laptop's"}
example_sentence_token_input = {'tokens': ["The", "laptop's", "case",
"was", "great", "and",
"cover", "was", "rubbish"],
'sentence': "The laptop's case was great"\
" and cover was rubbish"}
example_token_input = {'tokens': ["The", "laptop's", "case", "was",
"great", "and", "cover", "was",
"rubbish"]}
archive_dir = Path(__file__, '..', 'saved_models').resolve()
archive_model = load_archive(str(Path(archive_dir, 'cvt_tagger', 'model.tar.gz')))
predictor = Predictor.from_archive(archive_model, 'cvt-tagger')
output_keys = ['logits', 'mask', 'tags', 'class_probabilities']
result = predictor.predict_json(example_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop", "'s"]
result = predictor.predict_json(example_sentence_token_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop's", "case", "was", "great",
"and", "cover", "was", "rubbish"]
result = predictor.predict_json(example_token_input)
keys_in_output(output_keys, result)
assert result['words'] == ["The", "laptop's", "case", "was", "great",
"and", "cover", "was", "rubbish"] | none | 1 | 2.489601 | 2 | |
built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_QMIX_TensorFlow/xt/framework/comm/share_by_plasma.py | Huawei-Ascend/modelzoo | 0 | 6615567 | #!/usr/bin/env python
import os
import time
from multiprocessing import Queue
from subprocess import PIPE, Popen
import lz4.frame
# from pyarrow import deserialize, plasma, serialize
from xt.framework.register import Registers
import pickle
@Registers.comm.register
class ShareByPlasma(object):
def __init__(self, comm_info):
""" init plasma component """
super(ShareByPlasma, self).__init__()
self.size_shared_mem = comm_info.get("size", 1000000000)
self.path = comm_info.get("path", "/tmp/plasma" + str(os.getpid()))
self.compress = comm_info.get("compress", True)
self.control_q = Queue()
self.client = {}
self.start()
def send(self, data, name=None, block=True):
""" send data to plasma server """
# client = self.connect()
# data_buffer = lz4.frame.compress(serialize(data).to_buffer())
# object_id = client.put_raw_buffer(data_buffer)
pickled_data = pickle.dumps(data)
data_buffer = lz4.frame.compress(pickled_data)
self.control_q.put(data_buffer)
# del data
if data["ctr_info"].get("cmd") == "train":
keys = []
for key in data["data"].keys():
keys.append(key)
for key in keys:
del data["data"][key]
elif data["ctr_info"].get("cmd") == "predict":
del data["data"]
def recv(self, name=None, block=True):
""" receive data from plasma server """
object_id = self.control_q.get()
client = self.connect()
data = deserialize(lz4.frame.decompress(client.get_buffers([object_id])))
client.delete([object_id])
return data
def send_bytes(self, data_buffer):
""" send data to plasma server without serialize """
client = self.connect()
object_id = client.put_raw_buffer(data_buffer)
self.control_q.put(object_id)
def recv_bytes(self):
""" receive data from plasma server without deserialize """
object_id = self.control_q.get()
# client = self.connect()
# data_buffer = client.get_buffers([object_id])
# client.delete([object_id])
return object_id, 0
def delete(self, object_id):
# client = self.connect()
# client.delete([object_id])
pass
def send_multipart(self, data_buffer):
""" send multi-data to plasma server without serialize """
client = self.connect()
self.control_q.put(len(data_buffer))
for _buffer in data_buffer:
objec_id = client.put_raw_buffer(_buffer)
self.control_q.put(objec_id)
def recv_multipart(self):
""" recieve multi-data from plasma server without deserialize """
len_data = self.control_q.get()
object_id = []
client = self.connect()
for _ in range(len_data):
_object_id = self.control_q.get()
object_id.append(_object_id)
data_buffer = client.get_buffers(object_id)
client.delete(object_id)
return data_buffer
def start(self):
""" start plasma server """
try:
client = plasma.connect(self.path, int_num_retries=2)
except:
Popen(
"plasma_store -m {} -s {}".format(self.size_shared_mem, self.path),
shell=True,
stderr=PIPE,
)
print(
"plasma_store -m {} -s {} is acitvated!".format(
self.size_shared_mem, self.path
)
)
time.sleep(0.1)
def connect(self):
""" connect to plasma server """
pid = os.getpid()
if pid in self.client:
return self.client[pid]
else:
self.client[pid] = plasma.connect(self.path)
return self.client[pid]
def close(self):
""" close plasma server """
os.system("pkill -9 plasma")
| #!/usr/bin/env python
import os
import time
from multiprocessing import Queue
from subprocess import PIPE, Popen
import lz4.frame
# from pyarrow import deserialize, plasma, serialize
from xt.framework.register import Registers
import pickle
@Registers.comm.register
class ShareByPlasma(object):
def __init__(self, comm_info):
""" init plasma component """
super(ShareByPlasma, self).__init__()
self.size_shared_mem = comm_info.get("size", 1000000000)
self.path = comm_info.get("path", "/tmp/plasma" + str(os.getpid()))
self.compress = comm_info.get("compress", True)
self.control_q = Queue()
self.client = {}
self.start()
def send(self, data, name=None, block=True):
""" send data to plasma server """
# client = self.connect()
# data_buffer = lz4.frame.compress(serialize(data).to_buffer())
# object_id = client.put_raw_buffer(data_buffer)
pickled_data = pickle.dumps(data)
data_buffer = lz4.frame.compress(pickled_data)
self.control_q.put(data_buffer)
# del data
if data["ctr_info"].get("cmd") == "train":
keys = []
for key in data["data"].keys():
keys.append(key)
for key in keys:
del data["data"][key]
elif data["ctr_info"].get("cmd") == "predict":
del data["data"]
def recv(self, name=None, block=True):
""" receive data from plasma server """
object_id = self.control_q.get()
client = self.connect()
data = deserialize(lz4.frame.decompress(client.get_buffers([object_id])))
client.delete([object_id])
return data
def send_bytes(self, data_buffer):
""" send data to plasma server without serialize """
client = self.connect()
object_id = client.put_raw_buffer(data_buffer)
self.control_q.put(object_id)
def recv_bytes(self):
""" receive data from plasma server without deserialize """
object_id = self.control_q.get()
# client = self.connect()
# data_buffer = client.get_buffers([object_id])
# client.delete([object_id])
return object_id, 0
def delete(self, object_id):
# client = self.connect()
# client.delete([object_id])
pass
def send_multipart(self, data_buffer):
""" send multi-data to plasma server without serialize """
client = self.connect()
self.control_q.put(len(data_buffer))
for _buffer in data_buffer:
objec_id = client.put_raw_buffer(_buffer)
self.control_q.put(objec_id)
def recv_multipart(self):
""" recieve multi-data from plasma server without deserialize """
len_data = self.control_q.get()
object_id = []
client = self.connect()
for _ in range(len_data):
_object_id = self.control_q.get()
object_id.append(_object_id)
data_buffer = client.get_buffers(object_id)
client.delete(object_id)
return data_buffer
def start(self):
""" start plasma server """
try:
client = plasma.connect(self.path, int_num_retries=2)
except:
Popen(
"plasma_store -m {} -s {}".format(self.size_shared_mem, self.path),
shell=True,
stderr=PIPE,
)
print(
"plasma_store -m {} -s {} is acitvated!".format(
self.size_shared_mem, self.path
)
)
time.sleep(0.1)
def connect(self):
""" connect to plasma server """
pid = os.getpid()
if pid in self.client:
return self.client[pid]
else:
self.client[pid] = plasma.connect(self.path)
return self.client[pid]
def close(self):
""" close plasma server """
os.system("pkill -9 plasma")
| en | 0.647591 | #!/usr/bin/env python # from pyarrow import deserialize, plasma, serialize init plasma component send data to plasma server # client = self.connect() # data_buffer = lz4.frame.compress(serialize(data).to_buffer()) # object_id = client.put_raw_buffer(data_buffer) # del data receive data from plasma server send data to plasma server without serialize receive data from plasma server without deserialize # client = self.connect() # data_buffer = client.get_buffers([object_id]) # client.delete([object_id]) # client = self.connect() # client.delete([object_id]) send multi-data to plasma server without serialize recieve multi-data from plasma server without deserialize start plasma server connect to plasma server close plasma server | 2.20487 | 2 |
dagather/result.py | bentheiii/dagather | 2 | 6615568 | <filename>dagather/result.py
from asyncio import Task
from typing import Mapping, Any, Container
from dagather.exceptions import DiscardedTask
from dagather.tasktemplate import TaskTemplate, ContinueResult
class DagatherResult(Mapping[TaskTemplate, Any]):
def __init__(self, tasks: Mapping[TaskTemplate, Task], discarded: Container[TaskTemplate]):
self.tasks = tasks
self.discarded = discarded
def __getitem__(self, item):
try:
ret = self.tasks[item].result()
except KeyError:
if item in self.discarded:
raise DiscardedTask(item) from None
raise
if isinstance(ret, ContinueResult):
return ret.return_value
return ret
def __iter__(self):
return iter(self.tasks)
def keys(self):
return self.tasks.keys()
def __len__(self):
return len(self.tasks)
def kwargs(self):
"""
:return: A dict of the results as names rather than templates
"""
return {tt.name: v for (tt, v) in self.items()}
| <filename>dagather/result.py
from asyncio import Task
from typing import Mapping, Any, Container
from dagather.exceptions import DiscardedTask
from dagather.tasktemplate import TaskTemplate, ContinueResult
class DagatherResult(Mapping[TaskTemplate, Any]):
def __init__(self, tasks: Mapping[TaskTemplate, Task], discarded: Container[TaskTemplate]):
self.tasks = tasks
self.discarded = discarded
def __getitem__(self, item):
try:
ret = self.tasks[item].result()
except KeyError:
if item in self.discarded:
raise DiscardedTask(item) from None
raise
if isinstance(ret, ContinueResult):
return ret.return_value
return ret
def __iter__(self):
return iter(self.tasks)
def keys(self):
return self.tasks.keys()
def __len__(self):
return len(self.tasks)
def kwargs(self):
"""
:return: A dict of the results as names rather than templates
"""
return {tt.name: v for (tt, v) in self.items()}
| en | 0.925064 | :return: A dict of the results as names rather than templates | 2.349141 | 2 |
Day3/Grid_unique_paths.py | DsaWithPython/Striver_SDE_SHEET_python | 1 | 6615569 | class Solution:
def uniquePaths(self, m: int, n: int) -> int:
N = n + m - 2 # n-1 + m-1
r = m - 1
res = 1
for i in range(1,r+1):
res = res * (N - r + i)//i
return res
| class Solution:
def uniquePaths(self, m: int, n: int) -> int:
N = n + m - 2 # n-1 + m-1
r = m - 1
res = 1
for i in range(1,r+1):
res = res * (N - r + i)//i
return res
| ru | 0.15091 | # n-1 + m-1 | 3.241569 | 3 |
alembic/versions/8f022f603b93_add_address_table.py | bvbgrad/betterJob | 0 | 6615570 | """Add Address table
Revision ID: 8f022f603b93
Revises: <PASSWORD>
Create Date: 2021-02-27 05:58:26.790759
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8f022f603b93'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'address',
sa.Column('address_Id', sa.INTEGER(), nullable=False),
sa.Column('street', sa.VARCHAR(length=50), nullable=True),
sa.Column('city', sa.VARCHAR(length=30), nullable=True),
sa.Column('state', sa.VARCHAR(length=2), nullable=True),
sa.Column('zip_code', sa.VARCHAR(length=5), nullable=True),
sa.Column('company_IdFK', sa.Integer, nullable=True),
sa.PrimaryKeyConstraint('address_Id')
)
def downgrade():
op.drop_table('address')
| """Add Address table
Revision ID: 8f022f603b93
Revises: <PASSWORD>
Create Date: 2021-02-27 05:58:26.790759
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8f022f603b93'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'address',
sa.Column('address_Id', sa.INTEGER(), nullable=False),
sa.Column('street', sa.VARCHAR(length=50), nullable=True),
sa.Column('city', sa.VARCHAR(length=30), nullable=True),
sa.Column('state', sa.VARCHAR(length=2), nullable=True),
sa.Column('zip_code', sa.VARCHAR(length=5), nullable=True),
sa.Column('company_IdFK', sa.Integer, nullable=True),
sa.PrimaryKeyConstraint('address_Id')
)
def downgrade():
op.drop_table('address')
| en | 0.483213 | Add Address table Revision ID: 8f022f603b93 Revises: <PASSWORD> Create Date: 2021-02-27 05:58:26.790759 # revision identifiers, used by Alembic. | 1.283951 | 1 |
python/testData/inspections/PyUnresolvedReferencesInspection/percentStringDictLiteralArgumentWithReferenceExprKeys.py | teddywest32/intellij-community | 0 | 6615571 | <gh_stars>0
f = "fst"
s = "snd"
print ("first is %(fst)s, second is %(snd)s" % {s: "3", f: "1"}) | f = "fst"
s = "snd"
print ("first is %(fst)s, second is %(snd)s" % {s: "3", f: "1"}) | none | 1 | 3.316812 | 3 | |
src/TeXTableUtil.py | Mathew-Westfields/H8-Detect | 0 | 6615572 | import pandas as pd
import numpy as np
def report_to_df(report:str):
# Takes Classification report from sklearn and parses it into a dataframe
arr = report.split()
class2name = {0:"hateful",1:"offensive",2:"neither"}
cls_cols = ["class"] + arr[0:4] # split of column names and add column name class
avg_cols = ["metric"] + arr[0:4]
num_of_classes = 3
num_of_col = 5
cls_rows = arr[4:4+num_of_classes*(num_of_col)]
avg_rows = arr[4+num_of_classes*(num_of_col):]
def handle_classifer_row(cls_rows):
#this takes the precision, recall, f1 part of the table and makes it into pd.DataFrame readable data
# this changes with the number of classes
cls_rows = [float(el) for el in cls_rows]
cls_rows = [cls_rows[num_of_col*i:num_of_col*(i+1)] for i in range(num_of_classes)] #basicly index stuff
cls_data = []
for i in range(num_of_classes): # change class_id to to class_name via class2name for every class
cls = cls_rows[i][0]
name = class2name[cls]
cls_rows[i][0] = name
cls_data.append(cls_rows[i])
return cls_data
def handle_average_rows(avg_rows):
# this does the same thing but with the average rows
# this can be completely hardcoded since this never changes with size!
acc_row = avg_rows[0:3]
acc_name = acc_row[0] # this row is different than the other two since it contains more whitespace
acc_row = acc_row[1:]
acc_row = [float(el) for el in acc_row]
acc_row = [acc_name] + [np.nan,np.nan] + acc_row # fixing acc_row so it looks like every other row
macro_row = avg_rows[3:9]
macro_name = macro_row[0] # whitespace in "macro avg" creates two elements
macro_row = macro_row[2:]
macro_row = [float(el) for el in macro_row]
macro_row = [macro_name] + macro_row #put everything together so numbers are floats and name is at index 0
weighted_row = avg_rows[9:]
weighted_name = weighted_row[0]
weighted_row = weighted_row[2:]
weighted_row = [float(el) for el in weighted_row]
weighted_row = [weighted_name] + weighted_row
avg_data = [acc_row,macro_row,weighted_row]
return avg_data
cls_data = handle_classifer_row(cls_rows)
avg_data = handle_average_rows(avg_rows)
cls_df = pd.DataFrame(data = cls_data , columns = cls_cols).set_index("class")
avg_df = pd.DataFrame(data = avg_data , columns = avg_cols).set_index("metric")
return cls_df,avg_df
def report_to_LaTeX(report,table_name:str,mode=None):
#this takes the report from sklearn and formats it as a LaTeX table
cls_df,avg_df = report_to_df(report)
begin_str = "\\resizebox{\\linewidth}{!}{\n\\begin{tabular}{c| c c c c}\n" # needed to add evs and resize according to linewidth in LaTeX
end_str = "\\end{tabular}}\n"
def build_TeX(df,index_str):
#builds the inner of the tabluar env by concatenating smart and saves as txt in predetermined file
cols = list(df.columns)
out_str= index_str
for el in cols:
out_str = out_str + " & " + str(el) # a & b & c is a row in a table with 3 columns in LaTeX
out_str = out_str + " \\\\" + "\n" + "\\hline" + "\n" # add the hline so it looks nice between column names and values
for row in df.iterrows():
class_name = row[0]
row_str = class_name
data = list(row[1])
for el in data:
row_str = row_str + " & " + str(el)
row_str = row_str + " \\\\" + "\n"
out_str = out_str + row_str
return begin_str + out_str + end_str
cls_TeX = build_TeX(cls_df,"class")
avg_TeX = build_TeX(avg_df,"metric")
if mode == "save": #save it as a txt
with open("../report/tables-figures/"+table_name+".txt","w") as output_file:
output_file.write(cls_TeX + "\n" + avg_TeX)
else:
return cls_TeX,avg_TeX
if __name__ == "__main__":
print("in main:")
| import pandas as pd
import numpy as np
def report_to_df(report:str):
# Takes Classification report from sklearn and parses it into a dataframe
arr = report.split()
class2name = {0:"hateful",1:"offensive",2:"neither"}
cls_cols = ["class"] + arr[0:4] # split of column names and add column name class
avg_cols = ["metric"] + arr[0:4]
num_of_classes = 3
num_of_col = 5
cls_rows = arr[4:4+num_of_classes*(num_of_col)]
avg_rows = arr[4+num_of_classes*(num_of_col):]
def handle_classifer_row(cls_rows):
#this takes the precision, recall, f1 part of the table and makes it into pd.DataFrame readable data
# this changes with the number of classes
cls_rows = [float(el) for el in cls_rows]
cls_rows = [cls_rows[num_of_col*i:num_of_col*(i+1)] for i in range(num_of_classes)] #basicly index stuff
cls_data = []
for i in range(num_of_classes): # change class_id to to class_name via class2name for every class
cls = cls_rows[i][0]
name = class2name[cls]
cls_rows[i][0] = name
cls_data.append(cls_rows[i])
return cls_data
def handle_average_rows(avg_rows):
# this does the same thing but with the average rows
# this can be completely hardcoded since this never changes with size!
acc_row = avg_rows[0:3]
acc_name = acc_row[0] # this row is different than the other two since it contains more whitespace
acc_row = acc_row[1:]
acc_row = [float(el) for el in acc_row]
acc_row = [acc_name] + [np.nan,np.nan] + acc_row # fixing acc_row so it looks like every other row
macro_row = avg_rows[3:9]
macro_name = macro_row[0] # whitespace in "macro avg" creates two elements
macro_row = macro_row[2:]
macro_row = [float(el) for el in macro_row]
macro_row = [macro_name] + macro_row #put everything together so numbers are floats and name is at index 0
weighted_row = avg_rows[9:]
weighted_name = weighted_row[0]
weighted_row = weighted_row[2:]
weighted_row = [float(el) for el in weighted_row]
weighted_row = [weighted_name] + weighted_row
avg_data = [acc_row,macro_row,weighted_row]
return avg_data
cls_data = handle_classifer_row(cls_rows)
avg_data = handle_average_rows(avg_rows)
cls_df = pd.DataFrame(data = cls_data , columns = cls_cols).set_index("class")
avg_df = pd.DataFrame(data = avg_data , columns = avg_cols).set_index("metric")
return cls_df,avg_df
def report_to_LaTeX(report,table_name:str,mode=None):
#this takes the report from sklearn and formats it as a LaTeX table
cls_df,avg_df = report_to_df(report)
begin_str = "\\resizebox{\\linewidth}{!}{\n\\begin{tabular}{c| c c c c}\n" # needed to add evs and resize according to linewidth in LaTeX
end_str = "\\end{tabular}}\n"
def build_TeX(df,index_str):
#builds the inner of the tabluar env by concatenating smart and saves as txt in predetermined file
cols = list(df.columns)
out_str= index_str
for el in cols:
out_str = out_str + " & " + str(el) # a & b & c is a row in a table with 3 columns in LaTeX
out_str = out_str + " \\\\" + "\n" + "\\hline" + "\n" # add the hline so it looks nice between column names and values
for row in df.iterrows():
class_name = row[0]
row_str = class_name
data = list(row[1])
for el in data:
row_str = row_str + " & " + str(el)
row_str = row_str + " \\\\" + "\n"
out_str = out_str + row_str
return begin_str + out_str + end_str
cls_TeX = build_TeX(cls_df,"class")
avg_TeX = build_TeX(avg_df,"metric")
if mode == "save": #save it as a txt
with open("../report/tables-figures/"+table_name+".txt","w") as output_file:
output_file.write(cls_TeX + "\n" + avg_TeX)
else:
return cls_TeX,avg_TeX
if __name__ == "__main__":
print("in main:")
| en | 0.882784 | # Takes Classification report from sklearn and parses it into a dataframe # split of column names and add column name class #this takes the precision, recall, f1 part of the table and makes it into pd.DataFrame readable data # this changes with the number of classes #basicly index stuff # change class_id to to class_name via class2name for every class # this does the same thing but with the average rows # this can be completely hardcoded since this never changes with size! # this row is different than the other two since it contains more whitespace # fixing acc_row so it looks like every other row # whitespace in "macro avg" creates two elements #put everything together so numbers are floats and name is at index 0 #this takes the report from sklearn and formats it as a LaTeX table # needed to add evs and resize according to linewidth in LaTeX #builds the inner of the tabluar env by concatenating smart and saves as txt in predetermined file # a & b & c is a row in a table with 3 columns in LaTeX # add the hline so it looks nice between column names and values #save it as a txt | 3.281704 | 3 |
1-bit_and_2-bit_characters_717.py | cthi/LeetCode | 0 | 6615573 | <filename>1-bit_and_2-bit_characters_717.py
class Solution:
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
def isOneBitCharacter_h(bits, pos, good):
if pos == len(bits):
return good
res = False
if bits[pos] == 0:
res = res or isOneBitCharacter_h(bits, pos + 1, True)
if bits[pos] == 1 and pos != len(bits) - 1:
res = res or isOneBitCharacter_h(bits, pos + 2, False)
return res
return isOneBitCharacter_h(bits, 0, False)
| <filename>1-bit_and_2-bit_characters_717.py
class Solution:
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
def isOneBitCharacter_h(bits, pos, good):
if pos == len(bits):
return good
res = False
if bits[pos] == 0:
res = res or isOneBitCharacter_h(bits, pos + 1, True)
if bits[pos] == 1 and pos != len(bits) - 1:
res = res or isOneBitCharacter_h(bits, pos + 2, False)
return res
return isOneBitCharacter_h(bits, 0, False)
| en | 0.348912 | :type bits: List[int] :rtype: bool | 3.440653 | 3 |
Applications/3D rendering/3D_render.py | MRSAIL-Mini-Robotics-Software-AI-Lab/Corona-PU | 1 | 6615574 | import sys
'''
The 3D renderer takes 2 arguments when you run the script
The first is the .obj file
The second is the name of the file to save the output in
'''
code = open(sys.argv[1], mode='r') # the file which contains the code
code_lines = []
verts = []
lines = []
for line in code:
line = line.split()
if line[0] == 'v':
verts.append([float(line[1]),float(line[2]),float(line[3])])
elif line[0] == 'f':
for l in range(1,len(line)-1):
to_add = [int(line[l].split('/')[0]),int(line[l+1].split('/')[0])]
to_add.sort()
lines.append(to_add)
to_add = [int(line[1].split('/')[0]),int(line[len(line)-1].split('/')[0])]
to_add.sort()
lines.append(to_add)
unique_lines = []
for l in lines:
if l not in unique_lines:
unique_lines.append(l)
lines = unique_lines
def point_create(val,x,y,z):
x_str = x
y_str = y
z_str = z
if x < 0:
x_str = '0.0 - '+str(abs(x))
if y < 0:
y_str = '0.0 - '+str(abs(y))
if z < 0:
z_str = '0.0 - '+str(abs(z))
return f'fvar points_{val}_x = {x_str} ;\nfvar points_{val}_y = {y_str} ;\nfvar points_{val}_z = {z_str} ;\nfvar projected_{val}_x = 0.0 ;\nfvar projected_{val}_y = 0.0 ;\n\n'
final_code = ''
template_1 = '@ 3D rotating cube\n\nivar background_color = 0 ;\nivar line_color = 17792 ;\n\n@ Add all points here\n\n'
final_code += template_1
for idx,v in enumerate(verts):
to_add = point_create(idx,v[0],v[1],v[2])
final_code += to_add
template_2 = '\n\nfvar width = 400.0 ;\nfvar height = 300.0 ;\nfvar half_screen_width = 200.0 ;\nfvar half_screen_height = 150.0 ;\nfvar temp1 = 0.0 ;\nfvar temp2 = 0.0 ;\nfvar temp3 = 0.0 ;\nfvar rotated_0 = 0.0 ;\nfvar rotated_1 = 0.0 ;\nfvar x1 = 0.0 ;\nfvar x2 = 0.0 ;\nfvar y1 = 0.0 ;\nfvar y2 = 0.0 ;\nfvar m1 = 0.0 ;\nfvar m2 = 0.0 ;\nfvar denom = 0.0 ;\nfvar b1 = 0.0 ;\nfvar b2 = 0.0 ;\nfvar s = 0.0 ;\nfvar c = 0.0 ;\nfvar g = 0.0 ;\nbvar background_y = 1 ;\nbvar background_x = 1 ;\nfvar counter_x = 0.0 ;\nfvar counter_y = 0.0 ;\nfvar counter_l = 0.0 ;\nfvar new_x = 0.0 ;\nfvar new_y = 0.0 ;\nfvar new_z = 0.0 ;\nbvar t = 0 ;\nfvar x_point = 0.0 ;\nfvar y_point = 0.0 ;\nbvar line_l = 1 ;\nfvar theta = 0.03 ;\nbvar draw_loop = 1 ;\nfvar focal_length = 75.0 ;\ns = sin theta ;\nc = cos theta ;\ns = 0.15643 ;\nc = 0.9877 ;\n\ng = 0.0 - s ;\nwhile draw_loop {\n\n @ Rotating and projecting to camera view\n\n\n'
final_code += template_2
for i in range(len(verts)):
val = i
to_add = f'@ points {val}\n temp1 = points_{val}_x * c ;\n temp3 = points_{val}_z * s ;\n rotated_0 = temp1 + temp3 ;\n new_x = rotated_0 ;\n rotated_0 = rotated_0 * focal_length ;\n projected_{val}_x = rotated_0 + half_screen_width ;\n\n rotated_1 = points_{val}_y ;\n new_y = rotated_1 ;\n rotated_1 = rotated_1 * focal_length ;\n projected_{val}_y = rotated_1 + half_screen_height ;\n\n temp1 = points_{val}_x * g ;\n temp3 = points_{val}_z * c ;\n new_z = temp1 + temp3 ;\n points_{val}_x = new_x ;\n points_{val}_y = new_y ;\n points_{val}_z = new_z ;\n\n'
final_code += to_add
template_4 = '\n @ Draw Background\n background_y = 1 ;\n background_x = 1 ;\n counter_x = 0.0 ;\n counter_y = 0.0 ;\n while background_y {\n background_x = 1 ;\n counter_x = 0.0 ;\n\n while background_x {\n @ Draw background pixel\n intf counter_y ;\n intf counter_x ;\n VRAM_Save counter_y counter_x background_color ;\n fint counter_y ;\n fint counter_x ;\n counter_x = counter_x + 1.0 ;\n background_x = counter_x < width ;\n }\n\n counter_y = counter_y + 1.0 ;\n background_y = counter_y < height ;\n }\n\n'
final_code += template_4
for l in lines:
to_add = f'@ line {l[0]-1}, {l[1]-1}\n x1 = projected_{l[0]-1}_x ;\n x2 = projected_{l[1]-1}_x ;\n y1 = projected_{l[0]-1}_y ;\n y2 = projected_{l[1]-1}_y ;\n m1 = x2 - x1 ;\n m2 = y2 - y1 ;\n m1 = m1 '
to_add_2 = f' 100.0 ;\n m2 = m2 '
to_add_3 = ' 100.0 ;\n t = m1 == 0.0 ;\n if t {\n m1 = 0.001 ;\n } else{\n\n }\n\n t = m2 == 0.0 ;\n if t {\n m2 = 0.001 ;\n } else{\n\n }\n b1 = x1 ;\n b2 = y1 ;\n counter_l = 0.0 ;\n y_point = 0.0 ;\n x_point = 0.0 ;\n line_l = 1 ;\n while line_l {\n\n y_point = m2 * counter_l ;\n y_point = y_point + b2 ;\n\n x_point = m1 * counter_l ;\n x_point = x_point + b1 ;\n @ Draw point\n\n intf y_point ;\n intf x_point ;\n VRAM_Save y_point x_point line_color ;\n fint y_point ;\n fint x_point ;\n counter_l = counter_l + 0.1 ;\n line_l = counter_l < 100.0 ;\n }\n\n'
final_code += to_add+'/'+to_add_2+'/'+to_add_3
template_3 = 'Idle ;\n}\n'
final_code += template_3
output_file = sys.argv[2]
new_bemo_code = open(f"{output_file}", mode='w+')
new_bemo_code.write(final_code)
| import sys
'''
The 3D renderer takes 2 arguments when you run the script
The first is the .obj file
The second is the name of the file to save the output in
'''
code = open(sys.argv[1], mode='r') # the file which contains the code
code_lines = []
verts = []
lines = []
for line in code:
line = line.split()
if line[0] == 'v':
verts.append([float(line[1]),float(line[2]),float(line[3])])
elif line[0] == 'f':
for l in range(1,len(line)-1):
to_add = [int(line[l].split('/')[0]),int(line[l+1].split('/')[0])]
to_add.sort()
lines.append(to_add)
to_add = [int(line[1].split('/')[0]),int(line[len(line)-1].split('/')[0])]
to_add.sort()
lines.append(to_add)
unique_lines = []
for l in lines:
if l not in unique_lines:
unique_lines.append(l)
lines = unique_lines
def point_create(val,x,y,z):
x_str = x
y_str = y
z_str = z
if x < 0:
x_str = '0.0 - '+str(abs(x))
if y < 0:
y_str = '0.0 - '+str(abs(y))
if z < 0:
z_str = '0.0 - '+str(abs(z))
return f'fvar points_{val}_x = {x_str} ;\nfvar points_{val}_y = {y_str} ;\nfvar points_{val}_z = {z_str} ;\nfvar projected_{val}_x = 0.0 ;\nfvar projected_{val}_y = 0.0 ;\n\n'
final_code = ''
template_1 = '@ 3D rotating cube\n\nivar background_color = 0 ;\nivar line_color = 17792 ;\n\n@ Add all points here\n\n'
final_code += template_1
for idx,v in enumerate(verts):
to_add = point_create(idx,v[0],v[1],v[2])
final_code += to_add
template_2 = '\n\nfvar width = 400.0 ;\nfvar height = 300.0 ;\nfvar half_screen_width = 200.0 ;\nfvar half_screen_height = 150.0 ;\nfvar temp1 = 0.0 ;\nfvar temp2 = 0.0 ;\nfvar temp3 = 0.0 ;\nfvar rotated_0 = 0.0 ;\nfvar rotated_1 = 0.0 ;\nfvar x1 = 0.0 ;\nfvar x2 = 0.0 ;\nfvar y1 = 0.0 ;\nfvar y2 = 0.0 ;\nfvar m1 = 0.0 ;\nfvar m2 = 0.0 ;\nfvar denom = 0.0 ;\nfvar b1 = 0.0 ;\nfvar b2 = 0.0 ;\nfvar s = 0.0 ;\nfvar c = 0.0 ;\nfvar g = 0.0 ;\nbvar background_y = 1 ;\nbvar background_x = 1 ;\nfvar counter_x = 0.0 ;\nfvar counter_y = 0.0 ;\nfvar counter_l = 0.0 ;\nfvar new_x = 0.0 ;\nfvar new_y = 0.0 ;\nfvar new_z = 0.0 ;\nbvar t = 0 ;\nfvar x_point = 0.0 ;\nfvar y_point = 0.0 ;\nbvar line_l = 1 ;\nfvar theta = 0.03 ;\nbvar draw_loop = 1 ;\nfvar focal_length = 75.0 ;\ns = sin theta ;\nc = cos theta ;\ns = 0.15643 ;\nc = 0.9877 ;\n\ng = 0.0 - s ;\nwhile draw_loop {\n\n @ Rotating and projecting to camera view\n\n\n'
final_code += template_2
for i in range(len(verts)):
val = i
to_add = f'@ points {val}\n temp1 = points_{val}_x * c ;\n temp3 = points_{val}_z * s ;\n rotated_0 = temp1 + temp3 ;\n new_x = rotated_0 ;\n rotated_0 = rotated_0 * focal_length ;\n projected_{val}_x = rotated_0 + half_screen_width ;\n\n rotated_1 = points_{val}_y ;\n new_y = rotated_1 ;\n rotated_1 = rotated_1 * focal_length ;\n projected_{val}_y = rotated_1 + half_screen_height ;\n\n temp1 = points_{val}_x * g ;\n temp3 = points_{val}_z * c ;\n new_z = temp1 + temp3 ;\n points_{val}_x = new_x ;\n points_{val}_y = new_y ;\n points_{val}_z = new_z ;\n\n'
final_code += to_add
template_4 = '\n @ Draw Background\n background_y = 1 ;\n background_x = 1 ;\n counter_x = 0.0 ;\n counter_y = 0.0 ;\n while background_y {\n background_x = 1 ;\n counter_x = 0.0 ;\n\n while background_x {\n @ Draw background pixel\n intf counter_y ;\n intf counter_x ;\n VRAM_Save counter_y counter_x background_color ;\n fint counter_y ;\n fint counter_x ;\n counter_x = counter_x + 1.0 ;\n background_x = counter_x < width ;\n }\n\n counter_y = counter_y + 1.0 ;\n background_y = counter_y < height ;\n }\n\n'
final_code += template_4
for l in lines:
to_add = f'@ line {l[0]-1}, {l[1]-1}\n x1 = projected_{l[0]-1}_x ;\n x2 = projected_{l[1]-1}_x ;\n y1 = projected_{l[0]-1}_y ;\n y2 = projected_{l[1]-1}_y ;\n m1 = x2 - x1 ;\n m2 = y2 - y1 ;\n m1 = m1 '
to_add_2 = f' 100.0 ;\n m2 = m2 '
to_add_3 = ' 100.0 ;\n t = m1 == 0.0 ;\n if t {\n m1 = 0.001 ;\n } else{\n\n }\n\n t = m2 == 0.0 ;\n if t {\n m2 = 0.001 ;\n } else{\n\n }\n b1 = x1 ;\n b2 = y1 ;\n counter_l = 0.0 ;\n y_point = 0.0 ;\n x_point = 0.0 ;\n line_l = 1 ;\n while line_l {\n\n y_point = m2 * counter_l ;\n y_point = y_point + b2 ;\n\n x_point = m1 * counter_l ;\n x_point = x_point + b1 ;\n @ Draw point\n\n intf y_point ;\n intf x_point ;\n VRAM_Save y_point x_point line_color ;\n fint y_point ;\n fint x_point ;\n counter_l = counter_l + 0.1 ;\n line_l = counter_l < 100.0 ;\n }\n\n'
final_code += to_add+'/'+to_add_2+'/'+to_add_3
template_3 = 'Idle ;\n}\n'
final_code += template_3
output_file = sys.argv[2]
new_bemo_code = open(f"{output_file}", mode='w+')
new_bemo_code.write(final_code)
| en | 0.878472 | The 3D renderer takes 2 arguments when you run the script The first is the .obj file The second is the name of the file to save the output in # the file which contains the code | 3.485282 | 3 |
extract/tests/test_extract_licenses.py | SbastianGarzon/o2r-meta | 2 | 6615575 | # pylint: skip-file
import os
import json
def test_rmd_header(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['data'] == "CC0-1.0"
assert metadata['license']['text'] == "ODbL-1.0"
assert metadata['license']['metadata'] == "license-md.txt"
def test_rmd_header_incomplete(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd_incomplete',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 2, "should have only 2 license"
assert "data" not in metadata['license'], "should not have license entry for data"
assert "text" not in metadata['license'], "should not have license entry for text"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['metadata'] == "CC0-1.0"
def test_erc_yml(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/erc_yml',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['data'] == "ODbL-1.0"
assert metadata['license']['text'] == "CC0-1.0"
assert metadata['license']['metadata'] == "license-md.txt"
def test_rmd_header_default_license(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd-default',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['metadata'] == "CC-BY-4.0"
def test_cli_define_default_license(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd-default',
'-o', str(tmpdir),
'-lic', 'my own license',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['metadata'] == "my own license", "should override the hard-coded default" | # pylint: skip-file
import os
import json
def test_rmd_header(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['data'] == "CC0-1.0"
assert metadata['license']['text'] == "ODbL-1.0"
assert metadata['license']['metadata'] == "license-md.txt"
def test_rmd_header_incomplete(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd_incomplete',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 2, "should have only 2 license"
assert "data" not in metadata['license'], "should not have license entry for data"
assert "text" not in metadata['license'], "should not have license entry for text"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['metadata'] == "CC0-1.0"
def test_erc_yml(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/erc_yml',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['code'] == "Apache-2.0"
assert metadata['license']['data'] == "ODbL-1.0"
assert metadata['license']['text'] == "CC0-1.0"
assert metadata['license']['metadata'] == "license-md.txt"
def test_rmd_header_default_license(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd-default',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['metadata'] == "CC-BY-4.0"
def test_cli_define_default_license(script_runner, tmpdir):
ret = script_runner.run('python3', 'o2rmeta.py', '-debug', 'extract',
'-i', 'extract/tests/licenses/rmd-default',
'-o', str(tmpdir),
'-lic', 'my own license',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert "license" in metadata, "should have license entry"
assert len(metadata['license']) == 4, "should have 4 licenses"
assert metadata['license']['metadata'] == "my own license", "should override the hard-coded default" | en | 0.409619 | # pylint: skip-file | 2.255009 | 2 |
plot/show-group.py | AndreasMadsen/bachelor-code | 1 | 6615576 | <filename>plot/show-group.py
import plot
import model
import dataset
import sys
import numpy as np
import os.path as path
group_id = int(sys.argv[1])
cluster = model.load(path.realpath(sys.argv[2]))
articles = dataset.news.fetch(100000)
nodes = cluster['group'][group_id, 0:cluster['group_size'][group_id]]
print('nodes: ', nodes)
for node_id in nodes:
print("%6d | %s" % (node_id, articles[node_id]['title']))
| <filename>plot/show-group.py
import plot
import model
import dataset
import sys
import numpy as np
import os.path as path
group_id = int(sys.argv[1])
cluster = model.load(path.realpath(sys.argv[2]))
articles = dataset.news.fetch(100000)
nodes = cluster['group'][group_id, 0:cluster['group_size'][group_id]]
print('nodes: ', nodes)
for node_id in nodes:
print("%6d | %s" % (node_id, articles[node_id]['title']))
| none | 1 | 2.54013 | 3 | |
ibsng/handler/invoice/get_invoice_with_rule_by_invoice_i_d.py | ParspooyeshFanavar/pyibsng | 6 | 6615577 | <gh_stars>1-10
"""Get invoice with rule by invoice id API method."""
from ibsng.handler.handler import Handler
class getInvoiceWithRuleByInvoiceID(Handler):
"""Get invoice with rule by invoice id method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.invoice_id, int)
def setup(self, invoice_id):
"""Setup required parameters.
:param int invoice_id: invoice id
:return: None
:rtype: None
"""
self.invoice_id = invoice_id
| """Get invoice with rule by invoice id API method."""
from ibsng.handler.handler import Handler
class getInvoiceWithRuleByInvoiceID(Handler):
"""Get invoice with rule by invoice id method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.invoice_id, int)
def setup(self, invoice_id):
"""Setup required parameters.
:param int invoice_id: invoice id
:return: None
:rtype: None
"""
self.invoice_id = invoice_id | en | 0.889118 | Get invoice with rule by invoice id API method. Get invoice with rule by invoice id method class. Validate inputs after setup method. :return: None :rtype: None Setup required parameters. :param int invoice_id: invoice id :return: None :rtype: None | 2.589714 | 3 |
HRTransNet_test.py | caigentan/HRTransNet | 0 | 6615578 | <filename>HRTransNet_test.py
import torch
import torch.nn.functional as F
import sys
sys.path.append('./models')
import numpy as np
import os, argparse
import cv2
from models.model_v5 import HRTransNet
from data import test_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=352, help='testing size')
parser.add_argument('--gpu_id', type=str, default='0', help='select gpu id')
parser.add_argument('--test_path',type=str,default='../BBS_dataset/RGBD_for_test/',help='test dataset path')
opt = parser.parse_args()
dataset_path = opt.test_path
#set device for test
if opt.gpu_id=='0':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print('USE GPU 0')
elif opt.gpu_id=='1':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
print('USE GPU 1')
#load the model
model = HRTransNet()
#Large epoch size may not generalize well. You can choose a good model to load according to the log file and pth files saved in ('./BBSNet_cpts/') when training.
model.load_state_dict(torch.load('./model_pths/BBSNet.pth'))
model.cuda()
model.eval()
#test
test_datasets = ['CSSD']
for dataset in test_datasets:
save_path = './test_maps/HRTransNet/' + dataset + '/'
if not os.path.exists(save_path):
os.makedirs(save_path)
image_root = dataset_path + dataset + '/RGB/'
gt_root = dataset_path + dataset + '/GT/'
test_loader = test_dataset(image_root, gt_root, opt.testsize)
for i in range(test_loader.size):
image, gt,depth, name, image_for_post = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
image = image.cuda()
depth = depth.cuda()
_,res = model(image,depth)
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
print('save img to: ',save_path+name)
cv2.imwrite(save_path+name,res*255)
print('Test Done!')
| <filename>HRTransNet_test.py
import torch
import torch.nn.functional as F
import sys
sys.path.append('./models')
import numpy as np
import os, argparse
import cv2
from models.model_v5 import HRTransNet
from data import test_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=352, help='testing size')
parser.add_argument('--gpu_id', type=str, default='0', help='select gpu id')
parser.add_argument('--test_path',type=str,default='../BBS_dataset/RGBD_for_test/',help='test dataset path')
opt = parser.parse_args()
dataset_path = opt.test_path
#set device for test
if opt.gpu_id=='0':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print('USE GPU 0')
elif opt.gpu_id=='1':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
print('USE GPU 1')
#load the model
model = HRTransNet()
#Large epoch size may not generalize well. You can choose a good model to load according to the log file and pth files saved in ('./BBSNet_cpts/') when training.
model.load_state_dict(torch.load('./model_pths/BBSNet.pth'))
model.cuda()
model.eval()
#test
test_datasets = ['CSSD']
for dataset in test_datasets:
save_path = './test_maps/HRTransNet/' + dataset + '/'
if not os.path.exists(save_path):
os.makedirs(save_path)
image_root = dataset_path + dataset + '/RGB/'
gt_root = dataset_path + dataset + '/GT/'
test_loader = test_dataset(image_root, gt_root, opt.testsize)
for i in range(test_loader.size):
image, gt,depth, name, image_for_post = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
image = image.cuda()
depth = depth.cuda()
_,res = model(image,depth)
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
print('save img to: ',save_path+name)
cv2.imwrite(save_path+name,res*255)
print('Test Done!')
| en | 0.886484 | #set device for test #load the model #Large epoch size may not generalize well. You can choose a good model to load according to the log file and pth files saved in ('./BBSNet_cpts/') when training. #test | 2.455412 | 2 |
databuilder/databuilder/extractor/csv_extractor.py | wey-gu/amundsen | 0 | 6615579 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import csv
import importlib
from collections import defaultdict
from typing import Any, List
from pyhocon import ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.models.badge import Badge, BadgeMetadata
from databuilder.models.table_lineage import ColumnLineage, TableLineage
from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
from databuilder.models.query.query import QueryMetadata
from databuilder.models.query.query_join import QueryJoinMetadata
from databuilder.models.query.query_execution import QueryExecutionsMetadata
from databuilder.models.query.query_where import QueryWhereMetadata
from databuilder.models.user import User as UserMetadata
def split_badge_list(badges: str, separator: str) -> List[str]:
"""
Splits a string of badges into a list, removing all empty badges.
"""
if badges is None:
return []
return [badge for badge in badges.split(separator) if badge]
def split_table_list(tables: str, separator: str) -> List[str]:
"""
Splits a string of tables into a list, removing all empty tables.
"""
if tables is None:
return []
return [table for table in tables.split(separator) if table]
class CsvExtractor(Extractor):
# Config keys
FILE_LOCATION = 'file_location'
"""
An Extractor that extracts records via CSV.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.file_location = conf.get_string(CsvExtractor.FILE_LOCATION)
model_class = conf.get('model_class', None)
if model_class:
module_name, class_name = model_class.rsplit(".", 1)
mod = importlib.import_module(module_name)
self.model_class = getattr(mod, class_name)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
if not hasattr(self, 'results'):
with open(self.file_location, 'r') as fin:
self.results = [dict(i) for i in csv.DictReader(fin)]
if hasattr(self, 'model_class'):
results = [self.model_class(**result)
for result in self.results]
else:
results = self.results
self.iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self.iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csv'
class CsvTableBadgeExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
BADGE_FILE_LOCATION = 'badge_file_location'
BADGE_SEPARATOR = 'badge_separator'
"""
An Extractor that combines Table and Badge CSVs.
"""
def init(self, conf: ConfigTree) -> None:
self.conf = conf
self.table_file_location = conf.get_string(CsvTableBadgeExtractor.TABLE_FILE_LOCATION)
self.badge_file_location = conf.get_string(CsvTableBadgeExtractor.BADGE_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableBadgeExtractor.BADGE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
with open(self.badge_file_location, 'r') as fin:
self.badges = [dict(i) for i in csv.DictReader(fin)]
# print("BADGES: " + str(self.badges))
parsed_badges = defaultdict(list)
for badge_dict in self.badges:
db = badge_dict['database']
cluster = badge_dict['cluster']
schema = badge_dict['schema']
table_name = badge_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=badge_dict['name'],
separator=self.badge_separator)
for badge_name in split_badges:
badge = Badge(name=badge_name, category=badge_dict['category'])
parsed_badges[id].append(badge)
with open(self.table_file_location, 'r') as fin:
tables = [dict(i) for i in csv.DictReader(fin)]
results = []
for table_dict in tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
badges = parsed_badges[id]
if badges is None:
badges = []
badge_metadata = BadgeMetadata(start_label=TableMetadata.TABLE_NODE_LABEL,
start_key=id,
badges=badges)
results.append(badge_metadata)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablebadge'
class CsvTableColumnExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
BADGE_SEPARATOR = 'badge_separator'
"""
An Extractor that combines Table and Column CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.table_file_location = conf.get_string(CsvTableColumnExtractor.TABLE_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableColumnExtractor.COLUMN_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableBadgeExtractor.BADGE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
tables = [dict(i) for i in csv.DictReader(fin)]
results = []
for table_dict in tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
results.append(table)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablecolumn'
class CsvTableLineageExtractor(Extractor):
# Config keys
TABLE_LINEAGE_FILE_LOCATION = 'table_lineage_file_location'
"""
An Extractor that creates Table Lineage between two tables
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.table_lineage_file_location = conf.get_string(CsvTableLineageExtractor.TABLE_LINEAGE_FILE_LOCATION)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.table_lineage_file_location, 'r') as fin:
self.table_lineage = [dict(i) for i in csv.DictReader(fin)]
results = []
for lineage_dict in self.table_lineage:
source_table_key = lineage_dict['source_table_key']
target_table_key = lineage_dict['target_table_key']
lineage = TableLineage(
table_key=source_table_key,
downstream_deps=[target_table_key]
)
results.append(lineage)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablelineage'
class CsvColumnLineageExtractor(Extractor):
# Config keys
COLUMN_LINEAGE_FILE_LOCATION = 'column_lineage_file_location'
"""
An Extractor that creates Column Lineage between two columns
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.column_lineage_file_location = conf.get_string(CsvColumnLineageExtractor.COLUMN_LINEAGE_FILE_LOCATION)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.column_lineage_file_location, 'r') as fin:
self.column_lineage = [dict(i) for i in csv.DictReader(fin)]
results = []
for lineage_dict in self.column_lineage:
source_column_key = lineage_dict['source_column_key']
target_column_key = lineage_dict['target_column_key']
lineage = ColumnLineage(
column_key=source_column_key,
downstream_deps=[target_column_key]
)
results.append(lineage)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvcolumnlineage'
class CsvTableQueryExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryExtractor.QUERY_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
results = []
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
results.append(query)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequery'
class CsvTableQueryJoinExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
JOIN_FILE_LOCATION = 'join_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryJoinExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryJoinExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryJoinExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryJoinExtractor.QUERY_FILE_LOCATION)
self.join_file_location = conf.get_string(CsvTableQueryJoinExtractor.JOIN_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryJoinExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryJoinExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
# Create Column Dictionary
parsed_columns = defaultdict(list)
parsed_columns_dict = dict()
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
parsed_columns_dict[f'{ id }/{ column_dict["name"] }'] = column
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.join_file_location, 'r') as fin:
self.joins = [dict(i) for i in csv.DictReader(fin)]
results = []
for join_dict in self.joins:
join = QueryJoinMetadata(
left_table=parsed_tables[join_dict['left_table']],
right_table=parsed_tables[join_dict['right_table']],
left_column=parsed_columns_dict[join_dict['left_column']],
right_column=parsed_columns_dict[join_dict['right_column']],
join_type=join_dict['join_type'],
join_operator=join_dict['join_operator'],
join_sql=join_dict['join_sql'],
query_metadata=parsed_queries[join_dict['join_sql']])
results.append(join)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequeryjoin'
class CsvTableQueryWhereExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
WHERE_FILE_LOCATION = 'where_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryWhereExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryWhereExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryWhereExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryWhereExtractor.QUERY_FILE_LOCATION)
self.where_file_location = conf.get_string(CsvTableQueryWhereExtractor.WHERE_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryWhereExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryWhereExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
# Create Column Dictionary
parsed_columns = defaultdict(list)
parsed_columns_dict = dict()
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
parsed_columns_dict[f'{ id }/{ column_dict["name"] }'] = column
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.where_file_location, 'r') as fin:
self.wheres = [dict(i) for i in csv.DictReader(fin)]
results = []
for where_dict in self.wheres:
query_metadata = parsed_queries[where_dict['sql']]
where = QueryWhereMetadata(
tables=query_metadata.tables,
where_clause=where_dict['where_clause'],
left_arg=where_dict['left_arg'],
right_arg=where_dict['right_arg'],
operator=where_dict['operator'],
query_metadata=query_metadata)
results.append(where)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequerywhere'
class CsvTableQueryExecutionExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
EXECUTION_FILE_LOCATION = 'execution_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryExecutionExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryExecutionExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryExecutionExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryExecutionExtractor.QUERY_FILE_LOCATION)
self.execution_file_location = conf.get_string(CsvTableQueryExecutionExtractor.EXECUTION_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryExecutionExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryExecutionExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.execution_file_location, 'r') as fin:
self.executions = [dict(i) for i in csv.DictReader(fin)]
results = []
for execution_dict in self.executions:
sql=execution_dict['sql']
execution = QueryExecutionsMetadata(
start_time=int(execution_dict['start_time']),
execution_count=int(execution_dict['execution_count']),
query_metadata=parsed_queries[sql])
results.append(execution)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequeryexecution'
| # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import csv
import importlib
from collections import defaultdict
from typing import Any, List
from pyhocon import ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.models.badge import Badge, BadgeMetadata
from databuilder.models.table_lineage import ColumnLineage, TableLineage
from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
from databuilder.models.query.query import QueryMetadata
from databuilder.models.query.query_join import QueryJoinMetadata
from databuilder.models.query.query_execution import QueryExecutionsMetadata
from databuilder.models.query.query_where import QueryWhereMetadata
from databuilder.models.user import User as UserMetadata
def split_badge_list(badges: str, separator: str) -> List[str]:
"""
Splits a string of badges into a list, removing all empty badges.
"""
if badges is None:
return []
return [badge for badge in badges.split(separator) if badge]
def split_table_list(tables: str, separator: str) -> List[str]:
"""
Splits a string of tables into a list, removing all empty tables.
"""
if tables is None:
return []
return [table for table in tables.split(separator) if table]
class CsvExtractor(Extractor):
# Config keys
FILE_LOCATION = 'file_location'
"""
An Extractor that extracts records via CSV.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.file_location = conf.get_string(CsvExtractor.FILE_LOCATION)
model_class = conf.get('model_class', None)
if model_class:
module_name, class_name = model_class.rsplit(".", 1)
mod = importlib.import_module(module_name)
self.model_class = getattr(mod, class_name)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
if not hasattr(self, 'results'):
with open(self.file_location, 'r') as fin:
self.results = [dict(i) for i in csv.DictReader(fin)]
if hasattr(self, 'model_class'):
results = [self.model_class(**result)
for result in self.results]
else:
results = self.results
self.iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self.iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csv'
class CsvTableBadgeExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
BADGE_FILE_LOCATION = 'badge_file_location'
BADGE_SEPARATOR = 'badge_separator'
"""
An Extractor that combines Table and Badge CSVs.
"""
def init(self, conf: ConfigTree) -> None:
self.conf = conf
self.table_file_location = conf.get_string(CsvTableBadgeExtractor.TABLE_FILE_LOCATION)
self.badge_file_location = conf.get_string(CsvTableBadgeExtractor.BADGE_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableBadgeExtractor.BADGE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
with open(self.badge_file_location, 'r') as fin:
self.badges = [dict(i) for i in csv.DictReader(fin)]
# print("BADGES: " + str(self.badges))
parsed_badges = defaultdict(list)
for badge_dict in self.badges:
db = badge_dict['database']
cluster = badge_dict['cluster']
schema = badge_dict['schema']
table_name = badge_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=badge_dict['name'],
separator=self.badge_separator)
for badge_name in split_badges:
badge = Badge(name=badge_name, category=badge_dict['category'])
parsed_badges[id].append(badge)
with open(self.table_file_location, 'r') as fin:
tables = [dict(i) for i in csv.DictReader(fin)]
results = []
for table_dict in tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
badges = parsed_badges[id]
if badges is None:
badges = []
badge_metadata = BadgeMetadata(start_label=TableMetadata.TABLE_NODE_LABEL,
start_key=id,
badges=badges)
results.append(badge_metadata)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablebadge'
class CsvTableColumnExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
BADGE_SEPARATOR = 'badge_separator'
"""
An Extractor that combines Table and Column CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.table_file_location = conf.get_string(CsvTableColumnExtractor.TABLE_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableColumnExtractor.COLUMN_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableBadgeExtractor.BADGE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
tables = [dict(i) for i in csv.DictReader(fin)]
results = []
for table_dict in tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
results.append(table)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablecolumn'
class CsvTableLineageExtractor(Extractor):
# Config keys
TABLE_LINEAGE_FILE_LOCATION = 'table_lineage_file_location'
"""
An Extractor that creates Table Lineage between two tables
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.table_lineage_file_location = conf.get_string(CsvTableLineageExtractor.TABLE_LINEAGE_FILE_LOCATION)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.table_lineage_file_location, 'r') as fin:
self.table_lineage = [dict(i) for i in csv.DictReader(fin)]
results = []
for lineage_dict in self.table_lineage:
source_table_key = lineage_dict['source_table_key']
target_table_key = lineage_dict['target_table_key']
lineage = TableLineage(
table_key=source_table_key,
downstream_deps=[target_table_key]
)
results.append(lineage)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablelineage'
class CsvColumnLineageExtractor(Extractor):
# Config keys
COLUMN_LINEAGE_FILE_LOCATION = 'column_lineage_file_location'
"""
An Extractor that creates Column Lineage between two columns
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.column_lineage_file_location = conf.get_string(CsvColumnLineageExtractor.COLUMN_LINEAGE_FILE_LOCATION)
self._load_csv()
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.column_lineage_file_location, 'r') as fin:
self.column_lineage = [dict(i) for i in csv.DictReader(fin)]
results = []
for lineage_dict in self.column_lineage:
source_column_key = lineage_dict['source_column_key']
target_column_key = lineage_dict['target_column_key']
lineage = ColumnLineage(
column_key=source_column_key,
downstream_deps=[target_column_key]
)
results.append(lineage)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvcolumnlineage'
class CsvTableQueryExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryExtractor.QUERY_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
results = []
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
results.append(query)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequery'
class CsvTableQueryJoinExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
JOIN_FILE_LOCATION = 'join_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryJoinExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryJoinExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryJoinExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryJoinExtractor.QUERY_FILE_LOCATION)
self.join_file_location = conf.get_string(CsvTableQueryJoinExtractor.JOIN_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryJoinExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryJoinExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
# Create Column Dictionary
parsed_columns = defaultdict(list)
parsed_columns_dict = dict()
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
parsed_columns_dict[f'{ id }/{ column_dict["name"] }'] = column
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.join_file_location, 'r') as fin:
self.joins = [dict(i) for i in csv.DictReader(fin)]
results = []
for join_dict in self.joins:
join = QueryJoinMetadata(
left_table=parsed_tables[join_dict['left_table']],
right_table=parsed_tables[join_dict['right_table']],
left_column=parsed_columns_dict[join_dict['left_column']],
right_column=parsed_columns_dict[join_dict['right_column']],
join_type=join_dict['join_type'],
join_operator=join_dict['join_operator'],
join_sql=join_dict['join_sql'],
query_metadata=parsed_queries[join_dict['join_sql']])
results.append(join)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequeryjoin'
class CsvTableQueryWhereExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
WHERE_FILE_LOCATION = 'where_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryWhereExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryWhereExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryWhereExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryWhereExtractor.QUERY_FILE_LOCATION)
self.where_file_location = conf.get_string(CsvTableQueryWhereExtractor.WHERE_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryWhereExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryWhereExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
# Create Column Dictionary
parsed_columns = defaultdict(list)
parsed_columns_dict = dict()
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
parsed_columns_dict[f'{ id }/{ column_dict["name"] }'] = column
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.where_file_location, 'r') as fin:
self.wheres = [dict(i) for i in csv.DictReader(fin)]
results = []
for where_dict in self.wheres:
query_metadata = parsed_queries[where_dict['sql']]
where = QueryWhereMetadata(
tables=query_metadata.tables,
where_clause=where_dict['where_clause'],
left_arg=where_dict['left_arg'],
right_arg=where_dict['right_arg'],
operator=where_dict['operator'],
query_metadata=query_metadata)
results.append(where)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequerywhere'
class CsvTableQueryExecutionExtractor(Extractor):
# Config keys
TABLE_FILE_LOCATION = 'table_file_location'
COLUMN_FILE_LOCATION = 'column_file_location'
QUERY_FILE_LOCATION = 'query_file_location'
USER_FILE_LOCATION = 'user_file_location'
EXECUTION_FILE_LOCATION = 'execution_file_location'
BADGE_SEPARATOR = 'badge_separator'
TABLE_SEPARATOR = 'table_separator'
"""
An Extractor that combines Table, Column, User and Query CSVs.
"""
def init(self, conf: ConfigTree) -> None:
"""
:param conf:
"""
self.conf = conf
self.user_file_location = conf.get_string(CsvTableQueryExecutionExtractor.USER_FILE_LOCATION)
self.column_file_location = conf.get_string(CsvTableQueryExecutionExtractor.COLUMN_FILE_LOCATION)
self.table_file_location = conf.get_string(CsvTableQueryExecutionExtractor.TABLE_FILE_LOCATION)
self.query_file_location = conf.get_string(CsvTableQueryExecutionExtractor.QUERY_FILE_LOCATION)
self.execution_file_location = conf.get_string(CsvTableQueryExecutionExtractor.EXECUTION_FILE_LOCATION)
self.badge_separator = conf.get_string(CsvTableQueryExecutionExtractor.BADGE_SEPARATOR, default=',')
self.table_separator = conf.get_string(CsvTableQueryExecutionExtractor.TABLE_SEPARATOR, default=',')
self._load_csv()
def _get_key(self,
db: str,
cluster: str,
schema: str,
tbl: str
) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=db,
cluster=cluster,
schema=schema,
tbl=tbl)
def _load_csv(self) -> None:
"""
Create an iterator to execute sql.
"""
with open(self.user_file_location, 'r') as fin:
self.users = [dict(i) for i in csv.DictReader(fin)]
parsed_users = dict()
for user_dict in self.users:
user = UserMetadata(
email=user_dict['email'],
first_name=user_dict['first_name'],
last_name=user_dict['last_name'],
full_name=user_dict['full_name'],
github_username=user_dict['github_username'],
team_name=user_dict['team_name'],
employee_type=user_dict['employee_type'],
manager_email=user_dict['manager_email'],
slack_id=user_dict['slack_id'],
role_name=user_dict['role_name']
)
parsed_users[user_dict['email']] = user
with open(self.column_file_location, 'r') as fin:
self.columns = [dict(i) for i in csv.DictReader(fin)]
parsed_columns = defaultdict(list)
for column_dict in self.columns:
db = column_dict['database']
cluster = column_dict['cluster']
schema = column_dict['schema']
table_name = column_dict['table_name']
id = self._get_key(db, cluster, schema, table_name)
split_badges = split_badge_list(badges=column_dict['badges'],
separator=self.badge_separator)
column = ColumnMetadata(
name=column_dict['name'],
description=column_dict['description'],
col_type=column_dict['col_type'],
sort_order=int(column_dict['sort_order']),
badges=split_badges
)
parsed_columns[id].append(column)
# Create Table Dictionary
with open(self.table_file_location, 'r') as fin:
self.tables = [dict(i) for i in csv.DictReader(fin)]
parsed_tables = dict()
for table_dict in self.tables:
db = table_dict['database']
cluster = table_dict['cluster']
schema = table_dict['schema']
table_name = table_dict['name']
id = self._get_key(db, cluster, schema, table_name)
columns = parsed_columns[id]
if columns is None:
columns = []
table = TableMetadata(database=table_dict['database'],
cluster=table_dict['cluster'],
schema=table_dict['schema'],
name=table_dict['name'],
description=table_dict['description'],
columns=columns,
# TODO: this possibly should parse stringified booleans;
# right now it only will be false for empty strings
is_view=bool(table_dict['is_view']),
tags=table_dict['tags']
)
parsed_tables[id] = table
# Create Query Dictionary
with open(self.query_file_location, 'r') as fin:
self.queries = [dict(i) for i in csv.DictReader(fin)]
parsed_queries = {}
for query_dict in self.queries:
sql = query_dict['sql']
user = parsed_users[query_dict['user']]
split_tables = split_table_list(tables=query_dict['tables'],
separator=self.table_separator)
tables = [parsed_tables[t] for t in split_tables]
query = QueryMetadata(
sql=sql,
tables=tables,
user=user)
parsed_queries[sql] = query
with open(self.execution_file_location, 'r') as fin:
self.executions = [dict(i) for i in csv.DictReader(fin)]
results = []
for execution_dict in self.executions:
sql=execution_dict['sql']
execution = QueryExecutionsMetadata(
start_time=int(execution_dict['start_time']),
execution_count=int(execution_dict['execution_count']),
query_metadata=parsed_queries[sql])
results.append(execution)
self._iter = iter(results)
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
def get_scope(self) -> str:
return 'extractor.csvtablequeryexecution'
| en | 0.718259 | # Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 Splits a string of badges into a list, removing all empty badges. Splits a string of tables into a list, removing all empty tables. # Config keys An Extractor that extracts records via CSV. :param conf: Create an iterator to execute sql. Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table and Badge CSVs. # print("BADGES: " + str(self.badges)) Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table and Column CSVs. :param conf: Create an iterator to execute sql. # Create Table Dictionary # TODO: this possibly should parse stringified booleans; # right now it only will be false for empty strings Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that creates Table Lineage between two tables :param conf: Create an iterator to execute sql. Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that creates Column Lineage between two columns :param conf: Create an iterator to execute sql. Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table, Column, User and Query CSVs. :param conf: Create an iterator to execute sql. # Create Table Dictionary # TODO: this possibly should parse stringified booleans; # right now it only will be false for empty strings Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table, Column, User and Query CSVs. :param conf: Create an iterator to execute sql. # Create Column Dictionary # Create Table Dictionary # TODO: this possibly should parse stringified booleans; # right now it only will be false for empty strings # Create Query Dictionary Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table, Column, User and Query CSVs. :param conf: Create an iterator to execute sql. # Create Column Dictionary # Create Table Dictionary # TODO: this possibly should parse stringified booleans; # right now it only will be false for empty strings # Create Query Dictionary Yield the csv result one at a time. convert the result to model if a model_class is provided # Config keys An Extractor that combines Table, Column, User and Query CSVs. :param conf: Create an iterator to execute sql. # Create Table Dictionary # TODO: this possibly should parse stringified booleans; # right now it only will be false for empty strings # Create Query Dictionary Yield the csv result one at a time. convert the result to model if a model_class is provided | 2.305688 | 2 |
ml/dashboard.py | fdebrain/docker-for-datascience | 0 | 6615580 | <filename>ml/dashboard.py
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from bokeh.models import DataTable, TableColumn, ColumnDataSource, Select, Div
from bokeh.models import BasicTicker, ColorBar, LinearColorMapper, HoverTool
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column, row
from bokeh.transform import transform
from bokeh.palettes import RdBu7 as colors
def load_data():
'''Load Boston House price dataset.
More details: https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html'''
boston = load_boston()
data = pd.DataFrame(boston.data, columns=boston.feature_names)
data['Price'] = boston.target
return data
def on_feature_change(attr, old, new):
global source_df, figure_scatter
source_df.data.update(dict(Feature=df[new]))
figure_scatter.xaxis.axis_label = new
# Data - Load raw data & define source
df = load_data()
default_feature = df.columns[0]
source_df = ColumnDataSource(df)
# CREATE NEW FEATURES HERE
# Page title
title = Div(text="""<h1>Dive into the Boston House Price Dataset</h1> <br>
<p>This simple dashboard gives an overview of the capabilities of Bokeh
for data visualization. <br> Learn more about this dataset
<a href="https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html">here</a>.</p>""")
# Datatable - Display raw data
table = DataTable(source=source_df,
sortable=True)
table.columns = [TableColumn(field=col, title=col)
for col in df.columns]
# Data - Compute correlation matrix & define source
df_corr = df.corr()
df_corr.index.name = 'axis1'
df_corr.columns.name = 'axis2'
df_corr = df_corr.stack().rename("value").reset_index()
source_df_corr = ColumnDataSource(df_corr)
# Figure - Features correlation heatmap
figure_heatmap = figure(title="Correlation plot",
plot_width=600, plot_height=600,
x_range=list(df_corr.axis1.drop_duplicates()),
y_range=list(df_corr.axis2.drop_duplicates()))
mapper = LinearColorMapper(palette=colors, low=-1, high=1)
figure_heatmap.rect(x="axis1", y="axis2", width=1, height=1,
source=source_df_corr, line_color='black',
fill_color=transform('value', mapper))
# Add heatmap colorbar
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=10))
figure_heatmap.add_layout(color_bar, 'right')
figure_heatmap.xaxis.major_label_orientation = np.pi/4
figure_heatmap.yaxis.major_label_orientation = np.pi/4
# Add heatmap hovertool
hover = HoverTool(tooltips=[("feature1", "@axis1"),
("feature2", "@axis2"),
("correlation", "@value")])
figure_heatmap.add_tools(hover)
# Figure - Scatter plot feature vs price
figure_scatter = figure(title="Influence of feature over house price")
figure_scatter.circle(x='Feature', y='Price',
source=source_df,
selection_color='red',
nonselection_alpha=0.2)
figure_scatter.yaxis.axis_label = 'Price (in k$)'
# Select - Choose among a list of feature
select_feature = Select(title='Feature',
options=list(df.columns),
width=200)
select_feature.on_change('value', on_feature_change)
select_feature.value = default_feature
# Define layout and add to document
layout = row(column(title, table),
figure_heatmap,
column(select_feature, figure_scatter))
curdoc().add_root(layout)
| <filename>ml/dashboard.py
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from bokeh.models import DataTable, TableColumn, ColumnDataSource, Select, Div
from bokeh.models import BasicTicker, ColorBar, LinearColorMapper, HoverTool
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column, row
from bokeh.transform import transform
from bokeh.palettes import RdBu7 as colors
def load_data():
'''Load Boston House price dataset.
More details: https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html'''
boston = load_boston()
data = pd.DataFrame(boston.data, columns=boston.feature_names)
data['Price'] = boston.target
return data
def on_feature_change(attr, old, new):
global source_df, figure_scatter
source_df.data.update(dict(Feature=df[new]))
figure_scatter.xaxis.axis_label = new
# Data - Load raw data & define source
df = load_data()
default_feature = df.columns[0]
source_df = ColumnDataSource(df)
# CREATE NEW FEATURES HERE
# Page title
title = Div(text="""<h1>Dive into the Boston House Price Dataset</h1> <br>
<p>This simple dashboard gives an overview of the capabilities of Bokeh
for data visualization. <br> Learn more about this dataset
<a href="https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html">here</a>.</p>""")
# Datatable - Display raw data
table = DataTable(source=source_df,
sortable=True)
table.columns = [TableColumn(field=col, title=col)
for col in df.columns]
# Data - Compute correlation matrix & define source
df_corr = df.corr()
df_corr.index.name = 'axis1'
df_corr.columns.name = 'axis2'
df_corr = df_corr.stack().rename("value").reset_index()
source_df_corr = ColumnDataSource(df_corr)
# Figure - Features correlation heatmap
figure_heatmap = figure(title="Correlation plot",
plot_width=600, plot_height=600,
x_range=list(df_corr.axis1.drop_duplicates()),
y_range=list(df_corr.axis2.drop_duplicates()))
mapper = LinearColorMapper(palette=colors, low=-1, high=1)
figure_heatmap.rect(x="axis1", y="axis2", width=1, height=1,
source=source_df_corr, line_color='black',
fill_color=transform('value', mapper))
# Add heatmap colorbar
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=10))
figure_heatmap.add_layout(color_bar, 'right')
figure_heatmap.xaxis.major_label_orientation = np.pi/4
figure_heatmap.yaxis.major_label_orientation = np.pi/4
# Add heatmap hovertool
hover = HoverTool(tooltips=[("feature1", "@axis1"),
("feature2", "@axis2"),
("correlation", "@value")])
figure_heatmap.add_tools(hover)
# Figure - Scatter plot feature vs price
figure_scatter = figure(title="Influence of feature over house price")
figure_scatter.circle(x='Feature', y='Price',
source=source_df,
selection_color='red',
nonselection_alpha=0.2)
figure_scatter.yaxis.axis_label = 'Price (in k$)'
# Select - Choose among a list of feature
select_feature = Select(title='Feature',
options=list(df.columns),
width=200)
select_feature.on_change('value', on_feature_change)
select_feature.value = default_feature
# Define layout and add to document
layout = row(column(title, table),
figure_heatmap,
column(select_feature, figure_scatter))
curdoc().add_root(layout)
| en | 0.493365 | Load Boston House price dataset. More details: https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html # Data - Load raw data & define source # CREATE NEW FEATURES HERE # Page title <h1>Dive into the Boston House Price Dataset</h1> <br> <p>This simple dashboard gives an overview of the capabilities of Bokeh for data visualization. <br> Learn more about this dataset <a href="https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html">here</a>.</p> # Datatable - Display raw data # Data - Compute correlation matrix & define source # Figure - Features correlation heatmap # Add heatmap colorbar # Add heatmap hovertool # Figure - Scatter plot feature vs price # Select - Choose among a list of feature # Define layout and add to document | 3.47089 | 3 |
test_frame/test_rabbitmq/test_os_run.py | kisshang1993/funboost | 120 | 6615581 | <filename>test_frame/test_rabbitmq/test_os_run.py
import os
import threading
for i in range(8):
threading.Thread(target=os.system,args=('python test_rabbitmq_consume.py',)).start() | <filename>test_frame/test_rabbitmq/test_os_run.py
import os
import threading
for i in range(8):
threading.Thread(target=os.system,args=('python test_rabbitmq_consume.py',)).start() | none | 1 | 2.250621 | 2 | |
api/tests/test_acceptance.py | krzysztof-adamski/emenu | 0 | 6615582 | import datetime
from datetime import timedelta
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from api.factories import MealFactory, MenuFactory, UserFactory
from api.models import Meal, Menu
from api.serializers import MenuSerializer
class MenusAcceptanceTests(TestCase):
def setUp(self):
self.client = APIClient()
self.client.cookies.load({"pl": "pl"})
Meal.objects.all().delete()
Menu.objects.all().delete()
self.user = UserFactory.create(username="Jan")
def test_return_list_menus_without_empty_meals_without_auth(self):
"""Test sprawdza czy wyswietlamy listę menu bez pustych dań bez logowania."""
MenuFactory.create()
self.menu = MenuFactory.create()
self.meal = MealFactory.create(menu=self.menu)
url = reverse("menu-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 1)
self.assertEqual(len(response.json()[0]["meals"]), 1)
def test_return_list_all_menus_with_auth(self):
"""Test sprawdza czy wyswietlamy listę wszystkich menu będąc zalogowanym z domyślnym sortowaniem po nazwie menu."""
MenuFactory.create(name="A")
self.menu = MenuFactory.create(name="B")
self.meal = MealFactory.create(menu=self.menu)
self.client.force_authenticate(user=self.user)
url = reverse("menu-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 2)
self.assertEqual(len(response.json()[1]["meals"]), 1)
def test_return_list_menus_ordering_by_count_meals(self):
"""Test sprawdza czy wyswietlamy listę menu w kolejności malejącej ilości dań w menu."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create_batch(menu=self.menu_one, size=5)
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create_batch(menu=self.menu_two, size=3)
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "-meals_count"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 2)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_ordering_by_name_mealsAZ(self):
"""Test sprawdza czy wyswietlamy listę menu alfabtycznie A-Z po nazwie dań."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create(menu=self.menu_one, name="Burak")
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create(menu=self.menu_two, name="Ananas")
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "meals"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_ordering_by_name_mealsZA(self):
"""Test sprawdza czy wyswietlamy listę menu alfabtycznie Z-A po nazwie dań."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create(menu=self.menu_one, name="Burak")
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create(menu=self.menu_two, name="Ananas")
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "-meals"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_filtering_by_menu_name(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po nazwie."""
menu_one = MenuFactory.create(name="Jajecznica")
MealFactory.create(menu=menu_one)
menu_two = MenuFactory.create(name="Polędwica")
MealFactory.create(menu=menu_two)
url = reverse("menu-list")
response = self.client.get(url, {"name": "Jajecznica"})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_return_list_menus_filtering_by_create_date(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po dacie utworzenia."""
menu_one = MenuFactory.create(name="Jajecznica")
MealFactory.create(menu=menu_one)
yesterday = datetime.datetime.now() - timedelta(days=3)
menu_two = MenuFactory.create(name="Polędwica")
menu_two.created = yesterday
menu_two.save()
MealFactory.create(menu=menu_two)
search_date = yesterday.strftime("%Y-%m-%d")
url = reverse("menu-list")
response = self.client.get(url, {"created": search_date})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_return_list_menus_filtering_by_update_date(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po dacie aktualizacji."""
today = datetime.datetime.today()
past = today - timedelta(days=3)
menu_two = MenuFactory.create(name="Polędwica")
menu_two.created = past
menu_two.name = "Dziczyzna"
menu_two.save()
menu_two.refresh_from_db()
MealFactory.create(menu=menu_two)
search_date = today.strftime("%Y-%m-%d")
url = reverse("menu-list")
response = self.client.get(url, {"updated": search_date})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_create_menu_should_return_403_without_auth(self):
"""Test sprawdza czy utworzymy menu bez logowania."""
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, json=data)
self.assertEqual(response.status_code, 403)
def test_create_menu_should_return_201(self):
"""Test sprawdza czy utworzymy menu z logowaniem."""
self.client.force_authenticate(user=self.user)
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
self.assertEqual(Menu.objects.count(), 1)
self.assertTrue(
all(
[
True
for attr in ["name", "description", "id", "meals"]
if attr in response.json().keys()
]
)
)
self.assertEqual(response.json()["meals"], [])
def test_create_menu_should_return_400_with_existing_name(self):
"""Test sprawdza czy utworzymy menu z już istniejącą nazwą."""
self.client.force_authenticate(user=self.user)
MenuFactory.create(name="Nowe Menu")
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"name": ["Istnieje już menu z tą nazwą!"]}
)
def test_create_menu_should_return_400_with_long_name(self):
"""Test sprawdza czy utworzymy menu z za długą nazwą."""
self.client.force_authenticate(user=self.user)
menu_name = "N" * 60
data = {"name": menu_name, "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"name": ["Maksymalna ilość znaków: 50."]}
)
def test_detail_view_menu_should_return_404_without_auth(self):
"""Test sprawdza czy wyświetlimy menu bez autoryzacji."""
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_menu_should_return_200_with_auth(self):
"""Test sprawdza czy wyświetlimy menu z autoryzacją."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["id"], menu.id)
def test_update_patch_menu_name_should_return_200(self):
"""Test sprawdza czy zaktualizujemy nazwę menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.patch(url, {"name": "Nowa Nazwa"})
menu.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], menu.name)
def test_update_put_menu_should_return_200(self):
"""Test sprawdza czy zaktualizujemy menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
data = MenuSerializer(menu).data
data["name"] = "<NAME>"
response = self.client.put(url, data)
menu.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], menu.name)
def test_update_patch_no_exists_menu_should_return_404(self):
"""Test sprawdza czy zaktualizujemy nieistniejące menu."""
self.client.force_authenticate(user=self.user)
url = reverse("menu-detail", kwargs={"pk": 123})
response = self.client.patch(url, {"name": "Nowa Nazwa"})
self.assertEqual(response.status_code, 404)
class MealsAcceptanceTests(TestCase):
def setUp(self):
self.client = APIClient()
Meal.objects.all().delete()
Menu.objects.all().delete()
self.user = UserFactory.create(username="Jan")
def test_create_meal_in_no_exists_menu_return_404(self):
"""Test sprawdza czy utworzymy posiłęk w nieistniejącym menu."""
self.client.force_authenticate(user=self.user)
data = {
"name": "Jajecznica",
"description": "Super Hajecznica",
"price": 2.35,
"is_vege": True,
"prepartion_time": 10,
}
url = reverse("menus-meal-list", kwargs={"parent_lookup_menu": 2})
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
def test_create_meal_in_exist_menu(self):
"""Test sprawdza czy utworzymy posiłęk w menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal_name = "Jajecznica"
data = {
"name": meal_name,
"description": "Super Hajecznica",
"price": 2.35,
"is_vege": True,
"prepartion_time": 10,
}
url = reverse(
"menus-meal-list", kwargs={"parent_lookup_menu": menu.id}
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()["menu"], menu.id)
self.assertEqual(response.json()["name"], meal_name)
def test_create_meal_without_auth_403(self):
"""Test sprawdza czy utworzymy posiłęk bez logowania."""
url = reverse("menus-meal-list", kwargs={"parent_lookup_menu": 1})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 403)
def test_get_detail_meal_should_return_200(self):
"""Test sprawdza czy wyświetlimy posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_patch_no_exists_meal_should_return_404(self):
"""Test sprawdza czy zaktualizujemy nieistniejący posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": 123},
)
response = self.client.patch(url, {"name": "Nowa Nazwa"})
self.assertEqual(response.status_code, 404)
def test_update_patch_exists_meal_should_return_200(self):
"""Test sprawdza czy zaktualizujemy nieistniejący posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
menu.refresh_from_db()
meal.refresh_from_db()
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.patch(url, {"name": "<NAME>"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], "<NAME>")
def test_delete_meal_return_204(self):
"""Test sprawdza czy usuniemy posiłęk z menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
menu.refresh_from_db()
meal.refresh_from_db()
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertEqual(Meal.objects.count(), 0)
| import datetime
from datetime import timedelta
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from api.factories import MealFactory, MenuFactory, UserFactory
from api.models import Meal, Menu
from api.serializers import MenuSerializer
class MenusAcceptanceTests(TestCase):
def setUp(self):
self.client = APIClient()
self.client.cookies.load({"pl": "pl"})
Meal.objects.all().delete()
Menu.objects.all().delete()
self.user = UserFactory.create(username="Jan")
def test_return_list_menus_without_empty_meals_without_auth(self):
"""Test sprawdza czy wyswietlamy listę menu bez pustych dań bez logowania."""
MenuFactory.create()
self.menu = MenuFactory.create()
self.meal = MealFactory.create(menu=self.menu)
url = reverse("menu-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 1)
self.assertEqual(len(response.json()[0]["meals"]), 1)
def test_return_list_all_menus_with_auth(self):
"""Test sprawdza czy wyswietlamy listę wszystkich menu będąc zalogowanym z domyślnym sortowaniem po nazwie menu."""
MenuFactory.create(name="A")
self.menu = MenuFactory.create(name="B")
self.meal = MealFactory.create(menu=self.menu)
self.client.force_authenticate(user=self.user)
url = reverse("menu-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 2)
self.assertEqual(len(response.json()[1]["meals"]), 1)
def test_return_list_menus_ordering_by_count_meals(self):
"""Test sprawdza czy wyswietlamy listę menu w kolejności malejącej ilości dań w menu."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create_batch(menu=self.menu_one, size=5)
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create_batch(menu=self.menu_two, size=3)
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "-meals_count"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(len(response.json()), 2)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_ordering_by_name_mealsAZ(self):
"""Test sprawdza czy wyswietlamy listę menu alfabtycznie A-Z po nazwie dań."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create(menu=self.menu_one, name="Burak")
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create(menu=self.menu_two, name="Ananas")
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "meals"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_ordering_by_name_mealsZA(self):
"""Test sprawdza czy wyswietlamy listę menu alfabtycznie Z-A po nazwie dań."""
self.menu_one = MenuFactory.create(name="Menu One")
MealFactory.create(menu=self.menu_one, name="Burak")
self.menu_two = MenuFactory.create(name="Menu Two")
MealFactory.create(menu=self.menu_two, name="Ananas")
url = reverse("menu-list")
response = self.client.get(url, {"ordering": "-meals"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Menu.objects.count(), 2)
self.assertEqual(
len(response.json()[0]["meals"]), self.menu_one.meals.count()
)
self.assertEqual(
len(response.json()[1]["meals"]), self.menu_two.meals.count()
)
def test_return_list_menus_filtering_by_menu_name(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po nazwie."""
menu_one = MenuFactory.create(name="Jajecznica")
MealFactory.create(menu=menu_one)
menu_two = MenuFactory.create(name="Polędwica")
MealFactory.create(menu=menu_two)
url = reverse("menu-list")
response = self.client.get(url, {"name": "Jajecznica"})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_return_list_menus_filtering_by_create_date(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po dacie utworzenia."""
menu_one = MenuFactory.create(name="Jajecznica")
MealFactory.create(menu=menu_one)
yesterday = datetime.datetime.now() - timedelta(days=3)
menu_two = MenuFactory.create(name="Polędwica")
menu_two.created = yesterday
menu_two.save()
MealFactory.create(menu=menu_two)
search_date = yesterday.strftime("%Y-%m-%d")
url = reverse("menu-list")
response = self.client.get(url, {"created": search_date})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_return_list_menus_filtering_by_update_date(self):
"""Test sprawdza czy wyswietlamy menu odfiltrowane po dacie aktualizacji."""
today = datetime.datetime.today()
past = today - timedelta(days=3)
menu_two = MenuFactory.create(name="Polędwica")
menu_two.created = past
menu_two.name = "Dziczyzna"
menu_two.save()
menu_two.refresh_from_db()
MealFactory.create(menu=menu_two)
search_date = today.strftime("%Y-%m-%d")
url = reverse("menu-list")
response = self.client.get(url, {"updated": search_date})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()), 1)
def test_create_menu_should_return_403_without_auth(self):
"""Test sprawdza czy utworzymy menu bez logowania."""
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, json=data)
self.assertEqual(response.status_code, 403)
def test_create_menu_should_return_201(self):
"""Test sprawdza czy utworzymy menu z logowaniem."""
self.client.force_authenticate(user=self.user)
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
self.assertEqual(Menu.objects.count(), 1)
self.assertTrue(
all(
[
True
for attr in ["name", "description", "id", "meals"]
if attr in response.json().keys()
]
)
)
self.assertEqual(response.json()["meals"], [])
def test_create_menu_should_return_400_with_existing_name(self):
"""Test sprawdza czy utworzymy menu z już istniejącą nazwą."""
self.client.force_authenticate(user=self.user)
MenuFactory.create(name="Nowe Menu")
data = {"name": "Nowe Menu", "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"name": ["Istnieje już menu z tą nazwą!"]}
)
def test_create_menu_should_return_400_with_long_name(self):
"""Test sprawdza czy utworzymy menu z za długą nazwą."""
self.client.force_authenticate(user=self.user)
menu_name = "N" * 60
data = {"name": menu_name, "description": "Opis menu"}
url = reverse("menu-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"name": ["Maksymalna ilość znaków: 50."]}
)
def test_detail_view_menu_should_return_404_without_auth(self):
"""Test sprawdza czy wyświetlimy menu bez autoryzacji."""
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_menu_should_return_200_with_auth(self):
"""Test sprawdza czy wyświetlimy menu z autoryzacją."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["id"], menu.id)
def test_update_patch_menu_name_should_return_200(self):
"""Test sprawdza czy zaktualizujemy nazwę menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
response = self.client.patch(url, {"name": "Nowa Nazwa"})
menu.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], menu.name)
def test_update_put_menu_should_return_200(self):
"""Test sprawdza czy zaktualizujemy menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Trzy")
url = reverse("menu-detail", kwargs={"pk": menu.id})
data = MenuSerializer(menu).data
data["name"] = "<NAME>"
response = self.client.put(url, data)
menu.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], menu.name)
def test_update_patch_no_exists_menu_should_return_404(self):
"""Test sprawdza czy zaktualizujemy nieistniejące menu."""
self.client.force_authenticate(user=self.user)
url = reverse("menu-detail", kwargs={"pk": 123})
response = self.client.patch(url, {"name": "Nowa Nazwa"})
self.assertEqual(response.status_code, 404)
class MealsAcceptanceTests(TestCase):
def setUp(self):
self.client = APIClient()
Meal.objects.all().delete()
Menu.objects.all().delete()
self.user = UserFactory.create(username="Jan")
def test_create_meal_in_no_exists_menu_return_404(self):
"""Test sprawdza czy utworzymy posiłęk w nieistniejącym menu."""
self.client.force_authenticate(user=self.user)
data = {
"name": "Jajecznica",
"description": "Super Hajecznica",
"price": 2.35,
"is_vege": True,
"prepartion_time": 10,
}
url = reverse("menus-meal-list", kwargs={"parent_lookup_menu": 2})
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
def test_create_meal_in_exist_menu(self):
"""Test sprawdza czy utworzymy posiłęk w menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal_name = "Jajecznica"
data = {
"name": meal_name,
"description": "Super Hajecznica",
"price": 2.35,
"is_vege": True,
"prepartion_time": 10,
}
url = reverse(
"menus-meal-list", kwargs={"parent_lookup_menu": menu.id}
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()["menu"], menu.id)
self.assertEqual(response.json()["name"], meal_name)
def test_create_meal_without_auth_403(self):
"""Test sprawdza czy utworzymy posiłęk bez logowania."""
url = reverse("menus-meal-list", kwargs={"parent_lookup_menu": 1})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 403)
def test_get_detail_meal_should_return_200(self):
"""Test sprawdza czy wyświetlimy posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_patch_no_exists_meal_should_return_404(self):
"""Test sprawdza czy zaktualizujemy nieistniejący posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": 123},
)
response = self.client.patch(url, {"name": "Nowa Nazwa"})
self.assertEqual(response.status_code, 404)
def test_update_patch_exists_meal_should_return_200(self):
"""Test sprawdza czy zaktualizujemy nieistniejący posiłek."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
menu.refresh_from_db()
meal.refresh_from_db()
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.patch(url, {"name": "<NAME>"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["name"], "<NAME>")
def test_delete_meal_return_204(self):
"""Test sprawdza czy usuniemy posiłęk z menu."""
self.client.force_authenticate(user=self.user)
menu = MenuFactory.create(name="Menu Jajeczne")
meal = MealFactory.create(menu=menu)
menu.refresh_from_db()
meal.refresh_from_db()
url = reverse(
"menus-meal-detail",
kwargs={"parent_lookup_menu": menu.id, "pk": meal.id},
)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertEqual(Meal.objects.count(), 0)
| pl | 0.999021 | Test sprawdza czy wyswietlamy listę menu bez pustych dań bez logowania. Test sprawdza czy wyswietlamy listę wszystkich menu będąc zalogowanym z domyślnym sortowaniem po nazwie menu. Test sprawdza czy wyswietlamy listę menu w kolejności malejącej ilości dań w menu. Test sprawdza czy wyswietlamy listę menu alfabtycznie A-Z po nazwie dań. Test sprawdza czy wyswietlamy listę menu alfabtycznie Z-A po nazwie dań. Test sprawdza czy wyswietlamy menu odfiltrowane po nazwie. Test sprawdza czy wyswietlamy menu odfiltrowane po dacie utworzenia. Test sprawdza czy wyswietlamy menu odfiltrowane po dacie aktualizacji. Test sprawdza czy utworzymy menu bez logowania. Test sprawdza czy utworzymy menu z logowaniem. Test sprawdza czy utworzymy menu z już istniejącą nazwą. Test sprawdza czy utworzymy menu z za długą nazwą. Test sprawdza czy wyświetlimy menu bez autoryzacji. Test sprawdza czy wyświetlimy menu z autoryzacją. Test sprawdza czy zaktualizujemy nazwę menu. Test sprawdza czy zaktualizujemy menu. Test sprawdza czy zaktualizujemy nieistniejące menu. Test sprawdza czy utworzymy posiłęk w nieistniejącym menu. Test sprawdza czy utworzymy posiłęk w menu. Test sprawdza czy utworzymy posiłęk bez logowania. Test sprawdza czy wyświetlimy posiłek. Test sprawdza czy zaktualizujemy nieistniejący posiłek. Test sprawdza czy zaktualizujemy nieistniejący posiłek. Test sprawdza czy usuniemy posiłęk z menu. | 2.520946 | 3 |
tf/tf_add.py | soragui/startml | 0 | 6615583 | <reponame>soragui/startml<filename>tf/tf_add.py
import tensorflow as tf
def tf_add():
a_t = tf.constant(2)
b_t = tf.constant(3)
c_t = a_t + b_t
print("Tensorflow adding: ", c_t)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value: ", c_t_value)
if __name__ == "__main__":
tf_add() | import tensorflow as tf
def tf_add():
a_t = tf.constant(2)
b_t = tf.constant(3)
c_t = a_t + b_t
print("Tensorflow adding: ", c_t)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value: ", c_t_value)
if __name__ == "__main__":
tf_add() | none | 1 | 2.88478 | 3 | |
Python/src/problem0167.py | Const-Grigoryev/LeetCode | 0 | 6615584 | # 167. Two Sum II - Input array is sorted
# ---------------------------------------
#
# Given an array of integers `numbers` that is already ***sorted in non-decreasing order***, find two numbers such that
# they add up to a specific `target` number.
#
# Return *the indices of the two numbers **(1-indexed)** as an integer array `answer` of size `2`,
# where `1 <= answer[0] < answer[1] <= numbers.length`*.
#
# The tests are generated such that there is **exactly one solution**. You **may not** use the same element twice.
#
# ### Constraints:
#
# * `2 <= numbers.length <= 3 * 10^4`
# * `-1000 <= numbers[i] <= 1000`
# * `numbers` is sorted in **non-decreasing order**.
# * `-1000 <= target <= 1000`
# * The tests are generated such that there is **exactly one solution**.
#
# Source: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/
from typing import List
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
i, j = 0, len(numbers) - 1
while i < j:
x = numbers[i]
y = numbers[j]
if x + y < target:
i += 1
elif x + y > target:
j -= 1
else:
return [i+1, j+1]
if __name__ == '__main__':
s = Solution()
# Example 1:
#
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
print(f"{s.twoSum(numbers=[2, 7, 11, 15], target=9)} == [1, 2]")
# Example 2:
#
# Input: numbers = [2,3,4], target = 6
# Output: [1,3]
print(f"{s.twoSum(numbers=[2, 3, 4], target=6)} == [1, 3]")
# Example 3:
#
# Input: numbers = [-1,0], target = -1
# Output: [1,2]
print(f"{s.twoSum(numbers=[-1, 0], target=-1)} == [1, 2]") | # 167. Two Sum II - Input array is sorted
# ---------------------------------------
#
# Given an array of integers `numbers` that is already ***sorted in non-decreasing order***, find two numbers such that
# they add up to a specific `target` number.
#
# Return *the indices of the two numbers **(1-indexed)** as an integer array `answer` of size `2`,
# where `1 <= answer[0] < answer[1] <= numbers.length`*.
#
# The tests are generated such that there is **exactly one solution**. You **may not** use the same element twice.
#
# ### Constraints:
#
# * `2 <= numbers.length <= 3 * 10^4`
# * `-1000 <= numbers[i] <= 1000`
# * `numbers` is sorted in **non-decreasing order**.
# * `-1000 <= target <= 1000`
# * The tests are generated such that there is **exactly one solution**.
#
# Source: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/
from typing import List
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
i, j = 0, len(numbers) - 1
while i < j:
x = numbers[i]
y = numbers[j]
if x + y < target:
i += 1
elif x + y > target:
j -= 1
else:
return [i+1, j+1]
if __name__ == '__main__':
s = Solution()
# Example 1:
#
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
print(f"{s.twoSum(numbers=[2, 7, 11, 15], target=9)} == [1, 2]")
# Example 2:
#
# Input: numbers = [2,3,4], target = 6
# Output: [1,3]
print(f"{s.twoSum(numbers=[2, 3, 4], target=6)} == [1, 3]")
# Example 3:
#
# Input: numbers = [-1,0], target = -1
# Output: [1,2]
print(f"{s.twoSum(numbers=[-1, 0], target=-1)} == [1, 2]") | en | 0.752835 | # 167. Two Sum II - Input array is sorted # --------------------------------------- # # Given an array of integers `numbers` that is already ***sorted in non-decreasing order***, find two numbers such that # they add up to a specific `target` number. # # Return *the indices of the two numbers **(1-indexed)** as an integer array `answer` of size `2`, # where `1 <= answer[0] < answer[1] <= numbers.length`*. # # The tests are generated such that there is **exactly one solution**. You **may not** use the same element twice. # # ### Constraints: # # * `2 <= numbers.length <= 3 * 10^4` # * `-1000 <= numbers[i] <= 1000` # * `numbers` is sorted in **non-decreasing order**. # * `-1000 <= target <= 1000` # * The tests are generated such that there is **exactly one solution**. # # Source: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/ # Example 1: # # Input: numbers = [2,7,11,15], target = 9 # Output: [1,2] # Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2. # Example 2: # # Input: numbers = [2,3,4], target = 6 # Output: [1,3] # Example 3: # # Input: numbers = [-1,0], target = -1 # Output: [1,2] | 3.875597 | 4 |
usure/classification/infrastructure/intertass_xml_parser.py | coraxcr/usure | 2 | 6615585 | <filename>usure/classification/infrastructure/intertass_xml_parser.py
from typing import Iterable
from usure.common import fileutils
from xml.etree import ElementTree as et
from usure.classification.core import LabeledComments, SentenceCleaner
from .basic_sentence_cleaner import BasicSentenceCleaner
class InterTassXMLParser:
def __init__(self, folderpath, filename, cleaner:SentenceCleaner):
self._name = filename
self._xml = et.parse(fileutils.join(folderpath, filename))
self._cleaner = cleaner
def get(self) -> LabeledComments:
tweets = self._xml.getroot().findall("tweet")
comments, labels = [], []
for tweet in tweets:
content = tweet.find("content").text
content = self._cleaner.clean(content)
comments.append(content)
labels.append(tweet.find("./sentiment/polarity/value").text)
return LabeledComments(self._name, comments, labels)
def change_polarity_value(self, labels:Iterable[str]):
tweets = self._xml.getroot().findall("tweet")
for i, tweet in enumerate(tweets):
tweet.find("./sentiment/polarity/value").text = labels[i]
return self._xml
def save(self, folderpath, filename, xml):
path = fileutils.join(folderpath, filename)
xml.write(path)
| <filename>usure/classification/infrastructure/intertass_xml_parser.py
from typing import Iterable
from usure.common import fileutils
from xml.etree import ElementTree as et
from usure.classification.core import LabeledComments, SentenceCleaner
from .basic_sentence_cleaner import BasicSentenceCleaner
class InterTassXMLParser:
def __init__(self, folderpath, filename, cleaner:SentenceCleaner):
self._name = filename
self._xml = et.parse(fileutils.join(folderpath, filename))
self._cleaner = cleaner
def get(self) -> LabeledComments:
tweets = self._xml.getroot().findall("tweet")
comments, labels = [], []
for tweet in tweets:
content = tweet.find("content").text
content = self._cleaner.clean(content)
comments.append(content)
labels.append(tweet.find("./sentiment/polarity/value").text)
return LabeledComments(self._name, comments, labels)
def change_polarity_value(self, labels:Iterable[str]):
tweets = self._xml.getroot().findall("tweet")
for i, tweet in enumerate(tweets):
tweet.find("./sentiment/polarity/value").text = labels[i]
return self._xml
def save(self, folderpath, filename, xml):
path = fileutils.join(folderpath, filename)
xml.write(path)
| none | 1 | 2.546275 | 3 | |
python/nn/model_definition.py | PeterJackNaylor/AutomaticWSI | 0 | 6615586 | <filename>python/nn/model_definition.py
# Defines the neural network used in the WELDON model.
# Implemented in keras
import tensorflow as tf
from keras import backend as K
#config = tf.ConfigProto(log_device_placement=True)
#config.gpu_options.allow_growth = True
#K.tensorflow_backend.set_session(tf.Session(config=config))
from keras.optimizers import Adam
from keras_utils import *
from keras.layers import Add
from nn_metrics import import_metrics
### Model Names, Convention:
###
### For the following models, designated by model_n_m,
### n and m integers. We have the following architecture
### a block of n 1D convolutional layers applied to the tiles.
### Followed by a pooling layer. Followed by m fully connected
### layers. Followed by a softmax with output equal to the
### number of classes
###
def model_one_one(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1 version a.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) -> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1",
activation=activation_middle)
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
else:
raise ValueError("aggr not specified to avg or max")
x_i = dense_bn_act_drop(x_i, hidden_fcn, "dense", weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_one_two(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1 version b.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) -> fully connected layer (size hidden_fcn)
-> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1",
activation=activation_middle)
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_2",
weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_two_two(n_classes, hidden_btleneck=128, hidden_fcn=512, weight_decay=0.0005,
input_depth=2048, drop_out=0.5, aggr="avg", gaussian_param=0, k=10,
activation_middle="relu"):
"""
The simplest version of model_1 version c.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) ->
fully connected layer (size hidden_fcn)
bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) ->
fully connected layer (size hidden_fcn) -> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max or owkin
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_fcn, weight_decay, drop_out,
(None, hidden_btleneck), "fcn_1_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay, drop_out,
(None, hidden_fcn), "bottleneck_2")
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_2",
weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_three_two_skip(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1.
NN architecture with layers, skips are indicated by (sa, ea), (sb, eb):
Input layer -> bottleneck layer (size hidden_btleneck) (sa) ->
fully connected layer (size hidden_fcn) ->
bottleneck layer (size hidden_btleneck) (ea) -> 1D pooling with aggr (sb)
-> fully connected layer (size hidden_fcn) ->
fully connected layer (size hidden_fcn) ->
bottleneck layer (size hidden_btleneck) (eb) ->
softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
bn_encode_1 = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1")
x_i = conv_shape_bn_act_drop(bn_encode_1, hidden_fcn, weight_decay, drop_out,
(None, hidden_btleneck), name="fcn_1_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay, drop_out,
(None, hidden_fcn), name="bottleneck_2",
activation=activation_middle)
x_i = Concatenate(axis=-1)([bn_encode_1, x_i])
if aggr == "avg":
x_ik = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_ik = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_ik = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, 2*hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_ik = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, 2*hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_ik = Concatenate(axis=-1)([weldon_conc, avg_encoding])
else:
raise ValueError("aggr not specified to avg or max")
x_i = dense_bn_act_drop(x_ik, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_btleneck, "bottleneck_22",
weight_decay, drop_out)
x_i = Concatenate(axis=-1)([x_ik, x_i])
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def load_model(parameter_dic, options, verbose=True):
"""
Parameters
----------
parameter_dic : dict
disctionary containing the hyperparameters values.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
Returns
-------
Object: keras.models.Model
Compiled keras model.
Raises
------
ValueError
Optimizername not known
"""
if options.y_interest in ["Residual", "Prognostic"]:
n_classes = 2
else:
n_classes = 4
input_depth = options.input_depth
aggr = options.pooling_layer
k = options.k
optimizer_name = options.optimizer_name
hidden_fcn = parameter_dic["hidden_fcn"]
hidden_btleneck = parameter_dic["hidden_btleneck"]
gaussian_param = parameter_dic["gaussian_noise"]
drop_out = parameter_dic["drop_out"]
weight_decay = parameter_dic["weight_decay"]
activation_middle = options.activation_middle
learning_rate = parameter_dic["learning_rate"]
if options.model == "owkin":
model = model_one_two(n_classes, hidden_btleneck=1,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out,
aggr="weldon", gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
else:
pass
if verbose:
print(model.summary())
# print(config.gpu_options.allow_growth, flush=True)
if optimizer_name == "Adam":
opt = Adam(lr=learning_rate, epsilon=1e-08)
else:
msg = "Unknown optimizer_name type with name: {}"
raise ValueError(msg.format(optimizer_name))
metrics = import_metrics()
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=metrics)
return model
| <filename>python/nn/model_definition.py
# Defines the neural network used in the WELDON model.
# Implemented in keras
import tensorflow as tf
from keras import backend as K
#config = tf.ConfigProto(log_device_placement=True)
#config.gpu_options.allow_growth = True
#K.tensorflow_backend.set_session(tf.Session(config=config))
from keras.optimizers import Adam
from keras_utils import *
from keras.layers import Add
from nn_metrics import import_metrics
### Model Names, Convention:
###
### For the following models, designated by model_n_m,
### n and m integers. We have the following architecture
### a block of n 1D convolutional layers applied to the tiles.
### Followed by a pooling layer. Followed by m fully connected
### layers. Followed by a softmax with output equal to the
### number of classes
###
def model_one_one(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1 version a.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) -> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1",
activation=activation_middle)
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
else:
raise ValueError("aggr not specified to avg or max")
x_i = dense_bn_act_drop(x_i, hidden_fcn, "dense", weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_one_two(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1 version b.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) -> fully connected layer (size hidden_fcn)
-> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1",
activation=activation_middle)
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_2",
weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_two_two(n_classes, hidden_btleneck=128, hidden_fcn=512, weight_decay=0.0005,
input_depth=2048, drop_out=0.5, aggr="avg", gaussian_param=0, k=10,
activation_middle="relu"):
"""
The simplest version of model_1 version c.
NN architecture:
Input layer -> bottleneck layer (size hidden_btleneck) ->
fully connected layer (size hidden_fcn)
bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr
-> fully connected layer (size hidden_fcn) ->
fully connected layer (size hidden_fcn) -> softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max or owkin
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_fcn, weight_decay, drop_out,
(None, hidden_btleneck), "fcn_1_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay, drop_out,
(None, hidden_fcn), "bottleneck_2")
if aggr == "avg":
x_i = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_i = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_i = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_i = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_i = Concatenate(axis=-1)([weldon_conc, avg_encoding])
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_fcn, "fcn_2_2",
weight_decay, drop_out)
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def model_three_two_skip(n_classes, hidden_btleneck=128, hidden_fcn=512,
weight_decay=0.0005, input_depth=2048, drop_out=0.5,
aggr="avg", gaussian_param=0, k=10, activation_middle="relu"):
"""
The simplest version of model_1.
NN architecture with layers, skips are indicated by (sa, ea), (sb, eb):
Input layer -> bottleneck layer (size hidden_btleneck) (sa) ->
fully connected layer (size hidden_fcn) ->
bottleneck layer (size hidden_btleneck) (ea) -> 1D pooling with aggr (sb)
-> fully connected layer (size hidden_fcn) ->
fully connected layer (size hidden_fcn) ->
bottleneck layer (size hidden_btleneck) (eb) ->
softmax layer (size n_classes)
Parameters
----------
n_classes : int
as we have a classification, this the number of neurons (or classes)
to set the final layer to.
hidden_btleneck: int
size of the first intermediate bottleneck layer.
hidden_fcn: int
size of the final fully connected dense layers.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
weight_decay: float
L2 regularisation parameter.
input_depth: int
size of the tile that will be fed in to the neural network.
drop out: float
drop out value to apply to the input tiles, and to the tissue profile Z_i
vector.
aggr: string, avg or max
Pooling function to apply to the new tile representation.
gaussian_param: float
if 0, does not apply gaussian drop out, if different to 0, we had to the
input tile a gaussian noise.
activation_middle: string, relu, tanh, None,...
Non linear activation function to apply to the new tile encodings.
Returns
-------
Object: keras.models.Model
Compiled keras model.
"""
input_size = (None, input_depth)
in_layer = Input(shape=input_size)
if drop_out != 0:
x_i = Dropout(drop_out, noise_shape=(1, input_depth))(in_layer)
else:
x_i = in_layer
if gaussian_param != 0:
x_i = GaussianDropout(gaussian_param)(x_i)
bn_encode_1 = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay,
drop_out, input_size, name="bottleneck_1")
x_i = conv_shape_bn_act_drop(bn_encode_1, hidden_fcn, weight_decay, drop_out,
(None, hidden_btleneck), name="fcn_1_1")
x_i = conv_shape_bn_act_drop(x_i, hidden_btleneck, weight_decay, drop_out,
(None, hidden_fcn), name="bottleneck_2",
activation=activation_middle)
x_i = Concatenate(axis=-1)([bn_encode_1, x_i])
if aggr == "avg":
x_ik = GlobalAveragePooling1D()(x_i)
elif aggr == "max":
x_ik = GlobalMaxPooling1D()(x_i)
elif aggr == "weldon":
x_ik = WeldonPooling(x_i, k)
elif aggr == "weldon_conc":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, 2*hidden_btleneck), name="score",
activation=activation_middle)
x_i = WeldonConcPooling(s_i, x_i, k)
x_ik = Flatten()(x_i)
elif aggr == "conan_plus":
s_i = conv_shape_bn_act_drop(x_i, 1, weight_decay,
drop_out, (None, 2*hidden_btleneck),
name="score",
activation=activation_middle)
weldon_conc = WeldonConcPooling(s_i, x_i, k)
weldon_conc = Flatten()(weldon_conc)
avg_encoding = GlobalAveragePooling1D()(x_i)
x_ik = Concatenate(axis=-1)([weldon_conc, avg_encoding])
else:
raise ValueError("aggr not specified to avg or max")
x_i = dense_bn_act_drop(x_ik, hidden_fcn, "fcn_2_1",
weight_decay, drop_out)
x_i = dense_bn_act_drop(x_i, hidden_btleneck, "bottleneck_22",
weight_decay, drop_out)
x_i = Concatenate(axis=-1)([x_ik, x_i])
output_layer = Dense(n_classes, activation="softmax", use_bias=True,
kernel_initializer="glorot_normal",
bias_initializer="glorot_uniform",
kernel_regularizer=regularizers.l2(weight_decay))(x_i)
model = Model(inputs=in_layer, outputs=output_layer)
return model
def load_model(parameter_dic, options, verbose=True):
"""
Parameters
----------
parameter_dic : dict
disctionary containing the hyperparameters values.
options : NameSpace
NameSpace containing arguments collected by the argumentParser.
Returns
-------
Object: keras.models.Model
Compiled keras model.
Raises
------
ValueError
Optimizername not known
"""
if options.y_interest in ["Residual", "Prognostic"]:
n_classes = 2
else:
n_classes = 4
input_depth = options.input_depth
aggr = options.pooling_layer
k = options.k
optimizer_name = options.optimizer_name
hidden_fcn = parameter_dic["hidden_fcn"]
hidden_btleneck = parameter_dic["hidden_btleneck"]
gaussian_param = parameter_dic["gaussian_noise"]
drop_out = parameter_dic["drop_out"]
weight_decay = parameter_dic["weight_decay"]
activation_middle = options.activation_middle
learning_rate = parameter_dic["learning_rate"]
if options.model == "owkin":
model = model_one_two(n_classes, hidden_btleneck=1,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out,
aggr="weldon", gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "model_1S_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr=aggr,
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "weldon_plus_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="weldon_conc",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_a":
model = model_one_one(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_b":
model = model_one_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_c":
model = model_two_two(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
elif options.model == "conan_d":
model = model_three_two_skip(n_classes, hidden_btleneck=hidden_btleneck,
hidden_fcn=hidden_fcn, weight_decay=weight_decay,
input_depth=input_depth, drop_out=drop_out, aggr="conan_plus",
gaussian_param=gaussian_param, k=k,
activation_middle=activation_middle)
else:
pass
if verbose:
print(model.summary())
# print(config.gpu_options.allow_growth, flush=True)
if optimizer_name == "Adam":
opt = Adam(lr=learning_rate, epsilon=1e-08)
else:
msg = "Unknown optimizer_name type with name: {}"
raise ValueError(msg.format(optimizer_name))
metrics = import_metrics()
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=metrics)
return model
| en | 0.621999 | # Defines the neural network used in the WELDON model. # Implemented in keras #config = tf.ConfigProto(log_device_placement=True) #config.gpu_options.allow_growth = True #K.tensorflow_backend.set_session(tf.Session(config=config)) ### Model Names, Convention: ### ### For the following models, designated by model_n_m, ### n and m integers. We have the following architecture ### a block of n 1D convolutional layers applied to the tiles. ### Followed by a pooling layer. Followed by m fully connected ### layers. Followed by a softmax with output equal to the ### number of classes ### The simplest version of model_1 version a. NN architecture: Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr -> fully connected layer (size hidden_fcn) -> softmax layer (size n_classes) Parameters ---------- n_classes : int as we have a classification, this the number of neurons (or classes) to set the final layer to. hidden_btleneck: int size of the first intermediate bottleneck layer. hidden_fcn: int size of the final fully connected dense layers. options : NameSpace NameSpace containing arguments collected by the argumentParser. weight_decay: float L2 regularisation parameter. input_depth: int size of the tile that will be fed in to the neural network. drop out: float drop out value to apply to the input tiles, and to the tissue profile Z_i vector. aggr: string, avg or max Pooling function to apply to the new tile representation. gaussian_param: float if 0, does not apply gaussian drop out, if different to 0, we had to the input tile a gaussian noise. activation_middle: string, relu, tanh, None,... Non linear activation function to apply to the new tile encodings. Returns ------- Object: keras.models.Model Compiled keras model. The simplest version of model_1 version b. NN architecture: Input layer -> bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr -> fully connected layer (size hidden_fcn) -> fully connected layer (size hidden_fcn) -> softmax layer (size n_classes) Parameters ---------- n_classes : int as we have a classification, this the number of neurons (or classes) to set the final layer to. hidden_btleneck: int size of the first intermediate bottleneck layer. hidden_fcn: int size of the final fully connected dense layers. options : NameSpace NameSpace containing arguments collected by the argumentParser. weight_decay: float L2 regularisation parameter. input_depth: int size of the tile that will be fed in to the neural network. drop out: float drop out value to apply to the input tiles, and to the tissue profile Z_i vector. aggr: string, avg or max Pooling function to apply to the new tile representation. gaussian_param: float if 0, does not apply gaussian drop out, if different to 0, we had to the input tile a gaussian noise. activation_middle: string, relu, tanh, None,... Non linear activation function to apply to the new tile encodings. Returns ------- Object: keras.models.Model Compiled keras model. The simplest version of model_1 version c. NN architecture: Input layer -> bottleneck layer (size hidden_btleneck) -> fully connected layer (size hidden_fcn) bottleneck layer (size hidden_btleneck) -> 1D pooling with aggr -> fully connected layer (size hidden_fcn) -> fully connected layer (size hidden_fcn) -> softmax layer (size n_classes) Parameters ---------- n_classes : int as we have a classification, this the number of neurons (or classes) to set the final layer to. hidden_btleneck: int size of the first intermediate bottleneck layer. hidden_fcn: int size of the final fully connected dense layers. options : NameSpace NameSpace containing arguments collected by the argumentParser. weight_decay: float L2 regularisation parameter. input_depth: int size of the tile that will be fed in to the neural network. drop out: float drop out value to apply to the input tiles, and to the tissue profile Z_i vector. aggr: string, avg or max or owkin Pooling function to apply to the new tile representation. gaussian_param: float if 0, does not apply gaussian drop out, if different to 0, we had to the input tile a gaussian noise. activation_middle: string, relu, tanh, None,... Non linear activation function to apply to the new tile encodings. Returns ------- Object: keras.models.Model Compiled keras model. The simplest version of model_1. NN architecture with layers, skips are indicated by (sa, ea), (sb, eb): Input layer -> bottleneck layer (size hidden_btleneck) (sa) -> fully connected layer (size hidden_fcn) -> bottleneck layer (size hidden_btleneck) (ea) -> 1D pooling with aggr (sb) -> fully connected layer (size hidden_fcn) -> fully connected layer (size hidden_fcn) -> bottleneck layer (size hidden_btleneck) (eb) -> softmax layer (size n_classes) Parameters ---------- n_classes : int as we have a classification, this the number of neurons (or classes) to set the final layer to. hidden_btleneck: int size of the first intermediate bottleneck layer. hidden_fcn: int size of the final fully connected dense layers. options : NameSpace NameSpace containing arguments collected by the argumentParser. weight_decay: float L2 regularisation parameter. input_depth: int size of the tile that will be fed in to the neural network. drop out: float drop out value to apply to the input tiles, and to the tissue profile Z_i vector. aggr: string, avg or max Pooling function to apply to the new tile representation. gaussian_param: float if 0, does not apply gaussian drop out, if different to 0, we had to the input tile a gaussian noise. activation_middle: string, relu, tanh, None,... Non linear activation function to apply to the new tile encodings. Returns ------- Object: keras.models.Model Compiled keras model. Parameters ---------- parameter_dic : dict disctionary containing the hyperparameters values. options : NameSpace NameSpace containing arguments collected by the argumentParser. Returns ------- Object: keras.models.Model Compiled keras model. Raises ------ ValueError Optimizername not known # print(config.gpu_options.allow_growth, flush=True) | 3.367043 | 3 |
temp_umid.py | Riverfount/meteriologia | 0 | 6615587 | import Adafruit_DHT as DHT
import RPi.GPIO as GPIO
import psycopg2
import requests
from decouple import config
sensor = DHT.DHT11
GPIO.setmode(GPIO.BOARD)
pino_sensor = 4
url = 'https://api.thingspeak.com/update'
api_key = config('API_KEY')
umid, temp = DHT.read_retry(sensor, pino_sensor)
connect = psycopg2.connect(
host='192.168.0.115',
database='postgres',
user='postgres',
password='<PASSWORD>'
)
cursor = connect.cursor()
sql = f"""INSERT INTO dados_temp_umid (temperatura, umidade) VALUES ({temp}, {umid})"""
cursor.execute(sql)
connect.commit()
connect.close()
response = requests.get(f'{url}?api_key={api_key}&field1={temp}&field2={umid}')
| import Adafruit_DHT as DHT
import RPi.GPIO as GPIO
import psycopg2
import requests
from decouple import config
sensor = DHT.DHT11
GPIO.setmode(GPIO.BOARD)
pino_sensor = 4
url = 'https://api.thingspeak.com/update'
api_key = config('API_KEY')
umid, temp = DHT.read_retry(sensor, pino_sensor)
connect = psycopg2.connect(
host='192.168.0.115',
database='postgres',
user='postgres',
password='<PASSWORD>'
)
cursor = connect.cursor()
sql = f"""INSERT INTO dados_temp_umid (temperatura, umidade) VALUES ({temp}, {umid})"""
cursor.execute(sql)
connect.commit()
connect.close()
response = requests.get(f'{url}?api_key={api_key}&field1={temp}&field2={umid}')
| pt | 0.54899 | INSERT INTO dados_temp_umid (temperatura, umidade) VALUES ({temp}, {umid}) | 2.413624 | 2 |
tests/test_renderer.py | watarinishin/ns-dispatch-utility | 6 | 6615588 | <reponame>watarinishin/ns-dispatch-utility<gh_stars>1-10
import os
import shutil
import logging
from unittest import mock
import pytest
import toml
from nsdu import info
from nsdu import exceptions
from nsdu import renderer
class TestDispatchTemplateLoader():
def test_with_existent_template(self):
template_load_func = mock.Mock(return_value='Test text')
loader = renderer.JinjaTemplateLoader(template_load_func)
r = loader.get_source(mock.Mock(), 'Test')
assert r[0] == 'Test text'
def test_with_non_existent_template(self):
template_load_func = mock.Mock(return_value=None)
loader = renderer.JinjaTemplateLoader(template_load_func)
r = loader.get_source(mock.Mock(), 'Test')
assert r[0] == info.DEFAULT_TEMPLATE
class TestTemplateRenderer():
def test_render_with_non_existent_template(self):
template_load_func = mock.Mock(return_value=None)
ins = renderer.TemplateRenderer(template_load_func)
assert ins.render('foo', {}) == info.DEFAULT_TEMPLATE
def test_render_with_existent_template(self):
template = '{% for i in j %}{{ i }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
r = ins.render('foo', {'j': [1, 2]})
assert r == '12'
def test_load_filters_and_render(self):
template = '{% for i in j %}{{ i|foo_filter }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
def foo_filter(a):
return '[{}]'.format(a)
ins.load_filters({'foo_filter': foo_filter})
r = ins.render('foo', {'j': [1, 2]})
assert r == '[1][2]'
class TestLoadFiltersFromSource():
def test_with_existent_files(self):
template = '{% for i in j %}{{ i|filter1 }}{{ i|filter2(0)}}{{ i|filter3 }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
renderer.load_filters_from_source(ins, ['tests/resources/filters-1.py', 'tests/resources/filters-2.py'])
r = ins.render('foo', {'j': [1,2 ]})
assert r == '[1]1and0<1>[2]2and0<2>'
def test_with_a_non_existent_file(self):
ins = renderer.TemplateRenderer(mock.Mock())
with pytest.raises(exceptions.ConfigError):
renderer.load_filters_from_source(ins, ['tests/resources/filter-1.py', 'non-existent.py'])
class TestDispatchRenderer():
def test_render(self):
template = ('{% for i in j %}[complex]{{ i|filter1 }}[/complex]'
'[complexctx][complex]{{ i|filter2(0)}}[/complex][/complexctx]'
'[complexopt opt=1]{{ i|filter3 }}[/complexopt]{% endfor %}')
template_load_func = mock.Mock(return_value=template)
simple_bb_config = {'simple1': {'format_string': '[simple1r]%(value)s[/simple1r]'}}
complex_formatter_source_path = 'tests/resources/bb_complex_formatters.py'
template_filter_paths = ['tests/resources/filters-1.py', 'tests/resources/filters-2.py']
template_vars = {'j': [1, 2], 'example': {'foo': 'cool'}}
ins = renderer.DispatchRenderer(template_load_func, simple_bb_config,
complex_formatter_source_path, template_filter_paths, template_vars)
expected = ('[simple1r][1][/simple1r][complexctxr=cool][complex]1and0[/complex][/complexctxr][complexoptr=1]<1>[/complexoptr]'
'[simple1r][2][/simple1r][complexctxr=cool][complex]2and0[/complex][/complexctxr][complexoptr=1]<2>[/complexoptr]')
assert ins.render('test1') == expected
| import os
import shutil
import logging
from unittest import mock
import pytest
import toml
from nsdu import info
from nsdu import exceptions
from nsdu import renderer
class TestDispatchTemplateLoader():
def test_with_existent_template(self):
template_load_func = mock.Mock(return_value='Test text')
loader = renderer.JinjaTemplateLoader(template_load_func)
r = loader.get_source(mock.Mock(), 'Test')
assert r[0] == 'Test text'
def test_with_non_existent_template(self):
template_load_func = mock.Mock(return_value=None)
loader = renderer.JinjaTemplateLoader(template_load_func)
r = loader.get_source(mock.Mock(), 'Test')
assert r[0] == info.DEFAULT_TEMPLATE
class TestTemplateRenderer():
def test_render_with_non_existent_template(self):
template_load_func = mock.Mock(return_value=None)
ins = renderer.TemplateRenderer(template_load_func)
assert ins.render('foo', {}) == info.DEFAULT_TEMPLATE
def test_render_with_existent_template(self):
template = '{% for i in j %}{{ i }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
r = ins.render('foo', {'j': [1, 2]})
assert r == '12'
def test_load_filters_and_render(self):
template = '{% for i in j %}{{ i|foo_filter }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
def foo_filter(a):
return '[{}]'.format(a)
ins.load_filters({'foo_filter': foo_filter})
r = ins.render('foo', {'j': [1, 2]})
assert r == '[1][2]'
class TestLoadFiltersFromSource():
def test_with_existent_files(self):
template = '{% for i in j %}{{ i|filter1 }}{{ i|filter2(0)}}{{ i|filter3 }}{% endfor %}'
template_load_func = mock.Mock(return_value=template)
ins = renderer.TemplateRenderer(template_load_func)
renderer.load_filters_from_source(ins, ['tests/resources/filters-1.py', 'tests/resources/filters-2.py'])
r = ins.render('foo', {'j': [1,2 ]})
assert r == '[1]1and0<1>[2]2and0<2>'
def test_with_a_non_existent_file(self):
ins = renderer.TemplateRenderer(mock.Mock())
with pytest.raises(exceptions.ConfigError):
renderer.load_filters_from_source(ins, ['tests/resources/filter-1.py', 'non-existent.py'])
class TestDispatchRenderer():
def test_render(self):
template = ('{% for i in j %}[complex]{{ i|filter1 }}[/complex]'
'[complexctx][complex]{{ i|filter2(0)}}[/complex][/complexctx]'
'[complexopt opt=1]{{ i|filter3 }}[/complexopt]{% endfor %}')
template_load_func = mock.Mock(return_value=template)
simple_bb_config = {'simple1': {'format_string': '[simple1r]%(value)s[/simple1r]'}}
complex_formatter_source_path = 'tests/resources/bb_complex_formatters.py'
template_filter_paths = ['tests/resources/filters-1.py', 'tests/resources/filters-2.py']
template_vars = {'j': [1, 2], 'example': {'foo': 'cool'}}
ins = renderer.DispatchRenderer(template_load_func, simple_bb_config,
complex_formatter_source_path, template_filter_paths, template_vars)
expected = ('[simple1r][1][/simple1r][complexctxr=cool][complex]1and0[/complex][/complexctxr][complexoptr=1]<1>[/complexoptr]'
'[simple1r][2][/simple1r][complexctxr=cool][complex]2and0[/complex][/complexctxr][complexoptr=1]<2>[/complexoptr]')
assert ins.render('test1') == expected | none | 1 | 2.419467 | 2 | |
datasets/ilsvrc.py | Callidior/semantic-embeddings | 238 | 6615589 | <reponame>Callidior/semantic-embeddings<gh_stars>100-1000
import os
try:
from keras.preprocessing.image import list_pictures
except ImportError:
import keras
from keras_preprocessing.image import list_pictures
from . import IMAGENET_MEAN, IMAGENET_STD
from .common import FileDatasetGenerator
class ILSVRCGenerator(FileDatasetGenerator):
def __init__(self, root_dir, classes = None, mean = IMAGENET_MEAN, std = IMAGENET_STD, color_mode = "rgb"):
""" ILSVRC data generator.
# Arguments:
- root_dir: Root directory of the ILSVRC dataset, containing directories "ILSVRC2012_img_train" and "ILSVRC2012_img_val", both containing
sub-directories with names of synsets and the images for each synset in the corresponding sub-directories.
- classes: List of synsets to restrict the dataset to. Numeric labels will be assigned to these synsets in ascending order.
If set to `None`, all available synsets will be used and enumerated in the lexicographical order.
- mean: Channel-wise image mean for normalization (in "RGB" order). If set to `None`, mean and standard deviation will be computed from the images.
- std: Channel-wise standard deviation for normalization (in "RGB" order). If set to `None`, standard deviation will be computed from the images.
- color_mode: Image color mode, either "rgb" or "bgr".
"""
super(ILSVRCGenerator, self).__init__(root_dir, default_target_size = 256, randzoom_range = (256, 480), color_mode = color_mode)
self.train_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_train')
self.test_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_val')
# Search for classes
if classes is None:
classes = []
for subdir in sorted(os.listdir(self.train_dir)):
if os.path.isdir(os.path.join(self.train_dir, subdir)):
classes.append(subdir)
self.classes = classes
self.class_indices = dict(zip(self.classes, range(len(self.classes))))
# Search for images
for lbl, subdir in enumerate(self.classes):
cls_files = sorted(list_pictures(os.path.join(self.train_dir, subdir), 'jpeg'))
self.train_img_files += cls_files
self._train_labels += [lbl] * len(cls_files)
cls_files = sorted(list_pictures(os.path.join(self.test_dir, subdir), 'jpeg'))
self.test_img_files += cls_files
self._test_labels += [lbl] * len(cls_files)
print('Found {} training and {} validation images from {} classes.'.format(self.num_train, self.num_test, self.num_classes))
# Compute mean and standard deviation
self._compute_stats(mean, std)
| import os
try:
from keras.preprocessing.image import list_pictures
except ImportError:
import keras
from keras_preprocessing.image import list_pictures
from . import IMAGENET_MEAN, IMAGENET_STD
from .common import FileDatasetGenerator
class ILSVRCGenerator(FileDatasetGenerator):
def __init__(self, root_dir, classes = None, mean = IMAGENET_MEAN, std = IMAGENET_STD, color_mode = "rgb"):
""" ILSVRC data generator.
# Arguments:
- root_dir: Root directory of the ILSVRC dataset, containing directories "ILSVRC2012_img_train" and "ILSVRC2012_img_val", both containing
sub-directories with names of synsets and the images for each synset in the corresponding sub-directories.
- classes: List of synsets to restrict the dataset to. Numeric labels will be assigned to these synsets in ascending order.
If set to `None`, all available synsets will be used and enumerated in the lexicographical order.
- mean: Channel-wise image mean for normalization (in "RGB" order). If set to `None`, mean and standard deviation will be computed from the images.
- std: Channel-wise standard deviation for normalization (in "RGB" order). If set to `None`, standard deviation will be computed from the images.
- color_mode: Image color mode, either "rgb" or "bgr".
"""
super(ILSVRCGenerator, self).__init__(root_dir, default_target_size = 256, randzoom_range = (256, 480), color_mode = color_mode)
self.train_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_train')
self.test_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_val')
# Search for classes
if classes is None:
classes = []
for subdir in sorted(os.listdir(self.train_dir)):
if os.path.isdir(os.path.join(self.train_dir, subdir)):
classes.append(subdir)
self.classes = classes
self.class_indices = dict(zip(self.classes, range(len(self.classes))))
# Search for images
for lbl, subdir in enumerate(self.classes):
cls_files = sorted(list_pictures(os.path.join(self.train_dir, subdir), 'jpeg'))
self.train_img_files += cls_files
self._train_labels += [lbl] * len(cls_files)
cls_files = sorted(list_pictures(os.path.join(self.test_dir, subdir), 'jpeg'))
self.test_img_files += cls_files
self._test_labels += [lbl] * len(cls_files)
print('Found {} training and {} validation images from {} classes.'.format(self.num_train, self.num_test, self.num_classes))
# Compute mean and standard deviation
self._compute_stats(mean, std) | en | 0.785557 | ILSVRC data generator. # Arguments: - root_dir: Root directory of the ILSVRC dataset, containing directories "ILSVRC2012_img_train" and "ILSVRC2012_img_val", both containing sub-directories with names of synsets and the images for each synset in the corresponding sub-directories. - classes: List of synsets to restrict the dataset to. Numeric labels will be assigned to these synsets in ascending order. If set to `None`, all available synsets will be used and enumerated in the lexicographical order. - mean: Channel-wise image mean for normalization (in "RGB" order). If set to `None`, mean and standard deviation will be computed from the images. - std: Channel-wise standard deviation for normalization (in "RGB" order). If set to `None`, standard deviation will be computed from the images. - color_mode: Image color mode, either "rgb" or "bgr". # Search for classes # Search for images # Compute mean and standard deviation | 2.607954 | 3 |
argus/model/__init__.py | NickVeld/argus | 0 | 6615590 | <gh_stars>0
from argus.model.model import Model
from argus.model.load import load_model
| from argus.model.model import Model
from argus.model.load import load_model | none | 1 | 1.11722 | 1 | |
src/Controllers/bug2.py | karanbali/CSE-568-Lab-2 | 0 | 6615591 | #!/usr/bin/env python
# Importing all dependencies
import numpy as np
import rospy
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
import geometry_msgs
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Point
import tf
import random
import math
from tf.transformations import euler_from_quaternion
# Initialzing state ('GOALSEEK' == 1 || 'WALLFOLLOW' == 0)
state = 1
# Main bug2 function
def bug2(msg):
global sate
global rang1
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
z = 0
# Calulating distance of bug from start-goal line
lndst = ln_dst(x,y)
# Calulating distance of bug from goal
goaldst = goal_dst(x,y)
# Calculating bug's position angle after converting it from 'quaternion' to 'euler'
quaternion = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)
roll, pitch, yaw = tf.transformations.euler_from_quaternion(quaternion)
# Bug's position angle
robang = yaw
# Bug's angle from goal
goalang = (math.atan2((9.0-y),(4.5-x)) - robang)
#Initializing Twist
cmd_vel = Twist()
# Function to calculate forward velocity
def cmpt(sonar):
minsonar = min(sonar[90:270])
if minsonar < 0.6:
return 0
else:
return 1
# Function to calculate rotation when in "GOALSEEK"
def goalrot(ang):
global robang
if abs(ang) > np.pi/20:
return ang
else:
return 0
# Function to calculate rotation when in "WALLFOLLOW"
def wallrot(sonar):
# Initializing Left & Right sonar values
global rang1
rt = rang1[180:360]
lf = rang1[0:180]
# Minimum of left & right sonar values
minrt = min(rt)
minlf = min(lf)
# If both minrt & minlf too close than take a hard right of 90 degress
if (minrt < 0.01) and (minlf < 0.01):
return np.pi/2
# else turn according to distance b/w wall & bug
else:
turn = (np.pi/2 - ((np.pi/6)*(3 - minrt)))
return turn
# Function to detect obstacle in path
def obst(ang, sonar):
global state
global rang1
# If minimum for some fov is greater then 0.7 then return 0 else return 1
if min(rang1[90:270]) > 0.7:
return 0
else:
return 1
# Function to check the whether the goal has been reached
def atGoal(dist):
if dist < 1:
return 1
else:
return 0
# Setting linear velocity of bug
cmd_vel.linear.x = cmpt(rang1)
global state
obstck = obst(goalang, rang1)
# If in "GOALSEEK", then...
if (state == 1):
cmd_vel.angular.z = goalrot(goalang)
# If in "WALLFOLLOW", then...
if (state == 0):
cmd_vel.angular.z = wallrot(rang1)
# Changes state depending upon the condition
if (obstck == 1 and state == 1):
print("State changed to WALLFOLLOW")
state = 0
elif (obstck == 0 and state == 0):
print("State changed to GOALSEEK")
state = 1
# Final check if goal has been reached
if atGoal(goaldst) == 1:
# Stop the bug from moving
cmd_vel.linear.x = 0
cmd_vel.angular.z = 0
print("Goal Reached")
# Publishes the Bug's Twist
pub = rospy.Publisher('/robot_0/cmd_vel', Twist)
pub.publish(cmd_vel)
# Callback to get LaserScan data & store it in a Global variable 'rang1'
def cb(range):
global rang1
rang1 = range.ranges
# Function to calculatet the distance of bug from line joining intial position & goal position
def ln_dst(x,y):
global init_x
global init_y
global goal_x
global goal_y
dist = np.abs(((goal_y - init_y) * x) - ((goal_x - init_x)*y) + (goal_x * init_y) - (goal_y * init_x)) / math.sqrt(np.power((goal_y - init_y),2) + np.power((goal_x - init_x),2))
return dist
# Function to calculatet the distance of bug from goal position
def goal_dst(x,y):
global goal_x
global goal_y
dist = math.sqrt((y - goal_y)*(y - goal_y) + (x - goal_x)*(x - goal_x))
return dist
# Main function for evader
def evader_func():
global init_x
global init_y
global goal_x
global goal_y
# Getting initial position of bug
init_x = rospy.get_param("/bug2/init_x")
init_y = rospy.get_param("/bug2/init_y")
# Getting final goal position
goal_x = rospy.get_param("/bug2/goal_x")
goal_y = rospy.get_param("/bug2/goal_y")
# Initiating evader node
rospy.init_node('bug2')
# Setting the rate of cycles
rate = rospy.Rate(100) # 5Hz
# Subsciber to get LaserScan data & store it in a Global variable 'rang1'
rospy.Subscriber("/robot_0/base_scan", LaserScan, cb)
while not rospy.is_shutdown():
try:
# Subscribing to '/base_pose_ground_truth' & implement 'bug2' algo as a callback
#pos = rospy.Subscriber('/base_pose_ground_truth', Odometry, bug2, (ran_x,ran_y, pointX, pointY), queue_size=20)
pos = rospy.Subscriber('/base_pose_ground_truth', Odometry, bug2, queue_size=20)
rate.sleep()
except:
continue
if __name__ == '__main__':
try:
evader_func()
except rospy.ROSInterruptException:
pass | #!/usr/bin/env python
# Importing all dependencies
import numpy as np
import rospy
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
import geometry_msgs
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Point
import tf
import random
import math
from tf.transformations import euler_from_quaternion
# Initialzing state ('GOALSEEK' == 1 || 'WALLFOLLOW' == 0)
state = 1
# Main bug2 function
def bug2(msg):
global sate
global rang1
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
z = 0
# Calulating distance of bug from start-goal line
lndst = ln_dst(x,y)
# Calulating distance of bug from goal
goaldst = goal_dst(x,y)
# Calculating bug's position angle after converting it from 'quaternion' to 'euler'
quaternion = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)
roll, pitch, yaw = tf.transformations.euler_from_quaternion(quaternion)
# Bug's position angle
robang = yaw
# Bug's angle from goal
goalang = (math.atan2((9.0-y),(4.5-x)) - robang)
#Initializing Twist
cmd_vel = Twist()
# Function to calculate forward velocity
def cmpt(sonar):
minsonar = min(sonar[90:270])
if minsonar < 0.6:
return 0
else:
return 1
# Function to calculate rotation when in "GOALSEEK"
def goalrot(ang):
global robang
if abs(ang) > np.pi/20:
return ang
else:
return 0
# Function to calculate rotation when in "WALLFOLLOW"
def wallrot(sonar):
# Initializing Left & Right sonar values
global rang1
rt = rang1[180:360]
lf = rang1[0:180]
# Minimum of left & right sonar values
minrt = min(rt)
minlf = min(lf)
# If both minrt & minlf too close than take a hard right of 90 degress
if (minrt < 0.01) and (minlf < 0.01):
return np.pi/2
# else turn according to distance b/w wall & bug
else:
turn = (np.pi/2 - ((np.pi/6)*(3 - minrt)))
return turn
# Function to detect obstacle in path
def obst(ang, sonar):
global state
global rang1
# If minimum for some fov is greater then 0.7 then return 0 else return 1
if min(rang1[90:270]) > 0.7:
return 0
else:
return 1
# Function to check the whether the goal has been reached
def atGoal(dist):
if dist < 1:
return 1
else:
return 0
# Setting linear velocity of bug
cmd_vel.linear.x = cmpt(rang1)
global state
obstck = obst(goalang, rang1)
# If in "GOALSEEK", then...
if (state == 1):
cmd_vel.angular.z = goalrot(goalang)
# If in "WALLFOLLOW", then...
if (state == 0):
cmd_vel.angular.z = wallrot(rang1)
# Changes state depending upon the condition
if (obstck == 1 and state == 1):
print("State changed to WALLFOLLOW")
state = 0
elif (obstck == 0 and state == 0):
print("State changed to GOALSEEK")
state = 1
# Final check if goal has been reached
if atGoal(goaldst) == 1:
# Stop the bug from moving
cmd_vel.linear.x = 0
cmd_vel.angular.z = 0
print("Goal Reached")
# Publishes the Bug's Twist
pub = rospy.Publisher('/robot_0/cmd_vel', Twist)
pub.publish(cmd_vel)
# Callback to get LaserScan data & store it in a Global variable 'rang1'
def cb(range):
global rang1
rang1 = range.ranges
# Function to calculatet the distance of bug from line joining intial position & goal position
def ln_dst(x,y):
global init_x
global init_y
global goal_x
global goal_y
dist = np.abs(((goal_y - init_y) * x) - ((goal_x - init_x)*y) + (goal_x * init_y) - (goal_y * init_x)) / math.sqrt(np.power((goal_y - init_y),2) + np.power((goal_x - init_x),2))
return dist
# Function to calculatet the distance of bug from goal position
def goal_dst(x,y):
global goal_x
global goal_y
dist = math.sqrt((y - goal_y)*(y - goal_y) + (x - goal_x)*(x - goal_x))
return dist
# Main function for evader
def evader_func():
global init_x
global init_y
global goal_x
global goal_y
# Getting initial position of bug
init_x = rospy.get_param("/bug2/init_x")
init_y = rospy.get_param("/bug2/init_y")
# Getting final goal position
goal_x = rospy.get_param("/bug2/goal_x")
goal_y = rospy.get_param("/bug2/goal_y")
# Initiating evader node
rospy.init_node('bug2')
# Setting the rate of cycles
rate = rospy.Rate(100) # 5Hz
# Subsciber to get LaserScan data & store it in a Global variable 'rang1'
rospy.Subscriber("/robot_0/base_scan", LaserScan, cb)
while not rospy.is_shutdown():
try:
# Subscribing to '/base_pose_ground_truth' & implement 'bug2' algo as a callback
#pos = rospy.Subscriber('/base_pose_ground_truth', Odometry, bug2, (ran_x,ran_y, pointX, pointY), queue_size=20)
pos = rospy.Subscriber('/base_pose_ground_truth', Odometry, bug2, queue_size=20)
rate.sleep()
except:
continue
if __name__ == '__main__':
try:
evader_func()
except rospy.ROSInterruptException:
pass | en | 0.734485 | #!/usr/bin/env python # Importing all dependencies # Initialzing state ('GOALSEEK' == 1 || 'WALLFOLLOW' == 0) # Main bug2 function # Calulating distance of bug from start-goal line # Calulating distance of bug from goal # Calculating bug's position angle after converting it from 'quaternion' to 'euler' # Bug's position angle # Bug's angle from goal #Initializing Twist # Function to calculate forward velocity # Function to calculate rotation when in "GOALSEEK" # Function to calculate rotation when in "WALLFOLLOW" # Initializing Left & Right sonar values # Minimum of left & right sonar values # If both minrt & minlf too close than take a hard right of 90 degress # else turn according to distance b/w wall & bug # Function to detect obstacle in path # If minimum for some fov is greater then 0.7 then return 0 else return 1 # Function to check the whether the goal has been reached # Setting linear velocity of bug # If in "GOALSEEK", then... # If in "WALLFOLLOW", then... # Changes state depending upon the condition # Final check if goal has been reached # Stop the bug from moving # Publishes the Bug's Twist # Callback to get LaserScan data & store it in a Global variable 'rang1' # Function to calculatet the distance of bug from line joining intial position & goal position # Function to calculatet the distance of bug from goal position # Main function for evader # Getting initial position of bug # Getting final goal position # Initiating evader node # Setting the rate of cycles # 5Hz # Subsciber to get LaserScan data & store it in a Global variable 'rang1' # Subscribing to '/base_pose_ground_truth' & implement 'bug2' algo as a callback #pos = rospy.Subscriber('/base_pose_ground_truth', Odometry, bug2, (ran_x,ran_y, pointX, pointY), queue_size=20) | 2.196146 | 2 |
data/contacts.py | murtazintagir/python_training | 0 | 6615592 | from model.contact import Contact
constant = [
Contact(first_name="first_name", middle_name="middle_name", last_name="last_name", nickname="nickname",
title="title", company="company", address="address", home="home", mobile="mobile", work="work", fax="fax",
email="email", email2="email2", email3="email3", homepage="homepage", address2="address2", phone2="phone2",
notes="notes", bday="9", bmonth="March", byear="1990", aday="9", amonth="March", ayear="2090")
]
| from model.contact import Contact
constant = [
Contact(first_name="first_name", middle_name="middle_name", last_name="last_name", nickname="nickname",
title="title", company="company", address="address", home="home", mobile="mobile", work="work", fax="fax",
email="email", email2="email2", email3="email3", homepage="homepage", address2="address2", phone2="phone2",
notes="notes", bday="9", bmonth="March", byear="1990", aday="9", amonth="March", ayear="2090")
]
| none | 1 | 1.936829 | 2 | |
Homework_6/F_very_simple_task/F_very_simple_task.py | dimk00z/summer_yandex_algorithmic_course | 8 | 6615593 | def calculate_time(n, x, y):
left, right = 0, (n - 1) * max(x, y)
while right > left + 1:
middle = (right + left) // 2
if (middle // x + middle // y) < n - 1:
left = middle
else:
right = middle
return right + min(x, y)
with open('input.txt') as file:
n, x, y = map(int, file.readlines()[0].split())
with open('output.txt', 'w') as file:
file.write(str(calculate_time(n, x, y)))
| def calculate_time(n, x, y):
left, right = 0, (n - 1) * max(x, y)
while right > left + 1:
middle = (right + left) // 2
if (middle // x + middle // y) < n - 1:
left = middle
else:
right = middle
return right + min(x, y)
with open('input.txt') as file:
n, x, y = map(int, file.readlines()[0].split())
with open('output.txt', 'w') as file:
file.write(str(calculate_time(n, x, y)))
| none | 1 | 3.32277 | 3 | |
fuzzyCmeans.py | felialois/iml_kmeans | 0 | 6615594 | import math
import numpy as np
import utils
def execute(data, max_iterations, clusters, epsilon):
# Number of features
features = len(data[0])
# Number of rows
rows = len(data)
# create the first membership matrix
u_matrix = []
# fuzzy constant
fc = 2.00
# create the membershipo metric using random numbers
for i in range(rows):
vals = np.random.rand(clusters)
s = sum(vals)
memb = [v / s for v in vals]
u_matrix.append(memb)
# iteration counter
iteration = 0
cntrs = []
old_cntrs = []
# Check if the number of iterations has reached the maximum or if the iterations have converged
while (iteration < max_iterations) and (check_iteration(old_cntrs, cntrs, epsilon)):
old_cntrs = cntrs
# calcualte the Centroids
cntrs = get_centroids(data, u_matrix, clusters, fc)
# calculate the membership matrix
u_matrix = calculate_membership_matrix(data, cntrs, u_matrix, fc)
iteration += 1
return u_matrix, cntrs
# Check if the iterations have converged
def check_iteration(c1, c2, epsilon):
if not c1 or not c2:
return True
distances = [utils.distance(c1[i], c2[i]) for i in range(len(c1))]
return not (np.amax(distances) < epsilon)
def get_centroids(data, membership, clusters, fuzzy_constant):
centroids = []
# Separate the memberships by cluster number
mem = zip(*membership)
for i in range(clusters):
# The cluster i memberships for every row
mm = mem[i]
# elevate every membership by the fuzzy constant
mm_prod = [u ** fuzzy_constant for u in mm]
denominator = sum(mm_prod)
numerator = []
# In every column calculate the ith centroid
for j in range(len(data[0])):
temp = []
for k in range(len(data)):
# uij**m * xi
temp.append(mm_prod[k] * data[k][j])
numerator.append(temp)
numerator = [sum(x) for x in numerator]
centroid = []
# Calculate the centroid
for n in numerator:
centroid.append(n / denominator)
centroids.append(centroid)
return centroids
# Calculate a new membership function using the centroids
def calculate_membership_matrix(data, centroids, membership, fuzzy_constant):
u_matrix = membership
fc = 2 / (fuzzy_constant - 1)
for i in range(len(data)):
# Calculate the distance of the row with the centroids
distances = [utils.distance(data[i], centroids[k]) for k in range(len(centroids))]
for k in range(len(centroids)):
den = sum([math.pow(float(distances[k] / distances[c]), fc) for c in range(len(centroids))])
u_matrix[i][k] = float(1 / den)
return u_matrix
# Calculate which cluster is the one with the highest membership for every row
def get_labels(rows, membership):
labels = []
for i in range(rows):
max_membership = -1
max_mem_index = -1
for j in range(len(membership[i])):
if membership[i][j] > max_membership:
max_membership = membership[i][j]
max_mem_index = j
labels.append(max_mem_index)
return labels
| import math
import numpy as np
import utils
def execute(data, max_iterations, clusters, epsilon):
# Number of features
features = len(data[0])
# Number of rows
rows = len(data)
# create the first membership matrix
u_matrix = []
# fuzzy constant
fc = 2.00
# create the membershipo metric using random numbers
for i in range(rows):
vals = np.random.rand(clusters)
s = sum(vals)
memb = [v / s for v in vals]
u_matrix.append(memb)
# iteration counter
iteration = 0
cntrs = []
old_cntrs = []
# Check if the number of iterations has reached the maximum or if the iterations have converged
while (iteration < max_iterations) and (check_iteration(old_cntrs, cntrs, epsilon)):
old_cntrs = cntrs
# calcualte the Centroids
cntrs = get_centroids(data, u_matrix, clusters, fc)
# calculate the membership matrix
u_matrix = calculate_membership_matrix(data, cntrs, u_matrix, fc)
iteration += 1
return u_matrix, cntrs
# Check if the iterations have converged
def check_iteration(c1, c2, epsilon):
if not c1 or not c2:
return True
distances = [utils.distance(c1[i], c2[i]) for i in range(len(c1))]
return not (np.amax(distances) < epsilon)
def get_centroids(data, membership, clusters, fuzzy_constant):
centroids = []
# Separate the memberships by cluster number
mem = zip(*membership)
for i in range(clusters):
# The cluster i memberships for every row
mm = mem[i]
# elevate every membership by the fuzzy constant
mm_prod = [u ** fuzzy_constant for u in mm]
denominator = sum(mm_prod)
numerator = []
# In every column calculate the ith centroid
for j in range(len(data[0])):
temp = []
for k in range(len(data)):
# uij**m * xi
temp.append(mm_prod[k] * data[k][j])
numerator.append(temp)
numerator = [sum(x) for x in numerator]
centroid = []
# Calculate the centroid
for n in numerator:
centroid.append(n / denominator)
centroids.append(centroid)
return centroids
# Calculate a new membership function using the centroids
def calculate_membership_matrix(data, centroids, membership, fuzzy_constant):
u_matrix = membership
fc = 2 / (fuzzy_constant - 1)
for i in range(len(data)):
# Calculate the distance of the row with the centroids
distances = [utils.distance(data[i], centroids[k]) for k in range(len(centroids))]
for k in range(len(centroids)):
den = sum([math.pow(float(distances[k] / distances[c]), fc) for c in range(len(centroids))])
u_matrix[i][k] = float(1 / den)
return u_matrix
# Calculate which cluster is the one with the highest membership for every row
def get_labels(rows, membership):
labels = []
for i in range(rows):
max_membership = -1
max_mem_index = -1
for j in range(len(membership[i])):
if membership[i][j] > max_membership:
max_membership = membership[i][j]
max_mem_index = j
labels.append(max_mem_index)
return labels
| en | 0.821483 | # Number of features # Number of rows # create the first membership matrix # fuzzy constant # create the membershipo metric using random numbers # iteration counter # Check if the number of iterations has reached the maximum or if the iterations have converged # calcualte the Centroids # calculate the membership matrix # Check if the iterations have converged # Separate the memberships by cluster number # The cluster i memberships for every row # elevate every membership by the fuzzy constant # In every column calculate the ith centroid # uij**m * xi # Calculate the centroid # Calculate a new membership function using the centroids # Calculate the distance of the row with the centroids # Calculate which cluster is the one with the highest membership for every row | 2.611024 | 3 |
src/prostatex/classification.py | piotrsobecki/PCa-CNNs2 | 1 | 6615595 | import random
import math
def train_test_split(data,train_test_percentage : float):
random.shuffle(data)
data_len = len(data)
train_len = math.floor(data_len*train_test_percentage)
train_data = data[:train_len]
test_data = data[train_len:]
return train_data, test_data
| import random
import math
def train_test_split(data,train_test_percentage : float):
random.shuffle(data)
data_len = len(data)
train_len = math.floor(data_len*train_test_percentage)
train_data = data[:train_len]
test_data = data[train_len:]
return train_data, test_data
| none | 1 | 2.882125 | 3 | |
chapter7/services/calculate_entropy_service.py | andreffs18/collective-intelligence | 1 | 6615596 | from math import log
from services import CountUniqueElementsService
class CalculateEntropyService:
def __init__(self, rows):
self.rows = rows
def call(self):
"""
Calculate the sum of p(x)*log(p(x)) across all dfferent possible results
"""
log2 = lambda x: log(x) / log(2)
counts = CountUniqueElementsService(self.rows).call()
# Now calculate the entropy
ent = .0
for element in counts:
p = float(counts[element]) / len(self.rows)
ent -= p * log2(p)
return ent
| from math import log
from services import CountUniqueElementsService
class CalculateEntropyService:
def __init__(self, rows):
self.rows = rows
def call(self):
"""
Calculate the sum of p(x)*log(p(x)) across all dfferent possible results
"""
log2 = lambda x: log(x) / log(2)
counts = CountUniqueElementsService(self.rows).call()
# Now calculate the entropy
ent = .0
for element in counts:
p = float(counts[element]) / len(self.rows)
ent -= p * log2(p)
return ent
| en | 0.708287 | Calculate the sum of p(x)*log(p(x)) across all dfferent possible results # Now calculate the entropy | 3.250562 | 3 |
gitissius/commands/update.py | mcepl/gitissius | 2 | 6615597 | <filename>gitissius/commands/update.py<gh_stars>1-10
import gitissius.commands as commands
import gitissius.gitshelve as gitshelve
class Command(commands.GitissiusCommand):
"""
Pull issues from repo, then push
"""
name="update"
aliases = ['u']
help="Pull issues from upstream and then push"
def _execute(self, options, args):
from pull import Command as pull
from push import Command as push
# this looks funny, because we first create a Command object
# and then we execute it
pull()(None)
push()(None)
| <filename>gitissius/commands/update.py<gh_stars>1-10
import gitissius.commands as commands
import gitissius.gitshelve as gitshelve
class Command(commands.GitissiusCommand):
"""
Pull issues from repo, then push
"""
name="update"
aliases = ['u']
help="Pull issues from upstream and then push"
def _execute(self, options, args):
from pull import Command as pull
from push import Command as push
# this looks funny, because we first create a Command object
# and then we execute it
pull()(None)
push()(None)
| en | 0.894917 | Pull issues from repo, then push # this looks funny, because we first create a Command object # and then we execute it | 2.533084 | 3 |
_mod_config.py | sildein/python_tidbits | 0 | 6615598 | # _mod_config.py
# This module is responsible for sanity checking and displaying error and help
# messages.
import os
from sys import argv, modules, version as py_version
import _mod_prettyprint as prettyprint
pprint = prettyprint.pretty_print
# Dictionary of ID/message pairs
msg_list = {}
def sanity_check():
###########################################################################
# Get variables from the main scope and check per-program configuration
###########################################################################
main_vars = modules['__main__']
# Check to see if the user ignored the warnings about using aging shit.
try:
py2_gtfo = main_vars['py2_gtfo']
except KeyError:
py2_gtfo = False
# Is the tool redundant or useless on the average *nix?
try:
unix_check = main_vars['unix_check']
except KeyError:
unix_check = False
###########################################################################
# Begin enforcing per-program config
###########################################################################
if py2_gtfo and py_version.startswith('2'):
print(msg_list['python2_msg'])
exit()
if unix_check and os.name == 'posix':
print(msg_list['unix_os_msg'])
exit()
def parse_messages(msg_loc):
msg_file = open(msg_loc, 'r')
msg_lines = msg_file.readlines()
msg_file.close()
cur_id = ''
cur_msg = ''
for line in msg_lines:
if line.startswith('#'):
continue
elif line.startswith('$'):
line = line.strip()
line = line.strip('$')
if line == 'end':
msg_list[cur_id] = cur_msg
cur_id = ''
cur_msg = ''
else:
cur_id = line
else:
cur_msg += (line)
###############################################################################
# Run on import
###############################################################################
install_root = os.path.dirname(argv[0])
msg_location = os.path.join(install_root, 'messages.lst')
parse_messages(msg_location)
| # _mod_config.py
# This module is responsible for sanity checking and displaying error and help
# messages.
import os
from sys import argv, modules, version as py_version
import _mod_prettyprint as prettyprint
pprint = prettyprint.pretty_print
# Dictionary of ID/message pairs
msg_list = {}
def sanity_check():
###########################################################################
# Get variables from the main scope and check per-program configuration
###########################################################################
main_vars = modules['__main__']
# Check to see if the user ignored the warnings about using aging shit.
try:
py2_gtfo = main_vars['py2_gtfo']
except KeyError:
py2_gtfo = False
# Is the tool redundant or useless on the average *nix?
try:
unix_check = main_vars['unix_check']
except KeyError:
unix_check = False
###########################################################################
# Begin enforcing per-program config
###########################################################################
if py2_gtfo and py_version.startswith('2'):
print(msg_list['python2_msg'])
exit()
if unix_check and os.name == 'posix':
print(msg_list['unix_os_msg'])
exit()
def parse_messages(msg_loc):
msg_file = open(msg_loc, 'r')
msg_lines = msg_file.readlines()
msg_file.close()
cur_id = ''
cur_msg = ''
for line in msg_lines:
if line.startswith('#'):
continue
elif line.startswith('$'):
line = line.strip()
line = line.strip('$')
if line == 'end':
msg_list[cur_id] = cur_msg
cur_id = ''
cur_msg = ''
else:
cur_id = line
else:
cur_msg += (line)
###############################################################################
# Run on import
###############################################################################
install_root = os.path.dirname(argv[0])
msg_location = os.path.join(install_root, 'messages.lst')
parse_messages(msg_location)
| de | 0.606786 | # _mod_config.py # This module is responsible for sanity checking and displaying error and help # messages. # Dictionary of ID/message pairs ########################################################################### # Get variables from the main scope and check per-program configuration ########################################################################### # Check to see if the user ignored the warnings about using aging shit. # Is the tool redundant or useless on the average *nix? ########################################################################### # Begin enforcing per-program config ########################################################################### ############################################################################### # Run on import ############################################################################### | 2.137834 | 2 |
src/coleta de conteudos sobre vacina/filter_dfs.py | Renatolopo/MonitorNoticia | 0 | 6615599 | import getters as get
import pandas as pd
def filter_vacine(noticia):
p_noticia = noticia.split()
for p in p_noticia:
if p in vacinas:
return True
return False
vacinas = ["Vacina","vacina","vacinas","Vacinas","Ad26 SARS-CoV-2","mRNA 1273", "BNT162", "AZD1222", "CoronaVac", "AD5-nCov",
"NVX-CoV2373", "Sputnik V", "Covaxin", "vacinado", "vacinando", "vacinou","imunidade", "vacinara", "Astrazeneca"]
def filter_G1():
df = get.get_df('G1')
con = get.get_mysql()
cursor = con.cursor()
site = 'G1'
for i in range(len(df.ID)):
if filter_vacine(df["TITLE"].iloc[i]):
link = df['LINK'].iloc[i]
publishied = df['PUBLISHED'].iloc[i]
title = df["TITLE"].iloc[i]
try:
cursor.execute('INSERT INTO noticia_vacina(title, link, publishied, site)\
VALUES(%s, %s, %s, %s)',(str(title), str(link), str(publishied), site))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
#print(df)
def filter_folha():
df = get.get_df('Folha')
con = get.get_mysql()
cursor = con.cursor()
site = 'Folha'
for i in range(len(df.ID)):
if filter_vacine(df["TITLE"].iloc[i]):
link = df['LINK'].iloc[i]
publishied = df['PUBLISHED'].iloc[i]
title = df["TITLE"].iloc[i]
try:
cursor.execute('INSERT INTO noticia_vacina(title, link, publishied, site)\
VALUES(%s, %s, %s, %s)',(str(title), str(link), str(publishied), site))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
def filter_tweets():
df = get.get_df('tweet_paginas')
con = get.get_mysql()
cursor = con.cursor()
for i in range(len(df.tweet)):
if filter_vacine(df["tweet"].iloc[i]):
pk_cod = df['pk_cod'].iloc[i]
nome = df['nome'].iloc[i]
tweet = df['tweet'].iloc[i]
data = df['data'].iloc[i]
id_tweet = df['id_tweet'].iloc[i]
try:
cursor.execute('INSERT INTO vacina_paginas_noticia(pk_cod, nome, tweet, data, id_tweet)\
VALUES(%s, %s, %s, %s, %s)',(str(pk_cod), str(nome), str(tweet), str(data), str(id_tweet)))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
| import getters as get
import pandas as pd
def filter_vacine(noticia):
p_noticia = noticia.split()
for p in p_noticia:
if p in vacinas:
return True
return False
vacinas = ["Vacina","vacina","vacinas","Vacinas","Ad26 SARS-CoV-2","mRNA 1273", "BNT162", "AZD1222", "CoronaVac", "AD5-nCov",
"NVX-CoV2373", "Sputnik V", "Covaxin", "vacinado", "vacinando", "vacinou","imunidade", "vacinara", "Astrazeneca"]
def filter_G1():
df = get.get_df('G1')
con = get.get_mysql()
cursor = con.cursor()
site = 'G1'
for i in range(len(df.ID)):
if filter_vacine(df["TITLE"].iloc[i]):
link = df['LINK'].iloc[i]
publishied = df['PUBLISHED'].iloc[i]
title = df["TITLE"].iloc[i]
try:
cursor.execute('INSERT INTO noticia_vacina(title, link, publishied, site)\
VALUES(%s, %s, %s, %s)',(str(title), str(link), str(publishied), site))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
#print(df)
def filter_folha():
df = get.get_df('Folha')
con = get.get_mysql()
cursor = con.cursor()
site = 'Folha'
for i in range(len(df.ID)):
if filter_vacine(df["TITLE"].iloc[i]):
link = df['LINK'].iloc[i]
publishied = df['PUBLISHED'].iloc[i]
title = df["TITLE"].iloc[i]
try:
cursor.execute('INSERT INTO noticia_vacina(title, link, publishied, site)\
VALUES(%s, %s, %s, %s)',(str(title), str(link), str(publishied), site))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
def filter_tweets():
df = get.get_df('tweet_paginas')
con = get.get_mysql()
cursor = con.cursor()
for i in range(len(df.tweet)):
if filter_vacine(df["tweet"].iloc[i]):
pk_cod = df['pk_cod'].iloc[i]
nome = df['nome'].iloc[i]
tweet = df['tweet'].iloc[i]
data = df['data'].iloc[i]
id_tweet = df['id_tweet'].iloc[i]
try:
cursor.execute('INSERT INTO vacina_paginas_noticia(pk_cod, nome, tweet, data, id_tweet)\
VALUES(%s, %s, %s, %s, %s)',(str(pk_cod), str(nome), str(tweet), str(data), str(id_tweet)))
print(f'add...')
except BaseException as e:
print(e)
continue
print("FIM")
con.commit()
con.close()
| ru | 0.258093 | #print(df) | 2.938231 | 3 |
oo/Carro.py | lfgribeiro/pythonbirds | 0 | 6615600 | """
Exemplo:
>>> # Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> # Testando Direcao
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
>>>
"""
class Direcao:
posicao = ('N', 'L', 'S', 'O')
posicao_tupla = 0
@classmethod
def girar_a_direita(cls):
cls.posicao_tupla += 1
@classmethod
def girar_a_esquerda(cls):
cls.posicao_tupla -= 1
class Motor:
velocidade = 0
@classmethod
def acelerar(cls):
cls.velocidade += 1
@classmethod
def frear(cls):
if cls.velocidade > 0:
cls.velocidade -= 1
class Carro:
def __init__(self, mot, d):
self.mot = mot
self.d = d
def calcular_velocidade(self):
vel = self.mot
return vel.velocidade
def calcular_direcao(self):
dire = self.d
return dire.posicao[dire.posicao_tupla]
carro = Carro(mot=Motor(), d=Direcao())
| """
Exemplo:
>>> # Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> # Testando Direcao
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
>>>
"""
class Direcao:
posicao = ('N', 'L', 'S', 'O')
posicao_tupla = 0
@classmethod
def girar_a_direita(cls):
cls.posicao_tupla += 1
@classmethod
def girar_a_esquerda(cls):
cls.posicao_tupla -= 1
class Motor:
velocidade = 0
@classmethod
def acelerar(cls):
cls.velocidade += 1
@classmethod
def frear(cls):
if cls.velocidade > 0:
cls.velocidade -= 1
class Carro:
def __init__(self, mot, d):
self.mot = mot
self.d = d
def calcular_velocidade(self):
vel = self.mot
return vel.velocidade
def calcular_direcao(self):
dire = self.d
return dire.posicao[dire.posicao_tupla]
carro = Carro(mot=Motor(), d=Direcao())
| pt | 0.654472 | Exemplo: >>> # Testando motor >>> motor = Motor() >>> motor.velocidade 0 >>> motor.acelerar() >>> motor.velocidade 1 >>> motor.acelerar() >>> motor.velocidade 2 >>> motor.acelerar() >>> motor.velocidade 3 >>> motor.frear() >>> motor.velocidade 1 >>> motor.frear() >>> motor.velocidade 0 >>> # Testando Direcao >>> direcao = Direcao() >>> direcao.valor 'Norte' >>> direcao.girar_a_direita() >>> direcao.valor 'Leste' >>> direcao.girar_a_direita() >>> direcao.valor 'Sul' >>> direcao.girar_a_direita() >>> direcao.valor 'Oeste' >>> direcao.girar_a_direita() >>> direcao.valor 'Norte' >>> direcao.girar_a_esquerda() >>> direcao.valor 'Oeste' >>> direcao.girar_a_esquerda() >>> direcao.valor 'Sul' >>> direcao.girar_a_esquerda() >>> direcao.valor 'Leste' >>> direcao.girar_a_esquerda() >>> direcao.valor 'Norte' >>> carro = Carro(direcao, motor) >>> carro.calcular_velocidade() 0 >>> carro.acelerar() >>> carro.calcular_velocidade() 1 >>> carro.acelerar() >>> carro.calcular_velocidade() 2 >>> carro.frear() >>> carro.calcular_velocidade() 0 >>> carro.calcular_direcao() 'Norte' >>> carro.girar_a_direita() >>> carro.calcular_direcao() 'Leste' >>> carro.girar_a_esquerda() >>> carro.calcular_direcao() 'Norte' >>> carro.girar_a_esquerda() >>> carro.calcular_direcao() 'Oeste' >>> | 3.375786 | 3 |
stats.py | seojaehyung/EmotionX-2019 | 8 | 6615601 | <reponame>seojaehyung/EmotionX-2019
import sys
import json
import os
def process_stats(file_path, labels):
with open(file_path, 'r') as source_file:
source = json.load(source_file)
stats = {}
for label in labels:
stats[label] = 0
for diag in source:
for item in diag:
stats[item['emotion']] += 1
total = 0
for label in labels:
total += stats[label]
for label in labels:
print("{}\t{}\t{}".format(label, stats[label], stats[label]/float(total)))
print('Total = {}'.format(total))
if __name__ == '__main__':
FILE_PATH = sys.argv[1]
labels = ['neutral', 'joy', 'sadness', 'anger']
labels = ["non-neutral", "joy", "sadness", "surprise", "anger", "fear", "disgust", "neutral"]
process_stats(FILE_PATH, labels)
| import sys
import json
import os
def process_stats(file_path, labels):
with open(file_path, 'r') as source_file:
source = json.load(source_file)
stats = {}
for label in labels:
stats[label] = 0
for diag in source:
for item in diag:
stats[item['emotion']] += 1
total = 0
for label in labels:
total += stats[label]
for label in labels:
print("{}\t{}\t{}".format(label, stats[label], stats[label]/float(total)))
print('Total = {}'.format(total))
if __name__ == '__main__':
FILE_PATH = sys.argv[1]
labels = ['neutral', 'joy', 'sadness', 'anger']
labels = ["non-neutral", "joy", "sadness", "surprise", "anger", "fear", "disgust", "neutral"]
process_stats(FILE_PATH, labels) | none | 1 | 2.695189 | 3 | |
python/experiments/sing_only/tools/ioutils.py | boyuanzheng010/friends_semeval | 39 | 6615602 | import json
from experiments.sing_only.tools.mention import SingOnlyMentionNode
from structure.nodes import TokenNode
from structure.transcripts import Utterance, Scene, Episode
from util import idutils
plural_forms = ["we", "us", "our", "ours", "ourselves", "yourselves", "they", "them", "their", "theirs", "themselves"]
class SpliceReader:
def __init__(self):
self.mid = 0
def read_season_json(self, json_path):
season_mentions = []
with open(json_path, "r") as fin:
season_json = json.load(fin)
episode_jsons = season_json["episodes"]
episodes = [self.read_episode_json(episode_json, season_mentions)
for episode_json in episode_jsons]
for i in range(len(episodes) - 1):
episodes[i + 1]._previous = episodes[i]
episodes[i]._next = episodes[i + 1]
self.assign_metadata(episodes)
return episodes, season_mentions
def read_episode_json(self, episode_json, season_mentions):
episode_id = episode_json["episode_id"]
episode_num = idutils.parse_episode_id(episode_id)[-1]
scene_jsons = episode_json["scenes"]
scenes = [self.read_scene_json(scene_json, season_mentions)
for scene_json in scene_jsons]
for i in range(len(scenes) - 1):
scenes[i + 1]._previous = scenes[i]
scenes[i]._next = scenes[i + 1]
return Episode(episode_num, scenes)
def read_scene_json(self, scene_json, season_mentions):
scene_id = scene_json["scene_id"]
scene_num = idutils.parse_scene_id(scene_id)[-1]
utterance_jsons = scene_json["utterances"]
utterances = []
for i, utterance_json in enumerate(utterance_jsons):
utterance = self.read_utterance_json(utterance_json, season_mentions)
utterances.append(utterance)
for i in range(len(utterances) - 1):
utterances[i + 1]._previous = utterances[i]
utterances[i]._next = utterances[i + 1]
return Scene(scene_num, utterances)
def read_utterance_json(self, utterance_json, season_mentions):
speakers = utterance_json["speakers"]
word_forms = utterance_json["tokens"]
pos_tags = utterance_json["part_of_speech_tags"]
dep_tags = utterance_json["dependency_tags"]
dep_heads = utterance_json["dependency_heads"]
ner_tags = utterance_json["named_entity_tags"]
ref_tags = utterance_json["character_entities"]
tokens_all = self.parse_token_nodes(word_forms, pos_tags, dep_tags, dep_heads, ner_tags)
self.parse_mention_nodes(tokens_all, ref_tags, season_mentions)
return Utterance(speakers, statements=tokens_all)
def parse_token_nodes(self, word_forms, pos_tags, dep_tags, dep_heads, ner_tags):
tokens_all = []
# sentence
for word_s, pos_s, dep_s, h_dep_s, ner_s in zip(word_forms, pos_tags, dep_tags, dep_heads, ner_tags):
tokens = []
for idx, word, pos, dep, ner in zip(range(len(word_s)), word_s, pos_s, dep_s, ner_s):
token = TokenNode(idx, word, pos, ner, dep)
tokens.append(token)
for idx, hid in enumerate(h_dep_s):
tokens[idx].dep_head = tokens[hid - 1] if hid > 0 else None
tokens_all.append(tokens)
return tokens_all
def parse_mention_nodes(self, tokens, referents, season_mentions):
for token_s, ref_s in zip(tokens, referents):
# condensed referent
for condensed_mrefs in ref_s:
si, ei = condensed_mrefs[0], condensed_mrefs[1]
mrefs = condensed_mrefs[2:]
# remove Non-Entity referents b/c they don't refer to characters
# remove plural mentions b/c this tests only singular mentions
if mrefs == ["Non-Entity"] or len(mrefs) > 1 or token_s[si] in plural_forms:
continue
mention = SingOnlyMentionNode(self.mid, token_s[si:ei], mrefs[0])
season_mentions.append(mention)
def assign_metadata(self, episodes):
for episode in episodes:
for scene in episode.scenes:
scene._episode = episode
for utterance in scene.utterances:
utterance._scene = scene
for sentence in utterance.statements:
for token in sentence:
token._episode = episode
token._scene = scene
token._utterance = utterance
class StateWriter(object):
def __init__(self):
self.fout = None
def open_file(self, file_path):
self.fout = open(file_path, "w")
def write_states(self, states):
self.fout.write("Mention/Gold/System\n\n")
for s in states:
self.write_state(s)
self.fout.close()
def write_state(self, state):
for m in state:
result = "%s - %s / %s\n" % (str(m), str(m.gold_ref), str(m.auto_ref))
self.fout.write(result)
| import json
from experiments.sing_only.tools.mention import SingOnlyMentionNode
from structure.nodes import TokenNode
from structure.transcripts import Utterance, Scene, Episode
from util import idutils
plural_forms = ["we", "us", "our", "ours", "ourselves", "yourselves", "they", "them", "their", "theirs", "themselves"]
class SpliceReader:
def __init__(self):
self.mid = 0
def read_season_json(self, json_path):
season_mentions = []
with open(json_path, "r") as fin:
season_json = json.load(fin)
episode_jsons = season_json["episodes"]
episodes = [self.read_episode_json(episode_json, season_mentions)
for episode_json in episode_jsons]
for i in range(len(episodes) - 1):
episodes[i + 1]._previous = episodes[i]
episodes[i]._next = episodes[i + 1]
self.assign_metadata(episodes)
return episodes, season_mentions
def read_episode_json(self, episode_json, season_mentions):
episode_id = episode_json["episode_id"]
episode_num = idutils.parse_episode_id(episode_id)[-1]
scene_jsons = episode_json["scenes"]
scenes = [self.read_scene_json(scene_json, season_mentions)
for scene_json in scene_jsons]
for i in range(len(scenes) - 1):
scenes[i + 1]._previous = scenes[i]
scenes[i]._next = scenes[i + 1]
return Episode(episode_num, scenes)
def read_scene_json(self, scene_json, season_mentions):
scene_id = scene_json["scene_id"]
scene_num = idutils.parse_scene_id(scene_id)[-1]
utterance_jsons = scene_json["utterances"]
utterances = []
for i, utterance_json in enumerate(utterance_jsons):
utterance = self.read_utterance_json(utterance_json, season_mentions)
utterances.append(utterance)
for i in range(len(utterances) - 1):
utterances[i + 1]._previous = utterances[i]
utterances[i]._next = utterances[i + 1]
return Scene(scene_num, utterances)
def read_utterance_json(self, utterance_json, season_mentions):
speakers = utterance_json["speakers"]
word_forms = utterance_json["tokens"]
pos_tags = utterance_json["part_of_speech_tags"]
dep_tags = utterance_json["dependency_tags"]
dep_heads = utterance_json["dependency_heads"]
ner_tags = utterance_json["named_entity_tags"]
ref_tags = utterance_json["character_entities"]
tokens_all = self.parse_token_nodes(word_forms, pos_tags, dep_tags, dep_heads, ner_tags)
self.parse_mention_nodes(tokens_all, ref_tags, season_mentions)
return Utterance(speakers, statements=tokens_all)
def parse_token_nodes(self, word_forms, pos_tags, dep_tags, dep_heads, ner_tags):
tokens_all = []
# sentence
for word_s, pos_s, dep_s, h_dep_s, ner_s in zip(word_forms, pos_tags, dep_tags, dep_heads, ner_tags):
tokens = []
for idx, word, pos, dep, ner in zip(range(len(word_s)), word_s, pos_s, dep_s, ner_s):
token = TokenNode(idx, word, pos, ner, dep)
tokens.append(token)
for idx, hid in enumerate(h_dep_s):
tokens[idx].dep_head = tokens[hid - 1] if hid > 0 else None
tokens_all.append(tokens)
return tokens_all
def parse_mention_nodes(self, tokens, referents, season_mentions):
for token_s, ref_s in zip(tokens, referents):
# condensed referent
for condensed_mrefs in ref_s:
si, ei = condensed_mrefs[0], condensed_mrefs[1]
mrefs = condensed_mrefs[2:]
# remove Non-Entity referents b/c they don't refer to characters
# remove plural mentions b/c this tests only singular mentions
if mrefs == ["Non-Entity"] or len(mrefs) > 1 or token_s[si] in plural_forms:
continue
mention = SingOnlyMentionNode(self.mid, token_s[si:ei], mrefs[0])
season_mentions.append(mention)
def assign_metadata(self, episodes):
for episode in episodes:
for scene in episode.scenes:
scene._episode = episode
for utterance in scene.utterances:
utterance._scene = scene
for sentence in utterance.statements:
for token in sentence:
token._episode = episode
token._scene = scene
token._utterance = utterance
class StateWriter(object):
def __init__(self):
self.fout = None
def open_file(self, file_path):
self.fout = open(file_path, "w")
def write_states(self, states):
self.fout.write("Mention/Gold/System\n\n")
for s in states:
self.write_state(s)
self.fout.close()
def write_state(self, state):
for m in state:
result = "%s - %s / %s\n" % (str(m), str(m.gold_ref), str(m.auto_ref))
self.fout.write(result)
| en | 0.926818 | # sentence # condensed referent # remove Non-Entity referents b/c they don't refer to characters # remove plural mentions b/c this tests only singular mentions | 2.591151 | 3 |
topology_shape_metrics/TSM.py | MobiZaman/orthogonal-drawing-algorithm | 0 | 6615603 | """TSM means topology-shape-metrics, one approach for generating orthogonal layout.
"""
from topology_shape_metrics.planarization import Planarization
from topology_shape_metrics.orthogonalization import Orthogonalization
from topology_shape_metrics.compaction import Compaction
from topology_shape_metrics.utils import number_of_cross, overlap_nodes, overlay_edges
import networkx as nx
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
class TSM:
def __init__(self, G, init_pos=None, checkit=True, uselp=False):
if checkit:
TSM.precheck(G, init_pos)
planar = Planarization(G, init_pos)
ortho = Orthogonalization(planar, uselp)
compa = Compaction(ortho)
# self.G != G, it may include additional bend nodes
self.G = compa.planar.G
self.pos = compa.pos
def postcheck(self):
for u, v in self.G.edges:
assert self.pos[u][0] == self.pos[v][0] or self.pos[u][1] == self.pos[v][1]
def display(self):
bend_nodes = {node for node in self.G.nodes if type(node) == tuple and node[0] == 'bend'}
draw_nodes_kwds = {'G': self.G, 'pos': self.pos, 'node_size': 15, "edgecolors": 'black'}
nx.draw_networkx_nodes(node_color='white', **draw_nodes_kwds)
# bend nodes(dummy nodes, not exist in original graph)
nx.draw_networkx_nodes(nodelist=bend_nodes, node_color='grey', **draw_nodes_kwds)
# overlap nodes
nx.draw_networkx_nodes(nodelist=overlap_nodes(
self.G, self.pos), node_color="red", **draw_nodes_kwds)
# all edges
nx.draw_networkx_edges(self.G, self.pos)
# overlay edges
nx.draw_networkx_edges(
self.G, self.pos, edgelist=overlay_edges(self.G, self.pos), edge_color='red')
red_patch = mpatches.Patch(color='red', label='overlay')
grey_patch = mpatches.Patch(color='grey', label='bend node')
plt.legend(handles=[red_patch, grey_patch])
@staticmethod
def precheck(G, pos=None):
if max(degree for node, degree in G.degree) > 4:
raise Exception(
"Max node degree larger than 4, which is not supported currently")
if nx.number_of_selfloops(G) > 0:
raise Exception("G contains selfloop")
if not nx.is_connected(G):
raise Exception("G is not a connected graph")
if pos is None:
is_planar, _ = nx.check_planarity(G)
if not is_planar:
raise Exception("G is not a planar graph")
else:
if number_of_cross(G, pos) > 0:
raise Exception("There are cross edges in pos")
| """TSM means topology-shape-metrics, one approach for generating orthogonal layout.
"""
from topology_shape_metrics.planarization import Planarization
from topology_shape_metrics.orthogonalization import Orthogonalization
from topology_shape_metrics.compaction import Compaction
from topology_shape_metrics.utils import number_of_cross, overlap_nodes, overlay_edges
import networkx as nx
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
class TSM:
def __init__(self, G, init_pos=None, checkit=True, uselp=False):
if checkit:
TSM.precheck(G, init_pos)
planar = Planarization(G, init_pos)
ortho = Orthogonalization(planar, uselp)
compa = Compaction(ortho)
# self.G != G, it may include additional bend nodes
self.G = compa.planar.G
self.pos = compa.pos
def postcheck(self):
for u, v in self.G.edges:
assert self.pos[u][0] == self.pos[v][0] or self.pos[u][1] == self.pos[v][1]
def display(self):
bend_nodes = {node for node in self.G.nodes if type(node) == tuple and node[0] == 'bend'}
draw_nodes_kwds = {'G': self.G, 'pos': self.pos, 'node_size': 15, "edgecolors": 'black'}
nx.draw_networkx_nodes(node_color='white', **draw_nodes_kwds)
# bend nodes(dummy nodes, not exist in original graph)
nx.draw_networkx_nodes(nodelist=bend_nodes, node_color='grey', **draw_nodes_kwds)
# overlap nodes
nx.draw_networkx_nodes(nodelist=overlap_nodes(
self.G, self.pos), node_color="red", **draw_nodes_kwds)
# all edges
nx.draw_networkx_edges(self.G, self.pos)
# overlay edges
nx.draw_networkx_edges(
self.G, self.pos, edgelist=overlay_edges(self.G, self.pos), edge_color='red')
red_patch = mpatches.Patch(color='red', label='overlay')
grey_patch = mpatches.Patch(color='grey', label='bend node')
plt.legend(handles=[red_patch, grey_patch])
@staticmethod
def precheck(G, pos=None):
if max(degree for node, degree in G.degree) > 4:
raise Exception(
"Max node degree larger than 4, which is not supported currently")
if nx.number_of_selfloops(G) > 0:
raise Exception("G contains selfloop")
if not nx.is_connected(G):
raise Exception("G is not a connected graph")
if pos is None:
is_planar, _ = nx.check_planarity(G)
if not is_planar:
raise Exception("G is not a planar graph")
else:
if number_of_cross(G, pos) > 0:
raise Exception("There are cross edges in pos")
| en | 0.902878 | TSM means topology-shape-metrics, one approach for generating orthogonal layout. # self.G != G, it may include additional bend nodes # bend nodes(dummy nodes, not exist in original graph) # overlap nodes # all edges # overlay edges | 3.071496 | 3 |
lsf/plugins/user/date.py | unknownkz/LynxSuperFederation | 7 | 6615604 | <gh_stars>1-10
import calendar
from datetime import datetime as wkt
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from ... import TimeZone
from ..commander import Lynxcmd
@Lynxcmd("calendar")
def tanggaldanwaktu(update: Update, context: CallbackContext):
lyn = context.bot
message = update.effective_message
m = wkt.now().month
y = wkt.now().year
d = wkt.now(TimeZone).strftime("Date : %d/%m/%Y\nTime : %H:%M WIB")
k = calendar.month(y, m, 2, 1)
bulan_waktu = (
f"<strong><i>The calendar for this month is:</i></strong>\n\n" f"<code>{k}</code>\n\n" f"<code>{d}</code>"
)
message.reply_text(
bulan_waktu,
parse_mode=ParseMode.HTML,
)
__mod_name__ = "Calendar"
__help__ = """
*Calendar*
Get current date, time and month information.
• /calendar
Date information is for one month only.
( TimeZone : UTC+07:00 (ICT) )
"""
| import calendar
from datetime import datetime as wkt
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from ... import TimeZone
from ..commander import Lynxcmd
@Lynxcmd("calendar")
def tanggaldanwaktu(update: Update, context: CallbackContext):
lyn = context.bot
message = update.effective_message
m = wkt.now().month
y = wkt.now().year
d = wkt.now(TimeZone).strftime("Date : %d/%m/%Y\nTime : %H:%M WIB")
k = calendar.month(y, m, 2, 1)
bulan_waktu = (
f"<strong><i>The calendar for this month is:</i></strong>\n\n" f"<code>{k}</code>\n\n" f"<code>{d}</code>"
)
message.reply_text(
bulan_waktu,
parse_mode=ParseMode.HTML,
)
__mod_name__ = "Calendar"
__help__ = """
*Calendar*
Get current date, time and month information.
• /calendar
Date information is for one month only.
( TimeZone : UTC+07:00 (ICT) )
""" | en | 0.565168 | *Calendar* Get current date, time and month information. • /calendar Date information is for one month only. ( TimeZone : UTC+07:00 (ICT) ) | 2.866666 | 3 |
test.py | smcl/grf | 0 | 6615605 | <reponame>smcl/grf
#!/usr/bin/python
import random
from pprint import pprint
from grf import (
ClassifiedObject,
DecisionTreeGenerator,
DecisionTreeEnv,
Mutator
)
class Iris(ClassifiedObject):
def __init__(self, sepal_length, sepal_width, petal_length, petal_width):
self.sepal_length = float(sepal_length)
self.sepal_width = float(sepal_width)
self.petal_length = float(petal_length)
self.petal_width = float(petal_width)
def parse_data(filename):
data = []
f = open(filename)
lines = f.readlines()
f.close()
for line in lines:
if line:
(sepal_length, sepal_width, petal_length, petal_width, classification) = line.strip().split(",")
data.append((Iris(sepal_length, sepal_width, petal_length, petal_width), classification))
return data
iris_data = parse_data("./iris.data")
#---------------------------------------
# usage - need to abstract fitness into own class
env = DecisionTreeEnv(iris_data)
dtg = DecisionTreeGenerator(env)
from copy import deepcopy
num_trees = 40
num_generations = 1000
mut = Mutator(env)
dt_population = [ dtg.generate() for i in range(num_trees) ]
train_data = random.sample(iris_data, 100) # should be something like random.sample(test_data, 100)
verify_data = [ d for d in iris_data if d not in train_data ]
cull_threshold = int(len(dt_population) * 0.25)
def calc_fitness(dt, test_data):
dt.fitness = 0
for test in test_data:
input_val = test[0]
expected = test[1]
actual = dt.classify(input_val)
if actual == expected:
dt.fitness += 1
def print_stats(dt_pop, recalc):
if recalc:
dt_pop.sort(key = lambda dt: dt.fitness)
fitnesses = [ dt.fitness for dt in dt_pop ]
print("avg_fitness = %d" % (sum(fitnesses) / float(len(dt_pop))))
print("best_fitness = %d" % (max(fitnesses)))
print("worst_fitness = %d" % (min(fitnesses)))
for i in range(num_generations):
#print i
#print len(dt_population)
for dt in dt_population:
dt_mut = deepcopy(dt)
mut.mutate(dt_mut)
calc_fitness(dt, train_data)
calc_fitness(dt_mut, train_data)
if dt_mut.fitness > dt.fitness:
dt = dt_mut
dt_population.sort(key = lambda dt: dt.fitness, reverse = True)
#print(dt_population[-1].fitness)
#print_stats(dt_population, False)
useless_count = len([ x for x in dt_population if not x.fitness ])
to_cull = max(cull_threshold, useless_count)
#print "culling %d" % (to_cull)
#print "len(dt_population) = %d" % (len(dt_population))
del dt_population[-to_cull:]
dt_population += [ dtg.generate() for i in range(to_cull) ]
print "completed"
print_stats(dt_population, True)
best = dt_population[-1]
calc_fitness(best, verify_data)
| #!/usr/bin/python
import random
from pprint import pprint
from grf import (
ClassifiedObject,
DecisionTreeGenerator,
DecisionTreeEnv,
Mutator
)
class Iris(ClassifiedObject):
def __init__(self, sepal_length, sepal_width, petal_length, petal_width):
self.sepal_length = float(sepal_length)
self.sepal_width = float(sepal_width)
self.petal_length = float(petal_length)
self.petal_width = float(petal_width)
def parse_data(filename):
data = []
f = open(filename)
lines = f.readlines()
f.close()
for line in lines:
if line:
(sepal_length, sepal_width, petal_length, petal_width, classification) = line.strip().split(",")
data.append((Iris(sepal_length, sepal_width, petal_length, petal_width), classification))
return data
iris_data = parse_data("./iris.data")
#---------------------------------------
# usage - need to abstract fitness into own class
env = DecisionTreeEnv(iris_data)
dtg = DecisionTreeGenerator(env)
from copy import deepcopy
num_trees = 40
num_generations = 1000
mut = Mutator(env)
dt_population = [ dtg.generate() for i in range(num_trees) ]
train_data = random.sample(iris_data, 100) # should be something like random.sample(test_data, 100)
verify_data = [ d for d in iris_data if d not in train_data ]
cull_threshold = int(len(dt_population) * 0.25)
def calc_fitness(dt, test_data):
dt.fitness = 0
for test in test_data:
input_val = test[0]
expected = test[1]
actual = dt.classify(input_val)
if actual == expected:
dt.fitness += 1
def print_stats(dt_pop, recalc):
if recalc:
dt_pop.sort(key = lambda dt: dt.fitness)
fitnesses = [ dt.fitness for dt in dt_pop ]
print("avg_fitness = %d" % (sum(fitnesses) / float(len(dt_pop))))
print("best_fitness = %d" % (max(fitnesses)))
print("worst_fitness = %d" % (min(fitnesses)))
for i in range(num_generations):
#print i
#print len(dt_population)
for dt in dt_population:
dt_mut = deepcopy(dt)
mut.mutate(dt_mut)
calc_fitness(dt, train_data)
calc_fitness(dt_mut, train_data)
if dt_mut.fitness > dt.fitness:
dt = dt_mut
dt_population.sort(key = lambda dt: dt.fitness, reverse = True)
#print(dt_population[-1].fitness)
#print_stats(dt_population, False)
useless_count = len([ x for x in dt_population if not x.fitness ])
to_cull = max(cull_threshold, useless_count)
#print "culling %d" % (to_cull)
#print "len(dt_population) = %d" % (len(dt_population))
del dt_population[-to_cull:]
dt_population += [ dtg.generate() for i in range(to_cull) ]
print "completed"
print_stats(dt_population, True)
best = dt_population[-1]
calc_fitness(best, verify_data) | en | 0.430994 | #!/usr/bin/python #--------------------------------------- # usage - need to abstract fitness into own class # should be something like random.sample(test_data, 100) #print i #print len(dt_population) #print(dt_population[-1].fitness) #print_stats(dt_population, False) #print "culling %d" % (to_cull) #print "len(dt_population) = %d" % (len(dt_population)) | 2.851102 | 3 |
evaluation_on_simulated_150mers.py | 627oldcat/CNN_Virus | 0 | 6615606 | <gh_stars>0
import keras
from keras import models
from keras.models import Model
from keras.models import load_model
from keras import backend as K
from keras.callbacks import EarlyStopping, TensorBoard
import random
import collections
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from preprocessing import get_kmer_from_150mer,get_params_150mer,DataGenerator_from_150mer
from voting import get_final_result
#path for testing file
filepath_train="./data/ICTV_150mer_benchmarking"
#path for trained model
filepath_model="./data/pretrained_model.h5"
d_nucl={"A":0,"C":1,"G":2,"T":3,"N":4}
f_matrix,y_true,f_pos=get_kmer_from_150mer(filepath_train)
params=get_params_150mer()
testing_generator = DataGenerator_from_150mer(f_matrix, **params)
model=load_model(filepath_model)
hist = model.predict_generator(testing_generator,
verbose=1
)
predicted_labels_list=[i.argmax(axis=-1) for i in hist[0]]
predicted_prob_list=[max(i) for i in hist[0]]
predicted_loc_list=[i.argmax(axis=-1) for i in hist[1]]
predicted_loc_prob_list=[max(i) for i in hist[1]]
final_label=[]
final_loc=[]
num_iters=int(len(predicted_labels_list)*1.0/101)
for i in range(0,num_iters):
tmp_label,tmp_loc=get_final_result(predicted_labels_list[i*101:(i+1)*101],predicted_prob_list[i*101:(i+1)*101],predicted_loc_list[i*101:(i+1)*101],predicted_loc_prob_list[i*101:(i+1)*101])
final_label.append(str(tmp_label))
final_loc.append(str(tmp_loc))
y_pred=final_label
acc_kappa=cohen_kappa_score(y_true, y_pred)
precision_macro=precision_score(y_true, y_pred, average='macro')
precision_micro=precision_score(y_true, y_pred, average='micro')
recall_macro=recall_score(y_true, y_pred, average='macro')
recall_micro=recall_score(y_true, y_pred, average='micro')
f1_macro=f1_score(y_true, y_pred, average='macro')
f1_micro=f1_score(y_true, y_pred, average='micro')
print("kappa is %f" % acc_kappa)
print("precision_macro is %f" % precision_macro)
print("precision_micro is %f" % precision_micro)
print("recall_macro is %f" % recall_macro)
print("recall_micro is %f" % recall_micro)
print("f1_macro is %f" % f1_macro)
print("f1_micro is %f" % f1_micro)
| import keras
from keras import models
from keras.models import Model
from keras.models import load_model
from keras import backend as K
from keras.callbacks import EarlyStopping, TensorBoard
import random
import collections
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from preprocessing import get_kmer_from_150mer,get_params_150mer,DataGenerator_from_150mer
from voting import get_final_result
#path for testing file
filepath_train="./data/ICTV_150mer_benchmarking"
#path for trained model
filepath_model="./data/pretrained_model.h5"
d_nucl={"A":0,"C":1,"G":2,"T":3,"N":4}
f_matrix,y_true,f_pos=get_kmer_from_150mer(filepath_train)
params=get_params_150mer()
testing_generator = DataGenerator_from_150mer(f_matrix, **params)
model=load_model(filepath_model)
hist = model.predict_generator(testing_generator,
verbose=1
)
predicted_labels_list=[i.argmax(axis=-1) for i in hist[0]]
predicted_prob_list=[max(i) for i in hist[0]]
predicted_loc_list=[i.argmax(axis=-1) for i in hist[1]]
predicted_loc_prob_list=[max(i) for i in hist[1]]
final_label=[]
final_loc=[]
num_iters=int(len(predicted_labels_list)*1.0/101)
for i in range(0,num_iters):
tmp_label,tmp_loc=get_final_result(predicted_labels_list[i*101:(i+1)*101],predicted_prob_list[i*101:(i+1)*101],predicted_loc_list[i*101:(i+1)*101],predicted_loc_prob_list[i*101:(i+1)*101])
final_label.append(str(tmp_label))
final_loc.append(str(tmp_loc))
y_pred=final_label
acc_kappa=cohen_kappa_score(y_true, y_pred)
precision_macro=precision_score(y_true, y_pred, average='macro')
precision_micro=precision_score(y_true, y_pred, average='micro')
recall_macro=recall_score(y_true, y_pred, average='macro')
recall_micro=recall_score(y_true, y_pred, average='micro')
f1_macro=f1_score(y_true, y_pred, average='macro')
f1_micro=f1_score(y_true, y_pred, average='micro')
print("kappa is %f" % acc_kappa)
print("precision_macro is %f" % precision_macro)
print("precision_micro is %f" % precision_micro)
print("recall_macro is %f" % recall_macro)
print("recall_micro is %f" % recall_micro)
print("f1_macro is %f" % f1_macro)
print("f1_micro is %f" % f1_micro) | en | 0.920831 | #path for testing file #path for trained model | 2.20728 | 2 |
setup/load_to_s3.py | jbmadsen/ArXiv-Metadata-ETL | 3 | 6615607 | # Imports
import os
import boto3
import zipfile
import shutil
import progressbar
def connect(region = 'us-east-1'):
"""Creates a boto3 S3 client
Args:
region (str, optional): The AWS region to connect to. Defaults to 'us-east-1'.
Returns:
(boto3 client object): a boto3 S3 client
"""
# Create connections
s3_client = boto3.client('s3', region_name=region)
return s3_client
def create_bucket(s3_client, bucket_name = 'arxiv-etl'):
"""Creates an S3 bucket if one does not already exists
Args:
s3_client (boto3 client object): A boto3 S3 client
bucket_name (str, optional): The bucket name to create. Defaults to 'arxiv-etl'.
"""
# Retrieve the list of existing buckets
response = s3_client.list_buckets()
# Check if bucket already exists
bucket_exists = False
for obj in response['Buckets']:
if obj['Name'] == bucket_name:
return
# Create bucket if it doesn't exist
if not bucket_exists:
s3_client.create_bucket(Bucket=bucket_name)
def upload_file(s3_client, bucket_name, path, folder_name, file_name):
"""Uploads file to S3 bucket
Args:
s3_client (boto3 S3 client): A boto3 S3 client object
bucket_name (str): S3 bucketname to upload file to
path (str): Path of file to upload
folder_name (str): Folder name of file to upload
file_name (str): Filename of file to upload
Returns:
(object): Upload response from boto3 S3 client
"""
try:
# https://stackoverflow.com/questions/41827963/track-download-progress-of-s3-file-using-boto3-and-callbacks
full_name = os.path.join(path, folder_name, file_name)
print("Uploading:", full_name)
s3_path = f'staging/{folder_name}/{file_name}'
# Create progress info
statinfo = os.stat(full_name)
up_progress = progressbar.progressbar.ProgressBar(maxval=statinfo.st_size)
up_progress.start()
# Helper function for progress display
def upload_progress(chunk):
up_progress.update(up_progress.currval + chunk)
# Upload to S3
response = s3_client.upload_file(full_name, bucket_name, s3_path, Callback=upload_progress)
# Done
up_progress.finish()
return response
except Exception as e:
print(f"Error: {e}")
return None
if __name__ == "__main__":
# Configurations
bucket_name = 'arxiv-etl'
arxiv_src = '../data/arxiv.zip'
data_folder_name = '../data/loading/'
# Connect and create bucket
print(f"Connecting to S3 and creating bucket: {bucket_name}")
s3_client = connect()
create_bucket(s3_client, bucket_name)
#Unzip files to new folder
# https://stackoverflow.com/questions/3451111/unzipping-files-in-python
print(f"Extracting data")
with zipfile.ZipFile(arxiv_src, 'r') as zip_ref:
zip_ref.extractall(data_folder_name)
print("Copying data")
# Move files to individual folders
src = '../data/loading/arxiv-metadata-oai-snapshot.json'
dst = '../data/loading/metadata/arxiv-metadata-oai-snapshot.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/metadata/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/loading/authors-parsed.json'
dst = '../data/loading/authors/authors-parsed.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/authors/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/loading/internal-citations.json'
dst = '../data/loading/citations/internal-citations.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/citations/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/subject-classifications.csv'
dst = '../data/loading/classifications/subject-classifications.csv'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/classifications/'), exist_ok=True)
shutil.move(src, dst)
# Sync data/loaded folder to s3
# https://dev.to/razcodes/how-to-copy-files-to-s3-using-boto3-41fp
directory = os.fsencode(data_folder_name)
print("Starting upload to S3")
for folder in os.listdir(directory):
folder_name = os.fsdecode(folder)
print("Path:", folder_name)
dir = os.fsencode(os.path.join(data_folder_name, folder_name))
for file in os.listdir(dir):
file_name = os.fsdecode(file)
if file_name.endswith(".json") or file_name.endswith(".csv"):
try:
response = upload_file(s3_client, bucket_name, data_folder_name, folder_name, file_name)
if response is not None:
print("HTTPStatusCode:", response['ResponseMetadata']['HTTPStatusCode'])
except Exception as e:
print(f"Error: {e}")
# Delete data/loaded folder
print("Deleting temp folder")
shutil.rmtree(data_folder_name)
print("Done") | # Imports
import os
import boto3
import zipfile
import shutil
import progressbar
def connect(region = 'us-east-1'):
"""Creates a boto3 S3 client
Args:
region (str, optional): The AWS region to connect to. Defaults to 'us-east-1'.
Returns:
(boto3 client object): a boto3 S3 client
"""
# Create connections
s3_client = boto3.client('s3', region_name=region)
return s3_client
def create_bucket(s3_client, bucket_name = 'arxiv-etl'):
"""Creates an S3 bucket if one does not already exists
Args:
s3_client (boto3 client object): A boto3 S3 client
bucket_name (str, optional): The bucket name to create. Defaults to 'arxiv-etl'.
"""
# Retrieve the list of existing buckets
response = s3_client.list_buckets()
# Check if bucket already exists
bucket_exists = False
for obj in response['Buckets']:
if obj['Name'] == bucket_name:
return
# Create bucket if it doesn't exist
if not bucket_exists:
s3_client.create_bucket(Bucket=bucket_name)
def upload_file(s3_client, bucket_name, path, folder_name, file_name):
"""Uploads file to S3 bucket
Args:
s3_client (boto3 S3 client): A boto3 S3 client object
bucket_name (str): S3 bucketname to upload file to
path (str): Path of file to upload
folder_name (str): Folder name of file to upload
file_name (str): Filename of file to upload
Returns:
(object): Upload response from boto3 S3 client
"""
try:
# https://stackoverflow.com/questions/41827963/track-download-progress-of-s3-file-using-boto3-and-callbacks
full_name = os.path.join(path, folder_name, file_name)
print("Uploading:", full_name)
s3_path = f'staging/{folder_name}/{file_name}'
# Create progress info
statinfo = os.stat(full_name)
up_progress = progressbar.progressbar.ProgressBar(maxval=statinfo.st_size)
up_progress.start()
# Helper function for progress display
def upload_progress(chunk):
up_progress.update(up_progress.currval + chunk)
# Upload to S3
response = s3_client.upload_file(full_name, bucket_name, s3_path, Callback=upload_progress)
# Done
up_progress.finish()
return response
except Exception as e:
print(f"Error: {e}")
return None
if __name__ == "__main__":
# Configurations
bucket_name = 'arxiv-etl'
arxiv_src = '../data/arxiv.zip'
data_folder_name = '../data/loading/'
# Connect and create bucket
print(f"Connecting to S3 and creating bucket: {bucket_name}")
s3_client = connect()
create_bucket(s3_client, bucket_name)
#Unzip files to new folder
# https://stackoverflow.com/questions/3451111/unzipping-files-in-python
print(f"Extracting data")
with zipfile.ZipFile(arxiv_src, 'r') as zip_ref:
zip_ref.extractall(data_folder_name)
print("Copying data")
# Move files to individual folders
src = '../data/loading/arxiv-metadata-oai-snapshot.json'
dst = '../data/loading/metadata/arxiv-metadata-oai-snapshot.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/metadata/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/loading/authors-parsed.json'
dst = '../data/loading/authors/authors-parsed.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/authors/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/loading/internal-citations.json'
dst = '../data/loading/citations/internal-citations.json'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/citations/'), exist_ok=True)
shutil.move(src, dst)
src = '../data/subject-classifications.csv'
dst = '../data/loading/classifications/subject-classifications.csv'
if os.path.exists(src):
os.makedirs(os.path.dirname('../data/loading/classifications/'), exist_ok=True)
shutil.move(src, dst)
# Sync data/loaded folder to s3
# https://dev.to/razcodes/how-to-copy-files-to-s3-using-boto3-41fp
directory = os.fsencode(data_folder_name)
print("Starting upload to S3")
for folder in os.listdir(directory):
folder_name = os.fsdecode(folder)
print("Path:", folder_name)
dir = os.fsencode(os.path.join(data_folder_name, folder_name))
for file in os.listdir(dir):
file_name = os.fsdecode(file)
if file_name.endswith(".json") or file_name.endswith(".csv"):
try:
response = upload_file(s3_client, bucket_name, data_folder_name, folder_name, file_name)
if response is not None:
print("HTTPStatusCode:", response['ResponseMetadata']['HTTPStatusCode'])
except Exception as e:
print(f"Error: {e}")
# Delete data/loaded folder
print("Deleting temp folder")
shutil.rmtree(data_folder_name)
print("Done") | en | 0.650729 | # Imports Creates a boto3 S3 client Args: region (str, optional): The AWS region to connect to. Defaults to 'us-east-1'. Returns: (boto3 client object): a boto3 S3 client # Create connections Creates an S3 bucket if one does not already exists Args: s3_client (boto3 client object): A boto3 S3 client bucket_name (str, optional): The bucket name to create. Defaults to 'arxiv-etl'. # Retrieve the list of existing buckets # Check if bucket already exists # Create bucket if it doesn't exist Uploads file to S3 bucket Args: s3_client (boto3 S3 client): A boto3 S3 client object bucket_name (str): S3 bucketname to upload file to path (str): Path of file to upload folder_name (str): Folder name of file to upload file_name (str): Filename of file to upload Returns: (object): Upload response from boto3 S3 client # https://stackoverflow.com/questions/41827963/track-download-progress-of-s3-file-using-boto3-and-callbacks # Create progress info # Helper function for progress display # Upload to S3 # Done # Configurations # Connect and create bucket #Unzip files to new folder # https://stackoverflow.com/questions/3451111/unzipping-files-in-python # Move files to individual folders # Sync data/loaded folder to s3 # https://dev.to/razcodes/how-to-copy-files-to-s3-using-boto3-41fp # Delete data/loaded folder | 2.953062 | 3 |
src/query/expander.py | iwasingh/Wikoogle | 8 | 6615608 | <reponame>iwasingh/Wikoogle<filename>src/query/expander.py<gh_stars>1-10
from nltk import pos_tag, word_tokenize, RegexpParser, ngrams, FreqDist
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer, PorterStemmer
from preprocessing.analyzer import ThesaurusExpansionAnalyzer, WikimediaAnalyzer
from preprocessing.utils import clean
from enum import Enum
from nltk.tree import Tree
from functools import reduce
import operator
from math import log
from whoosh.analysis import StemmingAnalyzer
from searching.fragmenter import Fragmenter, PhraseTokenizer
import re
import math
from pywsd import disambiguate, adapted_lesk
# from pke.unsupervised import TopicRank
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
class POSTag(Enum):
J = wordnet.ADJ
V = wordnet.VERB
N = wordnet.NOUN
A = wordnet.ADV
# ALL = 'ALL'
@classmethod
def to_wordnet(cls, nltk_pos):
for pos in cls:
if nltk_pos.startswith(pos.name):
return pos
return cls.N # TODO check. Not founded tag are threatened as nouns. Maybe None?
def lemmatizer(tokens):
w_lemmatizer = WordNetLemmatizer()
return [w_lemmatizer.lemmatize(token, POSTag.to_wordnet(pos).value) for (token, pos) in pos_tag(tokens)]
def extract(tokens, tags=None):
if tags is None:
tags = [POSTag.J, POSTag.N]
t = [token for token in pos_tag(tokens) if POSTag.to_wordnet(token[1][0]) in tags]
return list(filter(None, t))
def stemming(tokens):
stemmer = PorterStemmer()
return [stemmer.stem(t) for t in tokens]
def pke_key_phrase_extract(text, n=10):
# create a TopicRank extractor
extractor = TopicRank()
# load the content of the document, here in CoreNLP XML format
# the input language is set to English (used for the stoplist)
# normalization is set to stemming (computed with Porter's stemming algorithm)
extractor.load_document(text,
language="en",
normalization='stemming')
# select the keyphrase candidates, for TopicRank the longest sequences of
# nouns and adjectives
extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
# weight the candidates using a random walk. The threshold parameter sets the
# minimum similarity for clustering, and the method parameter defines the
# linkage method
extractor.candidate_weighting(threshold=0.74,
method='average')
# print the n-highest (10) scored candidates
return extractor.get_n_best(n=n, stemming=True)
def thesaurus_expand(query, wikimedia, size=3, threshold=4.23):
"""
Wordent hierarchy
- hyponyms concepts that are more specific (immediate), navigate down to the tree
- hypernyms general concept, navigate up the hierarchy
- meronyms components. For instance a tree have trunk, ...so on as meronym
- holonyms things that contain meronyms (i.e. tree)
Query expansion require good relevance feedback methods. Using a thesaurus based query expansion might decrease
performance and has query drift problems with polysemic words. This method picks up keyword from gloss of the synsets
and uses a lesk algorithm to disambiguate terms from each other
:param query:
:return:
"""
analyzer = ThesaurusExpansionAnalyzer()
wikimedia_analyzer = WikimediaAnalyzer()
original_tokens = [i.text for i in analyzer(query)]
# original_tokens = set([i.text for i in query.all_tokens()])
print(original_tokens)
synonyms = set()
rule = r"""
NBAR: {<NN>}
{<JJ>}
# {<JJS>}
{<NNS>}
# {<NNP>}
"""
synsets = []
# for i in original_tokens:
# for s in wordnet.synsets(i):
# for h in s.hypernyms():
# print(s, h , s.wup_similarity(h))
# for i in original_tokens:
# for s in wordnet.synsets(i):
# print(s.definition())
for w, s in disambiguate(" ".join(original_tokens), algorithm=adapted_lesk):
if s:
definition = s.definition()
pke_text = definition + ' ' + ' '.join(s.lemma_names())
# print(pke_key_phrase_extract(pke_text))
tokens = [i.text for i in wikimedia_analyzer(definition)]
synsets.append((w, wordnet.synset(s.name()), tokens))
for word, sense, definition in synsets:
if sense:
synonyms = synonyms.union(noun_groups(word_tokenize(sense.definition()), chunk_size=1, rule=rule))
text = " ".join([i.name() for i in sense.lemmas()])
for lemma in wikimedia_analyzer(text):
if lemma.text not in original_tokens:
synonyms.add(lemma.text)
# vfor tok in wikimedia_analyzer(lemma.text):
# print(tok.text)
# if tok.text not in original_tokens:
# synonyms.add(tok.text)
# for token in tokens: for _, original_sense, _ in synsets: for child_synset in wordnet.synsets(token):
# if child_synset: # definition = [i.text for i in analyzer(child_synset.definition())] # pywsd. score =
# wordnet.synset(original_sense.name()).path_similarity(child_synset, simulate_root=False) print(
# child_synset, child_synset.definition(), original_sense, score)
# print(tokens)
# print([j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.simple_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.adapted_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.cosine_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.max_similarity)])
# if len(_concept) > 0:
# concept, similarity_strength = _concept[0]
# if similarity_strength > 0.7:
# __retrieve_definition_groupings(synsets)
# else:
# print(__retrieve_definition_groupings(synsets))
# disambiguated_senses = disambiguate(query, algorithm=adapted_lesk)
# print(disambiguated_senses, '\n\n', simple_lesk, '\n\n', resnik_wsd(word_tokenize(query)), '\n')
# for token in original_tokens:
# senses = wordnet.synsets(token, 'n')
# if len(senses) == 1:
# synonyms = synonyms.union(set(senses[0].lemma_names()))
# else:
#
# tokens += [i.text for i in analyzer(' '.join(list(synonyms)))]
# return original_tokens + [i for i in tokens if i not in original_tokens]
reader = wikimedia.reader
terms_vec = {}
for syn in synonyms:
score = calc_syn_score(syn, reader)
terms_vec[syn] = score
# else:
# terms_vec[syn] = 0
ranked_terms = sorted(terms_vec, key=lambda c: terms_vec[c], reverse=True)
print('***Ranked terms')
for i in list(map(lambda q: (q, terms_vec[q]), ranked_terms)):
print(i[0], ' ', i[1], '\n')
return list(map(lambda q: q[0], filter(lambda v: v[1] >= threshold, terms_vec.items())))
def calc_syn_score(syn, reader):
terms_vec = []
for i in word_tokenize(syn):
doc_frequency = reader.doc_frequency('text', i)
term_frequency = reader.frequency('text', i)
if doc_frequency != 0:
terms_vec.append(term_frequency / doc_frequency)
else:
terms_vec.append(0)
return max(terms_vec)
def noun_groups(tokens, chunk_size=2, analyzer=StemmingAnalyzer(), rule=None):
grammar = r"""
NBAR: {<NN|JJ><|JJ|NN>} # Nouns and Adjectives, terminated with Nouns
# {<NN>} # If pattern not found just a single NN is ok
"""
if rule is not None:
grammar = rule
cp = RegexpParser(grammar)
result = cp.parse(pos_tag(tokens))
nouns = set()
for chunk in result:
if type(chunk) == Tree:
if chunk.label() == 'NBAR':
words = list(map(lambda entry: entry[0], chunk.leaves()))
tokens = analyzer(" ".join(words))
nouns.add(" ".join([i.text for i in tokens]))
# nouns.add(tuple([i.text for i in tokens]))
else:
continue
# print('Leaf', '\n', chunk)
return nouns
class Passage:
""" Deprecated """
def __init__(self, doc, passage):
self._doc = doc
self._passage = passage
self.concept = []
def __repr__(self):
return f'{self._passage[0:3]}...[{len(self._passage)}] [{self._doc["title"]}]'
class DocStats:
"""
In-memory bigram index for text statistics
"""
def __init__(self, tokens):
self._bigram = BigramCollocationFinder.from_words(tokens)
@staticmethod
def _score_from_ngram(*args):
return args[0]
def _frequency(self, gram: tuple):
fd_score = self._bigram.score_ngram(self._score_from_ngram, *gram) or 0
bd_score = self._bigram.score_ngram(self._score_from_ngram, *gram[::-1]) or 0
return max(fd_score, bd_score)
def frequency(self, term: str):
grams = [i for i in ngrams(term.split(" "), 2)]
if len(grams) == 0: return self._bigram.word_fd[term]
return max([self._frequency(gram) for gram in grams])
def __count_docs_containing(c, docs):
docs_containing_c = list(filter(lambda f: f > 0, [d.frequency(c) for d in docs]))
return len(docs_containing_c)
def prod(products):
return reduce(operator.mul, products)
def _calculate_qterm_correlation(query_terms, concept, idf_c, docs):
for qterm, idf_i in query_terms:
N = len(docs)
# IDFc = max(1.0, log(N / npc, 10) / 5)
# IDFi = max(1.0, log(N / npi, 10) / 5)
y = 0.1
f = sum([doc_stat.frequency(qterm) * doc_stat.frequency(concept) for doc_stat in docs])
if f == 0:
yield y
else:
# print(f, N, y, idf_c, idf_i, concept, qterm)
yield (y + (log(f) * idf_c) / log(N)) ** idf_i
# yield d
def lca_expand(query, documents, size=15, passage_size=400, threshold=1.4):
"""
Implements the Local Context Analysis algorithm to expand query based on top ranked concept that
maximize the sim to the query
sim(q,c) = ∏ (y + (log(f(ci,ki) + IDFc) / log(n))^IDFi
where:
* f(ci, ki) = quantifies the correlation between the concept c and the query term ki:
and is given by: Σ pfi_j * pfc_j where pf(i,c)_j is the frequency of term ki or concept c in the j-th doc
* IDFc = inverse document frequency of concept c calculated as max(1, log_10(N/npc)/5)
IDFi = inverse document frequency of query term i calculated as max(1, log_10(N/npi)/5) to emphasizes infrequent query terms
where npc is number of documents containing the concept c nad npi number of docs containing the query term i
and N is number of documents
IDFi
* y is a smoothing constant set to 0.1 to avoid zeros values in the product calculation
A concept is a noun group of single, two, or three words.
"""
fragmenter = Fragmenter(max_size=passage_size)
query_terms = set([i.text for i in query.all_tokens()])
regex = re.compile(r"|".join(query_terms))
analyzer = StemmingAnalyzer()
concepts = set()
doc_stats = []
for doc in documents:
text = clean(doc['text']).lower()
fragment = fragmenter.merge_fragments(PhraseTokenizer().tokenize(text)[:3])
# fragment = fragmenter.merge_fragments(
# fragmenter.calculate_phrase_ranking(
# text,
# query_terms)[:3])
tokens = word_tokenize(fragment.text)
stemmed_tokens = [i.text for i in analyzer(text)]
key_terms = noun_groups(tokens)
concepts = concepts.union(key_terms)
doc_stats.append(DocStats(stemmed_tokens))
query_terms_with_idf = list()
for q in query_terms:
npi = __count_docs_containing(q, doc_stats)
if npi == 0:
query_terms_with_idf.append((q, 1))
else:
query_terms_with_idf.append((q, log(len(documents) / npi, 10) / 5))
concepts = set(filter(lambda c: len(c) > 2, concepts)) # Removing blank entries or spurious pos_tag entries
# tagged as NN
# breakpoint()
ranking = []
for concept in concepts:
if concept in query_terms: continue
N = len(documents)
npc = __count_docs_containing(concept, doc_stats) or 1
idf_c = max(1.0, log(N / npc, 10) / 5)
prods = _calculate_qterm_correlation(query_terms_with_idf, concept, idf_c, doc_stats)
sim = prod([i for i in prods])
ranking.append((concept, sim))
print(sorted(ranking, key=lambda c: c[1], reverse=True))
filtered = filter(lambda c: c[1] > threshold, ranking)
return list(map(lambda q: q[0], sorted(filtered, key=lambda c: c[1], reverse=True)))[:size]
# return [re.sub(regex, "", term).strip() for term in top_terms]
| from nltk import pos_tag, word_tokenize, RegexpParser, ngrams, FreqDist
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer, PorterStemmer
from preprocessing.analyzer import ThesaurusExpansionAnalyzer, WikimediaAnalyzer
from preprocessing.utils import clean
from enum import Enum
from nltk.tree import Tree
from functools import reduce
import operator
from math import log
from whoosh.analysis import StemmingAnalyzer
from searching.fragmenter import Fragmenter, PhraseTokenizer
import re
import math
from pywsd import disambiguate, adapted_lesk
# from pke.unsupervised import TopicRank
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
class POSTag(Enum):
J = wordnet.ADJ
V = wordnet.VERB
N = wordnet.NOUN
A = wordnet.ADV
# ALL = 'ALL'
@classmethod
def to_wordnet(cls, nltk_pos):
for pos in cls:
if nltk_pos.startswith(pos.name):
return pos
return cls.N # TODO check. Not founded tag are threatened as nouns. Maybe None?
def lemmatizer(tokens):
w_lemmatizer = WordNetLemmatizer()
return [w_lemmatizer.lemmatize(token, POSTag.to_wordnet(pos).value) for (token, pos) in pos_tag(tokens)]
def extract(tokens, tags=None):
if tags is None:
tags = [POSTag.J, POSTag.N]
t = [token for token in pos_tag(tokens) if POSTag.to_wordnet(token[1][0]) in tags]
return list(filter(None, t))
def stemming(tokens):
stemmer = PorterStemmer()
return [stemmer.stem(t) for t in tokens]
def pke_key_phrase_extract(text, n=10):
# create a TopicRank extractor
extractor = TopicRank()
# load the content of the document, here in CoreNLP XML format
# the input language is set to English (used for the stoplist)
# normalization is set to stemming (computed with Porter's stemming algorithm)
extractor.load_document(text,
language="en",
normalization='stemming')
# select the keyphrase candidates, for TopicRank the longest sequences of
# nouns and adjectives
extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
# weight the candidates using a random walk. The threshold parameter sets the
# minimum similarity for clustering, and the method parameter defines the
# linkage method
extractor.candidate_weighting(threshold=0.74,
method='average')
# print the n-highest (10) scored candidates
return extractor.get_n_best(n=n, stemming=True)
def thesaurus_expand(query, wikimedia, size=3, threshold=4.23):
"""
Wordent hierarchy
- hyponyms concepts that are more specific (immediate), navigate down to the tree
- hypernyms general concept, navigate up the hierarchy
- meronyms components. For instance a tree have trunk, ...so on as meronym
- holonyms things that contain meronyms (i.e. tree)
Query expansion require good relevance feedback methods. Using a thesaurus based query expansion might decrease
performance and has query drift problems with polysemic words. This method picks up keyword from gloss of the synsets
and uses a lesk algorithm to disambiguate terms from each other
:param query:
:return:
"""
analyzer = ThesaurusExpansionAnalyzer()
wikimedia_analyzer = WikimediaAnalyzer()
original_tokens = [i.text for i in analyzer(query)]
# original_tokens = set([i.text for i in query.all_tokens()])
print(original_tokens)
synonyms = set()
rule = r"""
NBAR: {<NN>}
{<JJ>}
# {<JJS>}
{<NNS>}
# {<NNP>}
"""
synsets = []
# for i in original_tokens:
# for s in wordnet.synsets(i):
# for h in s.hypernyms():
# print(s, h , s.wup_similarity(h))
# for i in original_tokens:
# for s in wordnet.synsets(i):
# print(s.definition())
for w, s in disambiguate(" ".join(original_tokens), algorithm=adapted_lesk):
if s:
definition = s.definition()
pke_text = definition + ' ' + ' '.join(s.lemma_names())
# print(pke_key_phrase_extract(pke_text))
tokens = [i.text for i in wikimedia_analyzer(definition)]
synsets.append((w, wordnet.synset(s.name()), tokens))
for word, sense, definition in synsets:
if sense:
synonyms = synonyms.union(noun_groups(word_tokenize(sense.definition()), chunk_size=1, rule=rule))
text = " ".join([i.name() for i in sense.lemmas()])
for lemma in wikimedia_analyzer(text):
if lemma.text not in original_tokens:
synonyms.add(lemma.text)
# vfor tok in wikimedia_analyzer(lemma.text):
# print(tok.text)
# if tok.text not in original_tokens:
# synonyms.add(tok.text)
# for token in tokens: for _, original_sense, _ in synsets: for child_synset in wordnet.synsets(token):
# if child_synset: # definition = [i.text for i in analyzer(child_synset.definition())] # pywsd. score =
# wordnet.synset(original_sense.name()).path_similarity(child_synset, simulate_root=False) print(
# child_synset, child_synset.definition(), original_sense, score)
# print(tokens)
# print([j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.simple_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.adapted_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.cosine_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.max_similarity)])
# if len(_concept) > 0:
# concept, similarity_strength = _concept[0]
# if similarity_strength > 0.7:
# __retrieve_definition_groupings(synsets)
# else:
# print(__retrieve_definition_groupings(synsets))
# disambiguated_senses = disambiguate(query, algorithm=adapted_lesk)
# print(disambiguated_senses, '\n\n', simple_lesk, '\n\n', resnik_wsd(word_tokenize(query)), '\n')
# for token in original_tokens:
# senses = wordnet.synsets(token, 'n')
# if len(senses) == 1:
# synonyms = synonyms.union(set(senses[0].lemma_names()))
# else:
#
# tokens += [i.text for i in analyzer(' '.join(list(synonyms)))]
# return original_tokens + [i for i in tokens if i not in original_tokens]
reader = wikimedia.reader
terms_vec = {}
for syn in synonyms:
score = calc_syn_score(syn, reader)
terms_vec[syn] = score
# else:
# terms_vec[syn] = 0
ranked_terms = sorted(terms_vec, key=lambda c: terms_vec[c], reverse=True)
print('***Ranked terms')
for i in list(map(lambda q: (q, terms_vec[q]), ranked_terms)):
print(i[0], ' ', i[1], '\n')
return list(map(lambda q: q[0], filter(lambda v: v[1] >= threshold, terms_vec.items())))
def calc_syn_score(syn, reader):
terms_vec = []
for i in word_tokenize(syn):
doc_frequency = reader.doc_frequency('text', i)
term_frequency = reader.frequency('text', i)
if doc_frequency != 0:
terms_vec.append(term_frequency / doc_frequency)
else:
terms_vec.append(0)
return max(terms_vec)
def noun_groups(tokens, chunk_size=2, analyzer=StemmingAnalyzer(), rule=None):
grammar = r"""
NBAR: {<NN|JJ><|JJ|NN>} # Nouns and Adjectives, terminated with Nouns
# {<NN>} # If pattern not found just a single NN is ok
"""
if rule is not None:
grammar = rule
cp = RegexpParser(grammar)
result = cp.parse(pos_tag(tokens))
nouns = set()
for chunk in result:
if type(chunk) == Tree:
if chunk.label() == 'NBAR':
words = list(map(lambda entry: entry[0], chunk.leaves()))
tokens = analyzer(" ".join(words))
nouns.add(" ".join([i.text for i in tokens]))
# nouns.add(tuple([i.text for i in tokens]))
else:
continue
# print('Leaf', '\n', chunk)
return nouns
class Passage:
""" Deprecated """
def __init__(self, doc, passage):
self._doc = doc
self._passage = passage
self.concept = []
def __repr__(self):
return f'{self._passage[0:3]}...[{len(self._passage)}] [{self._doc["title"]}]'
class DocStats:
"""
In-memory bigram index for text statistics
"""
def __init__(self, tokens):
self._bigram = BigramCollocationFinder.from_words(tokens)
@staticmethod
def _score_from_ngram(*args):
return args[0]
def _frequency(self, gram: tuple):
fd_score = self._bigram.score_ngram(self._score_from_ngram, *gram) or 0
bd_score = self._bigram.score_ngram(self._score_from_ngram, *gram[::-1]) or 0
return max(fd_score, bd_score)
def frequency(self, term: str):
grams = [i for i in ngrams(term.split(" "), 2)]
if len(grams) == 0: return self._bigram.word_fd[term]
return max([self._frequency(gram) for gram in grams])
def __count_docs_containing(c, docs):
docs_containing_c = list(filter(lambda f: f > 0, [d.frequency(c) for d in docs]))
return len(docs_containing_c)
def prod(products):
return reduce(operator.mul, products)
def _calculate_qterm_correlation(query_terms, concept, idf_c, docs):
for qterm, idf_i in query_terms:
N = len(docs)
# IDFc = max(1.0, log(N / npc, 10) / 5)
# IDFi = max(1.0, log(N / npi, 10) / 5)
y = 0.1
f = sum([doc_stat.frequency(qterm) * doc_stat.frequency(concept) for doc_stat in docs])
if f == 0:
yield y
else:
# print(f, N, y, idf_c, idf_i, concept, qterm)
yield (y + (log(f) * idf_c) / log(N)) ** idf_i
# yield d
def lca_expand(query, documents, size=15, passage_size=400, threshold=1.4):
"""
Implements the Local Context Analysis algorithm to expand query based on top ranked concept that
maximize the sim to the query
sim(q,c) = ∏ (y + (log(f(ci,ki) + IDFc) / log(n))^IDFi
where:
* f(ci, ki) = quantifies the correlation between the concept c and the query term ki:
and is given by: Σ pfi_j * pfc_j where pf(i,c)_j is the frequency of term ki or concept c in the j-th doc
* IDFc = inverse document frequency of concept c calculated as max(1, log_10(N/npc)/5)
IDFi = inverse document frequency of query term i calculated as max(1, log_10(N/npi)/5) to emphasizes infrequent query terms
where npc is number of documents containing the concept c nad npi number of docs containing the query term i
and N is number of documents
IDFi
* y is a smoothing constant set to 0.1 to avoid zeros values in the product calculation
A concept is a noun group of single, two, or three words.
"""
fragmenter = Fragmenter(max_size=passage_size)
query_terms = set([i.text for i in query.all_tokens()])
regex = re.compile(r"|".join(query_terms))
analyzer = StemmingAnalyzer()
concepts = set()
doc_stats = []
for doc in documents:
text = clean(doc['text']).lower()
fragment = fragmenter.merge_fragments(PhraseTokenizer().tokenize(text)[:3])
# fragment = fragmenter.merge_fragments(
# fragmenter.calculate_phrase_ranking(
# text,
# query_terms)[:3])
tokens = word_tokenize(fragment.text)
stemmed_tokens = [i.text for i in analyzer(text)]
key_terms = noun_groups(tokens)
concepts = concepts.union(key_terms)
doc_stats.append(DocStats(stemmed_tokens))
query_terms_with_idf = list()
for q in query_terms:
npi = __count_docs_containing(q, doc_stats)
if npi == 0:
query_terms_with_idf.append((q, 1))
else:
query_terms_with_idf.append((q, log(len(documents) / npi, 10) / 5))
concepts = set(filter(lambda c: len(c) > 2, concepts)) # Removing blank entries or spurious pos_tag entries
# tagged as NN
# breakpoint()
ranking = []
for concept in concepts:
if concept in query_terms: continue
N = len(documents)
npc = __count_docs_containing(concept, doc_stats) or 1
idf_c = max(1.0, log(N / npc, 10) / 5)
prods = _calculate_qterm_correlation(query_terms_with_idf, concept, idf_c, doc_stats)
sim = prod([i for i in prods])
ranking.append((concept, sim))
print(sorted(ranking, key=lambda c: c[1], reverse=True))
filtered = filter(lambda c: c[1] > threshold, ranking)
return list(map(lambda q: q[0], sorted(filtered, key=lambda c: c[1], reverse=True)))[:size]
# return [re.sub(regex, "", term).strip() for term in top_terms] | en | 0.630296 | # from pke.unsupervised import TopicRank # ALL = 'ALL' # TODO check. Not founded tag are threatened as nouns. Maybe None? # create a TopicRank extractor # load the content of the document, here in CoreNLP XML format # the input language is set to English (used for the stoplist) # normalization is set to stemming (computed with Porter's stemming algorithm) # select the keyphrase candidates, for TopicRank the longest sequences of # nouns and adjectives # weight the candidates using a random walk. The threshold parameter sets the # minimum similarity for clustering, and the method parameter defines the # linkage method # print the n-highest (10) scored candidates Wordent hierarchy - hyponyms concepts that are more specific (immediate), navigate down to the tree - hypernyms general concept, navigate up the hierarchy - meronyms components. For instance a tree have trunk, ...so on as meronym - holonyms things that contain meronyms (i.e. tree) Query expansion require good relevance feedback methods. Using a thesaurus based query expansion might decrease performance and has query drift problems with polysemic words. This method picks up keyword from gloss of the synsets and uses a lesk algorithm to disambiguate terms from each other :param query: :return: # original_tokens = set([i.text for i in query.all_tokens()]) NBAR: {<NN>} {<JJ>} # {<JJS>} {<NNS>} # {<NNP>} # for i in original_tokens: # for s in wordnet.synsets(i): # for h in s.hypernyms(): # print(s, h , s.wup_similarity(h)) # for i in original_tokens: # for s in wordnet.synsets(i): # print(s.definition()) # print(pke_key_phrase_extract(pke_text)) # vfor tok in wikimedia_analyzer(lemma.text): # print(tok.text) # if tok.text not in original_tokens: # synonyms.add(tok.text) # for token in tokens: for _, original_sense, _ in synsets: for child_synset in wordnet.synsets(token): # if child_synset: # definition = [i.text for i in analyzer(child_synset.definition())] # pywsd. score = # wordnet.synset(original_sense.name()).path_similarity(child_synset, simulate_root=False) print( # child_synset, child_synset.definition(), original_sense, score) # print(tokens) # print([j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.simple_lesk)], '\n', # [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.adapted_lesk)], '\n', # [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.cosine_lesk)], '\n', # [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.max_similarity)]) # if len(_concept) > 0: # concept, similarity_strength = _concept[0] # if similarity_strength > 0.7: # __retrieve_definition_groupings(synsets) # else: # print(__retrieve_definition_groupings(synsets)) # disambiguated_senses = disambiguate(query, algorithm=adapted_lesk) # print(disambiguated_senses, '\n\n', simple_lesk, '\n\n', resnik_wsd(word_tokenize(query)), '\n') # for token in original_tokens: # senses = wordnet.synsets(token, 'n') # if len(senses) == 1: # synonyms = synonyms.union(set(senses[0].lemma_names())) # else: # # tokens += [i.text for i in analyzer(' '.join(list(synonyms)))] # return original_tokens + [i for i in tokens if i not in original_tokens] # else: # terms_vec[syn] = 0 NBAR: {<NN|JJ><|JJ|NN>} # Nouns and Adjectives, terminated with Nouns # {<NN>} # If pattern not found just a single NN is ok # nouns.add(tuple([i.text for i in tokens])) # print('Leaf', '\n', chunk) Deprecated In-memory bigram index for text statistics # IDFc = max(1.0, log(N / npc, 10) / 5) # IDFi = max(1.0, log(N / npi, 10) / 5) # print(f, N, y, idf_c, idf_i, concept, qterm) # yield d Implements the Local Context Analysis algorithm to expand query based on top ranked concept that maximize the sim to the query sim(q,c) = ∏ (y + (log(f(ci,ki) + IDFc) / log(n))^IDFi where: * f(ci, ki) = quantifies the correlation between the concept c and the query term ki: and is given by: Σ pfi_j * pfc_j where pf(i,c)_j is the frequency of term ki or concept c in the j-th doc * IDFc = inverse document frequency of concept c calculated as max(1, log_10(N/npc)/5) IDFi = inverse document frequency of query term i calculated as max(1, log_10(N/npi)/5) to emphasizes infrequent query terms where npc is number of documents containing the concept c nad npi number of docs containing the query term i and N is number of documents IDFi * y is a smoothing constant set to 0.1 to avoid zeros values in the product calculation A concept is a noun group of single, two, or three words. # fragment = fragmenter.merge_fragments( # fragmenter.calculate_phrase_ranking( # text, # query_terms)[:3]) # Removing blank entries or spurious pos_tag entries # tagged as NN # breakpoint() # return [re.sub(regex, "", term).strip() for term in top_terms] | 2.502346 | 3 |
Basic/recv.py | julio-burgos/Rx_TX_RF24 | 0 | 6615609 | <filename>Basic/recv.py
import RPi.GPIO as GPIO
from RF24 import *
import time
import spidev
GPIO.setmode(GPIO.BCM)
pipes = [0xe7e7e7e7e7, 0xc2c2c2c2c2]
radio = RF24(25,8)
radio.begin()
#radio.setPayloadSize(32)
radio.setChannel(0x60)
radio.setDataRate(RF24_2MBPS)
radio.setPALevel(RF24_PA_MIN)
radio.setAutoAck(True)
radio.enableDynamicPayloads()
radio.enableAckPayload()
radio.openReadingPipe(1, pipes[1])
radio.printDetails()
radio.startListening()
while True:
ackPL = [1]
while not radio.available():
time.sleep(1/100)
receivedMessage =radio.read(radio.getDynamicPayloadSize())
print("Received: {}".format(receivedMessage))
print("Translating the receivedMessage into unicode characters...")
string = ""
for n in receivedMessage:
# Decode into standard unicode set
if (n >= 32 and n <= 126):
string += chr(n)
print(string)
radio.writeAckPayload(1, bytearray(ackPL))
print("Loaded payload reply of {}".format(ackPL))
| <filename>Basic/recv.py
import RPi.GPIO as GPIO
from RF24 import *
import time
import spidev
GPIO.setmode(GPIO.BCM)
pipes = [0xe7e7e7e7e7, 0xc2c2c2c2c2]
radio = RF24(25,8)
radio.begin()
#radio.setPayloadSize(32)
radio.setChannel(0x60)
radio.setDataRate(RF24_2MBPS)
radio.setPALevel(RF24_PA_MIN)
radio.setAutoAck(True)
radio.enableDynamicPayloads()
radio.enableAckPayload()
radio.openReadingPipe(1, pipes[1])
radio.printDetails()
radio.startListening()
while True:
ackPL = [1]
while not radio.available():
time.sleep(1/100)
receivedMessage =radio.read(radio.getDynamicPayloadSize())
print("Received: {}".format(receivedMessage))
print("Translating the receivedMessage into unicode characters...")
string = ""
for n in receivedMessage:
# Decode into standard unicode set
if (n >= 32 and n <= 126):
string += chr(n)
print(string)
radio.writeAckPayload(1, bytearray(ackPL))
print("Loaded payload reply of {}".format(ackPL))
| en | 0.645696 | #radio.setPayloadSize(32) # Decode into standard unicode set | 2.796199 | 3 |
server.py | salvadorNT/github-users | 0 | 6615610 | <filename>server.py
import unittest
from werkzeug.serving import run_simple
from app import app_factory
application = app_factory.create_app()
@application.cli.command()
def test():
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner().run(tests)
if __name__ == "__main__":
run_simple(
'0.0.0.0', 5000,
application,
use_reloader=True, use_debugger=True, threaded=True) | <filename>server.py
import unittest
from werkzeug.serving import run_simple
from app import app_factory
application = app_factory.create_app()
@application.cli.command()
def test():
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner().run(tests)
if __name__ == "__main__":
run_simple(
'0.0.0.0', 5000,
application,
use_reloader=True, use_debugger=True, threaded=True) | none | 1 | 2.237645 | 2 | |
message_generator.py | matthew-a-adams/morse-practice-scripts | 0 | 6615611 | import os.path
import numpy as np
import pandas as pd
import random
import alphabet_tools as at
class Message:
alphabet = []
numberOfWords = 1
numberOfCharacters = 1
def __init__(self, charactersPerWord = 1, wordsPerPhrase = 1, alphabet = []):
if alphabet:
self.alphabet = alphabet
else:
self.alphabet = at.Alphabet().getCharacters()
self.numberOfWords = wordsPerPhrase
self.numberOfCharacters = charactersPerWord
def generateCharacter(self):
return random.choices(self.alphabet, k=1)
def generateWord(self, dictionary = []):
word = []
for character in range(0, self.numberOfCharacters, 1):
word = word + self.generateCharacter()
return word
def generate(self):
phrase = []
for word in range(0, self.numberOfWords, 1):
phrase.append(self.generateWord())
return phrase
| import os.path
import numpy as np
import pandas as pd
import random
import alphabet_tools as at
class Message:
alphabet = []
numberOfWords = 1
numberOfCharacters = 1
def __init__(self, charactersPerWord = 1, wordsPerPhrase = 1, alphabet = []):
if alphabet:
self.alphabet = alphabet
else:
self.alphabet = at.Alphabet().getCharacters()
self.numberOfWords = wordsPerPhrase
self.numberOfCharacters = charactersPerWord
def generateCharacter(self):
return random.choices(self.alphabet, k=1)
def generateWord(self, dictionary = []):
word = []
for character in range(0, self.numberOfCharacters, 1):
word = word + self.generateCharacter()
return word
def generate(self):
phrase = []
for word in range(0, self.numberOfWords, 1):
phrase.append(self.generateWord())
return phrase
| none | 1 | 3.148537 | 3 | |
tools/py/archive_aar.py | elmernocon/XUnityPlayer | 0 | 6615612 | <filename>tools/py/archive_aar.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from shutil import copy
from subprocess import call
def main() -> None:
# Get paths.
fdp = Path(__file__).parent
rdp = fdp.parent.parent
# Get build_gradle.py script.
script = str(fdp / "build_gradle.py")
# Build gradle projects.
build_gradle(script)
# Copy the com.unity3d.xplayer.aar file to the refs/xplayer folder.
aar = rdp / "bin/Release/xplayer" / "com.unity3d.xplayer.aar"
if aar.exists():
rrxupdp = rdp / "refs/xplayer"
rrxupdp.mkdir(parents=True, exist_ok=True)
suffix = aar.suffix
name = f"{aar.name.replace(suffix, '')}_latest{suffix}"
src = aar
dst = rrxupdp / name
if dst.exists():
dst.unlink()
copy(str(src), str(dst))
print()
print(f"Copied {str(src.relative_to(rdp))}")
print(f" → {str(dst.relative_to(rdp))}")
def build_gradle(script: str) -> None:
projects = {
"src/xplayer": [
":xplayer:archiveDebug",
":xplayer:archiveRelease",
":xample:archiveDebug",
":xample:archiveRelease",
],
"src/deeplinking": [
":deeplinking:archiveDebug",
":deeplinking:archiveRelease"
]
}
for project, tasks in projects.items():
call([
"python3", script,
"-p", project,
"-t", *tasks])
if __name__ == "__main__":
main()
| <filename>tools/py/archive_aar.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from shutil import copy
from subprocess import call
def main() -> None:
# Get paths.
fdp = Path(__file__).parent
rdp = fdp.parent.parent
# Get build_gradle.py script.
script = str(fdp / "build_gradle.py")
# Build gradle projects.
build_gradle(script)
# Copy the com.unity3d.xplayer.aar file to the refs/xplayer folder.
aar = rdp / "bin/Release/xplayer" / "com.unity3d.xplayer.aar"
if aar.exists():
rrxupdp = rdp / "refs/xplayer"
rrxupdp.mkdir(parents=True, exist_ok=True)
suffix = aar.suffix
name = f"{aar.name.replace(suffix, '')}_latest{suffix}"
src = aar
dst = rrxupdp / name
if dst.exists():
dst.unlink()
copy(str(src), str(dst))
print()
print(f"Copied {str(src.relative_to(rdp))}")
print(f" → {str(dst.relative_to(rdp))}")
def build_gradle(script: str) -> None:
projects = {
"src/xplayer": [
":xplayer:archiveDebug",
":xplayer:archiveRelease",
":xample:archiveDebug",
":xample:archiveRelease",
],
"src/deeplinking": [
":deeplinking:archiveDebug",
":deeplinking:archiveRelease"
]
}
for project, tasks in projects.items():
call([
"python3", script,
"-p", project,
"-t", *tasks])
if __name__ == "__main__":
main()
| en | 0.66346 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Get paths. # Get build_gradle.py script. # Build gradle projects. # Copy the com.unity3d.xplayer.aar file to the refs/xplayer folder. | 2.179266 | 2 |
azure-quantum/tests/unit/test_quantinuum.py | Anatoliy-Litvinenko/qdk-python | 1 | 6615613 | #!/bin/env python
# -*- coding: utf-8 -*-
##
# test_qiskit.py: Tests for Qiskit plugin
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
import unittest
import warnings
import pytest
import numpy as np
from azure.core.exceptions import HttpResponseError
from azure.quantum.job.job import Job
from azure.quantum._client.models import CostEstimate, UsageEvent
from azure.quantum.target import Quantinuum
from common import QuantumTestBase, ZERO_UID
class TestQuantinuum(QuantumTestBase):
mock_create_job_id_name = "create_job_id"
create_job_id = Job.create_job_id
def get_test_job_id(self):
return ZERO_UID if self.is_playback \
else Job.create_job_id()
def _teleport(self):
return """OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg c0[1];
creg c1[1];
creg c2[1];
h q[0];
cx q[0], q[1];
x q[2];
h q[2];
cx q[2], q[0];
h q[2];
measure q[0] -> c0[0];
if (c0==1) x q[1];
measure q[2] -> c1[0];
if (c1==1) z q[1];
h q[1];
measure q[1] -> c2[0];
"""
@pytest.mark.quantinuum
def test_job_estimate_cost_quantinuum(self):
with unittest.mock.patch.object(
Job,
self.mock_create_job_id_name,
return_value=self.get_test_job_id(),
):
workspace = self.create_workspace()
circuit = self._teleport()
target = Quantinuum(workspace=workspace, name="quantinuum.hqs-lt-s1-apival")
cost = target.estimate_cost(circuit, num_shots=100e3)
assert cost.estimated_total == 0.0
target = Quantinuum(workspace=workspace, name="quantinuum.hqs-lt-s1")
cost = target.estimate_cost(circuit, num_shots=100e3)
assert cost.estimated_total == 845.0
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum(self):
with unittest.mock.patch.object(
Job,
self.mock_create_job_id_name,
return_value=self.get_test_job_id(),
):
workspace = self.create_workspace()
circuit = self._teleport()
target = Quantinuum(workspace=workspace)
try:
job = target.submit(circuit)
except HttpResponseError as e:
if "InvalidJobDefinition" not in e.message \
and "The provider specified does not exist" not in e.message:
raise(e)
warnings.warn(e.message)
else:
# Make sure the job is completed before fetching the results
# playback currently does not work for repeated calls
if not self.is_playback:
self.pause_recording()
self.assertEqual(False, job.has_completed())
try:
# Set a timeout for recording
job.wait_until_completed(timeout_secs=60)
except TimeoutError:
warnings.warn("Quantinuum execution exceeded timeout. Skipping fetching results.")
else:
# Check if job succeeded
self.assertEqual(True, job.has_completed())
assert job.details.status == "Succeeded"
self.resume_recording()
job = workspace.get_job(job.id)
self.assertEqual(True, job.has_completed())
if job.has_completed():
results = job.get_results()
assert results["c0"] == ["0"]
assert results["c1"] == ["0"]
| #!/bin/env python
# -*- coding: utf-8 -*-
##
# test_qiskit.py: Tests for Qiskit plugin
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
import unittest
import warnings
import pytest
import numpy as np
from azure.core.exceptions import HttpResponseError
from azure.quantum.job.job import Job
from azure.quantum._client.models import CostEstimate, UsageEvent
from azure.quantum.target import Quantinuum
from common import QuantumTestBase, ZERO_UID
class TestQuantinuum(QuantumTestBase):
mock_create_job_id_name = "create_job_id"
create_job_id = Job.create_job_id
def get_test_job_id(self):
return ZERO_UID if self.is_playback \
else Job.create_job_id()
def _teleport(self):
return """OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg c0[1];
creg c1[1];
creg c2[1];
h q[0];
cx q[0], q[1];
x q[2];
h q[2];
cx q[2], q[0];
h q[2];
measure q[0] -> c0[0];
if (c0==1) x q[1];
measure q[2] -> c1[0];
if (c1==1) z q[1];
h q[1];
measure q[1] -> c2[0];
"""
@pytest.mark.quantinuum
def test_job_estimate_cost_quantinuum(self):
with unittest.mock.patch.object(
Job,
self.mock_create_job_id_name,
return_value=self.get_test_job_id(),
):
workspace = self.create_workspace()
circuit = self._teleport()
target = Quantinuum(workspace=workspace, name="quantinuum.hqs-lt-s1-apival")
cost = target.estimate_cost(circuit, num_shots=100e3)
assert cost.estimated_total == 0.0
target = Quantinuum(workspace=workspace, name="quantinuum.hqs-lt-s1")
cost = target.estimate_cost(circuit, num_shots=100e3)
assert cost.estimated_total == 845.0
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum(self):
with unittest.mock.patch.object(
Job,
self.mock_create_job_id_name,
return_value=self.get_test_job_id(),
):
workspace = self.create_workspace()
circuit = self._teleport()
target = Quantinuum(workspace=workspace)
try:
job = target.submit(circuit)
except HttpResponseError as e:
if "InvalidJobDefinition" not in e.message \
and "The provider specified does not exist" not in e.message:
raise(e)
warnings.warn(e.message)
else:
# Make sure the job is completed before fetching the results
# playback currently does not work for repeated calls
if not self.is_playback:
self.pause_recording()
self.assertEqual(False, job.has_completed())
try:
# Set a timeout for recording
job.wait_until_completed(timeout_secs=60)
except TimeoutError:
warnings.warn("Quantinuum execution exceeded timeout. Skipping fetching results.")
else:
# Check if job succeeded
self.assertEqual(True, job.has_completed())
assert job.details.status == "Succeeded"
self.resume_recording()
job = workspace.get_job(job.id)
self.assertEqual(True, job.has_completed())
if job.has_completed():
results = job.get_results()
assert results["c0"] == ["0"]
assert results["c1"] == ["0"]
| en | 0.62295 | #!/bin/env python # -*- coding: utf-8 -*- ## # test_qiskit.py: Tests for Qiskit plugin ## # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. ## OPENQASM 2.0; include "qelib1.inc"; qreg q[3]; creg c0[1]; creg c1[1]; creg c2[1]; h q[0]; cx q[0], q[1]; x q[2]; h q[2]; cx q[2], q[0]; h q[2]; measure q[0] -> c0[0]; if (c0==1) x q[1]; measure q[2] -> c1[0]; if (c1==1) z q[1]; h q[1]; measure q[1] -> c2[0]; # Make sure the job is completed before fetching the results # playback currently does not work for repeated calls # Set a timeout for recording # Check if job succeeded | 1.983642 | 2 |
Cogs/DotaBase.py | kazoeru/Acinonyx-v3 | 0 | 6615614 | import discord
from discord.ext import commands
from Cogs import Utils
from Cogs import Settings
def setup(bot):
settings = bot.get_cog("Settings")
bot.add_cog(DotaBase(bot, settings))
class DotaBase(commands.Cog):
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command()
async def hero(self, ctx, *, hero = None):
"""Mencari informasi seputar hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh**
• acx hero sf
• acx hero inker
• acx hero furi"""
print("Command hero dialihkan ke MangoByte")
@commands.command()
async def item(self, ctx, *, item = None):
"""Mencari informasi seputar item DOTA 2
**Contoh:**
• acx item shadow blade
• acx item tango"""
print("Command item dialihkan ke MangoByte")
@commands.command()
async def ability(self, ctx, ability = None):
"""Mencari informasi seputar ability hero DOTA 2
**Contoh:**
• acx ability rocket flare
• acx ability laser
• acx ability sprout"""
print("Command ability dialihkan ke MangoByte")
@commands.command()
async def talents(self, ctx, *, hero = None):
"""Mencari informasi talent dari hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh:**
`acx talents shadow fiend`"""
print("Command talent dialihkan ke MangoByte")
@commands.command()
async def lore(self, ctx, *, name = None):
"""Mencari lore/knowledge (pengetahuan) dari hero, ability, item dalam game DOTA 2
Jika tidak memasukan [name] aku akan memberikan secara random
**Contoh:**
• acx lore bristleback
• acx lore shadow blade
• acx lore venomous gale"""
print("Command lore dialihkan ke MangoByte")
@commands.command()
async def leveledstats(self, ctx, *, hero = None):
"""Melihat informasi hero DOTA 2 pada level yang ditentukan.
Jika tidak memasukan level, aku akan memberikan status hero level 1
**Contoh:**
acx leveledstats tinker
acx leveledstats shaker lvl 2
acx leveledstats level 28 shaman"""
print("Command leveledstats dialihkan ke MangoByte")
@commands.command()
async def courage(self, ctx, *, hero = None):
"""Kamu ingin menantang teman mu?
Membuat tantangan dengan random build hero DOTA 2
(atau kamu dapat memilihi hero) dan random set item
**Contoh:**
acx courage
acx courage shadow fiend"""
print("Command courage dialihkan ke MangoByte")
@commands.command()
async def herotable(self, ctx, *, table_args = None):
"""Menampilkan urutan table dari hero DOTA 2 dan statusnya.
Table statistik hero menampilkan status nilai tertinggi yang ditentukan
**Contoh:**
acx herotable dps
acx herotable health lvl 30
acx herotable attack speed level 21 descending
"""
print("Command herotable dialihkan ke MangoByte")
@commands.command(aliases=["neutrals", "neutraltier"])
async def neutralitems(self, ctx, *, tier = None):
"""Menampilkan semua neutral item DOTA 2
Jika kamu memasukan tier yang ditentukan, aku akan menampilkan item dalam tier tersebut beserta namanya
**Contoh:**
acx neutralitems
acx neutralitems tier 5
acx neutralitems 3"""
print("Command herotable dialihkan ke MangoByte")
@commands.command(aliases=["aghs", "ags", "aghanims", "scepter", "shard"])
async def aghanim(self, ctx, *, name = None):
"""Melihat aghanim upgrade untuk hero atau ability DOTA 2"""
print("Command aghanim dialihkan ke MangoByte")
@commands.command()
async def blog(self, ctx):
"""Berita terbaru seputar DOTA 2"""
print("Command aghanim dialihkan ke MangoByte")
@commands.command(aliases=["fuse", "fuze", "fuzeheroes"])
async def fuseheroes(self, ctx, *, heroes = None):
"""Lihatlah apa yang terjadi jika kamu menggabungkan 2 hero DOTA 2
Jika kamu tidak memasukan hero, aku akan memberikan secara random
**Contoh:**
acx fuseheroes axe chen"""
print("Command fuseheros dialihkan ke MangoByte")
@commands.command()
async def laning(self, ctx, match_id = None):
"""Membuat lanning dalam bentuk gif
Jika kamu tidak memasukan match_id, aku akan mengambil data lastmatch dari akun mu yang telah terhubung dengan Acinonyx"""
print("Command laning dialihkan ke MangoByte")
@commands.command()
async def blog(self,ctx):
"""Berita terbaru seputar DOTA 2"""
print("Command laning dialihkan ke MangoByte")
@commands.command()
async def lastmatch(self, ctx, *, matchfilter = None):
"""Mendapatkan informasi pertandingan terakhir player DOTA 2"""
print("Command lastmatch dialihkan ke MangoByte")
@commands.command(aliases=["whois"])
async def profile(self, ctx, player = None):
"""Menampilkan informasi profile player DOTA 2.
Jika kamu tidak mengisi `DotaPlayer` aku hanya akan mengambil id steam milik mu yang telah di set.
`DotaPlayer` hanya dapat di isi dengan nomor id steam32 atau steam64, atau @mention member jika dia sudah melakukan set id steam.
"""
print("Command profile dialihkan ke MangoByte")
@commands.command()
async def firstmatch(self, ctx, *, matchfilter = None):
"""Mendapatkan informasi pertandingan pertama kalinya pada saat player bermain DOTA 2"""
print("Command firstmatch dialihkan ke MangoByte")
@commands.command()
async def match(self, ctx, match_id : int):
"""Melihat ringkasan pertandingan dota dengan id yang diberikan"""
print("Command match dialihkan ke MangoByte")
@commands.command()
async def matchstory(self, ctx, match_id : int, perspective=None):
"""Mengetahui alur cerita dari pertandingan
Alur cerita ditentukan dari perspective sebegai:
• radiant
• dire
"""
print("Command matchstory dialihkan ke MangoByte")
@commands.command()
async def skillbuild(self, ctx, match_id : int):
"""Melihat ability upgrade dalam pertandingan"""
print("Command skillbuild dialihkan ke MangoByte")
@commands.command(aliases=["recentmatches", "recent"])
async def matches(self, ctx, *, matchfilter = None):
"""Menampilkan list pertandinga DOTA 2 mu dalam bentuk gambar
Tanggal/waktu berdasarkan server tempat permainan dimainkan, dan mungkin tidak akan sesuai dengan zona waktu mu
**Catatan:**
Kamu dapat menampilkan pertandingan mu hingga 100.
jika kamu tidak memasukan `matchfilter` aku akan memberikan 10 list pertandingan
**Contoh :**
acx matches 20
acx matches @member mid witch doctor ranked
acx matches natures prophet
acx matches @member riki"""
print("Command matches dialihkan ke MangoByte")
@commands.command()
async def matchids(self, ctx, *, matchfilter = None):
"""Mendapatkan list pertandingan DOTA 2
Aku akan memberikan 10 match id secara default, jika kamu tidak mengisi matchfilter
**Contoh:**
acx matchids 20
acx matchids @member mid witch doctor ranked
acx matchids natures prophet
acx matchids @member riki"""
print("Command matchids dialihkan ke MangoByte")
@commands.command()
async def aboutdotabase(self, ctx):
"""Tentang DotaBase.py dalam kategori ini"""
msg = "Semua command ini dalam kategori *`DotaBase.py`*\n"
msg += "*`{}help DotaBase`*\n".format(ctx.prefix)
msg += "Merupakan karya original dari [MangoByte](https://github.com/mdiller/MangoByte/).\n\n"
msg += "Saya (owner/pemilik) dari bot ini hanya melakukan re-work karya dari [MangoByte](https://github.com/mdiller/MangoByte/)\n"
msg += "agar dapat berfungsi dengan baik dalam bot Acinonyx dan mengubah beberapa bahasa dari inggris ke indonesia.\n\n"
msg += "Invite bot MangoByte link dibawah ini.\n"
msg += "**[MangoByte Official Bot](https://discord.com/oauth2/authorize?permissions=314432&scope=bot&client_id=213476188037971968)**"
em = discord.Embed(color = 0XFF8C00, description = msg)
em.set_author(name = "DotaBase DISCLAIMER")
em.set_footer(text = "{}".format(ctx.author), icon_url = "{}".format(ctx.author.avatar_url))
await ctx.send(embed = em) | import discord
from discord.ext import commands
from Cogs import Utils
from Cogs import Settings
def setup(bot):
settings = bot.get_cog("Settings")
bot.add_cog(DotaBase(bot, settings))
class DotaBase(commands.Cog):
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command()
async def hero(self, ctx, *, hero = None):
"""Mencari informasi seputar hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh**
• acx hero sf
• acx hero inker
• acx hero furi"""
print("Command hero dialihkan ke MangoByte")
@commands.command()
async def item(self, ctx, *, item = None):
"""Mencari informasi seputar item DOTA 2
**Contoh:**
• acx item shadow blade
• acx item tango"""
print("Command item dialihkan ke MangoByte")
@commands.command()
async def ability(self, ctx, ability = None):
"""Mencari informasi seputar ability hero DOTA 2
**Contoh:**
• acx ability rocket flare
• acx ability laser
• acx ability sprout"""
print("Command ability dialihkan ke MangoByte")
@commands.command()
async def talents(self, ctx, *, hero = None):
"""Mencari informasi talent dari hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh:**
`acx talents shadow fiend`"""
print("Command talent dialihkan ke MangoByte")
@commands.command()
async def lore(self, ctx, *, name = None):
"""Mencari lore/knowledge (pengetahuan) dari hero, ability, item dalam game DOTA 2
Jika tidak memasukan [name] aku akan memberikan secara random
**Contoh:**
• acx lore bristleback
• acx lore shadow blade
• acx lore venomous gale"""
print("Command lore dialihkan ke MangoByte")
@commands.command()
async def leveledstats(self, ctx, *, hero = None):
"""Melihat informasi hero DOTA 2 pada level yang ditentukan.
Jika tidak memasukan level, aku akan memberikan status hero level 1
**Contoh:**
acx leveledstats tinker
acx leveledstats shaker lvl 2
acx leveledstats level 28 shaman"""
print("Command leveledstats dialihkan ke MangoByte")
@commands.command()
async def courage(self, ctx, *, hero = None):
"""Kamu ingin menantang teman mu?
Membuat tantangan dengan random build hero DOTA 2
(atau kamu dapat memilihi hero) dan random set item
**Contoh:**
acx courage
acx courage shadow fiend"""
print("Command courage dialihkan ke MangoByte")
@commands.command()
async def herotable(self, ctx, *, table_args = None):
"""Menampilkan urutan table dari hero DOTA 2 dan statusnya.
Table statistik hero menampilkan status nilai tertinggi yang ditentukan
**Contoh:**
acx herotable dps
acx herotable health lvl 30
acx herotable attack speed level 21 descending
"""
print("Command herotable dialihkan ke MangoByte")
@commands.command(aliases=["neutrals", "neutraltier"])
async def neutralitems(self, ctx, *, tier = None):
"""Menampilkan semua neutral item DOTA 2
Jika kamu memasukan tier yang ditentukan, aku akan menampilkan item dalam tier tersebut beserta namanya
**Contoh:**
acx neutralitems
acx neutralitems tier 5
acx neutralitems 3"""
print("Command herotable dialihkan ke MangoByte")
@commands.command(aliases=["aghs", "ags", "aghanims", "scepter", "shard"])
async def aghanim(self, ctx, *, name = None):
"""Melihat aghanim upgrade untuk hero atau ability DOTA 2"""
print("Command aghanim dialihkan ke MangoByte")
@commands.command()
async def blog(self, ctx):
"""Berita terbaru seputar DOTA 2"""
print("Command aghanim dialihkan ke MangoByte")
@commands.command(aliases=["fuse", "fuze", "fuzeheroes"])
async def fuseheroes(self, ctx, *, heroes = None):
"""Lihatlah apa yang terjadi jika kamu menggabungkan 2 hero DOTA 2
Jika kamu tidak memasukan hero, aku akan memberikan secara random
**Contoh:**
acx fuseheroes axe chen"""
print("Command fuseheros dialihkan ke MangoByte")
@commands.command()
async def laning(self, ctx, match_id = None):
"""Membuat lanning dalam bentuk gif
Jika kamu tidak memasukan match_id, aku akan mengambil data lastmatch dari akun mu yang telah terhubung dengan Acinonyx"""
print("Command laning dialihkan ke MangoByte")
@commands.command()
async def blog(self,ctx):
"""Berita terbaru seputar DOTA 2"""
print("Command laning dialihkan ke MangoByte")
@commands.command()
async def lastmatch(self, ctx, *, matchfilter = None):
"""Mendapatkan informasi pertandingan terakhir player DOTA 2"""
print("Command lastmatch dialihkan ke MangoByte")
@commands.command(aliases=["whois"])
async def profile(self, ctx, player = None):
"""Menampilkan informasi profile player DOTA 2.
Jika kamu tidak mengisi `DotaPlayer` aku hanya akan mengambil id steam milik mu yang telah di set.
`DotaPlayer` hanya dapat di isi dengan nomor id steam32 atau steam64, atau @mention member jika dia sudah melakukan set id steam.
"""
print("Command profile dialihkan ke MangoByte")
@commands.command()
async def firstmatch(self, ctx, *, matchfilter = None):
"""Mendapatkan informasi pertandingan pertama kalinya pada saat player bermain DOTA 2"""
print("Command firstmatch dialihkan ke MangoByte")
@commands.command()
async def match(self, ctx, match_id : int):
"""Melihat ringkasan pertandingan dota dengan id yang diberikan"""
print("Command match dialihkan ke MangoByte")
@commands.command()
async def matchstory(self, ctx, match_id : int, perspective=None):
"""Mengetahui alur cerita dari pertandingan
Alur cerita ditentukan dari perspective sebegai:
• radiant
• dire
"""
print("Command matchstory dialihkan ke MangoByte")
@commands.command()
async def skillbuild(self, ctx, match_id : int):
"""Melihat ability upgrade dalam pertandingan"""
print("Command skillbuild dialihkan ke MangoByte")
@commands.command(aliases=["recentmatches", "recent"])
async def matches(self, ctx, *, matchfilter = None):
"""Menampilkan list pertandinga DOTA 2 mu dalam bentuk gambar
Tanggal/waktu berdasarkan server tempat permainan dimainkan, dan mungkin tidak akan sesuai dengan zona waktu mu
**Catatan:**
Kamu dapat menampilkan pertandingan mu hingga 100.
jika kamu tidak memasukan `matchfilter` aku akan memberikan 10 list pertandingan
**Contoh :**
acx matches 20
acx matches @member mid witch doctor ranked
acx matches natures prophet
acx matches @member riki"""
print("Command matches dialihkan ke MangoByte")
@commands.command()
async def matchids(self, ctx, *, matchfilter = None):
"""Mendapatkan list pertandingan DOTA 2
Aku akan memberikan 10 match id secara default, jika kamu tidak mengisi matchfilter
**Contoh:**
acx matchids 20
acx matchids @member mid witch doctor ranked
acx matchids natures prophet
acx matchids @member riki"""
print("Command matchids dialihkan ke MangoByte")
@commands.command()
async def aboutdotabase(self, ctx):
"""Tentang DotaBase.py dalam kategori ini"""
msg = "Semua command ini dalam kategori *`DotaBase.py`*\n"
msg += "*`{}help DotaBase`*\n".format(ctx.prefix)
msg += "Merupakan karya original dari [MangoByte](https://github.com/mdiller/MangoByte/).\n\n"
msg += "Saya (owner/pemilik) dari bot ini hanya melakukan re-work karya dari [MangoByte](https://github.com/mdiller/MangoByte/)\n"
msg += "agar dapat berfungsi dengan baik dalam bot Acinonyx dan mengubah beberapa bahasa dari inggris ke indonesia.\n\n"
msg += "Invite bot MangoByte link dibawah ini.\n"
msg += "**[MangoByte Official Bot](https://discord.com/oauth2/authorize?permissions=314432&scope=bot&client_id=213476188037971968)**"
em = discord.Embed(color = 0XFF8C00, description = msg)
em.set_author(name = "DotaBase DISCLAIMER")
em.set_footer(text = "{}".format(ctx.author), icon_url = "{}".format(ctx.author.avatar_url))
await ctx.send(embed = em) | id | 0.737721 | Mencari informasi seputar hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh**
• acx hero sf
• acx hero inker
• acx hero furi Mencari informasi seputar item DOTA 2
**Contoh:**
• acx item shadow blade
• acx item tango Mencari informasi seputar ability hero DOTA 2
**Contoh:**
• acx ability rocket flare
• acx ability laser
• acx ability sprout Mencari informasi talent dari hero DOTA 2.
Kamu dapat menggunakan command ini dengan option:
• Nama hero
• Hero ID
**Contoh:**
`acx talents shadow fiend` Mencari lore/knowledge (pengetahuan) dari hero, ability, item dalam game DOTA 2
Jika tidak memasukan [name] aku akan memberikan secara random
**Contoh:**
• acx lore bristleback
• acx lore shadow blade
• acx lore venomous gale Melihat informasi hero DOTA 2 pada level yang ditentukan.
Jika tidak memasukan level, aku akan memberikan status hero level 1
**Contoh:**
acx leveledstats tinker
acx leveledstats shaker lvl 2
acx leveledstats level 28 shaman Kamu ingin menantang teman mu?
Membuat tantangan dengan random build hero DOTA 2
(atau kamu dapat memilihi hero) dan random set item
**Contoh:**
acx courage
acx courage shadow fiend Menampilkan urutan table dari hero DOTA 2 dan statusnya.
Table statistik hero menampilkan status nilai tertinggi yang ditentukan
**Contoh:**
acx herotable dps
acx herotable health lvl 30
acx herotable attack speed level 21 descending Menampilkan semua neutral item DOTA 2
Jika kamu memasukan tier yang ditentukan, aku akan menampilkan item dalam tier tersebut beserta namanya
**Contoh:**
acx neutralitems
acx neutralitems tier 5
acx neutralitems 3 Melihat aghanim upgrade untuk hero atau ability DOTA 2 Berita terbaru seputar DOTA 2 Lihatlah apa yang terjadi jika kamu menggabungkan 2 hero DOTA 2
Jika kamu tidak memasukan hero, aku akan memberikan secara random
**Contoh:**
acx fuseheroes axe chen Membuat lanning dalam bentuk gif
Jika kamu tidak memasukan match_id, aku akan mengambil data lastmatch dari akun mu yang telah terhubung dengan Acinonyx Berita terbaru seputar DOTA 2 Mendapatkan informasi pertandingan terakhir player DOTA 2 Menampilkan informasi profile player DOTA 2.
Jika kamu tidak mengisi `DotaPlayer` aku hanya akan mengambil id steam milik mu yang telah di set.
`DotaPlayer` hanya dapat di isi dengan nomor id steam32 atau steam64, atau @mention member jika dia sudah melakukan set id steam. Mendapatkan informasi pertandingan pertama kalinya pada saat player bermain DOTA 2 Melihat ringkasan pertandingan dota dengan id yang diberikan Mengetahui alur cerita dari pertandingan
Alur cerita ditentukan dari perspective sebegai:
• radiant
• dire Melihat ability upgrade dalam pertandingan Menampilkan list pertandinga DOTA 2 mu dalam bentuk gambar
Tanggal/waktu berdasarkan server tempat permainan dimainkan, dan mungkin tidak akan sesuai dengan zona waktu mu
**Catatan:**
Kamu dapat menampilkan pertandingan mu hingga 100.
jika kamu tidak memasukan `matchfilter` aku akan memberikan 10 list pertandingan
**Contoh :**
acx matches 20
acx matches @member mid witch doctor ranked
acx matches natures prophet
acx matches @member riki Mendapatkan list pertandingan DOTA 2
Aku akan memberikan 10 match id secara default, jika kamu tidak mengisi matchfilter
**Contoh:**
acx matchids 20
acx matchids @member mid witch doctor ranked
acx matchids natures prophet
acx matchids @member riki Tentang DotaBase.py dalam kategori ini | 2.751292 | 3 |
source/test/HelloWorld.py | Rubix982/Digital-Bibliography-Library-Project---The-DBLP-Data-Structure-Project | 0 | 6615615 | def printHello(a, b):
print("Hello, world!"); | def printHello(a, b):
print("Hello, world!"); | none | 1 | 2.072486 | 2 | |
translator/note.py | andmatand/midi-to-pico8 | 54 | 6615616 | import math
from . import MIDI_TO_PICO8_PITCH_SUBTRAHEND
class Note:
def __init__(self, event=None):
# MIDI properties
self.midiDuration = 0
self.midiPitch = None
self.midiChannel = None
self.midiVelocity = None
# PICO-8 tracker properties
self.pitch = None
self.volume = 0
self.waveform = None
self.effect = None
self.length = 0
if event != None:
self.midiPitch = event.pitch
self.midiChannel = event.channel
self.midiVelocity = event.velocity
self.pitch = event.pitch - MIDI_TO_PICO8_PITCH_SUBTRAHEND
self.volume = math.floor((event.velocity / 127) * 7)
| import math
from . import MIDI_TO_PICO8_PITCH_SUBTRAHEND
class Note:
def __init__(self, event=None):
# MIDI properties
self.midiDuration = 0
self.midiPitch = None
self.midiChannel = None
self.midiVelocity = None
# PICO-8 tracker properties
self.pitch = None
self.volume = 0
self.waveform = None
self.effect = None
self.length = 0
if event != None:
self.midiPitch = event.pitch
self.midiChannel = event.channel
self.midiVelocity = event.velocity
self.pitch = event.pitch - MIDI_TO_PICO8_PITCH_SUBTRAHEND
self.volume = math.floor((event.velocity / 127) * 7)
| en | 0.558018 | # MIDI properties # PICO-8 tracker properties | 2.896578 | 3 |
musx/tools.py | ricktaube/musx | 9 | 6615617 | <reponame>ricktaube/musx
###############################################################################
"""
An assortmant of music composition tools for working with randomness,
rescaling, arithmetic, etc.
"""
import types
import math
import random
import subprocess
__pdoc__ = {
'parse_string_sequence': False
}
def isgen(x):
"""Returns True if x is a generator."""
return isinstance(x, types.GeneratorType)
def isfunc(x):
"""Returns True if x is a function."""
return isinstance(x, types.FunctionType)
def isseq(x):
"""Returns True if x is a list or tuple."""
return isinstance(x, (list, tuple))
def isnum(x, others=None):
"""
Returns True if x is an int or float of one of the
other number types passed in (e.g. Fraction, complex).
"""
if isinstance(x, (int, float)):
return True
if others:
return isinstance(x, others)
return False
def rescale(x, x1, x2, y1, y2, mode='lin'):
"""
Maps the value x ranging between x1 and x2 into a proportional value
between y1 and y2.
Parameters
----------
x : int | float
The value to rescale.
x1 : int | float
The lower limit of input range.
x2 : int | float
The upper limit of input range.
y1 : int | float
The lowest limit of output range.
y2 : int | float
The upper limit of output range.
mode : string
If mode is 'lin' then linear scaling occurs, 'cos' produces cosine
scaling, 'exp' produces exponential scaling, and '-exp' produces
inverted exponential.
Returns
-------
The rescaled value.
"""
if x > x2: return y2
if x <= x1: return y1
# as x moves x1 to x2 mu moves 0 to 1
mu = (x - x1) / (x2 - x1)
if mode == 'lin':
#return (((y2 - y1) / (x2 - x1)) * (x - x1)) + y1
return (y1 * (1 - mu) + (y2 * mu))
elif mode == 'cos':
mu2 = (1 - math.cos(mu * math.pi)) / 2
return (y1 * (1 - mu2) + y2 * mu2)
elif mode in ['exp','-exp']:
# # http://www.pmean.com/10/ExponentialInterpolation.html
# if y1==0: y1=0.00001
# return y1 * ((y2/y1) ** (mu))
# #https://docs.fincad.com/support/developerfunc/mathref/Interpolation.htm
# if y1==0: y1=0.00001
# m = math.log(y2 / y1) / (x2 - x1)
# k = y1 * math.pow(math.e, -m * x1)
# return k * math.pow(math.e, m * x)
# a base that yields a slope that is not too steep or shallow...
b = 512
if mode == 'exp':
return y1 + ( ((y2 - y1) / b) * math.pow(b, mu) )
b = 1/b
return y1 + ( ((y2 - y1) / (b - 1)) * (math.pow(b, mu) - 1) )
raise ValueError(f"mode {mode} is not 'lin', 'cos', 'exp', or '-exp'.")
def frange(start, stop=None, step=None):
"""
Returns an iterator produceing a series of floats from start (inclusive)
to stop (exclusive) by step.
Parameters
----------
frange can be called with one, two or three arguments:
* frange(stop)
* frange(start, stop)
* frange(start, stop, step)
start : int | float
The starting value of the sequence.
stop : int | float
The exclusive upper (or lower) bounds of the iteration.
step : int | float
The increment (or decrement) to move by. Defaults to 1.
Returns
-------
Values ranging from start to stop (exclusive).
"""
if stop is None:
stop = start
start = 0.0
else:
start += 0.0
if step is None:
step = 1.0
else:
step += 0.0
i = 1
init = start # initial start value (offset)
# no iteration if step is 0 or reversed direction of start -> stop.
while step != 0:
if step > 0 and start >= stop:
break
if step < 0 and start <= stop:
break
yield start
# start += step # arrg! cumulative addition yields wonky numbers
start = (step * i) + init # scale by step and shift to start
i += 1
def rand(limit):
"""
Returns a generator that produces uniform random numbers below limit.
Parameters
----------
limit : int | float
Sets the exlusive upper bound for random selection. If limit
is an integer then integer values are returned otherwise float
values are returned.
"""
if isinstance(limit, int):
def irand():
while True:
yield random.randrange(limit)
return irand()
elif isinstance(limit, float):
def frand():
while True:
yield random.random() * limit
return frand()
raise TypeError("limit not an int or float: {limit}.")
def quantize(number, stepsize):
"""Quantizes number to a given step size."""
return math.floor( (number/stepsize) + .5) * stepsize
def deltas(numbers):
"""
Returns the changes between consecutive numbers in a list of numbers.
Example: deltas([1,5,3]) -> [4, -2]
Parameters
----------
numbers : list
The list of numbers to process.
Returns
-------
A list containing the differences between a series of numbers.
"""
return [l - r for l,r in zip(numbers[1:], numbers[:])]
def _expl(powr, y0, y1, base):
if powr < 0:
powr=0.0
elif powr > 1:
powr = 1.0
if base == 1.0:
return y0 + (powr * (y1 - y0))
return y0 + ( ( (y1 - y0) / (base - 1.0) ) * ((base ** powr) - 1.0) )
def _explseg(i, length, summ, powr):
if i >= length:
i += -1
x1 = (i+1) / length
x2 = i / length
f1 = _expl(x1, 0.0, 1.0, powr)
f2 = 0.0 if (i <= 0) else _expl( x2, 0.0, 1.0, powr)
return summ * (f1 - f2)
def explsegs(num, summ, base=2):
segs = []
for i in range(num):
segs.append(_explseg(i, num, summ, base))
return segs
def _geoseg( i, length, summ, base):
if length == 0:
return 0.0
a = summ * ((1.0 - base) / (1.0 - (base ** length)))
return a * (base ** i)
def geosegs(num, summ, base=2):
segs = []
for i in range(num):
segs.append(_geoseg(i, num, summ, base))
return segs
def _map_lists(func, left, right):
if type(left) is list:
if type(right) is list:
assert len(left) == len(right), "lists are different lengths."
return [func(l,r) for l,r in zip(left,right)]
else:
return [func(l,right) for l in left]
else:
if type(right) is list:
return [func(left,r) for r in right]
else:
return func(left,right)
def multiply(left, right):
"""
List-aware multiplication.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l*r, left, right)
def add(left, right):
"""
List-aware addition.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l+r, left, right)
def subtract(left, right):
"""
List-aware subtraction.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l-r, left, right)
def divide(left, right):
"""
List-aware division.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l/r, left, right)
def _rem(x,y):
#return math.copysign(x % y, x)
mod = x % y
res = math.copysign(mod, x)
return int(res) if isinstance(mod, int) else res
def fit(num, lb, ub, mode='wrap'):
"""
Forces a number to lie between a lower and upper bound according to mode.
Parameters
----------
num : int | float
The number to fit.
lb : int | float
The lower bound.
ub : int | float
The upper bound.
mode : 'reflect' | 'limit' | 'wrap'
If mode is 'reflect' then the min and max boundaries reflect the value back into range.\
If mode is 'wrap' then num will be the remainder of num % boundaries.
Returns
-------
The value of num coerced to lie within the range lb to ub.
Raises
------
ValueError if mode is not one of the supported modes.
Examples
--------
```python
[fit(i, 0, 10) for i in range(-20, 21, 5)]
```
"""
if lb > ub:
ub, lb = lb, ub
if lb <= num <= ub:
return num
b = ub if num > ub else lb
if mode == 'limit':
return b
rng = ub - lb
if mode == 'reflect':
# shift num to 0 to compare with range
# limit num to rng*2 (rising/reflecting)
num = _rem(num - b, (rng * 2))
if abs(num) > rng: # in range2
if num >= 0:
num = num - (rng * 2)
else:
num = num + (rng * 2)
else:
num = -num
return num + b
if mode == 'wrap':
return (lb if b == ub else ub) + _rem(num - b, rng)
raise ValueError(f"{mode} not one of ['reflect', 'limit', 'wrap'].")
'''
(defun fit (number lb ub &optional (mode :reflect))
(when (> lb ub) (rotatef lb ub))
(if (<= lb number ub)
number
(let ((b (if (> number ub) ub lb)) (r (- ub lb)))
(case mode
((:limit) b)
((:reflect)
(let* ((2r (* 2 r))
(v (rem (- number b) 2r)))
(+ (if (> (abs v) r)
(funcall (if (>= v 0) #'- #'+) v 2r)
(- v))
b)))
((:wrap) (+ (if (= b ub) lb ub) (rem (- number b) r)))
(t (error "~s is not :limit, :reflect or :wrap" mode))))))
'''
midiextensions = ('.mid', '.midi')
"""
A tuple of allowable midi file extensions. Defaults to ('.mid', '.midi').
"""
midiplayer = ''
def setmidiplayer(command):
"""
Assign a shell command (string) that will play a midi file.
Parameter
---------
command : string
The shell command that will play a midi file.
Example
-------
setmidiplayer('fluidsynth -iq -g1 /Users/taube/SoundFonts/MuseScore_General.sf2')
"""
global midiplayer
midiplayer = command
audioextensions = ('.aiff', '.wav', '.mp3', '.mp4')
"""
A tuple of allowable audio file extensions.
Defaults to ('.aiff', '.wav', '.mp3', '.mp4').
"""
audioplayer = ''
"""
A shell command (string) that accepts one argument,
a pathname (file) to play. See: setaudioplayer().
"""
def setaudioplayer(command):
"""
Assign a shell command (string) that will play an audio file.
Parameter
---------
command : string
The shell command that will play an audio file.
Example
-------
setaudioplayer('afplay')
"""
global audioplayer
audioplayer = command
def playfile(file, wait=False):
"""
Plays a midi or audio file using the shell commands you have specified
for midiplayer and audioplayer. See: setaudioplayer, setmidiplayer.
Parameters
----------
file : string
The file to play.
wait : bool
If true playfile waits until the file has finished playing
before returning, otherwise playfile returns immediately.
"""
args = []
kind = ''
if file.endswith(midiextensions):
kind = 'midi'
if midiplayer:
args = midiplayer.split() + [file]
elif file.endswith(audioextensions):
kind = 'audio'
if audioplayer:
args = audioplayer.split() + [file]
if args:
p = subprocess.Popen(args)
if wait:
p.wait()
else:
help = f"playfile(): Don't know how to play '{file}':"
if kind:
help += f" use musx.set{kind}player() to set a shell command to play {kind} files."
else:
help += f" file type not found in musx.midiextensions or musx.audioextensions."
print(help)
def parse_string_sequence(string):
# split string at blank spaces, replace each repeat token ',' by
# repeated value, check for dangling and undelimited repeats.
seq = []
for raw in string.split():
tok = raw.rstrip(',')
if tok:
if ',' not in tok:
for _ in range(len(raw) - len(tok) + 1):
seq.append(tok)
else:
raise SyntaxError(f"undelimited ',' in '{tok}'.")
else:
raise SyntaxError(f"dangling repeat ',' in '{string}'.")
return seq
| ###############################################################################
"""
An assortmant of music composition tools for working with randomness,
rescaling, arithmetic, etc.
"""
import types
import math
import random
import subprocess
__pdoc__ = {
'parse_string_sequence': False
}
def isgen(x):
"""Returns True if x is a generator."""
return isinstance(x, types.GeneratorType)
def isfunc(x):
"""Returns True if x is a function."""
return isinstance(x, types.FunctionType)
def isseq(x):
"""Returns True if x is a list or tuple."""
return isinstance(x, (list, tuple))
def isnum(x, others=None):
"""
Returns True if x is an int or float of one of the
other number types passed in (e.g. Fraction, complex).
"""
if isinstance(x, (int, float)):
return True
if others:
return isinstance(x, others)
return False
def rescale(x, x1, x2, y1, y2, mode='lin'):
"""
Maps the value x ranging between x1 and x2 into a proportional value
between y1 and y2.
Parameters
----------
x : int | float
The value to rescale.
x1 : int | float
The lower limit of input range.
x2 : int | float
The upper limit of input range.
y1 : int | float
The lowest limit of output range.
y2 : int | float
The upper limit of output range.
mode : string
If mode is 'lin' then linear scaling occurs, 'cos' produces cosine
scaling, 'exp' produces exponential scaling, and '-exp' produces
inverted exponential.
Returns
-------
The rescaled value.
"""
if x > x2: return y2
if x <= x1: return y1
# as x moves x1 to x2 mu moves 0 to 1
mu = (x - x1) / (x2 - x1)
if mode == 'lin':
#return (((y2 - y1) / (x2 - x1)) * (x - x1)) + y1
return (y1 * (1 - mu) + (y2 * mu))
elif mode == 'cos':
mu2 = (1 - math.cos(mu * math.pi)) / 2
return (y1 * (1 - mu2) + y2 * mu2)
elif mode in ['exp','-exp']:
# # http://www.pmean.com/10/ExponentialInterpolation.html
# if y1==0: y1=0.00001
# return y1 * ((y2/y1) ** (mu))
# #https://docs.fincad.com/support/developerfunc/mathref/Interpolation.htm
# if y1==0: y1=0.00001
# m = math.log(y2 / y1) / (x2 - x1)
# k = y1 * math.pow(math.e, -m * x1)
# return k * math.pow(math.e, m * x)
# a base that yields a slope that is not too steep or shallow...
b = 512
if mode == 'exp':
return y1 + ( ((y2 - y1) / b) * math.pow(b, mu) )
b = 1/b
return y1 + ( ((y2 - y1) / (b - 1)) * (math.pow(b, mu) - 1) )
raise ValueError(f"mode {mode} is not 'lin', 'cos', 'exp', or '-exp'.")
def frange(start, stop=None, step=None):
"""
Returns an iterator produceing a series of floats from start (inclusive)
to stop (exclusive) by step.
Parameters
----------
frange can be called with one, two or three arguments:
* frange(stop)
* frange(start, stop)
* frange(start, stop, step)
start : int | float
The starting value of the sequence.
stop : int | float
The exclusive upper (or lower) bounds of the iteration.
step : int | float
The increment (or decrement) to move by. Defaults to 1.
Returns
-------
Values ranging from start to stop (exclusive).
"""
if stop is None:
stop = start
start = 0.0
else:
start += 0.0
if step is None:
step = 1.0
else:
step += 0.0
i = 1
init = start # initial start value (offset)
# no iteration if step is 0 or reversed direction of start -> stop.
while step != 0:
if step > 0 and start >= stop:
break
if step < 0 and start <= stop:
break
yield start
# start += step # arrg! cumulative addition yields wonky numbers
start = (step * i) + init # scale by step and shift to start
i += 1
def rand(limit):
"""
Returns a generator that produces uniform random numbers below limit.
Parameters
----------
limit : int | float
Sets the exlusive upper bound for random selection. If limit
is an integer then integer values are returned otherwise float
values are returned.
"""
if isinstance(limit, int):
def irand():
while True:
yield random.randrange(limit)
return irand()
elif isinstance(limit, float):
def frand():
while True:
yield random.random() * limit
return frand()
raise TypeError("limit not an int or float: {limit}.")
def quantize(number, stepsize):
"""Quantizes number to a given step size."""
return math.floor( (number/stepsize) + .5) * stepsize
def deltas(numbers):
"""
Returns the changes between consecutive numbers in a list of numbers.
Example: deltas([1,5,3]) -> [4, -2]
Parameters
----------
numbers : list
The list of numbers to process.
Returns
-------
A list containing the differences between a series of numbers.
"""
return [l - r for l,r in zip(numbers[1:], numbers[:])]
def _expl(powr, y0, y1, base):
if powr < 0:
powr=0.0
elif powr > 1:
powr = 1.0
if base == 1.0:
return y0 + (powr * (y1 - y0))
return y0 + ( ( (y1 - y0) / (base - 1.0) ) * ((base ** powr) - 1.0) )
def _explseg(i, length, summ, powr):
if i >= length:
i += -1
x1 = (i+1) / length
x2 = i / length
f1 = _expl(x1, 0.0, 1.0, powr)
f2 = 0.0 if (i <= 0) else _expl( x2, 0.0, 1.0, powr)
return summ * (f1 - f2)
def explsegs(num, summ, base=2):
segs = []
for i in range(num):
segs.append(_explseg(i, num, summ, base))
return segs
def _geoseg( i, length, summ, base):
if length == 0:
return 0.0
a = summ * ((1.0 - base) / (1.0 - (base ** length)))
return a * (base ** i)
def geosegs(num, summ, base=2):
segs = []
for i in range(num):
segs.append(_geoseg(i, num, summ, base))
return segs
def _map_lists(func, left, right):
if type(left) is list:
if type(right) is list:
assert len(left) == len(right), "lists are different lengths."
return [func(l,r) for l,r in zip(left,right)]
else:
return [func(l,right) for l in left]
else:
if type(right) is list:
return [func(left,r) for r in right]
else:
return func(left,right)
def multiply(left, right):
"""
List-aware multiplication.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l*r, left, right)
def add(left, right):
"""
List-aware addition.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l+r, left, right)
def subtract(left, right):
"""
List-aware subtraction.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l-r, left, right)
def divide(left, right):
"""
List-aware division.
Left and right can be numbers or lists, if both are lists
they must be of the same length.
"""
return _map_lists(lambda l,r: l/r, left, right)
def _rem(x,y):
#return math.copysign(x % y, x)
mod = x % y
res = math.copysign(mod, x)
return int(res) if isinstance(mod, int) else res
def fit(num, lb, ub, mode='wrap'):
"""
Forces a number to lie between a lower and upper bound according to mode.
Parameters
----------
num : int | float
The number to fit.
lb : int | float
The lower bound.
ub : int | float
The upper bound.
mode : 'reflect' | 'limit' | 'wrap'
If mode is 'reflect' then the min and max boundaries reflect the value back into range.\
If mode is 'wrap' then num will be the remainder of num % boundaries.
Returns
-------
The value of num coerced to lie within the range lb to ub.
Raises
------
ValueError if mode is not one of the supported modes.
Examples
--------
```python
[fit(i, 0, 10) for i in range(-20, 21, 5)]
```
"""
if lb > ub:
ub, lb = lb, ub
if lb <= num <= ub:
return num
b = ub if num > ub else lb
if mode == 'limit':
return b
rng = ub - lb
if mode == 'reflect':
# shift num to 0 to compare with range
# limit num to rng*2 (rising/reflecting)
num = _rem(num - b, (rng * 2))
if abs(num) > rng: # in range2
if num >= 0:
num = num - (rng * 2)
else:
num = num + (rng * 2)
else:
num = -num
return num + b
if mode == 'wrap':
return (lb if b == ub else ub) + _rem(num - b, rng)
raise ValueError(f"{mode} not one of ['reflect', 'limit', 'wrap'].")
'''
(defun fit (number lb ub &optional (mode :reflect))
(when (> lb ub) (rotatef lb ub))
(if (<= lb number ub)
number
(let ((b (if (> number ub) ub lb)) (r (- ub lb)))
(case mode
((:limit) b)
((:reflect)
(let* ((2r (* 2 r))
(v (rem (- number b) 2r)))
(+ (if (> (abs v) r)
(funcall (if (>= v 0) #'- #'+) v 2r)
(- v))
b)))
((:wrap) (+ (if (= b ub) lb ub) (rem (- number b) r)))
(t (error "~s is not :limit, :reflect or :wrap" mode))))))
'''
midiextensions = ('.mid', '.midi')
"""
A tuple of allowable midi file extensions. Defaults to ('.mid', '.midi').
"""
midiplayer = ''
def setmidiplayer(command):
"""
Assign a shell command (string) that will play a midi file.
Parameter
---------
command : string
The shell command that will play a midi file.
Example
-------
setmidiplayer('fluidsynth -iq -g1 /Users/taube/SoundFonts/MuseScore_General.sf2')
"""
global midiplayer
midiplayer = command
audioextensions = ('.aiff', '.wav', '.mp3', '.mp4')
"""
A tuple of allowable audio file extensions.
Defaults to ('.aiff', '.wav', '.mp3', '.mp4').
"""
audioplayer = ''
"""
A shell command (string) that accepts one argument,
a pathname (file) to play. See: setaudioplayer().
"""
def setaudioplayer(command):
"""
Assign a shell command (string) that will play an audio file.
Parameter
---------
command : string
The shell command that will play an audio file.
Example
-------
setaudioplayer('afplay')
"""
global audioplayer
audioplayer = command
def playfile(file, wait=False):
"""
Plays a midi or audio file using the shell commands you have specified
for midiplayer and audioplayer. See: setaudioplayer, setmidiplayer.
Parameters
----------
file : string
The file to play.
wait : bool
If true playfile waits until the file has finished playing
before returning, otherwise playfile returns immediately.
"""
args = []
kind = ''
if file.endswith(midiextensions):
kind = 'midi'
if midiplayer:
args = midiplayer.split() + [file]
elif file.endswith(audioextensions):
kind = 'audio'
if audioplayer:
args = audioplayer.split() + [file]
if args:
p = subprocess.Popen(args)
if wait:
p.wait()
else:
help = f"playfile(): Don't know how to play '{file}':"
if kind:
help += f" use musx.set{kind}player() to set a shell command to play {kind} files."
else:
help += f" file type not found in musx.midiextensions or musx.audioextensions."
print(help)
def parse_string_sequence(string):
# split string at blank spaces, replace each repeat token ',' by
# repeated value, check for dangling and undelimited repeats.
seq = []
for raw in string.split():
tok = raw.rstrip(',')
if tok:
if ',' not in tok:
for _ in range(len(raw) - len(tok) + 1):
seq.append(tok)
else:
raise SyntaxError(f"undelimited ',' in '{tok}'.")
else:
raise SyntaxError(f"dangling repeat ',' in '{string}'.")
return seq | en | 0.653624 | ############################################################################### An assortmant of music composition tools for working with randomness, rescaling, arithmetic, etc. Returns True if x is a generator. Returns True if x is a function. Returns True if x is a list or tuple. Returns True if x is an int or float of one of the other number types passed in (e.g. Fraction, complex). Maps the value x ranging between x1 and x2 into a proportional value between y1 and y2. Parameters ---------- x : int | float The value to rescale. x1 : int | float The lower limit of input range. x2 : int | float The upper limit of input range. y1 : int | float The lowest limit of output range. y2 : int | float The upper limit of output range. mode : string If mode is 'lin' then linear scaling occurs, 'cos' produces cosine scaling, 'exp' produces exponential scaling, and '-exp' produces inverted exponential. Returns ------- The rescaled value. # as x moves x1 to x2 mu moves 0 to 1 #return (((y2 - y1) / (x2 - x1)) * (x - x1)) + y1 # # http://www.pmean.com/10/ExponentialInterpolation.html # if y1==0: y1=0.00001 # return y1 * ((y2/y1) ** (mu)) # #https://docs.fincad.com/support/developerfunc/mathref/Interpolation.htm # if y1==0: y1=0.00001 # m = math.log(y2 / y1) / (x2 - x1) # k = y1 * math.pow(math.e, -m * x1) # return k * math.pow(math.e, m * x) # a base that yields a slope that is not too steep or shallow... Returns an iterator produceing a series of floats from start (inclusive) to stop (exclusive) by step. Parameters ---------- frange can be called with one, two or three arguments: * frange(stop) * frange(start, stop) * frange(start, stop, step) start : int | float The starting value of the sequence. stop : int | float The exclusive upper (or lower) bounds of the iteration. step : int | float The increment (or decrement) to move by. Defaults to 1. Returns ------- Values ranging from start to stop (exclusive). # initial start value (offset) # no iteration if step is 0 or reversed direction of start -> stop. # start += step # arrg! cumulative addition yields wonky numbers # scale by step and shift to start Returns a generator that produces uniform random numbers below limit. Parameters ---------- limit : int | float Sets the exlusive upper bound for random selection. If limit is an integer then integer values are returned otherwise float values are returned. Quantizes number to a given step size. Returns the changes between consecutive numbers in a list of numbers. Example: deltas([1,5,3]) -> [4, -2] Parameters ---------- numbers : list The list of numbers to process. Returns ------- A list containing the differences between a series of numbers. List-aware multiplication. Left and right can be numbers or lists, if both are lists they must be of the same length. List-aware addition. Left and right can be numbers or lists, if both are lists they must be of the same length. List-aware subtraction. Left and right can be numbers or lists, if both are lists they must be of the same length. List-aware division. Left and right can be numbers or lists, if both are lists they must be of the same length. #return math.copysign(x % y, x) Forces a number to lie between a lower and upper bound according to mode. Parameters ---------- num : int | float The number to fit. lb : int | float The lower bound. ub : int | float The upper bound. mode : 'reflect' | 'limit' | 'wrap' If mode is 'reflect' then the min and max boundaries reflect the value back into range.\ If mode is 'wrap' then num will be the remainder of num % boundaries. Returns ------- The value of num coerced to lie within the range lb to ub. Raises ------ ValueError if mode is not one of the supported modes. Examples -------- ```python [fit(i, 0, 10) for i in range(-20, 21, 5)] ``` # shift num to 0 to compare with range # limit num to rng*2 (rising/reflecting) # in range2 (defun fit (number lb ub &optional (mode :reflect)) (when (> lb ub) (rotatef lb ub)) (if (<= lb number ub) number (let ((b (if (> number ub) ub lb)) (r (- ub lb))) (case mode ((:limit) b) ((:reflect) (let* ((2r (* 2 r)) (v (rem (- number b) 2r))) (+ (if (> (abs v) r) (funcall (if (>= v 0) #'- #'+) v 2r) (- v)) b))) ((:wrap) (+ (if (= b ub) lb ub) (rem (- number b) r))) (t (error "~s is not :limit, :reflect or :wrap" mode)))))) A tuple of allowable midi file extensions. Defaults to ('.mid', '.midi'). Assign a shell command (string) that will play a midi file. Parameter --------- command : string The shell command that will play a midi file. Example ------- setmidiplayer('fluidsynth -iq -g1 /Users/taube/SoundFonts/MuseScore_General.sf2') A tuple of allowable audio file extensions. Defaults to ('.aiff', '.wav', '.mp3', '.mp4'). A shell command (string) that accepts one argument, a pathname (file) to play. See: setaudioplayer(). Assign a shell command (string) that will play an audio file. Parameter --------- command : string The shell command that will play an audio file. Example ------- setaudioplayer('afplay') Plays a midi or audio file using the shell commands you have specified for midiplayer and audioplayer. See: setaudioplayer, setmidiplayer. Parameters ---------- file : string The file to play. wait : bool If true playfile waits until the file has finished playing before returning, otherwise playfile returns immediately. # split string at blank spaces, replace each repeat token ',' by # repeated value, check for dangling and undelimited repeats. | 3.03836 | 3 |
cluster_tools/transformations/linear.py | constantinpape/cluster_tools | 28 | 6615618 | #! /bin/python
import os
import sys
import json
import luigi
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from cluster_tools.utils.task_utils import DummyTask
#
# linear transformation tasks
#
class LinearBase(luigi.Task):
""" linear base class
"""
task_name = 'linear'
src_file = os.path.abspath(__file__)
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
transformation = luigi.Parameter()
mask_path = luigi.Parameter(default='')
mask_key = luigi.Parameter(default='')
dependency = luigi.TaskParameter(default=DummyTask())
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'chunks': None, 'compression': 'gzip'})
return config
def requires(self):
return self.dependency
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape, dtype and make block config
with vu.file_reader(self.input_path, 'r') as f:
shape = f[self.input_key].shape
dtype = f[self.input_key].dtype
# load the config
task_config = self.get_task_config()
compression = task_config.pop('compression', 'gzip')
chunks = task_config.pop('chunks', None)
if chunks is None:
chunks = tuple(bs // 2 for bs in block_shape)
if self.output_path != self.input_path:
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=shape, chunks=chunks,
compression=compression, dtype=dtype)
# update the config with input and output paths and keys
# as well as block shape
task_config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'mask_path': self.mask_path, 'mask_key': self.mask_key,
'block_shape': block_shape, 'transformation': self.transformation})
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
self._write_log("scheduled %i blocks to run" % len(block_list))
# prime and run the jobs
n_jobs = min(len(block_list), self.max_jobs)
self.prepare_jobs(n_jobs, block_list, task_config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class LinearLocal(LinearBase, LocalTask):
"""
Linear intensity transform on local machine
"""
pass
class LinearSlurm(LinearBase, SlurmTask):
"""
copy on slurm cluster
Linear intensity transform on slurm cluster
"""
pass
class LinearLSF(LinearBase, LSFTask):
"""
Linear intensity transform on lsf cluster
"""
pass
#
# Implementation
#
def _load_transformation(trafo_file, shape):
with open(trafo_file) as f:
trafo = json.load(f)
# for now, we support two different transformation specifications:
# 1.) global trafo specified as {'a': a, 'b': b}
# 2.) transformation for each slcie, specified as {'1': {'a': a, 'b': b}, ...}
if len(trafo) == 2:
assert set(trafo.keys()) == {'a', 'b'}
fu.log("Found global transformation with values %f, %f" % (trafo['a'], trafo['b']))
else:
assert len(trafo) == shape[0]
assert all((len(tr) == 2 for tr in trafo.values()))
trafo = {int(k): v for k, v in trafo.items()}
fu.log("Found transformation per slice")
return trafo
def _transform_data(data, a, b, mask=None):
if mask is None:
data = a * data + b
else:
data[mask] = a * data[mask] + b
return data
def _transform_block(ds_in, ds_out, transformation, blocking, block_id, mask=None):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
bb = vu.block_to_bb(block)
if mask is not None:
bb_mask = mask[bb].astype('bool')
if bb_mask.sum() == 0:
fu.log_block_success(block_id)
return
else:
bb_mask = None
data = ds_in[bb]
if len(transformation) == 2:
data = _transform_data(data, transformation['a'], transformation['b'], bb_mask)
else:
z_offset = block.begin[0]
for z in range(data.shape[0]):
trafo = transformation[z + z_offset]
data[z] = _transform_data(data[z], trafo['a'], trafo['b'], bb_mask[z])
ds_out[bb] = data
fu.log_block_success(block_id)
def _transform_linear(ds_in, ds_out, transformation, blocking, block_list, mask=None):
for block_id in block_list:
_transform_block(ds_in, ds_out, transformation, blocking, block_id, mask)
def linear(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config and path to transformation
output_path = config['output_path']
output_key = config['output_key']
trafo_file = config['transformation']
mask_path = config['mask_path']
mask_key = config['mask_key']
if mask_path != '':
assert mask_key != ''
with vu.file_reader(input_path, 'r') as f:
in_shape = f[input_key].shape
mask = vu.load_mask(mask_path, mask_key, in_shape)
same_file = input_path == output_path
in_place = same_file and (input_key == output_key)
# submit blocks
if same_file:
with vu.file_reader(input_path) as f:
ds_in = f[input_key]
ds_out = ds_in if in_place else f[output_key]
shape = list(ds_in.shape)
trafo = _load_transformation(trafo_file, shape)
blocking = nt.blocking([0, 0, 0], shape, block_shape)
_transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)
else:
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
ds_out = f_out[output_key]
shape = list(ds_in.shape)
trafo = _load_transformation(trafo_file, shape)
blocking = nt.blocking([0, 0, 0], shape, block_shape)
_transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
linear(job_id, path)
| #! /bin/python
import os
import sys
import json
import luigi
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from cluster_tools.utils.task_utils import DummyTask
#
# linear transformation tasks
#
class LinearBase(luigi.Task):
""" linear base class
"""
task_name = 'linear'
src_file = os.path.abspath(__file__)
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
transformation = luigi.Parameter()
mask_path = luigi.Parameter(default='')
mask_key = luigi.Parameter(default='')
dependency = luigi.TaskParameter(default=DummyTask())
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'chunks': None, 'compression': 'gzip'})
return config
def requires(self):
return self.dependency
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape, dtype and make block config
with vu.file_reader(self.input_path, 'r') as f:
shape = f[self.input_key].shape
dtype = f[self.input_key].dtype
# load the config
task_config = self.get_task_config()
compression = task_config.pop('compression', 'gzip')
chunks = task_config.pop('chunks', None)
if chunks is None:
chunks = tuple(bs // 2 for bs in block_shape)
if self.output_path != self.input_path:
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=shape, chunks=chunks,
compression=compression, dtype=dtype)
# update the config with input and output paths and keys
# as well as block shape
task_config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'mask_path': self.mask_path, 'mask_key': self.mask_key,
'block_shape': block_shape, 'transformation': self.transformation})
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
self._write_log("scheduled %i blocks to run" % len(block_list))
# prime and run the jobs
n_jobs = min(len(block_list), self.max_jobs)
self.prepare_jobs(n_jobs, block_list, task_config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class LinearLocal(LinearBase, LocalTask):
"""
Linear intensity transform on local machine
"""
pass
class LinearSlurm(LinearBase, SlurmTask):
"""
copy on slurm cluster
Linear intensity transform on slurm cluster
"""
pass
class LinearLSF(LinearBase, LSFTask):
"""
Linear intensity transform on lsf cluster
"""
pass
#
# Implementation
#
def _load_transformation(trafo_file, shape):
with open(trafo_file) as f:
trafo = json.load(f)
# for now, we support two different transformation specifications:
# 1.) global trafo specified as {'a': a, 'b': b}
# 2.) transformation for each slcie, specified as {'1': {'a': a, 'b': b}, ...}
if len(trafo) == 2:
assert set(trafo.keys()) == {'a', 'b'}
fu.log("Found global transformation with values %f, %f" % (trafo['a'], trafo['b']))
else:
assert len(trafo) == shape[0]
assert all((len(tr) == 2 for tr in trafo.values()))
trafo = {int(k): v for k, v in trafo.items()}
fu.log("Found transformation per slice")
return trafo
def _transform_data(data, a, b, mask=None):
if mask is None:
data = a * data + b
else:
data[mask] = a * data[mask] + b
return data
def _transform_block(ds_in, ds_out, transformation, blocking, block_id, mask=None):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
bb = vu.block_to_bb(block)
if mask is not None:
bb_mask = mask[bb].astype('bool')
if bb_mask.sum() == 0:
fu.log_block_success(block_id)
return
else:
bb_mask = None
data = ds_in[bb]
if len(transformation) == 2:
data = _transform_data(data, transformation['a'], transformation['b'], bb_mask)
else:
z_offset = block.begin[0]
for z in range(data.shape[0]):
trafo = transformation[z + z_offset]
data[z] = _transform_data(data[z], trafo['a'], trafo['b'], bb_mask[z])
ds_out[bb] = data
fu.log_block_success(block_id)
def _transform_linear(ds_in, ds_out, transformation, blocking, block_list, mask=None):
for block_id in block_list:
_transform_block(ds_in, ds_out, transformation, blocking, block_id, mask)
def linear(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config and path to transformation
output_path = config['output_path']
output_key = config['output_key']
trafo_file = config['transformation']
mask_path = config['mask_path']
mask_key = config['mask_key']
if mask_path != '':
assert mask_key != ''
with vu.file_reader(input_path, 'r') as f:
in_shape = f[input_key].shape
mask = vu.load_mask(mask_path, mask_key, in_shape)
same_file = input_path == output_path
in_place = same_file and (input_key == output_key)
# submit blocks
if same_file:
with vu.file_reader(input_path) as f:
ds_in = f[input_key]
ds_out = ds_in if in_place else f[output_key]
shape = list(ds_in.shape)
trafo = _load_transformation(trafo_file, shape)
blocking = nt.blocking([0, 0, 0], shape, block_shape)
_transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)
else:
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
ds_out = f_out[output_key]
shape = list(ds_in.shape)
trafo = _load_transformation(trafo_file, shape)
blocking = nt.blocking([0, 0, 0], shape, block_shape)
_transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
linear(job_id, path)
| en | 0.708808 | #! /bin/python # # linear transformation tasks # linear base class # input and output volumes # we use this to get also get the common default config # TODO remove any output of failed blocks because it might be corrupted # get the global config and init configs # get shape, dtype and make block config # load the config # require output dataset # update the config with input and output paths and keys # as well as block shape # prime and run the jobs # wait till jobs finish and check for job success Linear intensity transform on local machine copy on slurm cluster Linear intensity transform on slurm cluster Linear intensity transform on lsf cluster # # Implementation # # for now, we support two different transformation specifications: # 1.) global trafo specified as {'a': a, 'b': b} # 2.) transformation for each slcie, specified as {'1': {'a': a, 'b': b}, ...} # read the input cofig # read the output config and path to transformation # submit blocks # log success | 1.845615 | 2 |
0x06-python-classes/0-square.py | omarcherni007/holbertonschool-higher_level_programming | 1 | 6615619 | <reponame>omarcherni007/holbertonschool-higher_level_programming
#!/usr/bin/python3
"""Module Square"""
class Square:
"""Empty class"""
pass
| #!/usr/bin/python3
"""Module Square"""
class Square:
"""Empty class"""
pass | en | 0.215868 | #!/usr/bin/python3 Module Square Empty class | 1.439179 | 1 |
main.py | HubertSienicki/TicTacToe-VoiceControll | 0 | 6615620 | <filename>main.py
from GUI.TitleScreen import *
from GUI.Grid import *
from tkinter import Tk
def main():
root = Tk()
ts = Grid(root, True)
root.mainloop()
if __name__ == "__main__":
main() | <filename>main.py
from GUI.TitleScreen import *
from GUI.Grid import *
from tkinter import Tk
def main():
root = Tk()
ts = Grid(root, True)
root.mainloop()
if __name__ == "__main__":
main() | none | 1 | 2.53699 | 3 | |
tests/tools/test_singleton.py | jbenden/pipeline | 30 | 6615621 | <reponame>jbenden/pipeline<filename>tests/tools/test_singleton.py
"""Testing of module filter."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to, is_not
from spline.tools.decorators import singleton
class TestSingleton(unittest.TestCase):
"""Testing of singleton decorator."""
def test_without_parameters(self):
"""Testing singleton decorator without parameters."""
@singleton
class SingletonTest1(object):
"""Test singleton."""
def __init__(self):
"""No parameters, no fields."""
pass
instance_a = SingletonTest1()
instance_b = SingletonTest1()
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
def test_with_positional_arguments(self):
"""Testing singleton decorator with positional parameters."""
@singleton
class SingletonTest2(object):
"""Test singleton."""
def __init__(self, value_a, value_b):
"""Init fields."""
self.value_a = value_a
self.value_b = value_b
instance_a = SingletonTest2(10, "hello")
instance_b = SingletonTest2(10, "hello")
instance_c = SingletonTest2(20, "hello")
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
assert_that(instance_a.value_a, equal_to(instance_b.value_a))
assert_that(instance_a.value_b, equal_to(instance_b.value_b))
assert_that(instance_a, is_not(equal_to(instance_c)))
assert_that(id(instance_a), is_not(equal_to(id(instance_c))))
assert_that(instance_a.value_a, is_not(equal_to(instance_c.value_a)))
assert_that(instance_a.value_b, equal_to(instance_c.value_b))
def test_with_named_arguments(self):
"""Testing singleton decorator with named parameters."""
@singleton
class SingletonTest3(object):
"""Test singleton."""
def __init__(self, value_a, value_b):
"""Init fields."""
self.value_a = value_a
self.value_b = value_b
instance_a = SingletonTest3(10, value_b="hello")
instance_b = SingletonTest3(10, value_b="hello")
instance_c = SingletonTest3(10, "hello")
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
assert_that(instance_a.value_a, equal_to(instance_b.value_a))
assert_that(instance_a.value_b, equal_to(instance_b.value_b))
# also the field values are the same the instances are
# different because of the different call.
assert_that(instance_a, is_not(equal_to(instance_c)))
assert_that(id(instance_a), is_not(equal_to(id(instance_c))))
assert_that(instance_a.value_a, equal_to(instance_c.value_a))
assert_that(instance_a.value_b, equal_to(instance_c.value_b))
| """Testing of module filter."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to, is_not
from spline.tools.decorators import singleton
class TestSingleton(unittest.TestCase):
"""Testing of singleton decorator."""
def test_without_parameters(self):
"""Testing singleton decorator without parameters."""
@singleton
class SingletonTest1(object):
"""Test singleton."""
def __init__(self):
"""No parameters, no fields."""
pass
instance_a = SingletonTest1()
instance_b = SingletonTest1()
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
def test_with_positional_arguments(self):
"""Testing singleton decorator with positional parameters."""
@singleton
class SingletonTest2(object):
"""Test singleton."""
def __init__(self, value_a, value_b):
"""Init fields."""
self.value_a = value_a
self.value_b = value_b
instance_a = SingletonTest2(10, "hello")
instance_b = SingletonTest2(10, "hello")
instance_c = SingletonTest2(20, "hello")
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
assert_that(instance_a.value_a, equal_to(instance_b.value_a))
assert_that(instance_a.value_b, equal_to(instance_b.value_b))
assert_that(instance_a, is_not(equal_to(instance_c)))
assert_that(id(instance_a), is_not(equal_to(id(instance_c))))
assert_that(instance_a.value_a, is_not(equal_to(instance_c.value_a)))
assert_that(instance_a.value_b, equal_to(instance_c.value_b))
def test_with_named_arguments(self):
"""Testing singleton decorator with named parameters."""
@singleton
class SingletonTest3(object):
"""Test singleton."""
def __init__(self, value_a, value_b):
"""Init fields."""
self.value_a = value_a
self.value_b = value_b
instance_a = SingletonTest3(10, value_b="hello")
instance_b = SingletonTest3(10, value_b="hello")
instance_c = SingletonTest3(10, "hello")
assert_that(instance_a, equal_to(instance_b))
assert_that(id(instance_a), equal_to(id(instance_b)))
assert_that(instance_a.value_a, equal_to(instance_b.value_a))
assert_that(instance_a.value_b, equal_to(instance_b.value_b))
# also the field values are the same the instances are
# different because of the different call.
assert_that(instance_a, is_not(equal_to(instance_c)))
assert_that(id(instance_a), is_not(equal_to(id(instance_c))))
assert_that(instance_a.value_a, equal_to(instance_c.value_a))
assert_that(instance_a.value_b, equal_to(instance_c.value_b)) | en | 0.710321 | Testing of module filter. # pylint: disable=no-self-use, invalid-name Testing of singleton decorator. Testing singleton decorator without parameters. Test singleton. No parameters, no fields. Testing singleton decorator with positional parameters. Test singleton. Init fields. Testing singleton decorator with named parameters. Test singleton. Init fields. # also the field values are the same the instances are # different because of the different call. | 2.662993 | 3 |
modules/plotly_renderer.py | tkc-morita/ct_multiview | 0 | 6615622 | <reponame>tkc-morita/ct_multiview
# coding: utf-8
import numpy as np
from plotly.figure_factory import create_trisurf
import trimesh
import io
from PIL import Image
scene = {'{}axis'.format(ax):
dict(
title='',
showgrid=False,
zeroline=False,
showticklabels=False,
linecolor='rgb(0, 0, 0)',
)
for ax in 'xyz'}
scene['bgcolor'] = 'rgb(0, 0, 0)'
scene['aspectmode'] = 'data'
def load_stl(path):
mesh = trimesh.load_mesh(path)
x,y,z = zip(*mesh.vertices)
fig = create_trisurf(x=x,
y=y,
z=z,
plot_edges=False,
colormap=['rgb(255,255,255)','rgb(255,255,255)'],
simplices=mesh.faces,
backgroundcolor='rgb(0, 0, 0)',
title='',
show_colorbar=False,
)
fig.update_layout(
scene=scene,
margin=dict(r=0, l=0, b=0, t=0),
)
fig.update_traces(lighting=dict(ambient=0.5))
return fig
def project2D(fig, elev, azim, dist=1.8, width=512, height=512, **kwargs):
elev=np.deg2rad(elev)
azim=np.deg2rad(azim)
z=dist*np.sin(elev)
r_xy=dist*np.cos(elev)
x=r_xy*np.cos(azim)
y=r_xy*np.sin(azim)
fig.update_layout(
scene_camera_eye=dict(x=x, y=y, z=z),
)
fig.update_traces(lightposition=dict(x=x*1.5, y=y*1.5, z=z*1.5))
img_bytes = fig.to_image(format="png", width=width, height=height)
img = Image.open(io.BytesIO(img_bytes))
return img | # coding: utf-8
import numpy as np
from plotly.figure_factory import create_trisurf
import trimesh
import io
from PIL import Image
scene = {'{}axis'.format(ax):
dict(
title='',
showgrid=False,
zeroline=False,
showticklabels=False,
linecolor='rgb(0, 0, 0)',
)
for ax in 'xyz'}
scene['bgcolor'] = 'rgb(0, 0, 0)'
scene['aspectmode'] = 'data'
def load_stl(path):
mesh = trimesh.load_mesh(path)
x,y,z = zip(*mesh.vertices)
fig = create_trisurf(x=x,
y=y,
z=z,
plot_edges=False,
colormap=['rgb(255,255,255)','rgb(255,255,255)'],
simplices=mesh.faces,
backgroundcolor='rgb(0, 0, 0)',
title='',
show_colorbar=False,
)
fig.update_layout(
scene=scene,
margin=dict(r=0, l=0, b=0, t=0),
)
fig.update_traces(lighting=dict(ambient=0.5))
return fig
def project2D(fig, elev, azim, dist=1.8, width=512, height=512, **kwargs):
elev=np.deg2rad(elev)
azim=np.deg2rad(azim)
z=dist*np.sin(elev)
r_xy=dist*np.cos(elev)
x=r_xy*np.cos(azim)
y=r_xy*np.sin(azim)
fig.update_layout(
scene_camera_eye=dict(x=x, y=y, z=z),
)
fig.update_traces(lightposition=dict(x=x*1.5, y=y*1.5, z=z*1.5))
img_bytes = fig.to_image(format="png", width=width, height=height)
img = Image.open(io.BytesIO(img_bytes))
return img | en | 0.833554 | # coding: utf-8 | 2.247822 | 2 |
uniquindio_siclimatico/models/medicion.py | LosMejoresDelosMejores/ProyectoSebas | 0 | 6615623 | <reponame>LosMejoresDelosMejores/ProyectoSebas
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
from openerp import models, fields
class Medicion(models.Model):
_name = 'uniquindio.medicion'
estacion_id = fields.Many2one(
string='Estacion', required=True,
comodel_name='uniquindio.estacion', index=True)
tipo_id = fields.Many2one(
string='Tipo', required=True,
comodel_name='uniquindio.tiposensor', index=True)
valor = fields.Float('Valor', required=True,
group_operator="avg", index=True)
unidad = fields.Char('Unidad Medida', required=True, index=True)
fecha = fields.Datetime(u'Fecha Medición')
| # -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
from openerp import models, fields
class Medicion(models.Model):
_name = 'uniquindio.medicion'
estacion_id = fields.Many2one(
string='Estacion', required=True,
comodel_name='uniquindio.estacion', index=True)
tipo_id = fields.Many2one(
string='Tipo', required=True,
comodel_name='uniquindio.tiposensor', index=True)
valor = fields.Float('Valor', required=True,
group_operator="avg", index=True)
unidad = fields.Char('Unidad Medida', required=True, index=True)
fecha = fields.Datetime(u'Fecha Medición') | en | 0.774707 | # -*- coding: utf-8 -*- # -*- encoding: utf-8 -*- | 2.139473 | 2 |
UServer/admin_server/admin_http_api/api/api_gateway.py | soybean217/lora-python | 0 | 6615624 | <filename>UServer/admin_server/admin_http_api/api/api_gateway.py
import json
from ..api import api, root
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from ..http_auth import auth
from flask import request, Response
from .forms import get_formdata_from_json_or_form
# from userver.object.statistician_gateway import Statistician
from userver.object.stat_gateway import Statistician
from binascii import unhexlify
from .decorators import gateway_exists
from admin_server.admin_data_update.object.gateway import GatewayLocation
import json
@api.route(root + 'gateways', methods=['GET', 'POST'])
@auth.auth_required
def gateways():
if request.method == 'GET':
user_id = request.args.get('user', default=None)
if user_id is not None:
gateways = Gateway.query.filter_by(user_id=user_id)
else:
gateways = Gateway.query.order_by(Gateway.user_id)
gateways_list = []
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
return json.dumps(gateways_list), 200
# elif request.method == 'POST':
# formdata = get_formdata_from_json_or_form(request)
# add_gateway = AddGatewayForm(formdata)
# if add_gateway.validate():
# try:
# gateway = import_gateway(user, add_gateway)
# gateway.save()
# new_gateway = Gateway.query.get(gateway.id)
# return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
# except KeyDuplicateError as error:
# errors = {'mac_addr': str(error)}
# return Response(status=406, response=json.dumps({"errors": errors}))
# except AssertionError as error:
# return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
# else:
# errors = {}
# for key, value in add_gateway.errors.items():
# errors[key] = value[0]
# return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/statistician/hourly', methods=['GET', 'POST'])
@auth.auth_required
@gateway_exists
def gateway_statistician_hourly(gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
statistician = Statistician(gateway.id)
hourly = statistician.count_in_hourly()
return json.dumps(hourly), 200
@api.route(root + 'gateways/<gateway_id>/statistician/daily', methods=['GET', 'POST'])
@auth.auth_required
@gateway_exists
def gateway_statistician_daily(gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
statistician = Statistician(gateway.id)
daily = statistician.count_in_daily()
return json.dumps(daily), 200
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@auth.auth_required
@gateway_exists
def gateway(gateway):
if request.method == 'GET':
return Response( status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
# def import_gateway(user, add_gateway):
# mac_addr = add_gateway['mac_addr'].data
# name = add_gateway['name'].data
# platform = add_gateway['platform'].data
# freq_plan = add_gateway['freq_plan'].data
# location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
# if platform == Platform.rpi:
# model = add_gateway['model'].data
# return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location)
# elif platform == Platform.ll:
# return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location)
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location)
@api.route(root + 'gps/gateways', methods=['GET'])
@auth.auth_required
def gateway_gps():
if request.method == 'GET':
code = request.args.get('code', '')
gateway_id = request.args.get('id', '')
if code != '':
assert len(code) == 6 and isinstance(code, str), \
'It should be a string or the length should be 6 code=%s' % str(code)
code_province_url = code[:2]
code_city_url = code[2:4]
code_area_url = code[4:6]
if code_city_url == '00':
gateways_list = GatewayLocation.query_code(code_province=code_province_url)
else:
if code_area_url == '00':
gateways_list = GatewayLocation.query_code(code_province=code_province_url,
code_city=code_city_url)
else:
gateways_list = GatewayLocation.query_code(code_province=code_province_url,
code_city=code_city_url,
code_area=code_area_url)
if len(gateways_list) != 0:
data = [{'id': i_gateway.gateway_id, 'lat': i_gateway.latitude, 'lng': i_gateway.longitude}
for i_gateway in gateways_list]
return json.dumps(data), 200
else:
return 'There has not any gateway belong to the code.', 204
elif gateway_id != '':
gateway_data = GatewayLocation.query_gateway_id(gateway_id=gateway_id)
if gateway_data is not None:
data = {'id': gateway_id, 'lat': gateway_data.latitude, 'lng': gateway_data.longitude}
return json.dumps(data), 200
else:
return 'The id of gateway does not exit.', 204
else:
return 'Lack of url query key.', 406
| <filename>UServer/admin_server/admin_http_api/api/api_gateway.py
import json
from ..api import api, root
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from ..http_auth import auth
from flask import request, Response
from .forms import get_formdata_from_json_or_form
# from userver.object.statistician_gateway import Statistician
from userver.object.stat_gateway import Statistician
from binascii import unhexlify
from .decorators import gateway_exists
from admin_server.admin_data_update.object.gateway import GatewayLocation
import json
@api.route(root + 'gateways', methods=['GET', 'POST'])
@auth.auth_required
def gateways():
if request.method == 'GET':
user_id = request.args.get('user', default=None)
if user_id is not None:
gateways = Gateway.query.filter_by(user_id=user_id)
else:
gateways = Gateway.query.order_by(Gateway.user_id)
gateways_list = []
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
return json.dumps(gateways_list), 200
# elif request.method == 'POST':
# formdata = get_formdata_from_json_or_form(request)
# add_gateway = AddGatewayForm(formdata)
# if add_gateway.validate():
# try:
# gateway = import_gateway(user, add_gateway)
# gateway.save()
# new_gateway = Gateway.query.get(gateway.id)
# return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
# except KeyDuplicateError as error:
# errors = {'mac_addr': str(error)}
# return Response(status=406, response=json.dumps({"errors": errors}))
# except AssertionError as error:
# return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
# else:
# errors = {}
# for key, value in add_gateway.errors.items():
# errors[key] = value[0]
# return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/statistician/hourly', methods=['GET', 'POST'])
@auth.auth_required
@gateway_exists
def gateway_statistician_hourly(gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
statistician = Statistician(gateway.id)
hourly = statistician.count_in_hourly()
return json.dumps(hourly), 200
@api.route(root + 'gateways/<gateway_id>/statistician/daily', methods=['GET', 'POST'])
@auth.auth_required
@gateway_exists
def gateway_statistician_daily(gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
statistician = Statistician(gateway.id)
daily = statistician.count_in_daily()
return json.dumps(daily), 200
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@auth.auth_required
@gateway_exists
def gateway(gateway):
if request.method == 'GET':
return Response( status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
# def import_gateway(user, add_gateway):
# mac_addr = add_gateway['mac_addr'].data
# name = add_gateway['name'].data
# platform = add_gateway['platform'].data
# freq_plan = add_gateway['freq_plan'].data
# location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
# if platform == Platform.rpi:
# model = add_gateway['model'].data
# return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location)
# elif platform == Platform.ll:
# return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location)
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location)
@api.route(root + 'gps/gateways', methods=['GET'])
@auth.auth_required
def gateway_gps():
if request.method == 'GET':
code = request.args.get('code', '')
gateway_id = request.args.get('id', '')
if code != '':
assert len(code) == 6 and isinstance(code, str), \
'It should be a string or the length should be 6 code=%s' % str(code)
code_province_url = code[:2]
code_city_url = code[2:4]
code_area_url = code[4:6]
if code_city_url == '00':
gateways_list = GatewayLocation.query_code(code_province=code_province_url)
else:
if code_area_url == '00':
gateways_list = GatewayLocation.query_code(code_province=code_province_url,
code_city=code_city_url)
else:
gateways_list = GatewayLocation.query_code(code_province=code_province_url,
code_city=code_city_url,
code_area=code_area_url)
if len(gateways_list) != 0:
data = [{'id': i_gateway.gateway_id, 'lat': i_gateway.latitude, 'lng': i_gateway.longitude}
for i_gateway in gateways_list]
return json.dumps(data), 200
else:
return 'There has not any gateway belong to the code.', 204
elif gateway_id != '':
gateway_data = GatewayLocation.query_gateway_id(gateway_id=gateway_id)
if gateway_data is not None:
data = {'id': gateway_id, 'lat': gateway_data.latitude, 'lng': gateway_data.longitude}
return json.dumps(data), 200
else:
return 'The id of gateway does not exit.', 204
else:
return 'Lack of url query key.', 406
| en | 0.430507 | # from userver.object.statistician_gateway import Statistician # elif request.method == 'POST': # formdata = get_formdata_from_json_or_form(request) # add_gateway = AddGatewayForm(formdata) # if add_gateway.validate(): # try: # gateway = import_gateway(user, add_gateway) # gateway.save() # new_gateway = Gateway.query.get(gateway.id) # return Response(status=201, response=json.dumps(new_gateway.obj_to_dict())) # except KeyDuplicateError as error: # errors = {'mac_addr': str(error)} # return Response(status=406, response=json.dumps({"errors": errors})) # except AssertionError as error: # return Response(status=406, response=json.dumps({"errors": {"other": str(error)}})) # else: # errors = {} # for key, value in add_gateway.errors.items(): # errors[key] = value[0] # return Response(status=406, response=json.dumps({"errors": errors})) :param dev_eui: dev_eui :return: 返回上行下行统计数据 :param dev_eui: dev_eui :return: 返回上行下行统计数据 # def import_gateway(user, add_gateway): # mac_addr = add_gateway['mac_addr'].data # name = add_gateway['name'].data # platform = add_gateway['platform'].data # freq_plan = add_gateway['freq_plan'].data # location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data) # if platform == Platform.rpi: # model = add_gateway['model'].data # return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location) # elif platform == Platform.ll: # return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location) | 2.14732 | 2 |
entities/lst_configuration.py | fudo-myo/LST_BBDD | 0 | 6615625 | from sqlalchemy import *
from config.base import getBase, getMetaData, getEngine
from utils.checkers import Checkers
from utils.table_names import LstTableNames
if Checkers.check_table_exists(getEngine(), LstTableNames.LST_CONFIGURATION):
class LstConfiguration(getBase()):
__tablename__ = Table(LstTableNames.LST_CONFIGURATION, getMetaData(), autoload=True, autoload_with=getEngine())
id_config = Column('ID_CONFIG', Integer, primary_key=True)
config_description = Column('CONFIG_DESCRIPTION', VARCHAR(50), nullable=True)
param_1 = Column('PARAM_1', VARCHAR(45), nullable=True)
param_2 = Column('PARAM_2', VARCHAR(45), nullable=True)
param_3 = Column('PARAM_3', VARCHAR(45), nullable=True)
| from sqlalchemy import *
from config.base import getBase, getMetaData, getEngine
from utils.checkers import Checkers
from utils.table_names import LstTableNames
if Checkers.check_table_exists(getEngine(), LstTableNames.LST_CONFIGURATION):
class LstConfiguration(getBase()):
__tablename__ = Table(LstTableNames.LST_CONFIGURATION, getMetaData(), autoload=True, autoload_with=getEngine())
id_config = Column('ID_CONFIG', Integer, primary_key=True)
config_description = Column('CONFIG_DESCRIPTION', VARCHAR(50), nullable=True)
param_1 = Column('PARAM_1', VARCHAR(45), nullable=True)
param_2 = Column('PARAM_2', VARCHAR(45), nullable=True)
param_3 = Column('PARAM_3', VARCHAR(45), nullable=True)
| none | 1 | 2.279302 | 2 | |
serial2scratch.py | radzick/scratch2microbit | 0 | 6615626 | <gh_stars>0
#!/usr/bin/python
# --------------------------------------
# radzick 24-12-2016
#
# works only with Scratch 1.4
# --------------------------------------
# requirements:
# - scratchpy
# - pyserial
# --------------------------------------
# installation:
# $ easy_install-x.y pyserial
# $ pip install pyserial
# --------------------------------------
# usefull stuff:
# - https://pxt.microbit.org
# - https://github.com/pilliq/scratchpy
# - https://microbit-micropython.readthedocs.io
# - https://pythonhosted.org/pyserial
# - https://github.com/mu-editor/mu
# - enter microbit console:
# $ python -m serial.tools.miniterm <>
# --------------------------------------
import scratch
import serial
import sys
# get the port location from arguments list
COM_PORT = sys.argv[1]
print('debug: com port = ' + repr(COM_PORT))
# init scratch connection & serve exceptions
scr = scratch.Scratch()
scr.broadcast('init scratch')
print('debug: scratch is on')
# init serial connections & serve exceptions
ser = serial.Serial(COM_PORT, 115200, bytesize=8, parity='N', stopbits=1, timeout=None)
if ser.is_open != True:
ser.open()
ser.flushInput() #flush input buffer, discarding all its contents
ser.flushOutput() #flush output buffer, aborting current output
print('debug: serial_open = ' + repr(ser.is_open))
# receive message(s) from scratch
def listen2scratch():
i = 10
while i > 0:
try:
yield scr.receive()
i = i - 1
except scratch.ScratchError:
raise StopIteration
# receive message(s) from serial
def listen2serial():
while ser.in_waiting > 0:
try:
yield ser.readline()
except serial.SerialException:
raise StopIteration
# main loop
while True:
# parse messages from serial to scratch
for ser_msg in listen2serial():
print('debug: ' + ser_msg)
# use only the first letter to drive scratch
scr.broadcast(repr(ser_msg[0]))
| #!/usr/bin/python
# --------------------------------------
# radzick 24-12-2016
#
# works only with Scratch 1.4
# --------------------------------------
# requirements:
# - scratchpy
# - pyserial
# --------------------------------------
# installation:
# $ easy_install-x.y pyserial
# $ pip install pyserial
# --------------------------------------
# usefull stuff:
# - https://pxt.microbit.org
# - https://github.com/pilliq/scratchpy
# - https://microbit-micropython.readthedocs.io
# - https://pythonhosted.org/pyserial
# - https://github.com/mu-editor/mu
# - enter microbit console:
# $ python -m serial.tools.miniterm <>
# --------------------------------------
import scratch
import serial
import sys
# get the port location from arguments list
COM_PORT = sys.argv[1]
print('debug: com port = ' + repr(COM_PORT))
# init scratch connection & serve exceptions
scr = scratch.Scratch()
scr.broadcast('init scratch')
print('debug: scratch is on')
# init serial connections & serve exceptions
ser = serial.Serial(COM_PORT, 115200, bytesize=8, parity='N', stopbits=1, timeout=None)
if ser.is_open != True:
ser.open()
ser.flushInput() #flush input buffer, discarding all its contents
ser.flushOutput() #flush output buffer, aborting current output
print('debug: serial_open = ' + repr(ser.is_open))
# receive message(s) from scratch
def listen2scratch():
i = 10
while i > 0:
try:
yield scr.receive()
i = i - 1
except scratch.ScratchError:
raise StopIteration
# receive message(s) from serial
def listen2serial():
while ser.in_waiting > 0:
try:
yield ser.readline()
except serial.SerialException:
raise StopIteration
# main loop
while True:
# parse messages from serial to scratch
for ser_msg in listen2serial():
print('debug: ' + ser_msg)
# use only the first letter to drive scratch
scr.broadcast(repr(ser_msg[0])) | en | 0.521817 | #!/usr/bin/python # -------------------------------------- # radzick 24-12-2016 # # works only with Scratch 1.4 # -------------------------------------- # requirements: # - scratchpy # - pyserial # -------------------------------------- # installation: # $ easy_install-x.y pyserial # $ pip install pyserial # -------------------------------------- # usefull stuff: # - https://pxt.microbit.org # - https://github.com/pilliq/scratchpy # - https://microbit-micropython.readthedocs.io # - https://pythonhosted.org/pyserial # - https://github.com/mu-editor/mu # - enter microbit console: # $ python -m serial.tools.miniterm <> # -------------------------------------- # get the port location from arguments list # init scratch connection & serve exceptions # init serial connections & serve exceptions #flush input buffer, discarding all its contents #flush output buffer, aborting current output # receive message(s) from scratch # receive message(s) from serial # main loop # parse messages from serial to scratch # use only the first letter to drive scratch | 2.305481 | 2 |
dffml/skel/service/REPLACE_IMPORT_PACKAGE_NAME/misc.py | Patil2099/dffml | 0 | 6615627 | <filename>dffml/skel/service/REPLACE_IMPORT_PACKAGE_NAME/misc.py
from dffml.util.cli.arg import Arg
from dffml.util.cli.cmd import CMD
from dffml.util.entrypoint import entry_point
@entry_point("misc")
class MiscService(CMD):
"""
Description of the DFFML related command
"""
arg_integer = Arg(
"-integer",
type=int,
help=f"Port to do nothing with",
default=0,
required=True,
)
async def run(self):
print(f"Your integer was: {self.integer}")
| <filename>dffml/skel/service/REPLACE_IMPORT_PACKAGE_NAME/misc.py
from dffml.util.cli.arg import Arg
from dffml.util.cli.cmd import CMD
from dffml.util.entrypoint import entry_point
@entry_point("misc")
class MiscService(CMD):
"""
Description of the DFFML related command
"""
arg_integer = Arg(
"-integer",
type=int,
help=f"Port to do nothing with",
default=0,
required=True,
)
async def run(self):
print(f"Your integer was: {self.integer}")
| en | 0.740692 | Description of the DFFML related command | 2.329968 | 2 |
tests/python/prop_test.py | nelmiux/CS347-Data_Management | 4 | 6615628 | <gh_stars>1-10
from org.python.tests.identity import IdentityObject
#This test is for http://bugs.jython.org/issue1271
from org.python.tests.props import PropShadow
a = PropShadow.Derived()
assert a.foo() == 1, 'a'
assert a.bar() == 2, 'b'
from org.python.tests.props import PropShadow
b = PropShadow.Derived()
assert b.getBaz() == 4, 'c'
assert b.baz == 4, 'e'
assert b.getFoo() == 3, 'd'
assert b.foo() == 1, 'f'
| from org.python.tests.identity import IdentityObject
#This test is for http://bugs.jython.org/issue1271
from org.python.tests.props import PropShadow
a = PropShadow.Derived()
assert a.foo() == 1, 'a'
assert a.bar() == 2, 'b'
from org.python.tests.props import PropShadow
b = PropShadow.Derived()
assert b.getBaz() == 4, 'c'
assert b.baz == 4, 'e'
assert b.getFoo() == 3, 'd'
assert b.foo() == 1, 'f' | en | 0.536959 | #This test is for http://bugs.jython.org/issue1271 | 1.928696 | 2 |
sql_gen/test/end_to_end/test_templates_API.py | vecin2/em_automation | 0 | 6615629 | import os
import pytest
from sql_gen.test.utils.app_runner import PrintSQLToConsoleAppRunner
from sql_gen.app_project import AppProject
from sql_gen.emproject import QueryRunner
@pytest.fixture
def app_runner():
app_runner = PrintSQLToConsoleAppRunner()
yield app_runner
app_runner.teardown()
# disabled because is a computer specific test. We need to test this part with unit tests
@pytest.mark.skip
def test_query_runner(app_runner):
app = AppProject(os.environ)
app_runner.with_emproject_under(
"/opt/em/projects/Pacificorp/trunk"
).saveAndExit().run_prod()
| import os
import pytest
from sql_gen.test.utils.app_runner import PrintSQLToConsoleAppRunner
from sql_gen.app_project import AppProject
from sql_gen.emproject import QueryRunner
@pytest.fixture
def app_runner():
app_runner = PrintSQLToConsoleAppRunner()
yield app_runner
app_runner.teardown()
# disabled because is a computer specific test. We need to test this part with unit tests
@pytest.mark.skip
def test_query_runner(app_runner):
app = AppProject(os.environ)
app_runner.with_emproject_under(
"/opt/em/projects/Pacificorp/trunk"
).saveAndExit().run_prod()
| en | 0.947524 | # disabled because is a computer specific test. We need to test this part with unit tests | 1.986925 | 2 |
src/account/forms.py | smujuzi/ChoreScore | 0 | 6615630 | <filename>src/account/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate
from account.models import Account
from material import Layout, Row, Fieldset
# from protect.config import *
import logging
log = logging.getLogger(__name__)
# convert the errors to text
from django.utils.encoding import force_text
class RegistrationForm(UserCreationForm):
email = forms.EmailField(max_length=60, help_text= 'Required. Add a valid email address')
class Meta:
model = Account
fields = ('username', 'email', 'first_name', 'last_name', 'password1', '<PASSWORD>')
layout = Layout(
Fieldset('Account details',
'username', 'email',
Row('password1', '<PASSWORD>') ),
Fieldset('Personal details',
Row('first_name', 'last_name')) )
class AccountAuthenticationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = Account
fields = ('email', 'password')
layout = Layout(
Fieldset('Please Log In',
'email', 'password'))
def clean(self):
if self.is_valid():
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError("Invalid login") | <filename>src/account/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate
from account.models import Account
from material import Layout, Row, Fieldset
# from protect.config import *
import logging
log = logging.getLogger(__name__)
# convert the errors to text
from django.utils.encoding import force_text
class RegistrationForm(UserCreationForm):
email = forms.EmailField(max_length=60, help_text= 'Required. Add a valid email address')
class Meta:
model = Account
fields = ('username', 'email', 'first_name', 'last_name', 'password1', '<PASSWORD>')
layout = Layout(
Fieldset('Account details',
'username', 'email',
Row('password1', '<PASSWORD>') ),
Fieldset('Personal details',
Row('first_name', 'last_name')) )
class AccountAuthenticationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = Account
fields = ('email', 'password')
layout = Layout(
Fieldset('Please Log In',
'email', 'password'))
def clean(self):
if self.is_valid():
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError("Invalid login") | en | 0.373238 | # from protect.config import * # convert the errors to text | 2.626705 | 3 |
templates/fastApiService/zarubaServiceName/helpers/transport/test_local_rpc.py | state-alchemists/zaruba | 39 | 6615631 | <reponame>state-alchemists/zaruba<filename>templates/fastApiService/zarubaServiceName/helpers/transport/test_local_rpc.py
from typing import Any
from helpers.transport.local_rpc import LocalRPC
def test_mb():
rpc = LocalRPC()
@rpc.handle('test_rpc')
def handle(parameter_1: Any, parameter_2: str) -> Any:
assert parameter_1 == 'hello'
assert parameter_2 == 'world'
return 'hello world'
result = rpc.call('test_rpc', 'hello', 'world')
assert result == 'hello world'
rpc.shutdown() | from typing import Any
from helpers.transport.local_rpc import LocalRPC
def test_mb():
rpc = LocalRPC()
@rpc.handle('test_rpc')
def handle(parameter_1: Any, parameter_2: str) -> Any:
assert parameter_1 == 'hello'
assert parameter_2 == 'world'
return 'hello world'
result = rpc.call('test_rpc', 'hello', 'world')
assert result == 'hello world'
rpc.shutdown() | none | 1 | 2.548606 | 3 | |
fernet-write.py | MaxwellDAnderson/cryptography-sandbox | 1 | 6615632 | <filename>fernet-write.py<gh_stars>1-10
# This script requires use of the cryptography module, which can be
# downloaded with "pip install cryptography"
# For more information and documentation check out the devs' GitHub:
# https://github.com/pyca/cryptography
import string
import secrets
import cryptography
from cryptography.fernet import Fernet
# Generates Key and Saves Key to File:
class key_gen:
key = Fernet.generate_key()
key_file = open("keys.key", "wb")
key_file.write(key)
key_file.close()
my_key = Fernet(key)
# Encrypts message and saves it to file.
class message_encrypt:
plaintext = "hello motto".encode()
f = key_gen.my_key
ciphertext = f.encrypt(plaintext)
cipherfile = open("cipher.file", "wb")
cipherfile.write(ciphertext)
cipherfile.close()
# Decrypts message and saves it to file. NOTE: decrypted messages should
# not normally be saved in unencrypted files, such as this.
class message_decrypt:
decrypted_message = message_encrypt.f.decrypt(message_encrypt.ciphertext)
message_file = open("decrypted.file", "wb")
message_file.write(decrypted_message)
message_file.close()
| <filename>fernet-write.py<gh_stars>1-10
# This script requires use of the cryptography module, which can be
# downloaded with "pip install cryptography"
# For more information and documentation check out the devs' GitHub:
# https://github.com/pyca/cryptography
import string
import secrets
import cryptography
from cryptography.fernet import Fernet
# Generates Key and Saves Key to File:
class key_gen:
key = Fernet.generate_key()
key_file = open("keys.key", "wb")
key_file.write(key)
key_file.close()
my_key = Fernet(key)
# Encrypts message and saves it to file.
class message_encrypt:
plaintext = "hello motto".encode()
f = key_gen.my_key
ciphertext = f.encrypt(plaintext)
cipherfile = open("cipher.file", "wb")
cipherfile.write(ciphertext)
cipherfile.close()
# Decrypts message and saves it to file. NOTE: decrypted messages should
# not normally be saved in unencrypted files, such as this.
class message_decrypt:
decrypted_message = message_encrypt.f.decrypt(message_encrypt.ciphertext)
message_file = open("decrypted.file", "wb")
message_file.write(decrypted_message)
message_file.close()
| en | 0.8479 | # This script requires use of the cryptography module, which can be # downloaded with "pip install cryptography" # For more information and documentation check out the devs' GitHub: # https://github.com/pyca/cryptography # Generates Key and Saves Key to File: # Encrypts message and saves it to file. # Decrypts message and saves it to file. NOTE: decrypted messages should # not normally be saved in unencrypted files, such as this. | 3.185507 | 3 |
cnn_keras.py | avinashsai/cnn-rnf | 0 | 6615633 | # Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle, logging, argparse
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.engine import Layer
from keras.layers import *
from keras.layers.core import *
from keras.layers.embeddings import *
from keras.layers.convolutional import *
from keras.utils import np_utils
from sklearn.metrics import accuracy_score
from proc_data import WordVecs
np.random.seed(1332)
logger = logging.getLogger("cnn_rnf.cnn_keras")
class ConvInputLayer(Layer):
"""
Distribute word vectors into chunks - input for the convolution operation
Input dim: [batch_size x sentence_len x word_vec_dim]
Output dim: [batch_size x (sentence_len - filter_width + 1) x filter_width x word_vec_dim]
"""
def __init__(self, filter_width, sent_len, **kwargs):
super(ConvInputLayer, self).__init__(**kwargs)
self.filter_width = filter_width
self.sent_len = sent_len
def call(self, x):
chunks = []
for i in xrange(self.sent_len - self.filter_width + 1):
chunk = x[:, i:i+self.filter_width, :]
chunk = K.expand_dims(chunk, 1)
chunks.append(chunk)
return K.concatenate(chunks, 1)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.sent_len - self.filter_width + 1, self.filter_width, input_shape[-1])
def train_conv_net(datasets, # word indices of train/dev/test sentences
U, # pre-trained word embeddings
filter_type='linear', # linear or rnf
filter_width=5, # filter width for n-grams
hidden_dim=300, # dim of sentence vector
emb_dropout=0.4,
dropout=0.4,
recurrent_dropout=0.4,
pool_dropout=0.,
batch_size=32, # mini batch size
n_epochs=15):
"""
train and evaluate convolutional neural network model
"""
# print params
print ("PARAMS: filter_type=%s, filter_width=%d, hidden_dim=%d, emb_dropout=%.2f, dropout=%.2f, recurrent_dropout=%.2f, pool_dropout=%.2f, batch_size=%d"\
%(filter_type, filter_width, hidden_dim, emb_dropout, dropout, recurrent_dropout, pool_dropout, batch_size))
# prepare datasets
train_set, dev_set, test_set = datasets
train_set_x, dev_set_x, test_set_x = train_set[:,:-1], dev_set[:,:-1], test_set[:,:-1]
train_set_y, dev_set_y, test_set_y = train_set[:,-1], dev_set[:,-1], test_set[:,-1]
n_classes = np.max(train_set_y) + 1
train_set_y = np_utils.to_categorical(train_set_y, n_classes)
# build model with keras
n_tok = len(train_set_x[0])
vocab_size, emb_dim = U.shape
sequence = Input(shape=(n_tok,), dtype='int32')
inputs, train_inputs, dev_inputs, test_inputs = [sequence], [train_set_x], [dev_set_x], [test_set_x]
emb_layer = Embedding(vocab_size, emb_dim, weights=[U], trainable=False, input_length=n_tok)(sequence)
emb_layer = Dropout(emb_dropout)(emb_layer)
if filter_type == 'linear':
conv_layer = Conv1D(hidden_dim, filter_width, activation='relu')(emb_layer)
elif filter_type == 'rnf':
emb_layer = ConvInputLayer(filter_width, n_tok)(emb_layer)
conv_layer = TimeDistributed(LSTM(hidden_dim, dropout=dropout, recurrent_dropout=recurrent_dropout))(emb_layer)
text_layer = GlobalMaxPooling1D()(conv_layer)
text_layer = Dropout(pool_dropout)(text_layer)
pred_layer = Dense(n_classes, activation='softmax')(text_layer)
model = Model(inputs=inputs, outputs=pred_layer)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# start training
best_dev_perf, best_test_perf = 0., 0.
for epo in xrange(n_epochs):
# training
model.fit(train_inputs, train_set_y, batch_size=batch_size, epochs=1, verbose=0)
# evaluation
dev_perf = model.evaluate(dev_inputs,dev_set_y, batch_size=batch_size, verbose=0)[1]
test_perf = model.evaluate(test_inputs,test_set_y, batch_size=batch_size, verbose=0)[1]
if dev_perf >= best_dev_perf:
best_dev_perf, best_test_perf = dev_perf, test_perf
logger.info("Epoch: %d Dev perf: %.3f Test perf: %.3f" %(epo+1, dev_perf*100, test_perf*100))
print ("Dev perf: %.3f Test perf: %.3f" %(best_dev_perf*100, best_test_perf*100))
def get_idx_from_sent(words, word_idx_map, max_l=50, filter_width=5):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = [0] * (filter_width-1)
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
while len(x) < max_l + (filter_width-1)*2:
x.append(0)
return x
def make_idx_data(revs, word_idx_map, max_l=50, filter_width=4):
"""
Transforms sentences into a 2-d matrix.
"""
train, dev, test = [], [], []
for rev in revs:
sent = get_idx_from_sent(rev['words'], word_idx_map, max_l, filter_width)
sent.append(rev['y'])
if rev['split']==0:
train.append(sent)
elif rev['split']==1:
dev.append(sent)
elif rev['split']==2:
test.append(sent)
train = np.array(train,dtype='int32')
dev = np.array(dev,dtype='int32')
test = np.array(test,dtype='int32')
return train, dev, test
def parse_args():
parser = argparse.ArgumentParser(description="Convolutional neural networks with recurrent neural filters")
parser.add_argument('--filter-type', type=str, default="linear", choices=['linear', 'rnf'], help="filter type: linear or rnf")
parser.add_argument('--filter-width', type=int, default=6, help="convolution filter width")
parser.add_argument('--hidden-dim', type=int, default=300, help="penultimate layer dimension")
parser.add_argument('--emb-dropout', type=float, default=0.4, help="dropout rate for embedding layer")
parser.add_argument('--dropout', type=float, default=0.4, help="dropout rate for LSTM linear transformation layer")
parser.add_argument('--recurrent-dropout', type=float, default=0.4, help="dropout rate for LSTM recurrent layer")
parser.add_argument('--pool-dropout', type=float, default=0., help="dropout rate for pooling layer")
parser.add_argument('--batch-size', type=int, default=32, help="mini-batch size")
parser.add_argument('dataset', type=str, help="processed dataset file path")
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info("loading data...")
revs, wordvecs, max_l = cPickle.load(open(args.dataset,'rb'))
logger.info("data loaded!")
datasets = make_idx_data(revs, wordvecs.word_idx_map, max_l=max_l, filter_width=args.filter_width)
train_conv_net(datasets,
wordvecs.W,
filter_type=args.filter_type,
filter_width=args.filter_width,
hidden_dim=args.hidden_dim,
emb_dropout=args.emb_dropout,
dropout=args.dropout,
recurrent_dropout=args.recurrent_dropout,
pool_dropout=args.pool_dropout,
batch_size=args.batch_size,
n_epochs=20)
if __name__=="__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info('begin logging')
main()
logger.info("end logging")
| # Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle, logging, argparse
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.engine import Layer
from keras.layers import *
from keras.layers.core import *
from keras.layers.embeddings import *
from keras.layers.convolutional import *
from keras.utils import np_utils
from sklearn.metrics import accuracy_score
from proc_data import WordVecs
np.random.seed(1332)
logger = logging.getLogger("cnn_rnf.cnn_keras")
class ConvInputLayer(Layer):
"""
Distribute word vectors into chunks - input for the convolution operation
Input dim: [batch_size x sentence_len x word_vec_dim]
Output dim: [batch_size x (sentence_len - filter_width + 1) x filter_width x word_vec_dim]
"""
def __init__(self, filter_width, sent_len, **kwargs):
super(ConvInputLayer, self).__init__(**kwargs)
self.filter_width = filter_width
self.sent_len = sent_len
def call(self, x):
chunks = []
for i in xrange(self.sent_len - self.filter_width + 1):
chunk = x[:, i:i+self.filter_width, :]
chunk = K.expand_dims(chunk, 1)
chunks.append(chunk)
return K.concatenate(chunks, 1)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.sent_len - self.filter_width + 1, self.filter_width, input_shape[-1])
def train_conv_net(datasets, # word indices of train/dev/test sentences
U, # pre-trained word embeddings
filter_type='linear', # linear or rnf
filter_width=5, # filter width for n-grams
hidden_dim=300, # dim of sentence vector
emb_dropout=0.4,
dropout=0.4,
recurrent_dropout=0.4,
pool_dropout=0.,
batch_size=32, # mini batch size
n_epochs=15):
"""
train and evaluate convolutional neural network model
"""
# print params
print ("PARAMS: filter_type=%s, filter_width=%d, hidden_dim=%d, emb_dropout=%.2f, dropout=%.2f, recurrent_dropout=%.2f, pool_dropout=%.2f, batch_size=%d"\
%(filter_type, filter_width, hidden_dim, emb_dropout, dropout, recurrent_dropout, pool_dropout, batch_size))
# prepare datasets
train_set, dev_set, test_set = datasets
train_set_x, dev_set_x, test_set_x = train_set[:,:-1], dev_set[:,:-1], test_set[:,:-1]
train_set_y, dev_set_y, test_set_y = train_set[:,-1], dev_set[:,-1], test_set[:,-1]
n_classes = np.max(train_set_y) + 1
train_set_y = np_utils.to_categorical(train_set_y, n_classes)
# build model with keras
n_tok = len(train_set_x[0])
vocab_size, emb_dim = U.shape
sequence = Input(shape=(n_tok,), dtype='int32')
inputs, train_inputs, dev_inputs, test_inputs = [sequence], [train_set_x], [dev_set_x], [test_set_x]
emb_layer = Embedding(vocab_size, emb_dim, weights=[U], trainable=False, input_length=n_tok)(sequence)
emb_layer = Dropout(emb_dropout)(emb_layer)
if filter_type == 'linear':
conv_layer = Conv1D(hidden_dim, filter_width, activation='relu')(emb_layer)
elif filter_type == 'rnf':
emb_layer = ConvInputLayer(filter_width, n_tok)(emb_layer)
conv_layer = TimeDistributed(LSTM(hidden_dim, dropout=dropout, recurrent_dropout=recurrent_dropout))(emb_layer)
text_layer = GlobalMaxPooling1D()(conv_layer)
text_layer = Dropout(pool_dropout)(text_layer)
pred_layer = Dense(n_classes, activation='softmax')(text_layer)
model = Model(inputs=inputs, outputs=pred_layer)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# start training
best_dev_perf, best_test_perf = 0., 0.
for epo in xrange(n_epochs):
# training
model.fit(train_inputs, train_set_y, batch_size=batch_size, epochs=1, verbose=0)
# evaluation
dev_perf = model.evaluate(dev_inputs,dev_set_y, batch_size=batch_size, verbose=0)[1]
test_perf = model.evaluate(test_inputs,test_set_y, batch_size=batch_size, verbose=0)[1]
if dev_perf >= best_dev_perf:
best_dev_perf, best_test_perf = dev_perf, test_perf
logger.info("Epoch: %d Dev perf: %.3f Test perf: %.3f" %(epo+1, dev_perf*100, test_perf*100))
print ("Dev perf: %.3f Test perf: %.3f" %(best_dev_perf*100, best_test_perf*100))
def get_idx_from_sent(words, word_idx_map, max_l=50, filter_width=5):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = [0] * (filter_width-1)
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
while len(x) < max_l + (filter_width-1)*2:
x.append(0)
return x
def make_idx_data(revs, word_idx_map, max_l=50, filter_width=4):
"""
Transforms sentences into a 2-d matrix.
"""
train, dev, test = [], [], []
for rev in revs:
sent = get_idx_from_sent(rev['words'], word_idx_map, max_l, filter_width)
sent.append(rev['y'])
if rev['split']==0:
train.append(sent)
elif rev['split']==1:
dev.append(sent)
elif rev['split']==2:
test.append(sent)
train = np.array(train,dtype='int32')
dev = np.array(dev,dtype='int32')
test = np.array(test,dtype='int32')
return train, dev, test
def parse_args():
parser = argparse.ArgumentParser(description="Convolutional neural networks with recurrent neural filters")
parser.add_argument('--filter-type', type=str, default="linear", choices=['linear', 'rnf'], help="filter type: linear or rnf")
parser.add_argument('--filter-width', type=int, default=6, help="convolution filter width")
parser.add_argument('--hidden-dim', type=int, default=300, help="penultimate layer dimension")
parser.add_argument('--emb-dropout', type=float, default=0.4, help="dropout rate for embedding layer")
parser.add_argument('--dropout', type=float, default=0.4, help="dropout rate for LSTM linear transformation layer")
parser.add_argument('--recurrent-dropout', type=float, default=0.4, help="dropout rate for LSTM recurrent layer")
parser.add_argument('--pool-dropout', type=float, default=0., help="dropout rate for pooling layer")
parser.add_argument('--batch-size', type=int, default=32, help="mini-batch size")
parser.add_argument('dataset', type=str, help="processed dataset file path")
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info("loading data...")
revs, wordvecs, max_l = cPickle.load(open(args.dataset,'rb'))
logger.info("data loaded!")
datasets = make_idx_data(revs, wordvecs.word_idx_map, max_l=max_l, filter_width=args.filter_width)
train_conv_net(datasets,
wordvecs.W,
filter_type=args.filter_type,
filter_width=args.filter_width,
hidden_dim=args.hidden_dim,
emb_dropout=args.emb_dropout,
dropout=args.dropout,
recurrent_dropout=args.recurrent_dropout,
pool_dropout=args.pool_dropout,
batch_size=args.batch_size,
n_epochs=20)
if __name__=="__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info('begin logging')
main()
logger.info("end logging")
| en | 0.803995 | # Copyright 2018 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Distribute word vectors into chunks - input for the convolution operation Input dim: [batch_size x sentence_len x word_vec_dim] Output dim: [batch_size x (sentence_len - filter_width + 1) x filter_width x word_vec_dim] # word indices of train/dev/test sentences # pre-trained word embeddings # linear or rnf # filter width for n-grams # dim of sentence vector # mini batch size train and evaluate convolutional neural network model # print params # prepare datasets # build model with keras # start training # training # evaluation Transforms sentence into a list of indices. Pad with zeroes. Transforms sentences into a 2-d matrix. | 2.240334 | 2 |
Hackerrank/Algorithms/drawing-book.py | PROxZIMA/Competitive-Coding | 1 | 6615634 | n = int(input())
p = int(input())
if n%2 == 0:
front = (p-p%2)//2
end = n//2-front
print(min(front, end))
else:
front = (p-p%2)//2
end = (n-n%2)//2-front
print(min(front, end))
| n = int(input())
p = int(input())
if n%2 == 0:
front = (p-p%2)//2
end = n//2-front
print(min(front, end))
else:
front = (p-p%2)//2
end = (n-n%2)//2-front
print(min(front, end))
| none | 1 | 3.42419 | 3 | |
src/junction/markdown/children.py | explody/Junction | 16 | 6615635 | from typing import List, Any
from markdown import Markdown
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
import re
import xml.etree.ElementTree as etree
class ChildrenExtension(Extension):
"""Markdown extension for rendering the Confluence child pages macro.
Example: `:include-children:`
"""
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
md.parser.blockprocessors.register(
ChildrenBlockProcessor(md.parser), "children", 25
)
class ChildrenBlockProcessor(BlockProcessor):
BLOCK_RE = re.compile(
r"\s*:include-children:\s*", re.MULTILINE | re.DOTALL | re.VERBOSE
)
def test(self, parent: etree.Element, block: str) -> bool:
return bool(self.BLOCK_RE.match(block))
def run(self, parent: etree.Element, blocks: List[str]) -> None:
blocks.pop(0)
etree.SubElement(
parent,
"ac:structured-macro",
{
"ac:name": "children",
"ac:schema-version": "2",
"ac:macro-id": "92c7a2c4-5cca-4ecf-81a2-946ef7388c71",
},
).tail = "\n"
def makeExtension(**kwargs: Any) -> ChildrenExtension:
return ChildrenExtension(**kwargs)
| from typing import List, Any
from markdown import Markdown
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
import re
import xml.etree.ElementTree as etree
class ChildrenExtension(Extension):
"""Markdown extension for rendering the Confluence child pages macro.
Example: `:include-children:`
"""
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
md.parser.blockprocessors.register(
ChildrenBlockProcessor(md.parser), "children", 25
)
class ChildrenBlockProcessor(BlockProcessor):
BLOCK_RE = re.compile(
r"\s*:include-children:\s*", re.MULTILINE | re.DOTALL | re.VERBOSE
)
def test(self, parent: etree.Element, block: str) -> bool:
return bool(self.BLOCK_RE.match(block))
def run(self, parent: etree.Element, blocks: List[str]) -> None:
blocks.pop(0)
etree.SubElement(
parent,
"ac:structured-macro",
{
"ac:name": "children",
"ac:schema-version": "2",
"ac:macro-id": "92c7a2c4-5cca-4ecf-81a2-946ef7388c71",
},
).tail = "\n"
def makeExtension(**kwargs: Any) -> ChildrenExtension:
return ChildrenExtension(**kwargs)
| en | 0.428789 | Markdown extension for rendering the Confluence child pages macro. Example: `:include-children:` | 2.507498 | 3 |
modules/scripts/bin/set-contact-photo.py | sumnerevans/home-manager-config | 2 | 6615636 | <reponame>sumnerevans/home-manager-config
#! /usr/bin/env python3
import io
import sys
import vobject
from PIL import Image
if len(sys.argv) != 3:
print("Usage:")
print(" set-contact-photo <vcf file> <photo file>")
sys.exit(1)
with open(sys.argv[1]) as cf:
vcard = vobject.readOne(cf.read())
with open(sys.argv[2], 'rb') as imagefile:
image_data = imagefile.read()
image = Image.open(io.BytesIO(image_data))
photos = [c for c in vcard.getChildren() if c.name.lower() == "photo"]
if len(photos) == 1:
# Check if the photo is the same
if image_data == photos[0].value:
print("Photos are the same")
sys.exit(0)
while True:
print(f"Already have a photo for {vcard.fn.value}. Replace? [Yn]: ", end="")
response = input().lower()
if response == "y":
break
if response == "n":
sys.exit(0)
if len(photos) > 1:
while True:
print(
f"Multiple photos found for {vcard.fn.value}. Remove all and replace? [Yn]: ",
end="",
)
response = input().lower()
if response == "y":
break
if response == "n":
sys.exit(0)
# Remove all old photos:
for child in photos:
vcard.remove(child)
photo = vcard.add('photo')
photo.type_param = image.format
photo.encoding_param = 'b'
photo.value = image_data
with open(sys.argv[1], 'w+') as cf:
cf.write(vcard.serialize())
| #! /usr/bin/env python3
import io
import sys
import vobject
from PIL import Image
if len(sys.argv) != 3:
print("Usage:")
print(" set-contact-photo <vcf file> <photo file>")
sys.exit(1)
with open(sys.argv[1]) as cf:
vcard = vobject.readOne(cf.read())
with open(sys.argv[2], 'rb') as imagefile:
image_data = imagefile.read()
image = Image.open(io.BytesIO(image_data))
photos = [c for c in vcard.getChildren() if c.name.lower() == "photo"]
if len(photos) == 1:
# Check if the photo is the same
if image_data == photos[0].value:
print("Photos are the same")
sys.exit(0)
while True:
print(f"Already have a photo for {vcard.fn.value}. Replace? [Yn]: ", end="")
response = input().lower()
if response == "y":
break
if response == "n":
sys.exit(0)
if len(photos) > 1:
while True:
print(
f"Multiple photos found for {vcard.fn.value}. Remove all and replace? [Yn]: ",
end="",
)
response = input().lower()
if response == "y":
break
if response == "n":
sys.exit(0)
# Remove all old photos:
for child in photos:
vcard.remove(child)
photo = vcard.add('photo')
photo.type_param = image.format
photo.encoding_param = 'b'
photo.value = image_data
with open(sys.argv[1], 'w+') as cf:
cf.write(vcard.serialize()) | en | 0.458872 | #! /usr/bin/env python3 # Check if the photo is the same # Remove all old photos: | 2.790229 | 3 |
apps/athlete/migrations/0004_auto_20201031_1746.py | Akijunior/desafioCelero | 0 | 6615637 | <reponame>Akijunior/desafioCelero
# Generated by Django 3.1.1 on 2020-10-31 20:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('athlete', '0003_auto_20201030_2018'),
]
operations = [
migrations.AlterModelOptions(
name='athlete',
options={'ordering': ['athlete_id']},
),
migrations.AlterModelOptions(
name='game',
options={'ordering': ['game_name']},
),
migrations.AlterModelOptions(
name='medal',
options={'ordering': ['athlete__athlete_id']},
),
migrations.AlterModelOptions(
name='sport',
options={'ordering': ['sport']},
),
migrations.AddField(
model_name='medal',
name='game',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='athlete.game'),
preserve_default=False,
),
]
| # Generated by Django 3.1.1 on 2020-10-31 20:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('athlete', '0003_auto_20201030_2018'),
]
operations = [
migrations.AlterModelOptions(
name='athlete',
options={'ordering': ['athlete_id']},
),
migrations.AlterModelOptions(
name='game',
options={'ordering': ['game_name']},
),
migrations.AlterModelOptions(
name='medal',
options={'ordering': ['athlete__athlete_id']},
),
migrations.AlterModelOptions(
name='sport',
options={'ordering': ['sport']},
),
migrations.AddField(
model_name='medal',
name='game',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='athlete.game'),
preserve_default=False,
),
] | en | 0.759269 | # Generated by Django 3.1.1 on 2020-10-31 20:46 | 1.771019 | 2 |
backend/frameworkskills/apps.py | kush-daga/My-Portfolio | 0 | 6615638 | <reponame>kush-daga/My-Portfolio
from django.apps import AppConfig
class FrameworkskillsConfig(AppConfig):
name = 'frameworkskills'
| from django.apps import AppConfig
class FrameworkskillsConfig(AppConfig):
name = 'frameworkskills' | none | 1 | 1.117781 | 1 | |
services/dispatcher/data/summary_dao_factory.py | jizt-it/jizt-backend-microservice | 2 | 6615639 | <reponame>jizt-it/jizt-backend-microservice
# Copyright (C) 2020-2021 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# For license information on the libraries used, see LICENSE.
"""Data Access Object (DAO) Factory."""
__version__ = '0.1.3'
import logging
from summary_dao_postgresql import SummaryDAOPostgresql
class SummaryDAOFactory:
"""Summary DAO Factory."""
_instance = None
def __new__(cls,
host: str,
dbname: str,
user: str,
password: str,
log_level: int = logging.ERROR
) -> SummaryDAOPostgresql:
"""Singleton.
Args:
host (:obj:`str`):
The database host.
dbname (:obj:`str`):
The database name.
user (:obj:`str`):
The database user.
password (:obj:`str`):
The user's password.
log_level (:obj:`int`, `optional`, defaults to `logging.ERROR`):
The log level.
Returns:
:obj:`SummaryDAOFactory`: The single instance
of the DAO.
"""
if cls._instance is None:
cls._instance = SummaryDAOPostgresql(
host,
dbname,
user,
password,
log_level
)
return cls._instance
| # Copyright (C) 2020-2021 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# For license information on the libraries used, see LICENSE.
"""Data Access Object (DAO) Factory."""
__version__ = '0.1.3'
import logging
from summary_dao_postgresql import SummaryDAOPostgresql
class SummaryDAOFactory:
"""Summary DAO Factory."""
_instance = None
def __new__(cls,
host: str,
dbname: str,
user: str,
password: str,
log_level: int = logging.ERROR
) -> SummaryDAOPostgresql:
"""Singleton.
Args:
host (:obj:`str`):
The database host.
dbname (:obj:`str`):
The database name.
user (:obj:`str`):
The database user.
password (:obj:`str`):
The user's password.
log_level (:obj:`int`, `optional`, defaults to `logging.ERROR`):
The log level.
Returns:
:obj:`SummaryDAOFactory`: The single instance
of the DAO.
"""
if cls._instance is None:
cls._instance = SummaryDAOPostgresql(
host,
dbname,
user,
password,
log_level
)
return cls._instance | en | 0.775221 | # Copyright (C) 2020-2021 <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # For license information on the libraries used, see LICENSE. Data Access Object (DAO) Factory. Summary DAO Factory. Singleton. Args: host (:obj:`str`): The database host. dbname (:obj:`str`): The database name. user (:obj:`str`): The database user. password (:obj:`str`): The user's password. log_level (:obj:`int`, `optional`, defaults to `logging.ERROR`): The log level. Returns: :obj:`SummaryDAOFactory`: The single instance of the DAO. | 2.20295 | 2 |
stunning/exceptions.py | Ahuge/stunning-barnacle | 1 | 6615640 | class StunningError(Exception):
"""Stunning base class for exceptions"""
class ParsingError(StunningError, BufferError):
"""General Error while parsing."""
class ResolvingError(ParsingError):
"""General Error while Resolving a set of tokens to a node."""
class LexerError(StunningError, BufferError):
"""General Error while tokenizing."""
class KeyFrameError(StunningError):
"""Error related to keyframe values"""
| class StunningError(Exception):
"""Stunning base class for exceptions"""
class ParsingError(StunningError, BufferError):
"""General Error while parsing."""
class ResolvingError(ParsingError):
"""General Error while Resolving a set of tokens to a node."""
class LexerError(StunningError, BufferError):
"""General Error while tokenizing."""
class KeyFrameError(StunningError):
"""Error related to keyframe values"""
| en | 0.712762 | Stunning base class for exceptions General Error while parsing. General Error while Resolving a set of tokens to a node. General Error while tokenizing. Error related to keyframe values | 2.644334 | 3 |
CodeChef/SUPW/SUPW.py | Naga-kalyan/competitive_programming | 8 | 6615641 | n = int(input())
a = list(map(int, input().split()))
for i in reversed(range(0,n-3)):
a[i] += min(a[i+1],a[i+2],a[i+3])
print(min(a[:3]))
| n = int(input())
a = list(map(int, input().split()))
for i in reversed(range(0,n-3)):
a[i] += min(a[i+1],a[i+2],a[i+3])
print(min(a[:3]))
| none | 1 | 2.960367 | 3 | |
LMs/probe_tasks/src/training/__init__.py | baidu-research/task_space | 10 | 6615642 | <reponame>baidu-research/task_space
from src.training.utils import *
from src.training.train import *
| from src.training.utils import *
from src.training.train import * | none | 1 | 1.108005 | 1 | |
viper/web/viperweb/urls.py | Mario-Kart-Felix/mal-scrap | 0 | 6615643 | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
from django.conf.urls import url
from django.contrib.auth.views import login, logout
from . import views
from .forms import MyAuthenticationForm
urlpatterns = [
# login/logout (accounts)
url(r'^accounts/login/$', login,
{'template_name': 'viperweb/user_login.html', 'authentication_form': MyAuthenticationForm}, name='login'),
url(r'^accounts/logout/$', logout,
{'template_name': 'viperweb/logged_out.html'}, name='logout'),
# Main Page
url(r'^$', views.MainPageView.as_view(), name='main_page'),
url(r'^project/(?P<project>[^/]+)/$', views.MainPageView.as_view(), name='main-page-project'), # Project Page (Main view)
url(r'^about/', views.AboutView.as_view(), name='about'),
url(r'^changelog/', views.ChangelogView.as_view(), name='changelog'),
url(r'^config/$', views.ConfigView.as_view(), name='config-file'),
url(r'^create/$', views.CreateProjectView.as_view(), name='create-project'),
url(r'^project/default/cli/$', views.CliView.as_view(), name='cli-default'),
url(r'^project/(?P<project>[^/]+)/cli/$', views.CliView.as_view(), name='cli'),
url(r'^project/(?P<project>[^/]+)/file/(?P<sha256>[^/]+)/$', views.FileView.as_view(), name='file-view'), # File Page
url(r'^project/(?P<project>[^/]+)/file/$', views.FileView.as_view(), name='file-list'), # File List
url(r'^project/(?P<project>[^/]+)/file/(?P<sha256>[^/]+)/cuckoo/$', views.CuckooCheckOrSubmitView.as_view(), name='file-cuckoo-submit'),
url(r'^project/(?P<project>[^/]+)/hex/$', views.HexView.as_view(), name='hex-view'), # Hex View
url(r'^project/(?P<project>[^/]+)/module/$', views.RunModuleView.as_view(), name='run-module'), # Run Module Ajax
url(r'^search/$', views.SearchFileView.as_view(), name='search-file'), # Search
url(r'^urldownload/', views.UrlDownloadView.as_view(), name='url-download'), # Upload from URL
url(r'^yara/$', views.YaraRulesView.as_view(), name='yara-rules'), # Yara
url(r'^charts/$', views.ChartView.as_view(), name='chart-view'),
]
| # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
from django.conf.urls import url
from django.contrib.auth.views import login, logout
from . import views
from .forms import MyAuthenticationForm
urlpatterns = [
# login/logout (accounts)
url(r'^accounts/login/$', login,
{'template_name': 'viperweb/user_login.html', 'authentication_form': MyAuthenticationForm}, name='login'),
url(r'^accounts/logout/$', logout,
{'template_name': 'viperweb/logged_out.html'}, name='logout'),
# Main Page
url(r'^$', views.MainPageView.as_view(), name='main_page'),
url(r'^project/(?P<project>[^/]+)/$', views.MainPageView.as_view(), name='main-page-project'), # Project Page (Main view)
url(r'^about/', views.AboutView.as_view(), name='about'),
url(r'^changelog/', views.ChangelogView.as_view(), name='changelog'),
url(r'^config/$', views.ConfigView.as_view(), name='config-file'),
url(r'^create/$', views.CreateProjectView.as_view(), name='create-project'),
url(r'^project/default/cli/$', views.CliView.as_view(), name='cli-default'),
url(r'^project/(?P<project>[^/]+)/cli/$', views.CliView.as_view(), name='cli'),
url(r'^project/(?P<project>[^/]+)/file/(?P<sha256>[^/]+)/$', views.FileView.as_view(), name='file-view'), # File Page
url(r'^project/(?P<project>[^/]+)/file/$', views.FileView.as_view(), name='file-list'), # File List
url(r'^project/(?P<project>[^/]+)/file/(?P<sha256>[^/]+)/cuckoo/$', views.CuckooCheckOrSubmitView.as_view(), name='file-cuckoo-submit'),
url(r'^project/(?P<project>[^/]+)/hex/$', views.HexView.as_view(), name='hex-view'), # Hex View
url(r'^project/(?P<project>[^/]+)/module/$', views.RunModuleView.as_view(), name='run-module'), # Run Module Ajax
url(r'^search/$', views.SearchFileView.as_view(), name='search-file'), # Search
url(r'^urldownload/', views.UrlDownloadView.as_view(), name='url-download'), # Upload from URL
url(r'^yara/$', views.YaraRulesView.as_view(), name='yara-rules'), # Yara
url(r'^charts/$', views.ChartView.as_view(), name='chart-view'),
]
| en | 0.694175 | # This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. # login/logout (accounts) # Main Page # Project Page (Main view) # File Page # File List # Hex View # Run Module Ajax # Search # Upload from URL # Yara | 1.92739 | 2 |
trainer/evaluate.py | antoniobarbalau/black-box-ripper | 22 | 6615644 | <reponame>antoniobarbalau/black-box-ripper<gh_stars>10-100
import numpy as np
import torch
import setup
def evaluate(model, dataset):
model.eval()
dataloader = dataset.test_dataloader()
accs = 0
n_samples = 0
for iter_n, batch in enumerate(dataloader):
images = batch[0].to(setup.device)
targets = batch[1].to(setup.device)
n_samples += targets.shape[0]
with torch.no_grad():
outputs = model(images)
acc = outputs.max(1)[1].eq(targets).float().sum()
acc = acc.detach().cpu()
accs += acc
print(f'{model.name} accuracy: {accs / n_samples}')
| import numpy as np
import torch
import setup
def evaluate(model, dataset):
model.eval()
dataloader = dataset.test_dataloader()
accs = 0
n_samples = 0
for iter_n, batch in enumerate(dataloader):
images = batch[0].to(setup.device)
targets = batch[1].to(setup.device)
n_samples += targets.shape[0]
with torch.no_grad():
outputs = model(images)
acc = outputs.max(1)[1].eq(targets).float().sum()
acc = acc.detach().cpu()
accs += acc
print(f'{model.name} accuracy: {accs / n_samples}') | none | 1 | 2.490356 | 2 | |
app.py | ALGOMARINE-SOLUTION/tugas | 0 | 6615645 | <reponame>ALGOMARINE-SOLUTION/tugas
from flask import Flask
from routing.home import app as home
from routing.salmaa import app as salmaa
from routing.yoas import app as yoas
from routing.nia import app as nia
from routing.farhan import app as farhan
app = Flask(__name__)
app.register_blueprint(home)
app.register_blueprint(salmaa)
app.register_blueprint(yoas)
app.register_blueprint(nia)
app.register_blueprint(farhan)
if __name__ == '__main__':
app.run() | from flask import Flask
from routing.home import app as home
from routing.salmaa import app as salmaa
from routing.yoas import app as yoas
from routing.nia import app as nia
from routing.farhan import app as farhan
app = Flask(__name__)
app.register_blueprint(home)
app.register_blueprint(salmaa)
app.register_blueprint(yoas)
app.register_blueprint(nia)
app.register_blueprint(farhan)
if __name__ == '__main__':
app.run() | none | 1 | 1.914634 | 2 | |
google_scraper.py | ruettet/google_scraper | 2 | 6615646 | from bs4 import BeautifulSoup
from requests import get
from re import sub
from sys import argv
from nltk.util import ngrams
from time import sleep
from random import choice, randint
from configparser import ConfigParser
class GoogleScraper():
def __init__(self, search_ngram, min_sleep_length=30, max_sleep_length=60, domain=".nl", next_word="<PASSWORD>", stop=1, noresults="Geen resultaten gevonden"):
self.domain = domain
first_page_url = "https://www.google" + self.domain + "/search?num=100&q=" + '"' + search_ngram.replace(" ", "+") + '"'
with open("useragents.prop") as f:
self.user_agents = f.readlines()
self.min_sleep_length = float(min_sleep_length)
self.max_sleep_length = float(max_sleep_length)
self.stop_searching_after_page = int(stop)
self.noresultssentence = noresults
self.next_word = next_word
self.list_of_urls = []
self.get_urls(first_page_url)
def get_urls(self, first_page_url):
page_count = 1
print("\tChecking search page", page_count)
next_page_url, urls = self.get_info_from_page(first_page_url)
self.list_of_urls.extend(urls)
page_count += 1
while next_page_url and page_count <= self.stop_searching_after_page:
print("\tChecking search page", page_count)
next_page_url, urls = self.get_info_from_page(next_page_url)
self.list_of_urls.extend(urls)
page_count += 1
def get_info_from_page(self, url):
sleep(randint(self.min_sleep_length, self.max_sleep_length))
soup = BeautifulSoup(get(url, headers={'User-agent': choice(self.user_agents).strip()}).text, "lxml")
for div in soup.findAll("div", {"class": "med"}):
if self.noresultssentence in (div.text):
print("\t\tNo exact matches found")
return None, []
return self.get_next_page_url(soup), self.get_search_hit_urls(soup)
def get_next_page_url(self, soup):
nav = soup.find("a", id="pnnext")
if nav:
return "https://www.google" + self.domain + nav["href"]
else:
return None
def get_search_hit_urls(self, soup):
return [sub("&sa.+", "", hit.a["href"].lstrip("/url?q=")) for hit in soup.findAll("h3", {"class": "r"})]
if __name__ == "__main__":
'$ python3 google_scraper.py settings.prop'
config = ConfigParser()
config.read("settings.prop")
urls = {}
with open(config.get("InputOutput", "infile"), "r") as f:
for line in f.readlines():
for ngram in ngrams(line.strip().split(), int(config.get("Ngram", "n"))):
search_term = " ".join(ngram)
print(search_term)
gs = GoogleScraper(search_term,
config.get("Timing", "minsleep"),
config.get("Timing", "maxsleep"),
config.get("Google", "extension"),
config.get("Google", "nextword"),
config.get("Google", "stopsearchafterpages"),
config.get("Google", "noresultssentence"))
urls[search_term] = gs.list_of_urls
out = []
for ngram in urls:
for url in urls[ngram]:
out.append(",".join([ngram, sub("https?://[w]{0,3}\.?", "", url).split("/")[0], url]))
with open(config.get("InputOutput", "outfile"), "w") as f:
f.write("\n".join(out))
| from bs4 import BeautifulSoup
from requests import get
from re import sub
from sys import argv
from nltk.util import ngrams
from time import sleep
from random import choice, randint
from configparser import ConfigParser
class GoogleScraper():
def __init__(self, search_ngram, min_sleep_length=30, max_sleep_length=60, domain=".nl", next_word="<PASSWORD>", stop=1, noresults="Geen resultaten gevonden"):
self.domain = domain
first_page_url = "https://www.google" + self.domain + "/search?num=100&q=" + '"' + search_ngram.replace(" ", "+") + '"'
with open("useragents.prop") as f:
self.user_agents = f.readlines()
self.min_sleep_length = float(min_sleep_length)
self.max_sleep_length = float(max_sleep_length)
self.stop_searching_after_page = int(stop)
self.noresultssentence = noresults
self.next_word = next_word
self.list_of_urls = []
self.get_urls(first_page_url)
def get_urls(self, first_page_url):
page_count = 1
print("\tChecking search page", page_count)
next_page_url, urls = self.get_info_from_page(first_page_url)
self.list_of_urls.extend(urls)
page_count += 1
while next_page_url and page_count <= self.stop_searching_after_page:
print("\tChecking search page", page_count)
next_page_url, urls = self.get_info_from_page(next_page_url)
self.list_of_urls.extend(urls)
page_count += 1
def get_info_from_page(self, url):
sleep(randint(self.min_sleep_length, self.max_sleep_length))
soup = BeautifulSoup(get(url, headers={'User-agent': choice(self.user_agents).strip()}).text, "lxml")
for div in soup.findAll("div", {"class": "med"}):
if self.noresultssentence in (div.text):
print("\t\tNo exact matches found")
return None, []
return self.get_next_page_url(soup), self.get_search_hit_urls(soup)
def get_next_page_url(self, soup):
nav = soup.find("a", id="pnnext")
if nav:
return "https://www.google" + self.domain + nav["href"]
else:
return None
def get_search_hit_urls(self, soup):
return [sub("&sa.+", "", hit.a["href"].lstrip("/url?q=")) for hit in soup.findAll("h3", {"class": "r"})]
if __name__ == "__main__":
'$ python3 google_scraper.py settings.prop'
config = ConfigParser()
config.read("settings.prop")
urls = {}
with open(config.get("InputOutput", "infile"), "r") as f:
for line in f.readlines():
for ngram in ngrams(line.strip().split(), int(config.get("Ngram", "n"))):
search_term = " ".join(ngram)
print(search_term)
gs = GoogleScraper(search_term,
config.get("Timing", "minsleep"),
config.get("Timing", "maxsleep"),
config.get("Google", "extension"),
config.get("Google", "nextword"),
config.get("Google", "stopsearchafterpages"),
config.get("Google", "noresultssentence"))
urls[search_term] = gs.list_of_urls
out = []
for ngram in urls:
for url in urls[ngram]:
out.append(",".join([ngram, sub("https?://[w]{0,3}\.?", "", url).split("/")[0], url]))
with open(config.get("InputOutput", "outfile"), "w") as f:
f.write("\n".join(out))
| none | 1 | 2.909158 | 3 | |
src/evaluation-criticality.py | WM-SEMERU/SecureReqNet | 5 | 6615647 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# load model
from tensorflow.keras.models import load_model
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers import Dot, Input, Dense, Reshape, LSTM
from tensorflow.keras.layers import Embedding, Multiply, Subtract
from tensorflow.keras.models import Sequential, Model
from tensorflow.python.keras.layers import Lambda
from string import punctuation
from tensorflow.keras.preprocessing import text
from tensorflow.keras.preprocessing.sequence import skipgrams
import itertools
import pandas as pd
import numpy as np
import re
import nltk
import matplotlib.pyplot as plt
# In[18]:
from sklearn.metrics import average_precision_score,precision_recall_curve
from sklearn.utils.fixes import signature
# In[25]:
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# In[2]:
path = 'results[10000]/embeds20-improvement-99-0.51.hdf5'
criticality_network = load_model(path) #<----- The Model
# In[9]:
df_history_training = pd.read_csv('history_training.csv')
np_target_test_y = np.load('target_test_y.npy')
np_corpora_test_x = np.load('corpora_test_x.npy')
# In[4]:
df_history_training.head()
# In[27]:
for elem in np_target_test_y:
print(elem[0])
# In[11]:
np_corpora_test_x
# In[6]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[7]:
####Ploting Validation (overfitting assessment)
# summarize history for accuracy
plt.plot(df_history_training['accuracy'])
plt.plot(df_history_training['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# In[8]:
plt.plot(df_history_training['loss'])
plt.plot(df_history_training['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# In[12]:
#Making Evaluations
score = criticality_network.evaluate(np_corpora_test_x, np_target_test_y, verbose=1)
# In[14]:
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# In[15]:
#Making Predictions
history_predict = criticality_network.predict(x=np_corpora_test_x)
history_predict
# In[16]:
inferred_data = pd.DataFrame(history_predict,columns=list('AB'))
target_data = pd.DataFrame(np_target_test_y,columns=list('LN'))
data = target_data.join(inferred_data)
# In[17]:
data.head()
# In[20]:
y_true = list(data['L'])
y_score= list(data['A'])
average_precision = average_precision_score(y_true, y_score)
# In[21]:
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
# In[22]:
precision, recall, thresholds = precision_recall_curve(y_true, y_score)
# In[24]:
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
# In[26]:
#ROC Curve (all our samples are balanced)
auc = roc_auc_score(y_true, y_score)
print('AUC: %.3f' % auc)
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[1]:
# load model
from tensorflow.keras.models import load_model
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers import Dot, Input, Dense, Reshape, LSTM
from tensorflow.keras.layers import Embedding, Multiply, Subtract
from tensorflow.keras.models import Sequential, Model
from tensorflow.python.keras.layers import Lambda
from string import punctuation
from tensorflow.keras.preprocessing import text
from tensorflow.keras.preprocessing.sequence import skipgrams
import itertools
import pandas as pd
import numpy as np
import re
import nltk
import matplotlib.pyplot as plt
# In[18]:
from sklearn.metrics import average_precision_score,precision_recall_curve
from sklearn.utils.fixes import signature
# In[25]:
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# In[2]:
path = 'results[10000]/embeds20-improvement-99-0.51.hdf5'
criticality_network = load_model(path) #<----- The Model
# In[9]:
df_history_training = pd.read_csv('history_training.csv')
np_target_test_y = np.load('target_test_y.npy')
np_corpora_test_x = np.load('corpora_test_x.npy')
# In[4]:
df_history_training.head()
# In[27]:
for elem in np_target_test_y:
print(elem[0])
# In[11]:
np_corpora_test_x
# In[6]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[7]:
####Ploting Validation (overfitting assessment)
# summarize history for accuracy
plt.plot(df_history_training['accuracy'])
plt.plot(df_history_training['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# In[8]:
plt.plot(df_history_training['loss'])
plt.plot(df_history_training['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# In[12]:
#Making Evaluations
score = criticality_network.evaluate(np_corpora_test_x, np_target_test_y, verbose=1)
# In[14]:
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# In[15]:
#Making Predictions
history_predict = criticality_network.predict(x=np_corpora_test_x)
history_predict
# In[16]:
inferred_data = pd.DataFrame(history_predict,columns=list('AB'))
target_data = pd.DataFrame(np_target_test_y,columns=list('LN'))
data = target_data.join(inferred_data)
# In[17]:
data.head()
# In[20]:
y_true = list(data['L'])
y_score= list(data['A'])
average_precision = average_precision_score(y_true, y_score)
# In[21]:
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
# In[22]:
precision, recall, thresholds = precision_recall_curve(y_true, y_score)
# In[24]:
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
# In[26]:
#ROC Curve (all our samples are balanced)
auc = roc_auc_score(y_true, y_score)
print('AUC: %.3f' % auc)
# In[ ]:
| en | 0.459205 | #!/usr/bin/env python # coding: utf-8 # In[1]: # load model # In[18]: # In[25]: # In[2]: #<----- The Model # In[9]: # In[4]: # In[27]: # In[11]: # In[6]: # In[7]: ####Ploting Validation (overfitting assessment) # summarize history for accuracy # In[8]: # In[12]: #Making Evaluations # In[14]: # In[15]: #Making Predictions # In[16]: # In[17]: # In[20]: # In[21]: # In[22]: # In[24]: # In matplotlib < 1.5, plt.fill_between does not have a 'step' argument # In[26]: #ROC Curve (all our samples are balanced) # In[ ]: | 2.606856 | 3 |
node_modules/yeoman/test/test-insight.py | as3445/letsgo | 0 | 6615648 | <reponame>as3445/letsgo
#!/usr/bin/env python
import sys
import unittest
import os
sys.path.append('../bin/')
from yeomaninsight import main, Analytics
class YeomanInsightTest(unittest.TestCase):
def test_init_should_error_when_no_tracking_code_is_passed(self):
self.assertRaises(Exception, Analytics, tracking_code='')
def test_init_should_error_when_no_av_is_passed(self):
self.assertRaises(Exception, Analytics, av='')
def test_main_should_error_when_arguments_are_less_than_two(self):
# temporally disables print function, because it's used on main.
# just to keep the tests output clean.
sys.stdout = open(os.devnull, 'w')
self.assertRaises(SystemExit, main, args='')
# restores print function.
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main() | #!/usr/bin/env python
import sys
import unittest
import os
sys.path.append('../bin/')
from yeomaninsight import main, Analytics
class YeomanInsightTest(unittest.TestCase):
def test_init_should_error_when_no_tracking_code_is_passed(self):
self.assertRaises(Exception, Analytics, tracking_code='')
def test_init_should_error_when_no_av_is_passed(self):
self.assertRaises(Exception, Analytics, av='')
def test_main_should_error_when_arguments_are_less_than_two(self):
# temporally disables print function, because it's used on main.
# just to keep the tests output clean.
sys.stdout = open(os.devnull, 'w')
self.assertRaises(SystemExit, main, args='')
# restores print function.
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main() | en | 0.759666 | #!/usr/bin/env python # temporally disables print function, because it's used on main. # just to keep the tests output clean. # restores print function. | 2.429335 | 2 |
json-wrap.py | zauberpony/json-wrap | 0 | 6615649 | #!/usr/bin/env python3
import argparse
import datetime
import json
import sys
class StoreAsDict(argparse.Action):
"""
argparse action to create dicts from command-line
"""
def __call__(self, parser, namespace, values, option_string=None):
d = dict()
for pair in values:
try:
k, v = pair.split("=")
except ValueError as e:
raise ValueError("can't interpret '{}' — no equal-sign".format(pair))
if k in d:
raise ValueError("duplicate key '{}'".format(k))
if k == "msg" or k == "@timestamp":
raise ValueError("'{}' is a reserved keyword".format(k))
d[k] = v
setattr(namespace, self.dest, d)
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("additional_fields",
help="additional fields to add to the json, e.g. service_name=my-service",
nargs="*",
action=StoreAsDict)
parser.add_argument("-l", help="wrap output line by line", action="store_true")
return parser
def wrap_msg(msg, additional_fields):
return json.dumps({
**additional_fields,
'@timestamp': datetime.datetime.now().isoformat(),
'msg': msg
})
def read_lines(additional_fields):
for line in sys.stdin:
print(wrap_msg(line[:-1], additional_fields))
def read_full_stdin(additional_fields):
msg = "".join([line for line in sys.stdin])
print(wrap_msg(
# remove trailing newline
msg[:-1] if msg.endswith("\n") else msg,
additional_fields,
))
def main():
args = get_argparser().parse_args()
if not sys.stdin.isatty():
if args.l:
read_lines(args.additional_fields)
else:
read_full_stdin(args.additional_fields)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import datetime
import json
import sys
class StoreAsDict(argparse.Action):
"""
argparse action to create dicts from command-line
"""
def __call__(self, parser, namespace, values, option_string=None):
d = dict()
for pair in values:
try:
k, v = pair.split("=")
except ValueError as e:
raise ValueError("can't interpret '{}' — no equal-sign".format(pair))
if k in d:
raise ValueError("duplicate key '{}'".format(k))
if k == "msg" or k == "@timestamp":
raise ValueError("'{}' is a reserved keyword".format(k))
d[k] = v
setattr(namespace, self.dest, d)
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("additional_fields",
help="additional fields to add to the json, e.g. service_name=my-service",
nargs="*",
action=StoreAsDict)
parser.add_argument("-l", help="wrap output line by line", action="store_true")
return parser
def wrap_msg(msg, additional_fields):
return json.dumps({
**additional_fields,
'@timestamp': datetime.datetime.now().isoformat(),
'msg': msg
})
def read_lines(additional_fields):
for line in sys.stdin:
print(wrap_msg(line[:-1], additional_fields))
def read_full_stdin(additional_fields):
msg = "".join([line for line in sys.stdin])
print(wrap_msg(
# remove trailing newline
msg[:-1] if msg.endswith("\n") else msg,
additional_fields,
))
def main():
args = get_argparser().parse_args()
if not sys.stdin.isatty():
if args.l:
read_lines(args.additional_fields)
else:
read_full_stdin(args.additional_fields)
if __name__ == '__main__':
main()
| en | 0.374402 | #!/usr/bin/env python3 argparse action to create dicts from command-line # remove trailing newline | 3.088271 | 3 |
usure/wordvectors/core/vectorizer.py | coraxcr/usure | 2 | 6615650 | import multiprocessing
from gensim.models import Word2Vec
from usure.wordvectors.core import CorpusRep, Corpus
from usure.common import logging
class Vectorizer:
def __init__(self, typename, corpus: Corpus, w2v: Word2Vec):
assert isinstance(w2v, Word2Vec), "It's not a Word2Vec."
self._typename = typename
self._corpus = corpus
self._w2v = w2v
self._w2v.build_vocab(sentences=corpus)
self._w2v.name = f"{corpus.name}.{self._typename}.w2v"
self._w2v.wv.name = f"{corpus.name}.{self._typename}.kvs"
@classmethod
def create_with_window(cls, corpus, max_skip_len):
w2v = Vectorizer.create_word2vec(max_skip_len)
return cls(f"{max_skip_len}_w", corpus, w2v)
@staticmethod
def create_word2vec(window: int):
w2v = Word2Vec(
negative = 10,
sg=1,
hs=0,
size=300,
min_count=3,
workers=multiprocessing.cpu_count(),
window=window)
return w2v
@property
def typename(self):
return self._typename
@property
def w2v(self) -> Word2Vec:
return self._w2v
@property
def corpus(self):
return self._corpus
@logging.logtime
def train(self):
logging.info_time(f"CREATING EMBEDDINGS FOR CORPUS: {self.w2v.name}")
self._w2v.train(
sentences=self._corpus,
total_examples=self.w2v.corpus_count,
epochs=5)
| import multiprocessing
from gensim.models import Word2Vec
from usure.wordvectors.core import CorpusRep, Corpus
from usure.common import logging
class Vectorizer:
def __init__(self, typename, corpus: Corpus, w2v: Word2Vec):
assert isinstance(w2v, Word2Vec), "It's not a Word2Vec."
self._typename = typename
self._corpus = corpus
self._w2v = w2v
self._w2v.build_vocab(sentences=corpus)
self._w2v.name = f"{corpus.name}.{self._typename}.w2v"
self._w2v.wv.name = f"{corpus.name}.{self._typename}.kvs"
@classmethod
def create_with_window(cls, corpus, max_skip_len):
w2v = Vectorizer.create_word2vec(max_skip_len)
return cls(f"{max_skip_len}_w", corpus, w2v)
@staticmethod
def create_word2vec(window: int):
w2v = Word2Vec(
negative = 10,
sg=1,
hs=0,
size=300,
min_count=3,
workers=multiprocessing.cpu_count(),
window=window)
return w2v
@property
def typename(self):
return self._typename
@property
def w2v(self) -> Word2Vec:
return self._w2v
@property
def corpus(self):
return self._corpus
@logging.logtime
def train(self):
logging.info_time(f"CREATING EMBEDDINGS FOR CORPUS: {self.w2v.name}")
self._w2v.train(
sentences=self._corpus,
total_examples=self.w2v.corpus_count,
epochs=5)
| none | 1 | 2.690359 | 3 |