hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edd948bb9ec9eb83072bfce6e93f8f8d37219a11 | 3,077 | py | Python | DQM/Physics/test/ewkElecDQM_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQM/Physics/test/ewkElecDQM_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQM/Physics/test/ewkElecDQM_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("EwkDQM")
process.load("DQM.Physics.ewkElecDQM_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.DQM.collectorHost = ''
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
# load the full reconstraction configuration, to make sure we're getting all needed dependencies
process.load("Configuration.StandardSequences.MagneticField_cff")
#process.load("Configuration.StandardSequences.GeometryRecoDB_cff") #old one, to use for old releases
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.GlobalTag.globaltag = 'FT_53_V21_AN6::All'
#process.GlobalTag.globaltag = 'START70_V2::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
# input = cms.untracked.int32(5000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
## '/store/relval/CMSSW_3_1_1/RelValWM/GEN-SIM-RECO/STARTUP31X_V1-v2/0002/8E5D0675-E36B-DE11-8F71-001D09F242EF.root'
# MinBias real data!
# '/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/124/196/3C9489A4-B5E8-DE11-A475-001D09F2A465.root',
#'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/124/188/34641279-B5E8-DE11-A475-001D09F2910A.root',
# Real data
#'/store/data/Run2012B/SingleElectron/AOD/22Jan2013-v1/30000/FE93DA20-837E-E211-8A41-002481E73676.root'
# 'file:12251709-D77E-E211-96C8-003048F118FE.root' # data
# , 'file:5072427B-407E-E211-88EF-003048F237FE.root' #data
# 'file:DEC5AD62-280C-E311-89A7-002618FDA216.root'
# 'file:/tmp/andriusj/ZeePU.root'
'file:/tmp/andriusj/Data2012D_DoubleEl.root'
)
)
runOnData = False
#process.dqmEnv.subSystemFolder = 'SMP'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.workflow = cms.untracked.string('/Physics/EWK/Elec')
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
if runOnData:
process.dqmSaver.saveByRun = cms.untracked.int32(1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(False)
process.dqmSaver.forceRunNumber = cms.untracked.int32(-1)
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('detailedInfo'),
detailedInfo = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32(100) ),
threshold = cms.untracked.string('DEBUG')
#threshold = cms.untracked.string('INFO')
#threshold = cms.untracked.string('ERROR')
)
)
#process.ana = cms.EDAnalyzer("EventContentAnalyzer")
process.p = cms.Path(process.ewkElecDQM+process.dqmSaver)
| 40.486842 | 123 | 0.753331 | 362 | 3,077 | 6.350829 | 0.458564 | 0.093954 | 0.051762 | 0.089169 | 0.250979 | 0.209656 | 0.1592 | 0.118312 | 0.073945 | 0.073945 | 0 | 0.092279 | 0.116022 | 3,077 | 75 | 124 | 41.026667 | 0.752941 | 0.375366 | 0 | 0 | 0 | 0 | 0.250263 | 0.202318 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025 | 0 | 0.025 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eddafd9744249b5f6384f3044c4d9c5bb3848404 | 4,809 | py | Python | indStudyA.py | rafaelorozco/cloudsimbuck | 5b6bc4f24343bb171bc44522244647fcdaff7bf5 | [
"MIT"
] | null | null | null | indStudyA.py | rafaelorozco/cloudsimbuck | 5b6bc4f24343bb171bc44522244647fcdaff7bf5 | [
"MIT"
] | null | null | null | indStudyA.py | rafaelorozco/cloudsimbuck | 5b6bc4f24343bb171bc44522244647fcdaff7bf5 | [
"MIT"
] | null | null | null |
#version 1
#
#
#Setup data structure
#Made timer that includes fps
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import random
#import time
from pyqtgraph.ptime import time
import functools
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
def nearFunction(mat,i,j,k):
return mat[i+1,j,k-1] or mat[i,j+1,k-1] or mat[i,j,k-1] or \
mat[i-1,j,k] or mat[i,j-1,k] or mat[i,j,k-1] or \
mat[i+2,j,k] or mat[i,j+2,k] or \
mat[i-2,j,k] or mat[i,j-2,k] or mat[i,j,k-2]
def makeSeedRand(mat):
row, col, layer = mat.shape
for i in range(2, row-2):
for j in range(2, col-2):
for k in range(2, layer-2):
#p = 0.311
p = 0.211
randNum = random.uniform(0, 1)
if(randNum <= p):
mat[i,j,k] = 1
#matB[i,j,k] = 1
# if(1*(row/3) < i and i < 2*(row/3)): #middle third
# if(1*(col/3) < j and j < 2*(col/3)): #middle third
# if(k < 1*(layer/3)):
# #if(1*(layer/3) < k and k < 2*(layer/3)): #middle third
# randNum = random.randint(0,25)
# if(randNum <= 1):
# mat[i,j,k] = 1
# #matB[i,j,k] = 1
# else:
# randNum = random.randint(0,250)
# if(randNum <= 1):
# mat[i,j,k] = 1
def plantSeed(mat, numSeeds):
#put in the middle third of box
row, col, layer = mat.shape
for i in range(numSeeds):
rowRand = random.randint(2,row-2);
colRand = random.randint(2,col-2);
layerRand = random.randint(2,layer-2);
mat[rowRand,colRand,layerRand] = 1
def iterateForwardVector():
humCopy = hum.copy()
actCopy = act.copy()
cldCopy = cld.copy()
row, col, lay = hum.shape
hum[2:row-2, 2:col-2, 2:lay-2] = humCopy[2:row-2, 2:col-2, 2:lay-2] & (~ actCopy[2:row-2, 2:col-2, 2:lay-2])
cld[2:row-2, 2:col-2, 2:lay-2] = np.logical_or(cldCopy[2:row-2, 2:col-2, 2:lay-2] , actCopy[2:row-2, 2:col-2, 2:lay-2])
matR1 = np.roll(np.roll(act,-1,axis=0),1,axis=2) # mat[i+1,j,k-1]
matR2 = np.roll(np.roll(act,-1,axis=1),1,axis=2) # mat[i,j+1,k-1]
matR3 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR4 = np.roll(act,1,axis=0) # mat[i-1,j,k]
matR5 = np.roll(act,1,axis=1) # mat[i,j-1,k]
matR6 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR7 = np.roll(act,-2,axis=0) # mat[i+2,j,k]
matR8 = np.roll(act,-2,axis=1) # mat[i,j+2,k]
matR9 = np.roll(act,2,axis=0) # mat[i-2,j,k]
matR10 = np.roll(act,2,axis=1) # mat[i,j-2,k]
matR11 = np.roll(act,2,axis=2) # mat[i,j,k-2]
act[2:row-2, 2:col-2, 2:lay-2] = (~ actCopy[2:row-2, 2:col-2, 2:lay-2]) & humCopy[2:row-2, 2:col-2, 2:lay-2] & \
np.logical_or(matR1[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR2[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR3[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR4[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR5[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR6[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR7[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR8[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR9[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR10[2:row-2, 2:col-2, 2:lay-2],matR11[2:row-2, 2:col-2, 2:lay-2]))))))))))
lenI = 60
lenJ = 60
lenK = 60
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
sp2 = gl.GLScatterPlotItem(pos=indexesFinal,size=1.5,pxMode=False)
w.addItem(sp2)
def resetVars():
global hum, act, cld, indexesFinal
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
totalIterations = 80
numIteration = 0
lastTime = time()
fps = None
def update():
global numIteration, indexesFinal, lastTime, fps
if(numIteration < totalIterations) :
sp2.setData(pos=indexesFinal)
indexes = np.where(cld==1)
indexesFinal = np.array([[indexes[0][i],indexes[1][i],indexes[2][i]] for i in range(len(indexes[0]))])
iterateForwardVector()
numIteration+=1
else:
resetVars()
numIteration = 0
now = time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
print('%0.2f fps' % fps)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(5)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, PYQT_VERSION):
QtGui.QApplication.instance().exec_()
| 25.854839 | 120 | 0.602412 | 927 | 4,809 | 3.102481 | 0.166127 | 0.027816 | 0.038248 | 0.041725 | 0.438804 | 0.42733 | 0.39395 | 0.380042 | 0.370654 | 0.331015 | 0 | 0.075097 | 0.194219 | 4,809 | 185 | 121 | 25.994595 | 0.667097 | 0.140362 | 0 | 0.20339 | 0 | 0 | 0.004153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.067797 | 0.008475 | 0.127119 | 0.008475 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eddc970d0bca10b6c7c843c88343bba235218464 | 433 | py | Python | example.py | dib-lab/pybbhash | 08a1f71fc5b1f52d450ba1f33b168241423c9047 | [
"MIT"
] | 16 | 2018-01-18T06:00:42.000Z | 2021-03-03T08:50:42.000Z | example.py | dib-lab/pybbhash | 08a1f71fc5b1f52d450ba1f33b168241423c9047 | [
"MIT"
] | 17 | 2018-01-21T22:38:37.000Z | 2021-01-01T16:26:49.000Z | example.py | dib-lab/pybbhash | 08a1f71fc5b1f52d450ba1f33b168241423c9047 | [
"MIT"
] | 3 | 2018-07-04T20:38:36.000Z | 2021-11-11T12:49:01.000Z | import bbhash
# some collection of 64-bit (or smaller) hashes
uint_hashes = [10, 20, 50, 80]
num_threads = 1 # hopefully self-explanatory :)
gamma = 1.0 # internal gamma parameter for BBHash
mph = bbhash.PyMPHF(uint_hashes, len(uint_hashes), num_threads, gamma)
for val in uint_hashes:
print('{} now hashes to {}'.format(val, mph.lookup(val)))
# can also use 'mph.save(filename)' and 'mph = bbhash.load_mphf(filename)'.
| 28.866667 | 75 | 0.709007 | 67 | 433 | 4.477612 | 0.656716 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035813 | 0.161663 | 433 | 14 | 76 | 30.928571 | 0.790634 | 0.427252 | 0 | 0 | 0 | 0 | 0.078189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edddf9cd795da9fd0a04623dab549ea31d356178 | 1,618 | py | Python | setup.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | setup.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | setup.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name, enc=ascii: {True: enc}.get(name == 'mbcs'))
VERSION = '0.1.3'
setup(
name='crea-graphenelib',
version=VERSION,
description='Python library for graphene-based blockchains',
long_description=open('README.md').read(),
download_url='https://github.com/creativechain/crea-python-graphenelib/tarball/' + VERSION,
author='Creativechain Foundation',
author_email='info@creativechain.org',
maintainer='Creativechain Foundation',
maintainer_email='info@creativechain.org',
url='http://www.github.com/creativechain/crea-python-graphenelib',
keywords=[
'graphene',
'api',
'rpc',
'ecdsa',
'secp256k1'
],
packages=["grapheneapi",
"graphenebase",
],
install_requires=["ecdsa",
"requests",
"websocket-client",
"pylibscrypt",
"pycryptodome",
],
classifiers=['License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
)
| 30.528302 | 95 | 0.585909 | 150 | 1,618 | 6.26 | 0.66 | 0.025559 | 0.046858 | 0.055378 | 0.091587 | 0.091587 | 0 | 0 | 0 | 0 | 0 | 0.012017 | 0.279975 | 1,618 | 52 | 96 | 31.115385 | 0.793991 | 0.055006 | 0 | 0.088889 | 0 | 0 | 0.389908 | 0.028834 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.044444 | 0 | 0.044444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede0d5ebf66b21e6e1508ac010484457df91425a | 531 | py | Python | Kickstart/diwali-lightings.py | tushar-1728/Coding | 2df9da02cf3e5d4af5b47faf02a07ba54b3297cb | [
"MIT"
] | null | null | null | Kickstart/diwali-lightings.py | tushar-1728/Coding | 2df9da02cf3e5d4af5b47faf02a07ba54b3297cb | [
"MIT"
] | null | null | null | Kickstart/diwali-lightings.py | tushar-1728/Coding | 2df9da02cf3e5d4af5b47faf02a07ba54b3297cb | [
"MIT"
] | null | null | null | t = int(input())
for i in range(t):
pattern = input()
lindex, rindex = map(int, input().split())
d = len(pattern)
a_list = []
r_count = 0
l_count = 0
flag = 0
for j in range(d):
if pattern[j] == "B":
a_list.append(j +1)
for j in a_list:
temp = (rindex - j)//d + 1
r_count += temp
temp = (lindex - j)//d + 1
l_count += temp
if (lindex - j) % d == 0:
flag = 1
print("Case #", i+1, ": ", r_count - l_count + flag, sep="")
| 25.285714 | 64 | 0.45951 | 81 | 531 | 2.901235 | 0.37037 | 0.06383 | 0.051064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027027 | 0.372881 | 531 | 20 | 65 | 26.55 | 0.678679 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede0d8d35a9f0d6e5afc0c244d8363190ccf8288 | 1,121 | py | Python | oteltrace/contrib/grpc/utils.py | ocelotl/opentelemetry-auto-instr-python-1 | f5c47bd1ee492ffde298794f283031c22891f60b | [
"BSD-3-Clause"
] | 2 | 2020-03-04T17:33:22.000Z | 2021-01-20T14:20:10.000Z | oteltrace/contrib/grpc/utils.py | ocelotl/opentelemetry-auto-instr-python-1 | f5c47bd1ee492ffde298794f283031c22891f60b | [
"BSD-3-Clause"
] | 4 | 2019-11-25T00:11:16.000Z | 2021-05-13T20:43:50.000Z | oteltrace/contrib/grpc/utils.py | ocelotl/opentelemetry-auto-instr-python-1 | f5c47bd1ee492ffde298794f283031c22891f60b | [
"BSD-3-Clause"
] | 3 | 2020-02-05T14:54:25.000Z | 2020-03-23T02:51:27.000Z | # Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def parse_method_path(method_path):
""" Returns (package, service, method) tuple from parsing method path """
# unpack method path based on "/{package}.{service}/{method}"
# first remove leading "/" as unnecessary
package_service, method_name = method_path.lstrip('/').rsplit('/', 1)
# {package} is optional
package_service = package_service.rsplit('.', 1)
if len(package_service) == 2:
return package_service[0], package_service[1], method_name
return None, package_service[0], method_name
| 40.035714 | 77 | 0.729706 | 157 | 1,121 | 5.121019 | 0.56051 | 0.156716 | 0.074627 | 0.039801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015119 | 0.173952 | 1,121 | 27 | 78 | 41.518519 | 0.853132 | 0.668153 | 0 | 0 | 0 | 0 | 0.008571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede10dafbf743c6151c9253bd80b7dd3f59da855 | 3,852 | py | Python | datasetparser.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | 6 | 2019-02-11T14:32:23.000Z | 2021-12-07T09:49:41.000Z | datasetparser.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | null | null | null | datasetparser.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | 2 | 2018-10-11T17:29:37.000Z | 2021-09-08T12:01:40.000Z | #!/usr/bin/env python3
"""Reads all the headers in a folder and creates a vola index.
@author Jonathan Byrne
@copyright 2018 Intel Ltd (see LICENSE file).
"""
from __future__ import print_function
import argparse
import glob
import os
import struct
import json
def main():
"""Read the headers, calc the centroids and output."""
parser = argparse.ArgumentParser()
parser.add_argument("pathname",
help="the path containing volume files", type=str)
args = parser.parse_args()
dirname = args.pathname.rstrip('/')
dataset = os.path.basename(dirname)
volaname = os.path.join(dirname, dataset) + ".vola"
vol = os.path.join(dirname, "*.vol")
infofile = os.path.join(dirname, "info.json")
print("Processing folder:", dirname, " output:", volaname)
files = []
tminx, tminy, tminz = float('inf'), float('inf'), float('inf')
tmaxx, tmaxy, tmaxz = float('-inf'), float('-inf'), float('-inf')
filenames = glob.glob(vol)
hdr = {}
for filename in filenames:
with open(filename, "rb") as f:
hdr['headersize'] = struct.unpack('I', f.read(4))[0]
hdr['version'] = struct.unpack('H', f.read(2))[0]
hdr['mode'] = struct.unpack('B', f.read(1))[0]
hdr['depth'] = struct.unpack('B', f.read(1))[0]
hdr['nbits'] = struct.unpack('I', f.read(4))[0]
hdr['crs'] = struct.unpack('I', f.read(4))[0]
hdr['lat'] = struct.unpack('d', f.read(8))[0]
hdr['lon'] = struct.unpack('d', f.read(8))[0]
minx = struct.unpack('d', f.read(8))[0]
miny = struct.unpack('d', f.read(8))[0]
minz = struct.unpack('d', f.read(8))[0]
maxx = struct.unpack('d', f.read(8))[0]
maxy = struct.unpack('d', f.read(8))[0]
maxz = struct.unpack('d', f.read(8))[0]
if minx < tminx:
tminx = minx
if miny < tminy:
tminy = miny
if minz < tminz:
tminz = minz
if maxx > tmaxx:
tmaxx = maxx
if maxy > tmaxy:
tmaxy = maxy
if maxz > tmaxz:
tmaxz = maxz
bbox = [minx, miny, minz, maxx, maxy, maxz]
sides = [maxx - minx, maxy - miny, maxz - minz]
centroid = ((minx + maxx) / 2, (miny + maxy) / 2, (minz + maxz) / 2)
files.append({
'filename': filename,
'bbox': bbox,
'centroid': centroid,
'sides': sides,
'crs': hdr['crs'],
'lat': hdr['lat'],
'lon': hdr['lon']
})
if not os.path.isfile(infofile):
print("Missing attribution info file!! Attribution is required")
exit()
else:
with open(infofile) as data_file:
infodata = json.load(data_file)
if len(infodata['license']) < 5:
print("No license information!! License is required")
exit()
vola = {}
print("Depth:", hdr['depth'])
vola['dataset'] = infodata['dataset']
vola['info'] = infodata['info']
vola['url'] = infodata['url']
vola['author'] = infodata['author']
vola['authorurl'] = infodata['authorurl']
vola['license'] = infodata['license']
vola['licenseurl'] = infodata['licenseurl']
vola['files'] = files
vola['depth'] = hdr['depth']
vola['nbits'] = hdr['nbits']
vola['crs'] = hdr['crs']
vola['mode'] = hdr['mode']
vola['bbox'] = [tminx, tminy, tminz, tmaxx, tmaxy, tmaxz]
vola['sides'] = [tmaxx - tminx, tmaxy - tminy, tmaxz - tminz]
vola['centroid'] = ((tminx + tmaxx) / 2, (tminy + tmaxy) / 2,
(tminz + tmaxz) / 2)
volafile = open(volaname, 'w')
volafile.write(json.dumps(vola, sort_keys=True, indent=2))
volafile.close()
if __name__ == '__main__':
main()
| 33.495652 | 76 | 0.537383 | 471 | 3,852 | 4.356688 | 0.295117 | 0.081871 | 0.050682 | 0.054581 | 0.157407 | 0.157407 | 0.134016 | 0.056043 | 0 | 0 | 0 | 0.014942 | 0.287643 | 3,852 | 114 | 77 | 33.789474 | 0.732872 | 0.051661 | 0 | 0.021277 | 0 | 0 | 0.131832 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010638 | false | 0 | 0.06383 | 0 | 0.074468 | 0.053191 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede193cbc7f6dd6ed49b143d3a053602c1a03e2e | 6,324 | py | Python | chart-generator/main.py | ShironCat/covid-19-fernandopolis | f7767ed604368c27732de0b3300967bf1019e6e6 | [
"CC0-1.0"
] | 3 | 2020-06-10T02:51:38.000Z | 2021-05-14T14:37:09.000Z | chart-generator/main.py | ShironCat/covid-19-fernandopolis | f7767ed604368c27732de0b3300967bf1019e6e6 | [
"CC0-1.0"
] | 1 | 2022-03-12T01:08:07.000Z | 2022-03-12T01:08:07.000Z | chart-generator/main.py | ShironCat/covid-19-fernandopolis | f7767ed604368c27732de0b3300967bf1019e6e6 | [
"CC0-1.0"
] | 1 | 2020-06-18T21:50:11.000Z | 2020-06-18T21:50:11.000Z | from datetime import datetime, timedelta
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as opt
def area_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# draw the curves
ax.fill_between(
ds['Data'],
ds['Casos acumulados'],
color='#f44336',
label='Casos totais ({})'.format(ds['Casos acumulados'].values[-1]))
ax.stackplot(
ds['Data'],
ds['Óbitos acumulados'],
ds['Curados acumulados'],
colors=['#9a9a9a', '#009688'],
labels=[
'Óbitos totais ({})'.format(ds['Óbitos acumulados'].values[-1]),
'Curados totais ({})'.format(ds['Curados acumulados'].values[-1])])
# write the total number at the end of the curves
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Casos acumulados'].values[-1],
str(ds['Casos acumulados'].values[-1]),
color='w')
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Curados acumulados'].values[-1],
str(ds['Curados acumulados'].values[-1]),
color='w')
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Óbitos acumulados'].values[-1],
str(ds['Óbitos acumulados'].values[-1]),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Situação geral da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/area_chart.png')
def bar_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# calculate moving average
moving_average = ds['Novos casos'].rolling(window=14).mean()
# draw the bars
ax.bar(
ds['Data'],
ds['Novos casos'],
color='#f44336',
label='Casos novos de {} ({})'.format(
ds['Data'].iloc[-1].strftime('%d/%m/%Y'),
ds['Novos casos'].values[-1]))
ax.plot(
ds['Data'],
moving_average,
color='#f4a235',
linestyle='dashed',
label='Média móvel de casos novos ({})'.format(
int(np.trunc(moving_average.iloc[-1]))))
# write the number of cases at the top of each bar
for date in ds['Data']:
i = (date - datetime.fromisoformat('2020-03-25')).days
y = ds['Novos casos'].values[i]
if y != 0:
ax.text(
date - np.timedelta64(12, 'h'),
y + 0.25,
str(y),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Casos novos da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/bar_chart.png')
def line_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# polynomial function
def func(x, a, b, c, d, e, f, g):
params = [a, b, c, d, e, f, g]
n = len(params)
total = 0
for i in range(0, n):
total += params[n - i - 1] * np.power(x, i)
return total
# optimized parameters for exponential curve fitting
optimizedParameters, _ = opt.curve_fit(
func,
ds['Data'].map(
lambda x: (x - datetime.fromisoformat('2020-03-25')).days),
ds['Casos acumulados'])
# list of days extended over 7 days
extDate = ds['Data'].copy()
for i in range(1, 8):
extDate = extDate.append(
pd.Series(
[ds['Data'].iloc[-1] + timedelta(days=i)],
index=[ds['Data'].size + i - 1]))
# draw the curves
ax.plot(
ds['Data'],
ds['Casos acumulados'],
color='#f44336',
label='Casos totais ({})'.format(ds['Casos acumulados'].values[-1]))
ax.plot(
extDate,
func(
extDate.map(
lambda x: (x - datetime.fromisoformat('2020-03-25')).days),
*optimizedParameters),
color='#f4a235',
linestyle='dashed',
label='Projeção do número de casos até {} ({:.0f})'.format(
extDate.iloc[-1].strftime('%d/%m/%Y'),
np.floor(func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters))))
# write the number of cases at the end of the curve
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Casos acumulados'].values[-1],
str(ds['Casos acumulados'].values[-1]),
color='w')
ax.text(
extDate.iloc[-1] + timedelta(hours=12),
func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters),
'{:.0f}'.format(
np.floor(func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters))),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Casos da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/line_chart.png')
def main():
ds = pd.read_csv('../boletim-epidemiologico.csv')
ds['Data'] = ds['Data'].map(
lambda x: datetime.strptime(str(x), '%d/%m/%y'))
dateFmt = mdates.DateFormatter('%d/%m/%y')
area_chart(ds, dateFmt)
bar_chart(ds, dateFmt)
line_chart(ds, dateFmt)
if __name__ == '__main__':
main()
| 29.277778 | 79 | 0.553131 | 817 | 6,324 | 4.226438 | 0.22399 | 0.034752 | 0.059079 | 0.039965 | 0.631625 | 0.551984 | 0.537793 | 0.518679 | 0.511729 | 0.50362 | 0 | 0.041189 | 0.2821 | 6,324 | 215 | 80 | 29.413953 | 0.719383 | 0.112587 | 0 | 0.486842 | 0 | 0 | 0.189862 | 0.017912 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032895 | false | 0 | 0.039474 | 0 | 0.078947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede3596e0f595cefcd0e9bb3ee971620608011db | 5,297 | py | Python | carrier/classification/src/eda/eda.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | carrier/classification/src/eda/eda.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | carrier/classification/src/eda/eda.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | # %% [markdown]
'''
# Calculate suspect score for manufacturing claims
'''
# %% [markdown]
'''
# Problem statement
'''
# %% [markdown]
'''
**Author** : Sunil Yadav || yadav.sunil83@gmail.com || +91 96206 38383 ||
'''
# %% [markdown]
'''
# Solution Approach
- Check if we can correctly segregate suspected claims
- Prepare model
'''
# %% [markdown]
'''
# Solution
'''
# %% [markdown]
'''
## Lib Imports
'''
# %%
import src.utils.eda as eu
import set_base_path
import numpy as np
import pandas as pd
from IPython.display import display
import plotly.figure_factory as ff
import plotly.graph_objects as go
from enum import Enum, auto
from typing import List, Sequence, Tuple
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
import warnings
from src.constants import RAW_DATA_PATH, INTERIM_DATA_PATH, CLAIM_CAT_COLS
warnings.filterwarnings('ignore')
# %%
# Ignore warnings
# %% [markdown]
'''
## Data load
'''
# %%
# Load Data
merged_df: pd.DataFrame = pd.read_feather(INTERIM_DATA_PATH / "merged_df.feather")
claims_with_amount: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "claims_with_amount.feather")
labour: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "labour.feather")
parts_replaced: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "parts_replaced.feather")
# %% [markdown]
'''
## Pandas settings
'''
# %%
pd.options.display.max_columns = 300
pd.options.display.max_rows = 300
pd.options.display.width = None
pd.options.display.max_colwidth = 100
pd.options.display.precision = 3
# %% [markdown]
'''
# EDA
'''
# %% [markdown]
'''
## Data Overview
'''
# %% [markdown]
'''
### Merged DF
'''
# %%
# eu.get_data_frame_overview(merged_df)
# %%
# %%
pivoted_columns = list(labour['JOB_CODE'].unique()) + list(parts_replaced['INS_PART_CODE'].unique())
zeros = merged_df[pivoted_columns] == 0.
(((zeros).sum()*100)/merged_df.shape[0]).sort_values(ascending=False)
# %%
# Claims Data
eu.get_data_frame_overview(claims_with_amount)
# %%
# %% [markdown]
'''
### Univariate
'''
# %% [markdown]
'''
#### value counts
'''
# %%
eu.print_value_count_percents(claims_with_amount[CLAIM_CAT_COLS])
# %% [markdown]
'''
#### value counts plots
'''
# %%
eu.plot_univariate_categorical_columns(claims_with_amount[CLAIM_CAT_COLS], x_rotation=90, plot_limit=50)
# %% [markdown]
'''
#### distributions
'''
# %%
claims_with_amount.dtypes
# %%
num_cols = claims_with_amount.dtypes[claims_with_amount.dtypes == np.float].index
# %%
claims_with_amount[num_cols].isnull().sum()
# %%
eu.plot_dist(claims_with_amount[num_cols])
# %% [markdown]
'''
## Drop unwanted columns
'''
# %% [markdown]
'''
## Fix column dtypes
'''
# %% [markdown]
'''
#### Plotting numeric and categorical
'''
# %%
num_cols, CLAIM_CAT_COLS
# %%
len(num_cols), len(CLAIM_CAT_COLS)
# %% [markdown]
'''
### Bi-variate
'''
# %% [markdown]
'''
### Correlation
'''
# %%
plt.figure(figsize=(10, 10))
sns.heatmap(claims_with_amount[num_cols].corr(), annot=True)
plt.show()
# Mostly positive correlated data
# %% [markdown]
'''
#### Numeric-Numeric (Scatter plot)
'''
# %%
eu.plot_two_variables(claims_with_amount, 'CLAIMED_AMOUNT', 'CLAIM_PAID_AMOUNT')
# %%
plt.figure(figsize=(10, 10))
eu.plot_two_variables(claims_with_amount, 'UNITS_USAGE', 'CLAIM_PAID_AMOUNT')
# %% [markdown]
'''
#### Numeric-Categorical (Box and violin)
'''
# %%
new_cols_cat = CLAIM_CAT_COLS[:]
for rem_col in ["DEALER_NUMBER", "CAUSAL_REG_PART", "DEALER_CITY", "DEALER_STATE", "FAULT_LOCN", "FAULT_CODE"]:
new_cols_cat.remove(rem_col)
for col in new_cols_cat:
plt.figure(figsize=(35, 10))
print(f"\nPlotting {col} vs CLAIM_PAID_AMOUNT\n")
eu.plot_two_variables(claims_with_amount, col, 'CLAIM_PAID_AMOUNT', x_rotation=90, legend=False)
# %% [markdown]
'''
#### Categorical-Categorical (Cross Table)
'''
# %%
pd.crosstab(claims_with_amount['CLAIM_TYPE'], claims_with_amount['CLAIM_STATE'])
# %%
# TODO: Not working need to check data types
pd.crosstab(claims_with_amount['CLAIM_TYPE'], claims_with_amount[['CLAIM_STATE', 'APPLICABLE_POLICY',
'DEALER_NUMBER',
'DEALER_CITY',
'DEALER_STATE',
'DEALER_COUNTRY',
'CAUSAL_REG_PART',
'FAULT_CODE',
'FAULT_LOCN',
'REG_PRODUCT_FAMILY_NAME',
'REG_SERIES_NAME',
'MODEL_NAME',
'REG_MODEL_CODE',
'VARIANT']])
# %% [markdown]
'''
Print a data frame with color
'''
# %%
'''
Drop columns
Single valued
Drop Rows
'''
| 23.542222 | 111 | 0.600906 | 590 | 5,297 | 5.116949 | 0.372881 | 0.059622 | 0.095396 | 0.041736 | 0.186154 | 0.127526 | 0.108976 | 0.07519 | 0.040411 | 0.040411 | 0 | 0.011887 | 0.25354 | 5,297 | 224 | 112 | 23.647321 | 0.751644 | 0.11516 | 0 | 0.030303 | 0 | 0 | 0.133457 | 0.018763 | 0 | 0 | 0 | 0.004464 | 0 | 1 | 0 | false | 0 | 0.257576 | 0 | 0.257576 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ede87d5f9bacdbbf74448b95d151644f8502d5f0 | 5,532 | py | Python | vlnce_baselines/common/ddppo_alg.py | Felix2048/VLN-CE | 4ea21f2af0d869ae65dd6677a53e788233f93761 | [
"MIT"
] | 106 | 2020-05-11T00:47:23.000Z | 2022-03-31T13:15:18.000Z | vlnce_baselines/common/ddppo_alg.py | Felix2048/VLN-CE | 4ea21f2af0d869ae65dd6677a53e788233f93761 | [
"MIT"
] | 30 | 2020-08-01T02:43:32.000Z | 2022-03-31T21:20:30.000Z | vlnce_baselines/common/ddppo_alg.py | Felix2048/VLN-CE | 4ea21f2af0d869ae65dd6677a53e788233f93761 | [
"MIT"
] | 36 | 2020-06-16T01:18:20.000Z | 2022-03-09T17:15:48.000Z | from typing import Tuple
import torch
from habitat_baselines.rl.ddppo.algo.ddppo import DDPPO
from torch.functional import Tensor
from torch.nn.functional import l1_loss
class WDDPPO(DDPPO):
"""Differences with DD-PPO:
- expands entropy calculation and tracking to three variables
- adds a regularization term to the offset prediction
"""
def __init__(
self,
*args,
offset_regularize_coef: float = 0.0,
pano_entropy_coef: float = 1.0,
offset_entropy_coef: float = 1.0,
distance_entropy_coef: float = 1.0,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.offset_regularize_coef = offset_regularize_coef
self.pano_entropy_coef = pano_entropy_coef
self.offset_entropy_coef = offset_entropy_coef
self.distance_entropy_coef = distance_entropy_coef
def get_advantages(self, rollouts) -> Tensor:
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
if not self.use_normalized_advantage:
return advantages
return (advantages - advantages.mean()) / (advantages.std() + 1e-5)
def update(self, rollouts) -> Tuple[float, float, float]:
advantages = self.get_advantages(rollouts)
value_loss_epoch = 0.0
action_loss_epoch = 0.0
entropy_loss_epoch = 0.0
pano_entropy_epoch = 0.0
offset_entropy_epoch = 0.0
distance_entropy_epoch = 0.0
for _e in range(self.ppo_epoch):
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch
)
for sample in data_generator:
(
obs_batch,
recurrent_hidden_states_batch,
actions_batch,
prev_actions_batch,
value_preds_batch,
return_batch,
masks_batch,
old_action_log_probs_batch,
adv_targ,
) = sample
# Reshape to do in a single forward pass for all steps
(
values,
action_log_probs,
entropy,
_,
) = self.actor_critic.evaluate_actions(
obs_batch,
recurrent_hidden_states_batch,
prev_actions_batch,
masks_batch,
actions_batch,
)
entropy_loss = (
self.pano_entropy_coef * entropy["pano"]
+ self.offset_entropy_coef * entropy["offset"]
+ self.distance_entropy_coef * entropy["distance"]
).mean() * self.entropy_coef
ratio = torch.exp(
action_log_probs - old_action_log_probs_batch
)
surr1 = ratio * adv_targ
surr2 = (
torch.clamp(
ratio, 1.0 - self.clip_param, 1.0 + self.clip_param
)
* adv_targ
)
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + (
values - value_preds_batch
).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch
).pow(2)
value_loss = (
0.5
* torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
value_loss = value_loss * self.value_loss_coef
# slight regularization to the offset
offset_loss = 0.0
if "offset" in actions_batch:
offset_loss = self.offset_regularize_coef * l1_loss(
self.actor_critic.net.offset_to_continuous(
actions_batch["offset"]
),
torch.zeros_like(actions_batch["offset"]),
)
self.optimizer.zero_grad()
loss = value_loss + action_loss + offset_loss - entropy_loss
self.before_backward(loss)
loss.backward()
self.after_backward(loss)
self.before_step()
self.optimizer.step()
self.after_step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
entropy_loss_epoch += entropy_loss.item()
pano_entropy_epoch += entropy["pano"].mean().item()
offset_entropy_epoch += entropy["offset"].mean().item()
distance_entropy_epoch += entropy["distance"].mean().item()
num_updates = self.ppo_epoch * self.num_mini_batch
return (
value_loss_epoch / num_updates,
action_loss_epoch / num_updates,
entropy_loss_epoch / num_updates,
pano_entropy_epoch / num_updates,
offset_entropy_epoch / num_updates,
distance_entropy_epoch / num_updates,
)
| 36.88 | 78 | 0.525669 | 549 | 5,532 | 4.947177 | 0.238616 | 0.052651 | 0.015464 | 0.018778 | 0.086892 | 0.025037 | 0 | 0 | 0 | 0 | 0 | 0.012983 | 0.401302 | 5,532 | 149 | 79 | 37.127517 | 0.807065 | 0.041576 | 0 | 0.081301 | 0 | 0 | 0.010223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.04065 | 0 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edea2cfe56a56fb79fd1fce518faeebadbd65eee | 1,791 | py | Python | main.py | Jackson-Kang/Speech-dataset-generator | 7d73ea59f2fb0420cfcbd66afe9352a4eecbac9d | [
"MIT"
] | 4 | 2020-11-19T09:28:40.000Z | 2020-12-10T10:56:38.000Z | main.py | Jackson-Kang/Speech-dataset-generator | 7d73ea59f2fb0420cfcbd66afe9352a4eecbac9d | [
"MIT"
] | null | null | null | main.py | Jackson-Kang/Speech-dataset-generator | 7d73ea59f2fb0420cfcbd66afe9352a4eecbac9d | [
"MIT"
] | null | null | null | import sys
import configs as cfg
from video2wav import Video2Wav_Converter
from segment_speech import Segment_Speech
from transcribe_speech import Transcribe_Speech
from utils import create_dir
def convert_video_to_wav():
create_dir(cfg.preprocessed_wav_savepath)
create_dir(cfg.extracted_wav_savepath)
v2w = Video2Wav_Converter(input_video_dataset_path=cfg.input_video_data_path,
input_file_format=cfg.input_video_format,
extracted_wav_savepath=cfg.extracted_wav_savepath,
acodec=cfg.acodec,
sampling_rate=cfg.wav_extraction_output_sampling_rate)
v2w.do()
def segment_speech():
create_dir(cfg.preprocessed_wav_savepath)
create_dir(cfg.segmented_wav_savepath)
ss = Segment_Speech(in_unsegmented_wav_path=cfg.unsegmented_input_wav_path,
out_wav_savepath = cfg.segmented_wav_savepath,
input_file_format = cfg.segmentation_input_wav_format,
sampling_rate = cfg.segmentation_source_sampling_rate,
resampling_rate = cfg.segmentation_output_resampling_rate,
min_silence_len=400,
keep_silence=100,
silence_chunk_len=100,
silence_thresh=-40,
skip_idx=0)
ss.do()
def transcribe_speech():
ts = Transcribe_Speech(in_segmented_wav_path = cfg.segmented_input_wav_path,
out_meta_filename = cfg.meta_name,
input_file_format = cfg.transcription_input_wav_format,
sampling_rate = cfg.transcription_audio_sampling_rate,
wav_channel = cfg.wav_channel,
language_code=cfg.language_code)
ts.do()
if __name__ == "__main__":
assert len(sys.argv) == 2, "[ERROR] option must be provided!"
if sys.argv[1] in [0, "0"]:
convert_video_to_wav()
elif sys.argv[1] in [1, "1"]:
segment_speech()
elif sys.argv[1] in [2, "2"]:
transcribe_speech()
| 27.553846 | 79 | 0.757119 | 249 | 1,791 | 5.004016 | 0.305221 | 0.070626 | 0.038523 | 0.043339 | 0.144462 | 0.12199 | 0.075441 | 0.075441 | 0.075441 | 0 | 0 | 0.018 | 0.162479 | 1,791 | 64 | 80 | 27.984375 | 0.812667 | 0 | 0 | 0.044444 | 0 | 0 | 0.024009 | 0 | 0 | 0 | 0 | 0 | 0.022222 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edeef0d9d796972bf70b21cd812c5bf7a74c376d | 216 | py | Python | cgh_practical_ml/b_pandas.py | bm2-lab/MLClass | 50e12d58aa56c25feefaa18af2351148052c4c22 | [
"Apache-2.0"
] | 2 | 2017-05-18T08:01:10.000Z | 2017-06-07T06:23:11.000Z | cgh_practical_ml/b_pandas.py | bm2-lab/MLClass | 50e12d58aa56c25feefaa18af2351148052c4c22 | [
"Apache-2.0"
] | null | null | null | cgh_practical_ml/b_pandas.py | bm2-lab/MLClass | 50e12d58aa56c25feefaa18af2351148052c4c22 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
dfm = pd.read_csv('h3.bed', sep='\t', header=None, index_col=None)
dfm.columns = ['chrom', 'start', 'end']
dfm['length'] = dfm['end'] - dfm['start']
dfm.to_csv('h3.tsv', sep='\t', index=None) | 21.6 | 66 | 0.62037 | 37 | 216 | 3.540541 | 0.594595 | 0.076336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010582 | 0.125 | 216 | 10 | 67 | 21.6 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0.198157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edef28264d82bcd62dedd4c32a8425656c175820 | 7,762 | py | Python | scripts/cros_oobe_autoconfig.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | scripts/cros_oobe_autoconfig.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | 2 | 2021-03-26T00:29:32.000Z | 2021-04-30T21:29:33.000Z | scripts/cros_oobe_autoconfig.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provision a recovery image for OOBE autoconfiguration.
This script populates the OOBE autoconfiguration data
(/stateful/unencrypted/oobe_auto_config/config.json) with the given parameters.
Additionally, it marks the image as being "hands-free", i.e. requiring no
physical user interaction to remove the recovery media before rebooting after
the recovery procedure has completed.
Any parameters prefixed with --x (e.g. --x-demo-mode) correspond directly to
generated elements in the configuration expected by OOBE.
"""
from __future__ import print_function
import json
import os
import sys
import uuid
from chromite.lib import commandline
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import image_lib
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# OOBE auto-config parameters as they appear in
# chrome/browser/chromeos/login/configuration_keys.h
# Please keep the keys grouped in the same order as the source file.
_CONFIG_PARAMETERS = (
('demo-mode', bool, 'Whether the device should be placed into demo mode.'),
('network-onc', str, 'ONC blob for network configuration.'),
('network-auto-connect', bool,
'Whether the network screen should automatically proceed with '
'connected network.'),
('eula-send-statistics', bool,
'Whether the device should send usage statistics.'),
('eula-auto-accept', bool,
'Whether the EULA should be automatically accepted.'),
('update-skip', bool,
'Whether the udpate check should be skipped entirely (it may be '
'required for future version pinning).'),
('wizard-auto-enroll', bool,
'Whether the wizard should automatically start enrollment at the '
'appropriate moment.'),
)
# Set of flags to specify when building with --generic.
_GENERIC_FLAGS = {
'network-auto-connect': True,
'eula-send-statistics': True,
'eula-auto-accept': True,
'update-skip': True,
}
# Mapping of flag type to argparse kwargs.
_ARG_TYPES = {
str: {},
bool: {'action': 'store_true'},
}
# Name of the OOBE directory in unencrypted/.
_OOBE_DIRECTORY = 'oobe_auto_config'
# Name of the configuration file in the recovery image.
_CONFIG_PATH = 'config.json'
# Name of the file containing the enrollment domain.
_DOMAIN_PATH = 'enrollment_domain'
def SanitizeDomain(domain):
"""Sanitized |domain| for use in recovery.
Args:
domain: (str) The original string.
Returns:
(str) The sanitized domain name, possibly using punycode to disambiguate.
"""
# Encode using punycode ("idna" here) to prevent homograph attacks.
# Once that's been normalized to ASCII, normalize to lowercase.
return domain.encode('idna').decode('utf-8').lower()
def GetConfigContent(opts):
"""Formats OOBE autoconfiguration from commandline namespace.
Args:
opts: A commandline namespace containing OOBE autoconfig opts.
Returns:
A JSON string representation of the requested configuration.
"""
conf = {}
for flag, _, _ in _CONFIG_PARAMETERS:
conf[flag] = getattr(opts, 'x_' + flag.replace('-', '_'))
if opts.wifi_ssid:
conf['network-onc'] = {
'GUID': str(uuid.uuid4()),
'Name': opts.wifi_ssid,
'Type': 'WiFi',
'WiFi': {
'AutoConnect': True,
'HiddenSSID': False,
'SSID': opts.wifi_ssid,
'Security': 'None',
},
}
if opts.use_ethernet:
conf['network-onc'] = {
'GUID': str(uuid.uuid4()),
'Name': 'Ethernet',
'Type': 'Ethernet',
'Ethernet': {
'Authentication': 'None',
},
}
return json.dumps(conf)
def PrepareImage(path, content, domain=None):
"""Prepares a recovery image for OOBE autoconfiguration.
Args:
path: Path to the recovery image.
content: The content of the OOBE autoconfiguration.
domain: Which domain to enroll to.
"""
with osutils.TempDir() as tmp, \
image_lib.LoopbackPartitions(path, tmp) as image:
stateful_mnt = image.Mount((constants.CROS_PART_STATEFUL,),
mount_opts=('rw',))[0]
# /stateful/unencrypted may not exist at this point in time on the
# recovery image, so create it root-owned here.
unencrypted = os.path.join(stateful_mnt, 'unencrypted')
osutils.SafeMakedirs(unencrypted, mode=0o755, sudo=True)
# The OOBE autoconfig directory must be owned by the chronos user so
# that we can delete the config file from it from Chrome.
oobe_autoconf = os.path.join(unencrypted, _OOBE_DIRECTORY)
osutils.SafeMakedirsNonRoot(oobe_autoconf, user='chronos')
# Create the config file to be owned by the chronos user, and write the
# given data into it.
config = os.path.join(oobe_autoconf, _CONFIG_PATH)
osutils.WriteFile(config, content, sudo=True)
cros_build_lib.sudo_run(['chown', 'chronos:chronos', config])
# If we have a plaintext domain name, write it.
if domain:
domain_path = os.path.join(oobe_autoconf, _DOMAIN_PATH)
osutils.WriteFile(domain_path, SanitizeDomain(domain), sudo=True)
cros_build_lib.sudo_run(['chown', 'chronos:chronos', domain_path])
def ParseArguments(argv):
"""Returns a namespace for the CLI arguments."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('image', help='Path of recovery image to populate.')
# Prefix raw config elements with --x.
for flag, flag_type, help_text in _CONFIG_PARAMETERS:
parser.add_argument('--x-%s' % flag, help=help_text,
**_ARG_TYPES[flag_type])
parser.add_argument('--generic', action='store_true',
help='Set defaults for common configuration options.')
parser.add_argument('--dump-config', action='store_true',
help='Dump generated configuration file to stdout.')
parser.add_argument('--config', type='path', required=False,
help='Path to pre-generated configuration file to use, '
'overriding other flags set.')
parser.add_argument('--wifi-ssid', type=str, required=False,
help='If specified, generates an ONC for auto-connecting '
'to the given SSID. The network must not use any '
'security (i.e. be an open network), or the device '
'will fail to connect.')
parser.add_argument('--use-ethernet', action='store_true',
help='If specified, generates an ONC for auto-connecting '
'via ethernet.')
parser.add_argument('--enrollment-domain', type=str, required=False,
help='Text to visually identify the enrollment token in '
'recovery.')
opts = parser.parse_args(argv)
if opts.use_ethernet and opts.wifi_ssid:
parser.error('cannot specify --wifi-ssid and --use-ethernet together')
if opts.generic:
for opt, val in _GENERIC_FLAGS.items():
setattr(opts, 'x_' + opt.replace('-', '_'), val)
opts.Freeze()
return opts
def main(argv):
cros_build_lib.AssertInsideChroot()
opts = ParseArguments(argv)
if opts.config:
config_content = osutils.ReadFile(opts.config)
else:
config_content = GetConfigContent(opts)
logging.info('Using config: %s', config_content)
if opts.dump_config:
print(config_content)
PrepareImage(opts.image, config_content, opts.enrollment_domain)
| 33.747826 | 80 | 0.676887 | 988 | 7,762 | 5.213563 | 0.315789 | 0.013978 | 0.026403 | 0.024461 | 0.114735 | 0.072607 | 0.048923 | 0.048923 | 0.035721 | 0.017861 | 0 | 0.002799 | 0.21747 | 7,762 | 229 | 81 | 33.895197 | 0.845242 | 0.289616 | 0 | 0.046512 | 0 | 0 | 0.295049 | 0 | 0 | 0 | 0 | 0 | 0.015504 | 1 | 0.03876 | false | 0 | 0.085271 | 0 | 0.147287 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf065b3b9d813bd45d5a9f2000c563da0552f93 | 524 | py | Python | delivrable.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | delivrable.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | delivrable.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | import os
import sys
from Amazon import Amazon
from OpenStack import OpenStack
if sys.version_info.major < 2 and sys.version_info.minor < 7:
raise Exception("Python version 2.7 minimum is required for running this script.")
clouds = [OpenStack(), Amazon()]
for cloud in clouds:
cloud.create()
print('Press \'A\' to destroy instances created.')
consoleInput = os.read(0, 1)
while consoleInput != b'A':
consoleInput = os.read(0, 1)
for cloud in clouds:
cloud.destroy()
print("Delivrable terminated.")
| 20.153846 | 86 | 0.717557 | 76 | 524 | 4.921053 | 0.565789 | 0.053476 | 0.074866 | 0.085562 | 0.219251 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018433 | 0.171756 | 524 | 25 | 87 | 20.96 | 0.843318 | 0 | 0 | 0.25 | 0 | 0 | 0.234733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf0710ec6bce13e2d9a52d1a1948bbc1d362eb2 | 11,466 | py | Python | tests/test_algebra_meta_onnx.py | adrinjalali/sklearn-onnx | 160200eb19880b4ded0acdd0c1e1a5ecd45c7b74 | [
"MIT"
] | null | null | null | tests/test_algebra_meta_onnx.py | adrinjalali/sklearn-onnx | 160200eb19880b4ded0acdd0c1e1a5ecd45c7b74 | [
"MIT"
] | null | null | null | tests/test_algebra_meta_onnx.py | adrinjalali/sklearn-onnx | 160200eb19880b4ded0acdd0c1e1a5ecd45c7b74 | [
"MIT"
] | null | null | null | import os
import unittest
from distutils.version import StrictVersion
from io import StringIO
import contextlib
import numpy
from numpy.testing import assert_almost_equal
import onnx
import onnxruntime
from onnx import numpy_helper, helper
from skl2onnx.algebra.onnx_ops import dynamic_class_creation
from skl2onnx.algebra import OnnxOperator
from skl2onnx.proto import onnx_proto
class TestMetaOnnx(unittest.TestCase):
def setUp(self):
self._algebra = dynamic_class_creation()
def test_dynamic_class_creation(self):
res = self._algebra
for cl in res:
assert hasattr(cl, '__init__')
assert hasattr(cl, '__doc__')
def test_mul(self):
from skl2onnx.algebra.onnx_ops import OnnxMul
assert OnnxMul.operator_name == 'Mul'
assert isinstance(OnnxMul('a', 'b'), OnnxOperator)
@unittest.skipIf(StrictVersion(onnx.__version__) < StrictVersion("1.5.0"),
reason="too unstable with older versions")
@unittest.skipIf(StrictVersion(onnxruntime.__version__) <
StrictVersion("0.5.0"),
reason="too unstable with older versions")
def test_onnx_spec(self):
untested = {'AveragePool', # issue with ceil_mode
'BitShift', # opset 11
'Cast', # unsupported type
'Compress', # shape inference fails
'CumSum', # opset 11
# Input X must be 4-dimensional. X: {1,1,3}
'ConvInteger',
'ConvTranspose',
'CumSum', # opset 11
'DepthToSpace', # opset 11
'DequantizeLinear',
'Equal', # opset 11
'Expand', # shape inference fails
'GatherElements', # opset 11
'MatMulInteger',
'MaxPool', # issue with ceil_mode
'Mod',
'QLinearConv',
'QLinearMatMul',
"QuantizeLinear",
"Round", # opset 11
'Scan', # Graph attribute inferencing returned type
# information for 2 outputs. Expected 1
# Node () has input size 5 not in range [min=1, max=1].
'ScatterElements', # opset 11
'Unique', # opset 11
"Upsample",
}
folder = os.path.dirname(onnx.__file__)
folder = os.path.join(folder, "backend", "test", "data", "node")
subs = os.listdir(folder)
for sub in subs:
path = os.path.join(folder, sub)
model = os.path.join(path, "model.onnx")
if not os.path.exists(model):
continue
dataset = os.path.join(path, "test_data_set_0")
inps = [os.path.join(dataset, "input_0.pb")]
outs = [os.path.join(dataset, "output_0.pb")]
if not os.path.exists(inps[0]) or not os.path.exists(outs[0]):
continue
for d in range(1, 9):
name = os.path.join(dataset, "input_%d.pb" % d)
if os.path.exists(name):
inps.append(name)
else:
break
for d in range(1, 9):
name = os.path.join(dataset, "output_%d.pb" % d)
if os.path.exists(name):
outs.append(name)
else:
break
tests = dict(model=model, inputs=inps, outputs=outs)
try:
op_type, success, reason = self._check_algebra_onnxruntime(
untested=untested, **tests)
except Exception as e:
raise Exception(
"Unable to handle operator '{}'".format(model)) from e
if __name__ == "__main__":
if not success:
print("-", op_type, " Failure", reason.split('\n')[0])
def _load_data(self, name):
tensor = onnx.TensorProto()
with open(name, 'rb') as fid:
content = fid.read()
tensor.ParseFromString(content)
return tensor
def _load_data_test(self, name, test):
try:
return self._load_data(name)
except Exception as e:
raise RuntimeError(
"Unable to load data '{}' for test '{}'"
".".format(name, test)) from e
def _check_algebra_onnxruntime(self, untested=None, model=None,
inputs=None, outputs=None):
if untested is None:
untested = {}
name = os.path.split(os.path.split(model)[0])[-1]
try:
onx = onnx.load(model)
except Exception as e:
raise RuntimeError(
"Unable to load model '{}' - '{}'.".format(name, model)) from e
inps = [self._load_data_test(input, name) for input in inputs]
outs = [self._load_data_test(output, name) for output in outputs]
if len(onx.graph.node) != 1:
op_type = ",".join([n.op_type for n in onx.graph.node])
return (op_type, False,
"The graph contains more than one node. Not tested.")
# get the operator to test
node = onx.graph.node[0]
op_class = self._algebra.get("Onnx" + node.op_type, None)
if op_class is None:
raise RuntimeError(
"Unable to find the corresponding operator in the algebra "
"'{}'.".format(node.op_type))
atts = {}
if node.attribute:
for att in node.attribute:
atts[att.name] = helper.get_attribute_value(att)
if len(node.input) != len(inps):
if node.op_type in untested:
return (node.op_type, False,
"unexpected number of inputs {} != {}".format(
len(node.output), len(outs)))
raise RuntimeError(
"'{}': unexpected number of inputs {} != {}.".format(
node.op_type, len(node.input), len(inps)))
if len(node.output) < len(outs):
raise RuntimeError(
"'{}': unexpected number of inputs {} != {}.".format(
node.op_type, len(node.output), len(outs)))
# See file onnx-ml.proto.
if inps[0].data_type in (onnx_proto.TensorProto.FLOAT16, ):
# not supported
return (node.op_type, False,
"Unsupported type {}".format(inps[0].data_type))
expected_data_type = (onnx_proto.TensorProto.UINT8,
onnx_proto.TensorProto.INT32,
onnx_proto.TensorProto.INT64,
onnx_proto.TensorProto.FLOAT,
onnx_proto.TensorProto.DOUBLE,
onnx_proto.TensorProto.BOOL,
onnx_proto.TensorProto.STRING)
if inps[0].data_type not in expected_data_type:
if node.op_type in untested:
return (node.op_type, False,
"unexpected data_type {} not in {}".format(
inps[0].data_type, expected_data_type))
raise NotImplementedError(
"Unexpected data_type {}: {}\n---\n{}\n---".format(
inps[0].data_type, node.op_type, inps[0]))
# prepare the inputs
inp_arrays = [numpy_helper.to_array(inp) for inp in inps]
out_arrays = [numpy_helper.to_array(out) for out in outs]
for i in range(len(inp_arrays)):
inp_array = inp_arrays[i]
if inp_array.dtype == numpy.float64:
inp_arrays[i] = inp_array.astype(numpy.float32)
inps[i] = numpy_helper.from_array(inp_arrays[i])
# check the test from onnx is working.
import onnxruntime as ort
monx = onx.SerializeToString()
try:
sess = ort.InferenceSession(monx)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load ONNX model {}".format(e))
raise RuntimeError(
"'{}': cannot load(1) due to {}.".format(node.op_type, e))
names = [i.name for i in sess.get_inputs()]
ort_inputs = {name: inp_array for name,
inp_array in zip(names, inp_arrays)}
try:
Y = sess.run(None, ort_inputs)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError(
"'{}': cannot run(1) due to {}.".format(node.op_type, e))
for exp, got in zip(out_arrays, Y):
try:
assert_almost_equal(exp, got, decimal=4)
except TypeError:
pass
# instantiate the operator
for i, inp in enumerate(inps):
inp.name = 'I%d' % i
op = op_class(*[inp.name for inp in inps],
output_names=[out.name for out in outs],
**atts)
st = StringIO()
with contextlib.redirect_stdout(st):
with contextlib.redirect_stderr(st):
ort_inputs = {'I%d' % i: inp for i, inp in enumerate(inps)}
try:
onx2 = op.to_onnx(ort_inputs)
except (RuntimeError, NotImplementedError, TypeError) as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise NotImplementedError(
"Unable to continue {}\n{}\n{}".format(
inp_array.dtype, st.getvalue(), ort_inputs)) from e
# test with onnxruntime
monx2 = onx2.SerializeToString()
try:
sess = ort.InferenceSession(monx2)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError("'{}': cannot load(2) due to {}\n"
"---ONNX--\n{}\n---SKL2ONNX---\n{}".format(
node.op_type, e, onx, onx2))
names = [i.name for i in sess.get_inputs()]
ort_inputs = {name: inp_array for name,
inp_array in zip(names, inp_arrays)}
try:
Y = sess.run(None, ort_inputs)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError("'{}': cannot run(2) due to {}\n"
"---ONNX--\n{}\n---SKL2ONNX---\n{}".format(
node.op_type, e, onx, onx2))
for exp, got in zip(out_arrays, Y):
try:
assert_almost_equal(exp, got, decimal=4)
except (TypeError, AssertionError):
pass
return node.op_type, True, ""
if __name__ == "__main__":
unittest.main()
| 41.846715 | 79 | 0.513257 | 1,252 | 11,466 | 4.555911 | 0.191693 | 0.031557 | 0.043829 | 0.025245 | 0.380084 | 0.318724 | 0.294004 | 0.286466 | 0.245792 | 0.229313 | 0 | 0.012133 | 0.381825 | 11,466 | 273 | 80 | 42 | 0.792607 | 0.045788 | 0 | 0.3361 | 0 | 0 | 0.112689 | 0.006047 | 0 | 0 | 0 | 0 | 0.033195 | 1 | 0.029046 | false | 0.008299 | 0.062241 | 0 | 0.145228 | 0.004149 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf26090d854080fb9b45549474f48ba0c37c05d | 7,526 | py | Python | moztrap/model/environments/api.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/environments/api.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/environments/api.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | from tastypie import fields
from tastypie import http
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import ImmediateHttpResponse
from ..mtapi import MTResource, MTAuthorization
from .models import Profile, Environment, Element, Category
import logging
logger = logging.getLogger(__name__)
class EnvironmentAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "environments.manage_environments"
class ProfileResource(MTResource):
"""Create, Read, Update, and Delete capabilities for Profile."""
class Meta(MTResource.Meta):
queryset = Profile.objects.all()
fields = ["id", "name"]
authorization = EnvironmentAuthorization()
ordering = ["id", "name"]
filtering = {
"name": ALL,
}
@property
def model(self):
"""Model class related to this resource."""
return Profile
class CategoryResource(MTResource):
"""Create, Read, Update and Delete capabilities for Category."""
elements = fields.ToManyField(
"moztrap.model.environments.api.ElementResource",
"elements",
full=True,
readonly=True
)
class Meta(MTResource.Meta):
queryset = Category.objects.all()
fields = ["id", "name"]
authorization = EnvironmentAuthorization()
ordering = ["id", "name"]
filtering = {
"name": ALL,
}
@property
def model(self):
"""Model class related to this resource."""
return Category
class ElementResource(MTResource):
"""Create, Read, Update and Delete capabilities for Element."""
category = fields.ForeignKey(CategoryResource, "category")
class Meta(MTResource.Meta):
queryset = Element.objects.all()
fields = ["id", "name", "category"]
authorization = EnvironmentAuthorization()
filtering = {
"category": ALL_WITH_RELATIONS,
"name": ALL,
}
ordering = ["id", "name"]
@property
def model(self):
"""Model class related to this resource."""
return Element
@property
def read_create_fields(self):
"""List of fields that are required for create
but read-only for update."""
return ["category"]
class EnvironmentResource(MTResource):
"""Create, Read and Delete capabilities for environments"""
elements = fields.ToManyField(ElementResource, "elements")
# an environment is not required to be associated with a profile
profile = fields.ForeignKey(ProfileResource, "profile", null=True)
class Meta(MTResource.Meta):
queryset = Environment.objects.all()
list_allowed_methods = ['get', 'post', 'patch']
detail_allowed_methods = ['get', 'put', 'delete']
fields = ["id", "profile", "elements"]
filtering = {
"elements": ALL,
"profile": ALL_WITH_RELATIONS,
}
ordering = ["id", "profile"]
@property
def model(self):
"""Model class related to this resource."""
return Environment
def hydrate_m2m(self, bundle):
"""Validate the elements,
which should each belong to separate categories."""
bundle = super(EnvironmentResource, self).hydrate_m2m(bundle)
elem_categories = [elem.data['category'] for elem in
bundle.data['elements']]
if len(set(elem_categories)) != len(bundle.data['elements']):
error_msg = "Elements must each belong to a different Category."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
return bundle
def patch_list(self, request, **kwargs):
"""
Since there is no RESTful way to do what we want to do, and since
``PATCH`` is poorly defined with regards to RESTfulness, we are
overloading ``PATCH`` to take a single request that performs
combinatorics and creates multiple objects.
"""
import itertools
from django.db import transaction
from tastypie.utils import dict_strip_unicode_keys
deserialized = self.deserialize(
request,
request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
# verify input
categories = deserialized.pop('categories', [])
if not categories or not isinstance(categories, list):
error_msg = "PATCH request must contain categories list."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
# do the combinatorics
elem_lists = []
for cat in categories:
# do some type validation / variation
if isinstance(cat, basestring):
# simple case of create all the combinations
cat = Category.objects.filter(id=self._id_from_uri(cat))
elem_list = Element.objects.filter(category=cat)
elif isinstance(cat, dict):
# we must be working with at least one partial category
category = Category.objects.filter(
id=self._id_from_uri(cat['category']))
elem_list = Element.objects.filter(category=category)
if 'exclude' in cat:
# exclude some element(s) from the combinations
exclude_uris = cat['exclude']
exclude_ids = [int(
self._id_from_uri(x)) for x in exclude_uris]
elem_list = [elem for elem in elem_list
if elem.id not in exclude_ids]
elif 'include' in cat:
# include only a few elements in the combinations
include_uris = cat['include']
include_ids = [int(
self._id_from_uri(x)) for x in include_uris]
elem_list = [elem for elem in elem_list
if elem.id in include_ids]
else:
# don't worry about this,
# it'll act like a list of categories
pass # pragma: no cover
else:
error_msg = "categories list must contain resource uris or hashes."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
# save off the elements from this category that will be used
elem_lists.append(elem_list)
# create all the combinations of elements from categories
combinatorics = itertools.product(*elem_lists)
# do the creation
with transaction.commit_on_success():
for combo in combinatorics:
deserialized['elements'] = combo
bundle = self.build_bundle(
data=dict_strip_unicode_keys(deserialized))
bundle.request.META['REQUEST_METHOD'] = 'PATCH'
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
# don't try to reply with data, the request doesn't
# really match the results.
return http.HttpAccepted()
| 34.209091 | 83 | 0.59806 | 774 | 7,526 | 5.715762 | 0.284238 | 0.016275 | 0.018083 | 0.0217 | 0.292722 | 0.259268 | 0.22717 | 0.22717 | 0.193264 | 0.175633 | 0 | 0.000387 | 0.313978 | 7,526 | 219 | 84 | 34.365297 | 0.856479 | 0.1924 | 0 | 0.304348 | 0 | 0 | 0.087052 | 0.013134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0.007246 | 0.072464 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf492afe84acc1713a2081782233e25be267de7 | 890 | py | Python | examples/failbot/failbot/writer_options.py | Tallisado/DbBot | cfdea98a5770d86e886205fb2c8b9198c2d6be20 | [
"Apache-2.0"
] | 1 | 2021-11-22T14:35:22.000Z | 2021-11-22T14:35:22.000Z | examples/failbot/failbot/writer_options.py | Tallisado/DbBot | cfdea98a5770d86e886205fb2c8b9198c2d6be20 | [
"Apache-2.0"
] | null | null | null | examples/failbot/failbot/writer_options.py | Tallisado/DbBot | cfdea98a5770d86e886205fb2c8b9198c2d6be20 | [
"Apache-2.0"
] | null | null | null | from os.path import exists
from sys import argv
from dbbot import CommandLineOptions
class WriterOptions(CommandLineOptions):
@property
def output_file_path(self):
return self._options.output_file_path
def _add_parser_options(self):
super(WriterOptions, self)._add_parser_options()
self._parser.add_option('-o', '--output',
dest='output_file_path',
help='path to the resulting html file',
)
def _get_validated_options(self):
if len(argv) < 2:
self._exit_with_help()
options = super(WriterOptions, self)._get_validated_options()
if not options.output_file_path:
self._parser.error('output html filename is required')
if not exists(options.db_file_path):
self._parser.error('database %s not exists' % options.db_file_path)
return options
| 30.689655 | 79 | 0.668539 | 110 | 890 | 5.118182 | 0.4 | 0.085258 | 0.099467 | 0.063943 | 0.159858 | 0.092362 | 0 | 0 | 0 | 0 | 0 | 0.001486 | 0.24382 | 890 | 28 | 80 | 31.785714 | 0.835067 | 0 | 0 | 0 | 0 | 0 | 0.124719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.136364 | 0.045455 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf4bcfd3616b9eb20798b538246c06d4982fdb4 | 223 | py | Python | Solving_Problems/max_common_divisor.py | mingzhangyang/learning_pandas | 6ec0ef09839d87a28dbf3beaa7c61e89f4346a36 | [
"Apache-2.0"
] | null | null | null | Solving_Problems/max_common_divisor.py | mingzhangyang/learning_pandas | 6ec0ef09839d87a28dbf3beaa7c61e89f4346a36 | [
"Apache-2.0"
] | null | null | null | Solving_Problems/max_common_divisor.py | mingzhangyang/learning_pandas | 6ec0ef09839d87a28dbf3beaa7c61e89f4346a36 | [
"Apache-2.0"
] | 1 | 2017-10-10T15:09:38.000Z | 2017-10-10T15:09:38.000Z | #!usr/bin/python
#coding:utf8
#mcd:max_common_divisor
def mcd(a, b):#a and b are natural numbers.
if a == b:
return a
t = min(a, b)
cd = [i for i in range(1, t+1) if a % i == 0 and b % i == 0]
m = max(cd)
return m | 17.153846 | 61 | 0.587444 | 50 | 223 | 2.58 | 0.56 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029762 | 0.246637 | 223 | 13 | 62 | 17.153846 | 0.738095 | 0.340807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf5ff589947e9a4cdd842f130ed6198e9f67912 | 1,129 | py | Python | Tarefas RNAs/rna_mpl.py | Jovioluiz/IA | 35247c782747a972e73a723608e71faa70cb6916 | [
"MIT"
] | null | null | null | Tarefas RNAs/rna_mpl.py | Jovioluiz/IA | 35247c782747a972e73a723608e71faa70cb6916 | [
"MIT"
] | null | null | null | Tarefas RNAs/rna_mpl.py | Jovioluiz/IA | 35247c782747a972e73a723608e71faa70cb6916 | [
"MIT"
] | null | null | null | #tarefa 4
#Jóvio L. Giacomolli
import numpy as np
#função sigmoide
def sigmoid(x):
return 1/(1 + np.exp(-x))
#arquitetura da MPL
n_input = 3
n_hidden = 4
n_output = 2
#vetor dos valores de entrada(aleatoria)
x = np.array([1, 2, 3])
#pesos camada oculta
weights_in_hidden = np.array([[0.2, 0.1, -0.9, 0.03],
[0.6, -0.8,0.9, 0.02],
[0.5, -0.6, 0.1, 0.01]])
#pesos camada de saida
weights_hidden_out = np.array([[-0.18, 0.11],
[-0.09, 0.05],
[-0.04, 0.05],
[-0.02, 0.07]])
#passagem forward pela rede
#camada oculta
#calcule a combinação linear de entradas e pesos sinápticos
#entrada camada oculta
hidden_layer_in = np.dot(x, weights_in_hidden)
#saída camada oculta
hidden_layer_out = sigmoid(hidden_layer_in)
#camada de saida
output_layer_in = np.dot(hidden_layer_out, weights_hidden_out)
#aplicar a função de ativação
output_layer_out = sigmoid(output_layer_in)
print('As saídas da rede são {}' .format(output_layer_out)) | 25.659091 | 63 | 0.591674 | 174 | 1,129 | 3.683908 | 0.425287 | 0.074883 | 0.046802 | 0.071763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07472 | 0.288751 | 1,129 | 44 | 64 | 25.659091 | 0.723537 | 0.282551 | 0 | 0 | 0 | 0 | 0.03183 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0.052632 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edf8b9d24eb17e49b5ccc0a21211628f48bd98dd | 3,273 | py | Python | codestosort/CloudComputing/reports/hw3/src/run.py | jimmy-academia/Deeper-Learnings | ac363efe5450dd2751c0c1bea0ee7af457f7ac24 | [
"MIT"
] | 2 | 2019-09-30T04:57:11.000Z | 2020-04-06T04:27:46.000Z | codestosort/CloudComputing/reports/hw3/src/run.py | jimmy-academia/Deeper-Learnings | ac363efe5450dd2751c0c1bea0ee7af457f7ac24 | [
"MIT"
] | null | null | null | codestosort/CloudComputing/reports/hw3/src/run.py | jimmy-academia/Deeper-Learnings | ac363efe5450dd2751c0c1bea0ee7af457f7ac24 | [
"MIT"
] | null | null | null | from thrift.transport import TSocket,TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor
from hbase.ttypes import Mutation
import csv
import os
import time
import logging
from tqdm import tqdm
# table: station, column: attr, row: date
def main():
socket = TSocket.TSocket('127.0.0.1',9090)
socket.setTimeout(5000)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Hbase.Client(protocol)
socket.open()
table_list = client.getTableNames()
start = time.time()
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Initiating task: Taiwan Air Quality!')
Attributes = ['AMB_TEMP','CO','NO','NO2','NOx','O3','PM10','PM2.5','RAINFALL','RAIN_COND','UVB',
'RH','SO2','WD_HR','WIND_DIREC','WIND_SPEED','WS_HR','CH4','NMHC','THC','PH_RAIN']
csvfiles = [filename for filename in os.listdir(os.getcwd()) if filename.endswith('.csv')]
logging.info(str(csvfiles))
InsertCounts = 0
for file in csvfiles:
with open(file, newline='') as f:
frames = csv.reader(f)
table_Name = ''
logging.info("Start reading {0}".format(file))
Column_Descriptors = []
ctr = 0
# length = sum(1 for row in frames)
#
# for frame in tqdm(frames, total=length):
for frame in tqdm(frames):
if ctr == 0:
ctr += 1
continue
elif ctr == 1:
ctr += 1
table_Name = str(str.encode(frame[1],'utf-8')).replace('\\',"")
table_Name = table_Name.replace("b","")
table_Name = table_Name.replace("'","")
if table_Name not in table_list:
for type in Attributes:
Column_Descriptors.append(ColumnDescriptor(name=type))
client.createTable(table_Name,Column_Descriptors)
logging.info('Build Table : {0}'.format(table_Name))
else:
logging.info('Table {0} already exist, no need to create'.format(table_Name))
# ['2018/01/02', 'iilan', 'NOx', '5.1', '4.4', '3.5', '2.1', '2.5', '3.2', '4.6', '15',
# '13', '11', '7', '6.8', '7.1', '13', '13', '12', '13', '16', '24', '23', '20', '24', '18', '13']
for i in range(3,26):
qualifier = i-2
value = frame[i]
row = frame[0] # date
column = frame[2] # attr
mutate = Mutation(column=column+':'+str(qualifier),value=value)
client.mutateRow(table_Name, frame[0], [mutate])
InsertCounts += 1
end = time.time()
logging.info("================Insert Done================\n")
logging.info("totalInsertCount: {0}, totalTimeSpend: {1}\n".format(InsertCounts,end-start))
logging.info(client.getTableNames())
if __name__ == '__main__':
main() | 36.775281 | 114 | 0.534983 | 377 | 3,273 | 4.562334 | 0.416446 | 0.057558 | 0.017442 | 0.024419 | 0.052326 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04367 | 0.307363 | 3,273 | 89 | 115 | 36.775281 | 0.715042 | 0.094409 | 0 | 0.03125 | 0 | 0 | 0.132273 | 0.014885 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.15625 | 0 | 0.171875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edfad664d6522de1e57decf992ec9921d32421ab | 873 | py | Python | tests/functional/create_key.py | maxwolfe/autocsr | 6c8295c0796f597c8780658de1570f9951b3d846 | [
"MIT"
] | null | null | null | tests/functional/create_key.py | maxwolfe/autocsr | 6c8295c0796f597c8780658de1570f9951b3d846 | [
"MIT"
] | null | null | null | tests/functional/create_key.py | maxwolfe/autocsr | 6c8295c0796f597c8780658de1570f9951b3d846 | [
"MIT"
] | null | null | null | """Create PKCS11 Key."""
import pkcs11
from pkcs11.util.ec import encode_named_curve_parameters
if __name__ == "__main__":
lib = pkcs11.lib("/usr/lib/softhsm/libsofthsm2.so")
token = lib.get_token(token_label="token")
with token.open(rw=True, user_pin="1234") as session:
session.generate_keypair(
pkcs11.KeyType.RSA, 2048, label="small_rsa_key", store=True
)
session.generate_keypair(
pkcs11.KeyType.RSA, 4096, label="big_rsa_key", store=True
)
session.generate_keypair(pkcs11.KeyType.DSA, 2048, label="dsa_key", store=True)
ecparams = session.create_domain_parameters(
pkcs11.KeyType.EC,
{pkcs11.Attribute.EC_PARAMS: encode_named_curve_parameters("secp256r1")},
local=True,
)
ecparams.generate_keypair(store=True, label="ec_key")
| 33.576923 | 87 | 0.662085 | 107 | 873 | 5.121495 | 0.439252 | 0.109489 | 0.120438 | 0.153285 | 0.257299 | 0.257299 | 0.182482 | 0.182482 | 0.182482 | 0 | 0 | 0.057185 | 0.218786 | 873 | 25 | 88 | 34.92 | 0.746334 | 0.020619 | 0 | 0.105263 | 0 | 0 | 0.110718 | 0.036514 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edfc9ce9d519343ae32bf3b714e11e2e15706541 | 2,353 | py | Python | model/losses.py | TomHacker/faster-rcnn | 313e51f76814cfceb5c2f24fed6d596bebcbd13f | [
"Apache-2.0"
] | 1 | 2019-06-10T00:47:53.000Z | 2019-06-10T00:47:53.000Z | model/losses.py | TomHacker/faster-rcnn | 313e51f76814cfceb5c2f24fed6d596bebcbd13f | [
"Apache-2.0"
] | null | null | null | model/losses.py | TomHacker/faster-rcnn | 313e51f76814cfceb5c2f24fed6d596bebcbd13f | [
"Apache-2.0"
] | null | null | null | from keras import backend as K
from keras.objectives import categorical_crossentropy
import tensorflow as tf
lambda_rpn_regr=1.0
lambda_rpn_class=1.0
lambda_cls_regr=1.0
lambda_cls_class=1.0
epsilon=1e-4
def rpn_loss_regr(num_anchors):
def rpn_loss_regr_fixed_num(y_true,y_pred):
x=y_true[:,:,:,4*num_anchors:]-y_pred
x_abs=K.abs(x)
x_bool=K.cast(K.less_equal(x_abs,1.0),tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(
epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
def rpn_loss_cls(num_anchors):
def rpn_loss_cls_fixed_num(y_true, y_pred):
if K.image_dim_ordering() == 'tf':
return lambda_rpn_class * K.sum(y_true[:, :, :, :num_anchors] * K.binary_crossentropy(y_pred[:, :, :, :],
y_true[:, :, :,
num_anchors:])) / K.sum(
epsilon + y_true[:, :, :, :num_anchors])
else:
return lambda_rpn_class * K.sum(y_true[:, :num_anchors, :, :] * K.binary_crossentropy(y_pred[:, :, :, :],
y_true[:,
num_anchors:, :,
:])) / K.sum(
epsilon + y_true[:, :num_anchors, :, :])
return rpn_loss_cls_fixed_num
def class_loss_regr(num_classes):
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4 * num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
return lambda_cls_regr * K.sum(
y_true[:, :, :4 * num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(
epsilon + y_true[:, :, :4 * num_classes])
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
| 48.020408 | 122 | 0.486188 | 301 | 2,353 | 3.431894 | 0.156146 | 0.082285 | 0.03485 | 0.052275 | 0.655373 | 0.478219 | 0.460794 | 0.42788 | 0.42788 | 0.42788 | 0 | 0.02481 | 0.38334 | 2,353 | 48 | 123 | 49.020833 | 0.687112 | 0 | 0 | 0.047619 | 0 | 0 | 0.003825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.071429 | 0.02381 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
edfd77060965954e9fe35eddd7f4bb0c750e7c30 | 4,597 | py | Python | visualization.py | johnrickman/UnpairedImageTranslation | d1d5e1386babacceabb4fe45841592bc7b6c3baa | [
"MIT"
] | null | null | null | visualization.py | johnrickman/UnpairedImageTranslation | d1d5e1386babacceabb4fe45841592bc7b6c3baa | [
"MIT"
] | null | null | null | visualization.py | johnrickman/UnpairedImageTranslation | d1d5e1386babacceabb4fe45841592bc7b6c3baa | [
"MIT"
] | null | null | null | import os
import chainer
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from chainer import Variable,cuda
import numpy as np
import chainer.functions as F
import losses
from chainer.training import extensions
import warnings
# assume [0,1] input
def postprocess(var):
img = var.data.get()
img = (img + 1.0) / 2.0 # [0, 1)
img = img.transpose(0, 2, 3, 1)
return img
class VisEvaluator(extensions.Evaluator):
name = "myval"
def __init__(self, *args, **kwargs):
params = kwargs.pop('params')
super(VisEvaluator, self).__init__(*args, **kwargs)
self.vis_out = params['vis_out']
self.slice = params['slice']
if self.slice:
self.num_s = len(self.slice)
else:
self.num_s = 1
self.count = 0
warnings.filterwarnings("ignore", category=UserWarning)
def evaluate(self):
batch_x = self._iterators['testA'].next()
batch_y = self._iterators['testB'].next()
models = self._targets
if self.eval_hook:
self.eval_hook(self)
fig = plt.figure(figsize=(9, 3 * self.num_s*(len(batch_x)+ len(batch_y))))
gs = gridspec.GridSpec( self.num_s*(len(batch_x)+ len(batch_y)), 3, wspace=0.1, hspace=0.1)
x = Variable(self.converter(batch_x, self.device))
y = Variable(self.converter(batch_y, self.device))
with chainer.using_config('train', False):
with chainer.function.no_backprop_mode():
if len(models)>2:
x_y = models['dec_y'](models['enc_x'](x))
#x_y_x = models['dec_x'](models['enc_x'](x)) ## X => Z => X
x_y_x = models['dec_x'](models['enc_y'](x_y)) ## X => Y => X
else:
x_y = models['gen_g'](x)
x_y_x = models['gen_f'](x_y)
# for i, var in enumerate([x, x_y]):
for i, var in enumerate([x, x_y, x_y_x]):
imgs = postprocess(var).astype(np.float32)
for j in range(len(imgs)):
if self.slice != None:
for k in self.slice:
ax = fig.add_subplot(gs[j*len(self.slice)+k,i])
ax.imshow(imgs[j,:,:,k], interpolation='none',cmap='gray',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
else:
ax = fig.add_subplot(gs[j,i])
ax.imshow(imgs[j], interpolation='none',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
with chainer.using_config('train', False):
with chainer.function.no_backprop_mode():
if len(models)>2:
y_x = models['dec_x'](models['enc_y'](y))
#y_x_y = models['dec_y'](models['enc_y'](y)) ## Y => Z => Y
y_x_y = models['dec_y'](models['enc_x'](y_x)) ## Y => X => Y
else: # (gen_g, gen_f)
y_x = models['gen_f'](y)
y_x_y = models['gen_g'](y_x)
# for i, var in enumerate([y, y_y]):
for i, var in enumerate([y, y_x, y_x_y]):
imgs = postprocess(var).astype(np.float32)
for j in range(len(imgs)):
if self.slice != None:
for k in self.slice:
ax = fig.add_subplot(gs[(j+len(batch_x))*len(self.slice)+k,i])
ax.imshow(imgs[j,:,:,k], interpolation='none',cmap='gray',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
else:
ax = fig.add_subplot(gs[j+len(batch_x),i])
ax.imshow(imgs[j], interpolation='none',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
gs.tight_layout(fig)
plt.savefig(os.path.join(self.vis_out,'count{:0>4}.jpg'.format(self.count)), dpi=200)
self.count += 1
plt.close()
cycle_y_l1 = F.mean_absolute_error(y,y_x_y)
# cycle_y_l2 = F.mean_squared_error(y,y_x_y)
cycle_x_l1 = F.mean_absolute_error(x,x_y_x)
# id_xy_grad = losses.loss_grad(x,x_y)
result = {"myval/cycle_y_l1":cycle_y_l1, "myval/cycle_x_l1":cycle_x_l1}
return result
## obsolete
def visualize(models,test_image_folder, test_A_iter, test_B_iter):
@chainer.training.make_extension()
def visualization(trainer):
updater = trainer.updater
return visualization
| 39.290598 | 99 | 0.530998 | 636 | 4,597 | 3.630503 | 0.238994 | 0.019922 | 0.015591 | 0.010394 | 0.486358 | 0.438718 | 0.426159 | 0.408835 | 0.388913 | 0.320052 | 0 | 0.015489 | 0.325865 | 4,597 | 116 | 100 | 39.62931 | 0.72959 | 0.080052 | 0 | 0.326316 | 0 | 0 | 0.042766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6101e8e012fece4c920c8244350e3a04fbec14a7 | 4,469 | py | Python | perfkitbenchmarker/linux_packages/memcached_server.py | pierre-emmanuelJ/PerfKitBenchmarker | 3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69 | [
"Apache-2.0"
] | 1 | 2016-12-07T19:49:58.000Z | 2016-12-07T19:49:58.000Z | perfkitbenchmarker/linux_packages/memcached_server.py | pierre-emmanuelJ/PerfKitBenchmarker | 3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/memcached_server.py | pierre-emmanuelJ/PerfKitBenchmarker | 3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing memcached server installation and cleanup functions."""
import logging
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
FLAGS = flags.FLAGS
DOWNLOAD_URL = 'http://memcached.org/files/memcached-1.4.33.tar.gz'
MEMCACHED_DIR_NAME = 'memcached'
MEMCACHED_DIR = '%s/%s' % (INSTALL_DIR, MEMCACHED_DIR_NAME)
MEMCACHED_PORT = 11211
flags.DEFINE_integer('memcached_size_mb', 64,
'Size of memcached cache in megabytes.')
def _Install(vm):
"""Installs the memcached server on the VM."""
vm.Install('build_tools')
vm.Install('event')
vm.RemoteCommand('cd {0} && wget {1} -O memcached.tar.gz'.format(
INSTALL_DIR, DOWNLOAD_URL))
out, _ = vm.RemoteCommand('cd %s && tar -xzvf memcached.tar.gz' % INSTALL_DIR)
# The directory name should be the first line of stdout
memcached_dir = out.split('\n', 1)[0]
# Rename the directory to a standard name
vm.RemoteCommand('cd {0} && mv {1} {2}'.format(
INSTALL_DIR, memcached_dir, MEMCACHED_DIR_NAME))
# Make memcached
vm.RemoteCommand('cd {0} && ./configure && make'.format(MEMCACHED_DIR))
def YumInstall(vm):
"""Installs the memcache package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the memcache package on the VM."""
_Install(vm)
@vm_util.Retry(poll_interval=5, timeout=300,
retryable_exceptions=(errors.Resource.RetryableCreationError))
def _WaitForServerUp(server):
"""Block until the memcached server is up and responsive.
Will timeout after 5 minutes, and raise an exception. Before the timeout
expires any exceptions are caught and the status check is retried.
We check the status of the server by issuing a 'stats' command. This should
return many lines of form 'STAT <name> <value>\\r\\n' if the server is up and
running.
Args:
server: VirtualMachine memcached has been installed on.
Raises:
errors.Resource.RetryableCreationError when response is not as expected or
if there is an error connecting to the port or otherwise running the
remote check command.
"""
address = server.internal_ip
port = MEMCACHED_PORT
logging.info("Trying to connect to memcached at %s:%s", address, port)
try:
out, _ = server.RemoteCommand(
'(echo -e "stats\n" ; sleep 1)| netcat %s %s' % (address, port))
if out.startswith('STAT '):
logging.info("memcached server stats received. Server up and running.")
return
except errors.VirtualMachine.RemoteCommandError as e:
raise errors.Resource.RetryableCreationError(
"memcached server not up yet: %s." % str(e))
else:
raise errors.Resource.RetryableCreationError(
"memcached server not up yet. Expected 'STAT' but got '%s'." % out)
def ConfigureAndStart(server):
"""Prepare the memcached server on a VM.
Args:
server: VirtualMachine to install and start memcached on.
"""
server.Install('memcached_server')
for scratch_disk in server.scratch_disks:
server.RemoteCommand('sudo umount %s' % scratch_disk.mount_point)
server.RemoteCommand('cd {mcdir}; ./memcached -m {size} '
'&> /dev/null &'.format(
mcdir=MEMCACHED_DIR, size=FLAGS.memcached_size_mb))
_WaitForServerUp(server)
logging.info("memcached server configured and started.")
def StopMemcached(server):
out, _ = server.RemoteCommand(
'(echo -e "quit\n" ; sleep 1)| netcat %s %s' %
(server.internal_ip, MEMCACHED_PORT))
def FlushMemcachedServer(ip, port):
vm_util.IssueCommand(
'(echo -e "flush_all\n" ; sleep 1)| netcat %s %s' % (ip, port))
def Uninstall(vm):
vm.RemoteCommand('pkill memcached')
vm.RemoteCommand('rm -rf %s' % MEMCACHED_DIR)
| 33.601504 | 80 | 0.707765 | 601 | 4,469 | 5.179701 | 0.382696 | 0.043367 | 0.021844 | 0.017347 | 0.101189 | 0.083842 | 0.069386 | 0.069386 | 0.069386 | 0.028269 | 0 | 0.009655 | 0.188857 | 4,469 | 132 | 81 | 33.856061 | 0.849103 | 0.358022 | 0 | 0.095238 | 0 | 0 | 0.258516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0 | 0.079365 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6101fbe36a07e3eb66e44044a1570bf0f15fcbb4 | 582 | py | Python | tests/test_test_framework.py | mvaleev/asyncpgsa | 19b6b9f49cd8a6e63c79695fcb995a59964f694e | [
"Apache-2.0"
] | 419 | 2016-07-22T20:08:05.000Z | 2022-03-03T14:39:28.000Z | tests/test_test_framework.py | mvaleev/asyncpgsa | 19b6b9f49cd8a6e63c79695fcb995a59964f694e | [
"Apache-2.0"
] | 89 | 2016-09-16T17:28:14.000Z | 2021-04-30T08:16:47.000Z | tests/test_test_framework.py | mvaleev/asyncpgsa | 19b6b9f49cd8a6e63c79695fcb995a59964f694e | [
"Apache-2.0"
] | 63 | 2016-08-05T15:46:24.000Z | 2022-03-31T13:33:54.000Z | # Testing our tests!!
from asyncpgsa.testing import MockPG
async def test_use_fetchrow():
pg = MockPG()
pg.set_database_results({'sqrt': 3})
result = await pg.fetchrow('SELECT * FROM sqrt(16);')
assert result['sqrt'] == 3
async def test_use_fetchval():
pg = MockPG()
pg.set_database_results(3)
result = await pg.fetchval('SELECT * FROM sqrt(16);')
assert result == 3
async def test_use_fetch():
pg = MockPG()
pg.set_database_results([{'sqrt': 3}])
result = await pg.fetch('SELECT * FROM sqrt(16);')
assert result[0]['sqrt'] == 3
| 25.304348 | 57 | 0.646048 | 82 | 582 | 4.439024 | 0.317073 | 0.054945 | 0.098901 | 0.123626 | 0.648352 | 0.56044 | 0.252747 | 0.252747 | 0.252747 | 0.252747 | 0 | 0.027957 | 0.201031 | 582 | 22 | 58 | 26.454545 | 0.754839 | 0.032646 | 0 | 0.1875 | 0 | 0 | 0.151515 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
610352de23c24c1211593fc045bfabda52ab33ba | 3,784 | py | Python | tests.py | dantheta/norm | 0048dc66686e24d08ae3d01fda8d719abc09f276 | [
"BSD-3-Clause"
] | null | null | null | tests.py | dantheta/norm | 0048dc66686e24d08ae3d01fda8d719abc09f276 | [
"BSD-3-Clause"
] | null | null | null | tests.py | dantheta/norm | 0048dc66686e24d08ae3d01fda8d719abc09f276 | [
"BSD-3-Clause"
] | null | null | null |
import NORM
import NORM.utils
import psycopg2
import unittest
import logging
logging.basicConfig(level = logging.WARN)
class Person(NORM.DBObject):
TABLE = 'people'
FIELDS = ['firstname','surname','age']
class FakeCursor(object):
def __init__(self, conn):
self.conn = conn
def execute(self, sql, args = []):
logging.info("%s: %s", sql, args)
self.conn.append(sql, args)
self.sql = sql
self.args = args
def fetchone(self):
if self.sql.lower().startswith('select'):
if len(self.args) == 0 or self.args[0] == 1:
return {'firstname': 'joe','surname':'bloggs','age':27,'id':1}
else:
return {'firstname': 'jason','surname':'connery','age':52,'id':2}
elif self.sql.lower().startswith('insert'):
return {'newid': 2}
def __iter__(self):
yield self.fetchone()
def close(self):
pass
class FakeConnection(object):
def __init__(self):
self.statements = []
def cursor(self, cursor_factory = None):
return FakeCursor(self)
def append(self, sql, args):
self.statements.append( (sql, args) )
class NormTest(unittest.TestCase):
def setUp(self):
self.conn = FakeConnection()
#self.conn = psycopg2.connect('dbname=normtest')
def testDelete(self):
person = Person(self.conn, 2)
person.delete()
self.assertEquals(
self.conn.statements[-1],
('delete from people where id = %s', [2])
)
def testLoad(self):
person = Person(self.conn, 1)
self.assertEquals(person['firstname'], 'joe')
self.assertEquals(person['surname'], 'bloggs')
self.assertEquals(person['age'], 27)
self.assertEquals(person['id'], 1)
if hasattr(self.conn, 'statements'):
self.assertEquals(
self.conn.statements[-1],
('select * from people where id = %s', [1])
)
def testLimit(self):
people = Person.select_all(self.conn, _limit = 10)
self.assertIn(' LIMIT 10', self.conn.statements[-1][0])
person = Person.select_all(self.conn, _limit = (10, 10))
self.assertIn(' LIMIT 10 OFFSET 10', self.conn.statements[-1][0])
def testSelect(self):
people = Person.select_all(self.conn)
self.assertEquals(len(people), 1)
if hasattr(self.conn, 'statements'):
self.assertEquals(
self.conn.statements[-1],
('select * from people', [])
)
def testUpdate(self):
person = Person(self.conn, 1)
person['age'] = 28
person.store()
if hasattr(self.conn, 'statements'):
sql, args = self.conn.statements[-1]
self.assertIn('age = %(age)s', sql)
self.assertIn('firstname = %(firstname)s', sql)
self.assertIn('surname = %(surname)s', sql)
self.assertEquals(args,
{'firstname': 'joe','surname':'bloggs','age':28,'id':1}
)
def testCreate(self):
person = Person(self.conn)
person.update({
'firstname': 'jason',
'surname':'connery',
'age':52,
})
person.store()
if hasattr(self.conn, 'statements'):
sql, args = self.conn.statements[-1]
self.assertRegexpMatches(sql, '^insert into people')
self.assertIn('age', sql)
self.assertIn('firstname', sql)
self.assertIn('surname', sql)
self.assertIn('%(age)s', sql)
self.assertIn('%(firstname)s', sql)
self.assertIn('%(surname)s', sql)
self.assertIn('returning id as newid', sql)
self.assertEquals(args, {
'firstname': 'jason',
'surname': 'connery',
'age': 52,
'id': None,
})
self.assertEquals(person['id'], 2)
class UtilsTest(unittest.TestCase):
def testEncodeWhere(self):
wherestr, args = NORM.utils.encode_where({'age': 20})
self.assertEquals(wherestr, 'age = %(age)s')
self.assertIn('age', args)
self.assertEquals(args['age'], 20)
def testEncodeWhereCmp(self):
wherestr, args = NORM.utils.encode_where({'age' : ('>', 20)})
self.assertEquals(wherestr, 'age > %(age)s')
self.assertIn( 'age',args)
self.assertEquals(args['age'], 20)
unittest.main()
| 24.101911 | 69 | 0.656184 | 490 | 3,784 | 5.026531 | 0.195918 | 0.074706 | 0.08039 | 0.053999 | 0.481527 | 0.394641 | 0.2838 | 0.213561 | 0.213561 | 0.213561 | 0 | 0.018886 | 0.160412 | 3,784 | 156 | 70 | 24.25641 | 0.756374 | 0.012421 | 0 | 0.235294 | 0 | 0 | 0.15743 | 0 | 0 | 0 | 0 | 0 | 0.252101 | 1 | 0.142857 | false | 0.008403 | 0.042017 | 0.008403 | 0.277311 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61037aee09d2dd1ca60025b574f0aaaa3bfd465f | 4,012 | py | Python | logger/tensorboard_logger.py | system123/SOMatch | 6f10cf28f506998a5e430ccd3faab3076fe350d5 | [
"MIT"
] | 22 | 2020-09-25T05:10:57.000Z | 2022-03-16T08:16:00.000Z | logger/tensorboard_logger.py | system123/SOMatch | 6f10cf28f506998a5e430ccd3faab3076fe350d5 | [
"MIT"
] | 14 | 2020-10-09T14:12:08.000Z | 2021-05-18T12:55:18.000Z | logger/tensorboard_logger.py | system123/SOMatch | 6f10cf28f506998a5e430ccd3faab3076fe350d5 | [
"MIT"
] | 15 | 2020-11-02T02:01:58.000Z | 2022-03-30T08:00:17.000Z | import os
import torch
import numpy as np
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from datetime import datetime
from utils.helpers import get_learning_rate
class TensorboardLogger:
def __init__(self, log_every=10, log_params=False, log_dir=None, log_images=False, log_grads=False, **kwargs):
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
self.log_dir = os.path.join(log_dir, "runs", current_time)
self.writer = SummaryWriter(log_dir=self.log_dir)
self.counters = {"evaluate": 0, "train": 0, "test": 0}
self.epochs = {"evaluate": 0, "train": 0, "test": 0}
self.log_every = log_every
self.log_params = log_params if isinstance(log_params, bool) else False
self.log_images = log_images if isinstance(log_images, bool) else False
self.log_grads = log_grads if isinstance(log_grads, bool) else False
print(f"Logger: Log parameters={log_params}, Log gradients={log_grads}")
# def state_dict(self):
# state = {}
# state['counters'] = self.counters
# state['epochs'] = self.epochs
# return {'state': state}
def fast_forward(self, last_epoch=0, step_per_epoch=0):
step = (last_epoch+1)*step_per_epoch
self.counters = {"evaluate": step, "train": step, "test": step}
self.epochs = {"evaluate": last_epoch+1, "train": last_epoch+1, "test": last_epoch+1}
def teardown(self):
self.writer.export_scalars_to_json(os.path.join(self.log_dir, "all_scalars.json"))
self.writer.close()
def add_embedding(self, features, images, phase="train", stage="epoch"):
step = self.epochs[phase] if stage == "epoch" else self.counters[phase]
self.writer.add_embedding(features, label_img=images, global_step=step)
def _plot_metrics(self, metrics, phase, step):
for m_name, m_val in metrics.items():
self.writer.add_scalar("{}/{}".format(phase, m_name), m_val, step)
def log_gradients(self, tag, model, phase="train", log_every=1000):
if (self.log_grads is True) and (self.counters[phase] % log_every == 0):
for name, param in model.named_parameters():
if param.grad is not None:
self.writer.add_histogram("{}_{}".format(tag, name), param.grad.data.cpu().numpy(), self.counters[phase])
def log_preactivations(self, module, phase="train"):
classname = module.__class__.__name__
def _log_preactivations(input, output):
self.writer.add_histogram("{}_{}".format(classname, "forward"), output.data.cpu().numpy(), self.counters[phase])
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
module.register_forward_hook(_log_preactivations)
def log_image_grid(self, name, images, phase="train", normalize=True):
if self.log_images is True:
x_rg = vutils.make_grid(images, normalize=normalize, scale_each=True)
self.writer.add_image(name, x_rg, self.counters[phase])
# Method Missing - automatically assume it is for the summaryWriter
def __getattr__(self, method_name):
log_fn = getattr(self.writer, method_name, None)
if log_fn:
return log_fn
else:
raise AttributeError(method_name)
def log_iteration(self, engine, phase="train", models=None, optims=None):
# other_metrics = {}
if optims:
for name, optim in optims.items():
lr = get_learning_rate(optim)[0]
self.writer.add_scalar("{}/{}_lr".format(phase, name), lr, self.counters[phase])
if self.counters[phase] % self.log_every == 0:
self._plot_metrics(engine.state.metrics, phase, self.counters[phase])
# self._plot_metrics(other_metrics, phase, self.counters[phase])
self.counters[phase] += 1
def log_epoch(self, engine, phase="train", models=None, optims=None):
self._plot_metrics(engine.state.metrics, phase, self.counters[phase])
if phase == "train" and self.log_params is True:
for m_name, model in models.items():
for name, param in model.named_parameters():
self.writer.add_histogram("{}_{}".format(m_name, name), param.data.cpu().numpy(), self.epochs[phase])
if phase == "evaluate":
self.epochs[phase] += 1
else:
self.epochs[phase] = engine.state.epoch | 38.951456 | 115 | 0.720588 | 593 | 4,012 | 4.671164 | 0.241147 | 0.06065 | 0.067509 | 0.030325 | 0.189531 | 0.144765 | 0.110469 | 0.068592 | 0.039711 | 0.039711 | 0 | 0.007184 | 0.132602 | 4,012 | 103 | 116 | 38.951456 | 0.788793 | 0.068046 | 0 | 0.084507 | 0 | 0 | 0.069954 | 0.012061 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169014 | false | 0 | 0.098592 | 0 | 0.295775 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6103dca99223e2064971d08bcfcec2f45746107b | 870 | py | Python | challenges/week_1/bus_fare_challenge.py | sling254/python | c49c2c63a5fe92f07d24bbb28c3a176d516816da | [
"MIT"
] | null | null | null | challenges/week_1/bus_fare_challenge.py | sling254/python | c49c2c63a5fe92f07d24bbb28c3a176d516816da | [
"MIT"
] | null | null | null | challenges/week_1/bus_fare_challenge.py | sling254/python | c49c2c63a5fe92f07d24bbb28c3a176d516816da | [
"MIT"
] | null | null | null | # WRITE YOUR CODE SOLUTION HERE
from datetime import datetime, timedelta, date
#Get todays date and store it in a variable 'date'
date = datetime.now()
"""
# Use todays date to get the name on the day of the week written in a short
# form with the first letter capitalized (e.g) 'Fri' if today were Friday and
# assigns it a variable 'day'
"""
day = datetime.date(date).strftime('%a')
"""
Uses if Statement to determine the todays fare following these bus fare shedule:
Monday - Friday --> 100
Saturdat --> 60
Sunday --> 80
Prints the results in this exact formart
Date: 2021-01-05
Day:Tue
Fare:100
"""
if day == "Mon" or day == "Tue" or day == "Wen" or day =="Thu" or day == "Fri":
fare = 100
elif day == "Sat":
fare = 60
else:
fare = 80
print("Date:", date.date())
print("Day:" + day)
print("Fare:", fare)
| 20.714286 | 80 | 0.636782 | 136 | 870 | 4.073529 | 0.544118 | 0.057762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037707 | 0.237931 | 870 | 41 | 81 | 21.219512 | 0.797888 | 0.089655 | 0 | 0 | 0 | 0 | 0.096317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6103de3de6f757d0d0039c05b3e7ed32ecf1a76c | 572 | py | Python | TaskManager/forms.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | TaskManager/forms.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | TaskManager/forms.py | farahaulita/pbp-tk | fabf8e07ed0e1270d3e98a3d1bdd46267a1a4d6c | [
"Unlicense"
] | null | null | null | from django.db.models.base import Model
from django.forms import ModelForm, widgets
from django import forms
from login.models import User, Task, Submissions, Subject
class DateTimeInput(forms.DateTimeInput):
input_type = 'datetime-local'
input_value = ""
class AddTaskForm(ModelForm):
class Meta:
model = Task
fields = ['Name', 'Description', 'deadline']
widgets = {
'deadline' : DateTimeInput(),
}
class GraderForm(ModelForm):
class Meta:
model = Submissions
fields = ['comment', 'nilai'] | 26 | 58 | 0.655594 | 59 | 572 | 6.322034 | 0.525424 | 0.080429 | 0.096515 | 0.123324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243007 | 572 | 22 | 59 | 26 | 0.861432 | 0 | 0 | 0.111111 | 0 | 0 | 0.099476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.611111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61044666403f7fed0ad63dd4accb5ea22bf27e14 | 12,740 | py | Python | spira/yevon/geometry/nets/net.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 10 | 2018-07-13T09:46:21.000Z | 2021-06-22T13:34:50.000Z | spira/yevon/geometry/nets/net.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 8 | 2018-09-09T11:32:40.000Z | 2019-10-08T07:47:31.000Z | spira/yevon/geometry/nets/net.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 7 | 2019-01-17T18:50:17.000Z | 2022-01-13T20:27:52.000Z | import numpy as np
import networkx as nx
from copy import deepcopy
from spira.core.parameters.variables import GraphParameter, StringParameter
from spira.core.parameters.descriptor import Parameter, RestrictedParameter
from spira.yevon.geometry.coord import Coord
from spira.yevon.vmodel.geometry import GeometryParameter
from spira.yevon.geometry.ports.base import __Port__
from spira.core.parameters.restrictions import RestrictType
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
__all__ = ['Net', 'NetParameter']
ELM_TYPE = {1: 'line', 2: 'triangle'}
from spira.core.transformable import Transformable
from spira.core.parameters.initializer import ParameterInitializer
class __Net__(Transformable, ParameterInitializer):
""" """
@property
def count(self):
return nx.number_of_nodes(self.g)
class Net(__Net__):
"""
Constructs a graph from the physical geometry
generated from the list of elements.
"""
# g = GraphParameter()
g = Parameter()
mesh_data = Parameter(fdef_name='create_mesh_data')
geometry = GeometryParameter(allow_none=True, default=None)
branch_nodes = Parameter(fdef_name='create_branch_nodes')
lines = Parameter(fdef_name='create_lines')
triangles = Parameter(fdef_name='create_triangles')
physical_triangles = Parameter(fdef_name='create_physical_triangles')
physical_lines = Parameter(fdef_name='create_physical_lines')
name = StringParameter(default='no_name')
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'g' in kwargs:
self.g = kwargs['g']
else:
self.g = nx.Graph()
self._generate_mesh_graph()
def __repr__(self):
if self.geometry is None:
class_string = "[SPiRA: Net] (name \'{}\', nodes {})"
return class_string.format(self.name, self.count)
else:
class_string = "[SPiRA: Net] (name \'{}\', nodes {}, geometry {})"
return class_string.format(self.name, self.count, self.geometry.process.symbol)
def __str__(self):
return self.__repr__()
def _generate_mesh_graph(self):
""" Create a graph from the meshed geometry. """
ll = len(self.mesh_data.points)
A = np.zeros((ll, ll), dtype=np.int64)
for n, triangle in enumerate(self.triangles):
self._add_edges(n, triangle, A)
for n, triangle in enumerate(self.triangles):
self._add_positions(n, triangle)
def _add_edges(self, n, tri, A):
def update_adj(self, t1, adj_mat, v_pair):
if (adj_mat[v_pair[0]][v_pair[1]] != 0):
t2 = adj_mat[v_pair[0]][v_pair[1]] - 1
self.g.add_edge(t1, t2, label=None)
else:
adj_mat[v_pair[0]][v_pair[1]] = t1 + 1
adj_mat[v_pair[1]][v_pair[0]] = t1 + 1
v1 = [tri[0], tri[1], tri[2]]
v2 = [tri[1], tri[2], tri[0]]
for v_pair in list(zip(v1, v2)):
update_adj(self, n, A, v_pair)
def _add_positions(self, n, triangle):
from spira import settings
pp = self.mesh_data.points
grids_per_unit = settings.get_grids_per_unit()
n1, n2, n3 = pp[triangle[0]], pp[triangle[1]], pp[triangle[2]]
x = (n1[0] + n2[0] + n3[0]) / 3
y = (n1[1] + n2[1] + n3[1]) / 3
x = x * grids_per_unit
y = y * grids_per_unit
self.g.node[n]['vertex'] = triangle
self.g.node[n]['position'] = Coord(x, y)
self.g.node[n]['display'] = RDD.DISPLAY.STYLE_SET[RDD.PLAYER.METAL]
def create_mesh_data(self):
return self.geometry.mesh_data
def add_new_node(self, n, D, polygon, position, display):
num = self.g.number_of_nodes()
self.g.add_node(num+1, position=position, device_reference=D, process_polygon=polygon, display=display)
self.g.add_edge(n, num+1)
def create_triangles(self):
if 'triangle' not in self.mesh_data.cells:
raise ValueError('Triangle not found in cells')
return self.mesh_data.cells['triangle']
def create_lines(self):
if 'line' not in self.mesh_data.cells:
raise ValueError('Line not found in cells')
return self.mesh_data.cells['line']
def create_physical_triangles(self):
if 'triangle' not in self.mesh_data.cell_data:
raise ValueError('Triangle not in meshio cell_data')
if 'gmsh:physical' not in self.mesh_data.cell_data['triangle']:
raise ValueError('Physical not found in meshio triangle')
return self.mesh_data.cell_data['triangle']['gmsh:physical'].tolist()
def create_physical_lines(self):
if 'line' not in self.mesh_data.cell_data:
raise ValueError('Line not in meshio cell_data')
if 'gmsh:physical' not in self.mesh_data.cell_data['line']:
raise ValueError('Physical not found in meshio triangle')
return self.mesh_data.cell_data['line']['gmsh:physical'].tolist()
def process_triangles(self):
"""
Arguments
---------
tri : list
The surface_id of the triangle
corresponding to the index value.
name -> 5_0_1 (layer_datatype_polyid)
value -> [1 2] (1=surface_id 2=triangle)
"""
triangles = {}
for name, value in self.mesh_data.field_data.items():
for n in self.g.nodes():
surface_id = value[0]
if self.physical_triangles[n] == surface_id:
layer = int(name.split('_')[0])
datatype = int(name.split('_')[1])
key = (layer, datatype)
if key in triangles:
triangles[key].append(n)
else:
triangles[key] = [n]
return triangles
def process_lines(self):
"""
Arguments
---------
tri : list
The surface_id of the triangle
corresponding to the index value.
name -> 5_0_1 (layer_datatype_polyid)
value -> [1 2] (1=surface_id 2=triangle)
"""
lines = {}
for name, value in self.mesh_data.field_data.items():
# print(name, value)
# print(self.physical_lines)
for n in self.physical_lines:
line_id = value[0]
if n == line_id:
# print(name)
# print(value)
# print('')
polygon_string = name.split('*')[0]
polygon_hash = name.split('*')[1]
polygon_uid = int(name.split('*')[2])
key = (polygon_string, polygon_hash, polygon_uid)
if key in lines:
lines[key].append(n)
else:
lines[key] = [n]
return lines
def get_triangles_connected_to_line(self):
"""
Labeling of an edge line:
polygon_uid_i [line elm_type]
[SPiRA: Polygon 'M5']_17_0 [2 1]
Labeling of triangle:
layer datatype [triangle elm_type]
50_1_0_0 [1 2]
"""
# lines = []
# for v in self.process_lines().values():
# lines.extend(v)
# print(lines)
# triangles = {}
# for n in nodes:
# for node, triangle in enumerate(self.triangles):
# if n == node:
# triangles[n] = triangle
# return triangles
def triangle_nodes(self):
""" Get triangle field_data in list form. """
nodes = []
for v in self.process_triangles().values():
nodes.extend(v)
triangles = {}
for n in nodes:
for node, triangle in enumerate(self.triangles):
if n == node:
triangles[n] = triangle
return triangles
def transform(self, transformation):
for n in self.g.nodes():
self.g.node[n]['position'] = transformation.apply_to_coord(self.g.node[n]['position'])
return self
def create_branch_nodes(self):
""" Nodes that defines different conducting branches. """
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
branch_nodes = list()
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if isinstance(D, SRef):
branch_nodes.append(n)
if isinstance(D, Port):
branch_nodes.append(n)
return branch_nodes
def st_nodes(self):
""" Nodes that defines different conducting branches.
All nodes are ports. Chek port purposes.
"""
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
branch_nodes = list()
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
P = self.g.node[n]['process_polygon']
# FIXME: Maybe implement node operators (__and__, etc)
# if (D.purpose.symbol == 'B') and (P.layer.purpose.symbol == 'DEVICE_METAL'):
# branch_nodes.append(n)
if D.purpose.symbol == 'C':
branch_nodes.append(n)
elif D.purpose.symbol == 'D':
branch_nodes.append(n)
# elif D.purpose.symbol == 'P':
# branch_nodes.append(n)
elif D.purpose.symbol == 'T':
branch_nodes.append(n)
# elif (D.purpose.symbol == 'P') and (D.name[1] != 'E'):
# branch_nodes.append(n)
return branch_nodes
def convert_to_branch_node(self, n, uid):
pass
def del_branch_attrs(self):
""" Reset the branch attrs for new branch node creation. """
for n in self.g.nodes():
if 'branch_node' in self.g.node[n]:
del self.g.node[n]['branch_node']
return self
def convert_pins(self):
""" Remove pin node attrs with more than 1 edge connected to it. """
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if D.purpose.symbol == 'P':
if len(self.g.edges(n)) > 0:
del self.g.node[n]['device_reference']
return self
def convert_device(self):
""" Convert a device metal node to a dummy port.
Has to be connected to atleast 1 PEdge node. """
from spira.yevon.geometry.ports import Port
for n in self.g.nodes():
convert = False
P = self.g.node[n]['process_polygon']
if P.layer.purpose.symbol == 'DEVICE_METAL':
for i in self.g.neighbors(n):
if 'device_reference' in self.g.node[i]:
D = self.g.node[i]['device_reference']
# print(D)
if D.purpose.symbol == 'P':
convert = True
if convert is True:
port = Port(
name='Djj{}'.format(n),
midpoint=P.center,
process=P.layer.process,
)
self.g.node[n]['device_reference'] = port
return self
def remove_nodes(self):
"""
Nodes to be removed:
1. Are not a branch node.
2. Are not a device node.
3. Branch nodes must equal the branch id.
"""
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
locked_nodes = []
remove_nodes = []
for n in self.g.nodes():
if 'branch_node' in self.g.node[n]:
D = self.g.node[n]['branch_node']
if isinstance(D, Port):
locked_nodes.append(n)
elif 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if isinstance(D, (Port, SRef)):
locked_nodes.append(n)
for n in self.g.nodes():
if n not in locked_nodes:
remove_nodes.append(n)
self.g.remove_nodes_from(remove_nodes)
def NetParameter(local_name=None, restriction=None, **kwargs):
R = RestrictType(Net) & restriction
return RestrictedParameter(local_name, restriction=R, **kwargs)
| 35.388889 | 111 | 0.557614 | 1,581 | 12,740 | 4.329538 | 0.150538 | 0.030679 | 0.030241 | 0.030679 | 0.443974 | 0.39065 | 0.351497 | 0.332067 | 0.269832 | 0.207597 | 0 | 0.011103 | 0.328414 | 12,740 | 359 | 112 | 35.487465 | 0.78892 | 0.141758 | 0 | 0.267544 | 0 | 0 | 0.081076 | 0.004357 | 0 | 0 | 0 | 0.002786 | 0 | 1 | 0.118421 | false | 0.004386 | 0.087719 | 0.013158 | 0.337719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6106d1e77ba2c189d3335415eaec9708cfc5663a | 337 | py | Python | main.py | vsalvino/pyinstaller-demo | 0abfd197bb5aaafc894d3f48848d2c919ad62792 | [
"Unlicense"
] | null | null | null | main.py | vsalvino/pyinstaller-demo | 0abfd197bb5aaafc894d3f48848d2c919ad62792 | [
"Unlicense"
] | null | null | null | main.py | vsalvino/pyinstaller-demo | 0abfd197bb5aaafc894d3f48848d2c919ad62792 | [
"Unlicense"
] | null | null | null | """
Runs list_files on the current directory (".")
"""
from util import list_files
def main() -> None:
path = "."
files = list_files(path)
for f in files:
print(
"d" if f.isdir else "f",
f" {f.human_readable_bytes:<12}",
f.path
)
if __name__ == "__main__":
main()
| 16.047619 | 46 | 0.51632 | 43 | 337 | 3.744186 | 0.627907 | 0.167702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009009 | 0.341246 | 337 | 20 | 47 | 16.85 | 0.716216 | 0.136499 | 0 | 0 | 0 | 0 | 0.141343 | 0.09894 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6107e1c219772ea1245d3f4b2f2a7463443f4c29 | 11,846 | py | Python | bin/NormalizeReadCounts.py | DSchreyer/crisprquant | ffebb979064fed2d4f65ce6dc1c703b829ff23e7 | [
"MIT"
] | 1 | 2021-03-19T09:50:48.000Z | 2021-03-19T09:50:48.000Z | bin/NormalizeReadCounts.py | DSchreyer/crisprquant | ffebb979064fed2d4f65ce6dc1c703b829ff23e7 | [
"MIT"
] | 2 | 2021-03-19T09:43:20.000Z | 2021-06-23T07:22:43.000Z | bin/NormalizeReadCounts.py | DSchreyer/crisprquant | ffebb979064fed2d4f65ce6dc1c703b829ff23e7 | [
"MIT"
] | 3 | 2021-03-18T15:03:18.000Z | 2021-06-26T19:09:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:23:51 2017
@author: philipp
"""
# Analyze count distribution
# =======================================================================
# Imports
from __future__ import division # floating point division by default
import sys
import yaml
import os
import glob
import pandas
import scipy.stats.mstats as sc
import numpy
import time
def Normalization():
# ------------------------------------------------
# Print header
# ------------------------------------------------
print('++++++++++++++++++++++++++++++++++++++++++++++++')
start = time.time()
# ------------------------------------------------
# Get parameters
# ------------------------------------------------
configFile = open('configuration.yaml','r')
config = yaml.safe_load(configFile)
configFile.close()
ScriptsDir = config['ScriptsDir']
sgRNAReadCountDir = config['sgRNAReadCountDir']
GeneReadCountDir = config['GeneReadCountDir']
delta = config['delta']
norm = config['Normalization']
RoundCount = config['RoundCount']
NormSuffix = '_normalized.txt'
N0 = 1000000
eps = 0.001
# ------------------------------------------------
# Get files
# ------------------------------------------------
os.chdir(sgRNAReadCountDir)
FileNames_u = glob.glob('*_GuideCounts.txt')
colnames_u = ['sgRNA','gene','counts']
os.chdir(GeneReadCountDir)
FileNames_g = glob.glob('*_GeneCounts.txt')
colnames_g = ['gene','counts']
# ------------------------------------------------
# Normalization to counts per million
# ------------------------------------------------
if norm == 'cpm':
print('Normalizing to counts per million reads ...')
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * N0)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * N0 for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * N0)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * N0 for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization to mean total read count across replicates
# ------------------------------------------------------------
elif norm == 'total':
print('Normalizing to mean total read count ...')
os.chdir(sgRNAReadCountDir)
TotalCounts = list()
for filename in FileNames_u:
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
TotalCounts.append(numpy.sum(x))
MeanCount = numpy.mean(TotalCounts)
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * MeanCount)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * MeanCount for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * MeanCount)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * MeanCount for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization by size-factor (Love et al., Genome Biol 2014)
# ------------------------------------------------------------
elif norm == 'size':
print('Normalizing by size-factors ...')
# Establish data frame
os.chdir(sgRNAReadCountDir)
filename = FileNames_u[0]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
sgIDs = list(SampleFile['sgRNA'])
geneIDs = list(SampleFile['gene'])
L = len(sgIDs)
RawCounts = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
SizeFactors = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
# Compute geometric means for all sgRNAs
print('Computing geometric means ...')
for filename in FileNames_u:
sample = filename[0:-16]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
RawCounts[sample] = x
SizeFactors[sample] = [x[k] if x[k]>0 else x[k]+eps for k in range(L)]
geomean = [sc.gmean(list(SizeFactors.iloc[k,2:])) for k in range(L)]
SizeFactors['Geom mean'] = geomean
# Compute size-factors for each sgRNA and each sample
print('Computing sgRNA size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
x = SizeFactors[sample]
g0 = SizeFactors['Geom mean']
x0_k = [x[k]/g0[k] for k in range(L)]
SizeFactors[sample+' sgRNA size-factors'] = [x0_k[k] for k in range(L)]
# Compute size-factor for each sample
print('Computing sample size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
SizeFactors[sample+' size-factor'] = numpy.median(SizeFactors[sample+' sgRNA size-factors'])
# Write size-factor dataframe
SizeFactors.to_csv('Size-factors.txt',sep='\t',index=False)
# Write normalized counts dataframe
print('Writing normalized read counts ...')
# sgRNA counts
for filename in FileNames_u:
sample = filename[0:-16]
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k])) \
for k in range(L)]
else:
ReadsPerGuide_0 = [RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k] for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
sample = filename[0:-15]
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j])) \
for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j] for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Spelling error catch
# ------------------------------------------------------------
else:
print('### ERROR: Check spelling of Normalization parameter in configuration file! ###')
# --------------------------------------
# Time stamp
# --------------------------------------
os.chdir(ScriptsDir)
end = time.time()
# Final time stamp
print('------------------------------------------------')
print('Script completed.')
sec_elapsed = end - start
if sec_elapsed < 60:
time_elapsed = sec_elapsed
print('Time elapsed (Total) [secs]: ' + '%.3f' % time_elapsed +'\n')
elif sec_elapsed < 3600:
time_elapsed = sec_elapsed/60
print('Time elapsed (Total) [mins]: ' + '%.3f' % time_elapsed +'\n')
else:
time_elapsed = sec_elapsed/3600
print('Time elapsed (Total) [hours]: ' + '%.3f' % time_elapsed +'\n')
if __name__ == "__main__":
Normalization()
| 46.637795 | 123 | 0.516968 | 1,125 | 11,846 | 5.356444 | 0.168889 | 0.023233 | 0.027879 | 0.025556 | 0.652838 | 0.625788 | 0.615831 | 0.609359 | 0.606372 | 0.579489 | 0 | 0.016597 | 0.29301 | 11,846 | 253 | 124 | 46.822134 | 0.702925 | 0.140132 | 0 | 0.553299 | 0 | 0 | 0.121547 | 0.009471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005076 | false | 0 | 0.045685 | 0 | 0.050761 | 0.091371 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
610dcc6aa683bc18e852da17456d9fb2df99e847 | 8,761 | py | Python | main.py | francescofraternali/CityLearn | 0338dcd81a856638a163bbc88401fa93543b1e05 | [
"MIT"
] | 1 | 2020-07-21T22:30:54.000Z | 2020-07-21T22:30:54.000Z | main.py | francescofraternali/CityLearn | 0338dcd81a856638a163bbc88401fa93543b1e05 | [
"MIT"
] | null | null | null | main.py | francescofraternali/CityLearn | 0338dcd81a856638a163bbc88401fa93543b1e05 | [
"MIT"
] | null | null | null | from citylearn import CityLearn, building_loader, auto_size
from energy_models import HeatPump, EnergyStorage, Building
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import collections
import gym
from gym.utils import seeding
from gym import core, spaces
import os
import ptan
import time
import argparse
import model, common
from matplotlib.pyplot import figure
import numpy as np
class AgentD4PG(ptan.agent.BaseAgent):
"""
Agent implementing noisy agent
"""
def __init__(self, net, device="cpu", epsilon=1.0):
self.net = net
self.device = device
self.epsilon = epsilon
def __call__(self, states, agent_states):
states_v = ptan.agent.float32_preprocessor(states).to(self.device)
mu_v = self.net(states_v)
actions = mu_v.data.cpu().numpy()
actions += self.epsilon * np.random.normal(size=actions.shape)
actions = np.clip(actions, -1, 1)
return actions, agent_states
class DDPGActor(nn.Module):
def __init__(self, obs_size, act_size):
super(DDPGActor, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, 4),
nn.ReLU(),
nn.Linear(4, 4),
nn.ReLU(),
nn.Linear(4, act_size),
nn.Tanh()
)
def forward(self, x):
return self.net(x)
class DDPGCritic(nn.Module):
def __init__(self, obs_size, act_size):
super(DDPGCritic, self).__init__()
self.obs_net = nn.Sequential(
nn.Linear(obs_size, 8),
nn.BatchNorm1d(8),
nn.ReLU(),
)
self.out_net = nn.Sequential(
nn.Linear(8 + act_size, 6),
nn.BatchNorm1d(6),
nn.ReLU(),
nn.Linear(6, 1)
)
def forward(self, x, a):
obs = self.obs_net(x)
return self.out_net(torch.cat([obs, a], dim=1))
from pathlib import Path
data_folder = Path("data/")
demand_file = data_folder / "AustinResidential_TH.csv"
weather_file = data_folder / 'Austin_Airp_TX-hour.csv'
#building_ids = [4, 5, 9, 16, 21, 26, 33, 36, 49, 59]
building_ids = [4]
heat_pump, heat_tank, cooling_tank = {}, {}, {}
#Ref: Assessment of energy efficiency in electric storage water heaters (2008 Energy and Buildings)
loss_factor = 0.19/24
buildings = {}
for uid in building_ids:
heat_pump[uid] = HeatPump(nominal_power = 9e12, eta_tech = 0.22, t_target_heating = 45, t_target_cooling = 10)
heat_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor)
cooling_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor)
buildings[uid] = Building(uid, heating_storage = heat_tank[uid], cooling_storage = cooling_tank[uid], heating_device = heat_pump[uid], cooling_device = heat_pump[uid])
buildings[uid].state_action_space(np.array([24.0, 40.0, 1.001]), np.array([1.0, 17.0, -0.001]), np.array([0.5]), np.array([-0.5]))
building_loader(demand_file, weather_file, buildings)
auto_size(buildings, t_target_heating = 45, t_target_cooling = 10)
env = {}
for uid in building_ids:
env[uid] = CityLearn(demand_file, weather_file, buildings = {uid: buildings[uid]}, time_resolution = 1, simulation_period = (3500,6000))
env[uid](uid)
if __name__ == "__main__":
N_AGENTS = 2
GAMMA = 0.99
BATCH_SIZE = 5000
LEARNING_RATE_ACTOR = 1e-4
LEARNING_RATE_CRITIC = 1e-3
REPLAY_SIZE = 5000
REPLAY_INITIAL = 100
TEST_ITERS = 120
EPSILON_DECAY_LAST_FRAME = 1000
EPSILON_START = 1.2
EPSILON_FINAL = 0.02
device = torch.device("cpu")
act_net, crt_net, tgt_act_net, tgt_crt_net, agent, exp_source, buffer, act_opt, crt_opt, frame_idx = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
rew_last_1000, rew, track_loss_critic, track_loss_actor = {}, {}, {}, {}
# for uid in buildings:
# env[uid].reset()
for uid in building_ids:
#Create as many actor and critic nets as number of agents
#Actor: states_agent_i -> actions_agent_i
act_net[uid] = DDPGActor(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device)
#Critic: states_all_agents + actions_all_agents -> Q-value_agent_i [1]
crt_net[uid] = DDPGCritic(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device)
tgt_act_net[uid] = ptan.agent.TargetNet(act_net[uid])
tgt_crt_net[uid] = ptan.agent.TargetNet(crt_net[uid])
agent[uid] = model.AgentD4PG(act_net[uid], device=device)
exp_source[uid] = ptan.experience.ExperienceSourceFirstLast(env[uid], agent[uid], gamma=GAMMA, steps_count=1)
buffer[uid] = ptan.experience.ExperienceReplayBuffer(exp_source[uid], buffer_size=REPLAY_SIZE)
act_opt[uid] = optim.Adam(act_net[uid].parameters(), lr=LEARNING_RATE_ACTOR)
crt_opt[uid] = optim.Adam(crt_net[uid].parameters(), lr=LEARNING_RATE_CRITIC)
frame_idx[uid] = 0
rew_last_1000[uid], rew[uid], track_loss_critic[uid], track_loss_actor[uid] = [], [], [], []
batch, states_v, actions_v, rewards_v, dones_mask, last_states_v, q_v, last_act_v, q_last_v, q_ref_v, critic_loss_v, cur_actions_v, actor_loss_v = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
cost, price_list, buffer_reward = {},{},{}
for uid in buildings:
cost[uid] = []
price_list[uid] = []
buffer_reward[uid] = []
while not env[building_ids[-1]]._terminal():
if frame_idx[4]%100 == 0:
print(frame_idx[uid])
for uid in buildings:
# print(env[uid].time_step)
agent[uid].epsilon = max(EPSILON_FINAL, EPSILON_START - frame_idx[uid] / EPSILON_DECAY_LAST_FRAME)
frame_idx[uid] += 1
buffer[uid].populate(1)
# print(buffer[uid].buffer[-1])
# print(env[uid].buildings[uid].time_step)
price = env[uid].total_electric_consumption[-1]*3e-5 + 0.045
price_list[uid].append(price)
for uid in buildings:
buffer_reward[uid].append(buffer[uid].buffer[-1].reward)
electricity_cost = buffer[uid].buffer[-1].reward*price
cost[uid].append(-electricity_cost)
buffer[uid].buffer[-1] = buffer[uid].buffer[-1]._replace(reward=electricity_cost)
if len(buffer[uid]) < REPLAY_INITIAL:
continue
for uid in buildings:
for k in range(6):
batch[uid] = buffer[uid].sample(BATCH_SIZE)
states_v[uid], actions_v[uid], rewards_v[uid], dones_mask[uid], last_states_v[uid] = common.unpack_batch_ddqn(batch[uid], device)
# TRAIN CRITIC
crt_opt[uid].zero_grad()
#Obtaining Q' using critic net with parameters teta_Q'
q_v[uid] = crt_net[uid](states_v[uid], actions_v[uid])
#Obtaining estimated optimal actions a|teta_mu from target actor net and from s_i+1.
last_act_v[uid] = tgt_act_net[uid].target_model(last_states_v[uid]) #<----- Actor to train Critic
#Obtaining Q'(s_i+1, a|teta_mu) from critic net Q'
q_last_v[uid] = tgt_crt_net[uid].target_model(last_states_v[uid], last_act_v[uid])
q_last_v[uid][dones_mask[uid]] = 0.0
#Q_target used to train critic net Q'
q_ref_v[uid] = rewards_v[uid].unsqueeze(dim=-1) + q_last_v[uid] * GAMMA
critic_loss_v[uid] = F.mse_loss(q_v[uid], q_ref_v[uid].detach())
critic_loss_v[uid].backward()
crt_opt[uid].step()
# TRAIN ACTOR
act_opt[uid].zero_grad()
#Obtaining estimated optimal current actions a|teta_mu from actor net and from s_i
cur_actions_v[uid] = act_net[uid](states_v[uid])
#Actor loss = mean{ -Q_i'(s_i, a|teta_mu) }
actor_loss_v[uid] = -crt_net[uid](states_v[uid], cur_actions_v[uid]) #<----- Critic to train Actor
actor_loss_v[uid] = actor_loss_v[uid].mean()
#Find gradient of the loss and backpropagate to perform the updates of teta_mu
actor_loss_v[uid].backward()
act_opt[uid].step()
if frame_idx[uid] % 1 == 0:
tgt_act_net[uid].alpha_sync(alpha=1 - 0.1)
tgt_crt_net[uid].alpha_sync(alpha=1 - 0.1)
from matplotlib.pyplot import figure
#Plotting all the individual actions
print(env)
figure(figsize=(18, 6))
for uid in buildings:
print(env[uid].buildings[uid].time_step)
plt.plot(env[uid].action_track[uid][2400:2500])
plt.show()
| 38.091304 | 201 | 0.629266 | 1,240 | 8,761 | 4.194355 | 0.220968 | 0.021534 | 0.013843 | 0.019612 | 0.271679 | 0.185157 | 0.13632 | 0.104211 | 0.062296 | 0.042684 | 0 | 0.028924 | 0.242324 | 8,761 | 229 | 202 | 38.257642 | 0.754595 | 0.12179 | 0 | 0.101266 | 0 | 0 | 0.00862 | 0.006138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0 | 0.126582 | 0.006329 | 0.202532 | 0.018987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
610ddbb5e092cf2175ef5db86499670928275f5e | 2,041 | py | Python | main.py | ErikBavenstrand/Neural-Network-Implementation | 01652abd972139367c45ce991d228f2a1c125c07 | [
"MIT"
] | null | null | null | main.py | ErikBavenstrand/Neural-Network-Implementation | 01652abd972139367c45ce991d228f2a1c125c07 | [
"MIT"
] | 5 | 2019-11-20T13:29:21.000Z | 2022-03-12T00:05:57.000Z | main.py | ErikBavenstrand/Neural-Network-Implementation | 01652abd972139367c45ce991d228f2a1c125c07 | [
"MIT"
] | null | null | null | import pickle
import sys
from mnist import MNIST
from NeuralNetwork import *
import numpy as np
from PIL import Image
def vectorizeResult(x):
e = np.zeros((10, 1))
e[x] = 1.0
return e
def getImageArray(fileName):
ls = []
for p in np.invert(Image.open(fileName).convert('L')).ravel():
ls.append([p])
return np.array(ls)/255
def createNeuralNetwork(layers, name):
layers = list(map(int, layers))
NN = NeuralNetwork(layers)
data = MNIST('Data')
trainingInput, trainingOutput = data.load_training()
testingInput, testingOutput = data.load_testing()
trainingInput = np.array(trainingInput)/255
testingInput = np.array(testingInput)/255
trainingInput = [np.reshape(x, (layers[0], 1)) for x in trainingInput]
trainingOutput = [vectorizeResult(x) for x in trainingOutput]
trainingData = list(zip(trainingInput, trainingOutput))
testingInput = [np.reshape(x, (layers[0], 1)) for x in testingInput]
testingData = list(zip(testingInput, testingOutput))
NN.stochasticGradientDescent(trainingData, 50, 30, 2.0, testingData)
binaryFile = open(name, mode='wb')
neuralNetwork = pickle.dump(NN, binaryFile)
binaryFile.close()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Creating a neural network...")
createNeuralNetwork(sys.argv[1:-1], sys.argv[-1])
print("Done")
else:
fileName = sys.argv[1]
NN = pickle.load(open(fileName, 'rb'))
while True:
numberFile = input("What file would you like to read? ")
if numberFile == '':
break
elif numberFile == 'all':
for i in range(10):
f = str(i) + '.png'
val = np.argmax(NN.propagate(getImageArray(f)))
print("written number: {0}. Network finds a: {1}. {2}".format(i, val, val == i))
else:
numberFile += '.png'
print(np.argmax(NN.propagate(getImageArray(numberFile))))
| 31.4 | 100 | 0.610975 | 241 | 2,041 | 5.13278 | 0.414938 | 0.022635 | 0.014551 | 0.025869 | 0.090542 | 0.038804 | 0.038804 | 0.038804 | 0.038804 | 0 | 0 | 0.022442 | 0.257717 | 2,041 | 64 | 101 | 31.890625 | 0.794059 | 0 | 0 | 0.038462 | 0 | 0 | 0.068594 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.115385 | 0 | 0.211538 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
610f4efe5e37318e7fc086def5a33639b6de24e4 | 1,286 | py | Python | JM_exerc/dao/Back_dao.py | matheusschuetz/TrabalhoPython | 953957898de633f8f2776681a45a1a15b68e80b9 | [
"MIT"
] | 1 | 2020-01-21T11:43:12.000Z | 2020-01-21T11:43:12.000Z | JM_exerc/dao/Back_dao.py | matheusschuetz/TrabalhoPython | 953957898de633f8f2776681a45a1a15b68e80b9 | [
"MIT"
] | null | null | null | JM_exerc/dao/Back_dao.py | matheusschuetz/TrabalhoPython | 953957898de633f8f2776681a45a1a15b68e80b9 | [
"MIT"
] | null | null | null | import MySQLdb
import sys
sys.path.append('C:/Users/900152/Documents/Dados/TrabalhoPython/JM_exerc')
from model.Back_model import BackEnd
class BackDb:
def select_all(self):
comand = 'SELECT * FROM topskills01.02_JM_BackEnd;'
selectcomand = self.cursor.execute(comand)
return selectcomand
def select_by_id(self,id):
comand = f"SELECT * FROM topskills01.02_JM_BackEnd WHERE ID={id}"
idcomand = self.cursor.execute(comand)
return idcomand
def update(self, back : BackEnd):
comand = f"UPDATE topskills01.02_JM_BackEnd SET Nome = {back.Nome}, Descricao = '{back.Descricao}', Versao = '{back.Versao}' WHERE ID = {back.id}"
self.conexao.commit()
def save(self, back: BackEnd):
comand = f"""INSERT INTO topskills01.02_JM_BackEnd
(
Nome
,Descricao
,Versao
)
VALUES(
'{back.Nome}'
,'{back.Descricao}'
,'{back.Versao}'
)"""
savecomand = self.cursor.execute(comand)
return savecomand
def delete(self,id):
comand = f"DELETE FROM topskills01.02_JM_BackEnd WHERE ID={id}"
deletecomand = self.cursor.execute(comand)
return deletecomand
| 31.365854 | 155 | 0.608087 | 146 | 1,286 | 5.253425 | 0.335616 | 0.084746 | 0.097784 | 0.143416 | 0.349413 | 0.140808 | 0.091265 | 0.091265 | 0 | 0 | 0 | 0.028261 | 0.284603 | 1,286 | 40 | 156 | 32.15 | 0.805435 | 0 | 0 | 0 | 0 | 0.029412 | 0.437792 | 0.140747 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.088235 | 0 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
611288649e75ce5d1bb3366ed4efae6440380a9d | 1,079 | py | Python | code/dataSource.py | youkaisteve/Population | bfda0b4b8dc510726911f5e5dd7ef6c7863634b1 | [
"MIT"
] | null | null | null | code/dataSource.py | youkaisteve/Population | bfda0b4b8dc510726911f5e5dd7ef6c7863634b1 | [
"MIT"
] | null | null | null | code/dataSource.py | youkaisteve/Population | bfda0b4b8dc510726911f5e5dd7ef6c7863634b1 | [
"MIT"
] | null | null | null | import re
import xlrd
DATA_BASE_PATH = '../data/population-migration-all/'
def get_files(file_path):
"""get files.
Keyword arguments:
file_path -- file path
"""
result = []
work_book = xlrd.open_workbook(file_path)
first_table = work_book.sheet_by_index(0)
cols = first_table.ncols
title_row = first_table.row_values(0)
source_col_index = title_row.index('来源')
for i in range(first_table.nrows):
row_values = first_table.row_values(i)
if row_values[source_col_index] == '中华人民共和国人口统计资料汇编':
result.append(row_values[cols - 1])
return result
def get_file_content(file_path):
work_book = xlrd.open_workbook(file_path)
table = work_book.sheet_by_index(0)
area = get_area(table.row_values(0, 0, 1)[0])[0]
data_list = []
for i in range(7, table.nrows):
year = table.row_values(i, 0, 1)[0]
if year.isdigit():
data_list.append(table.row_values(i))
return area, data_list
def get_area(line):
return re.findall(r'年(.*?)历年', line, re.U | re.I)
| 22.957447 | 61 | 0.653383 | 164 | 1,079 | 4.030488 | 0.341463 | 0.108926 | 0.1059 | 0.068079 | 0.175492 | 0.175492 | 0.175492 | 0 | 0 | 0 | 0 | 0.015476 | 0.221501 | 1,079 | 46 | 62 | 23.456522 | 0.771429 | 0.048193 | 0 | 0.074074 | 0 | 0 | 0.05754 | 0.032738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0.037037 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61155cc8647d3a04287a744c3fe45ab20382fb37 | 3,635 | py | Python | rest-server/bin/engines.py | soft-super/harness | 540f7648fd0702c1b71f0f1c41b71a870c9420fe | [
"Apache-2.0"
] | 1 | 2020-12-17T11:22:42.000Z | 2020-12-17T11:22:42.000Z | rest-server/bin/engines.py | soft-super/harness | 540f7648fd0702c1b71f0f1c41b71a870c9420fe | [
"Apache-2.0"
] | null | null | null | rest-server/bin/engines.py | soft-super/harness | 540f7648fd0702c1b71f0f1c41b71a870c9420fe | [
"Apache-2.0"
] | 1 | 2019-03-26T20:43:23.000Z | 2019-03-26T20:43:23.000Z | #!/usr/bin/env python3
from harness import EnginesClient, HttpError
from common import *
engine_client = EnginesClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
if args.action == 'create':
with open(args.config) as data_file:
config = json.load(data_file)
try:
res = engine_client.create(config)
print_success(res, 'Created new engine: \n')
except HttpError as err:
print_failure(err, 'Error creating new engine\n')
elif args.action == 'update':
engine_id, config = id_and_config()
# print("Engine-id: " + engine_id)
# print("Json config: \n" + str(config))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="configs", data=config)
# print_success_string('Updating engine-id: {} \n'.format(engine_id))
print_success(res, 'Updating engine: \n')
except HttpError as err:
print_failure(err, 'Error updating engine-id: {}\n'.format(engine_id))
# with open(args.config) as data_file:
# config = json.load(data_file)
# engine_id = config.engine_id
# try:
# res = engine_client.update(config)
# print_success(res, 'Updating engine: ')
# except HttpError as err:
# print_failure(err, 'Error updating engine\n')
# engine_id, config = id_or_config()
# try:
# res = engine_client.update(engine_id, config, args.delete, args.force, args.input)
# print_success(res, 'Updating existing engine. Success:\n')
# except HttpError as err:
# print_failure(err, 'Error updating engine.')
elif args.action == 'import':
engine_id = args.engine_id
# print("Import path: {}".format(args.import_path))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="imports", data={})
print_success(res, 'Importing to engine: {}\n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error importing to engine-id: {} from {}\n'.format(engine_id, args.import_path))
# else:
# print_failure(None, "Error: no input for import command.")
elif args.action == 'train':
engine_id = args.engine_id
# print("Import path: {}".format(args.import_path))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="jobs", data={})
print_success(res, 'Asking engine: {} to train\n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error requesting engine: {} to train\n'.format(engine_id))
# else:
# print_failure(None, "Error: no input for import command.")
elif args.action == 'delete':
engine_id, config = id_or_config()
try:
res = engine_client.delete(engine_id=engine_id)
print_success_string('Deleted engine-id: {} \n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error deleting engine-id: {}\n'.format(engine_id))
elif args.action == 'status':
engine_id = args.engineid
try:
if engine_id is not None:
res = engine_client.get(engine_id=engine_id)
# print(str(res))
print_success(res, 'Status for engine-id: {}\n'.format(engine_id))
else:
res = engine_client.get(engine_id=None)
# print(str(res))
print_success(res, 'Status for all Engines:\n')
except HttpError as err:
print_failure(err, 'Error getting status.\n')
else:
print_warning("Unknown action: %{}".format(args.action))
| 37.864583 | 121 | 0.646217 | 480 | 3,635 | 4.697917 | 0.164583 | 0.141907 | 0.059867 | 0.059867 | 0.655432 | 0.597783 | 0.550776 | 0.511308 | 0.471397 | 0.453215 | 0 | 0.000356 | 0.22696 | 3,635 | 95 | 122 | 38.263158 | 0.802135 | 0.282806 | 0 | 0.285714 | 0 | 0 | 0.16699 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61185f9554e6fdad4742b175bf8931b9e3aa29a8 | 1,817 | py | Python | protlearn/dimreduction/pca.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 24 | 2020-09-17T10:35:44.000Z | 2022-03-09T19:19:01.000Z | protlearn/dimreduction/pca.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 14 | 2020-08-09T18:23:01.000Z | 2020-11-19T05:48:14.000Z | protlearn/dimreduction/pca.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 3 | 2021-03-07T23:41:17.000Z | 2022-02-25T18:48:37.000Z | # Author: Thomas Dorfer <thomas.a.dorfer@gmail.com>
import warnings
import numpy as np
from sklearn.decomposition import PCA
def pca(X, *, thres=.9, whiten=False):
"""Principal component analysis.
PCA is defined as an orthogonal linear transformation that transforms the
data to a new coordinate system such that the greatest variance by some
scalar projection of the data comes to lie on the first coordinate (called
the first principal component), the second greatest variance on the second
coordinate, and so on.
Parameters
----------
X : ndarray of shape (n_samples, n_features_pre)
Feature matrix.
thres : float, default=.9
Specify the desired explained variance.
Returns
-------
arr : ndarray of shape (n_samples, n_features_post)
Array containing the PCA components comprising the specified variance.
Notes
-----
For the output to be meaningful, the number of samples should be larger than
the number of features.
Examples
--------
>>> from protlearn.dimreduction import pca
>>> features.shape #from a larger dataset (not shown here)
(1000, 575)
>>> reduced = pca(features, thres=.9)
(1000, 32)
"""
# check input dimensionality
if X.shape[0] < X.shape[1]:
warnings.warn("The number of samples (%i) is less than the number of "
"features (%i). Therefore, the PCA output may not be "
"meaningful." % (X.shape[0], X.shape[1]))
# fit and transform PCA
pca = PCA(whiten=whiten).fit(X)
var = pca.explained_variance_ratio_[0]
comp = 1
while var <= thres:
var += pca.explained_variance_ratio_[comp]
comp += 1
arr = pca.transform(X)
return arr[:,:comp] | 28.390625 | 80 | 0.636214 | 239 | 1,817 | 4.786611 | 0.476987 | 0.031469 | 0.038462 | 0.026224 | 0.167832 | 0.078671 | 0.054196 | 0 | 0 | 0 | 0 | 0.017306 | 0.268575 | 1,817 | 64 | 81 | 28.390625 | 0.843491 | 0.58448 | 0 | 0 | 0 | 0 | 0.184543 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
611baf35e81592e930584d66af2ff718199af1d7 | 600 | py | Python | base/lib/pythonbin/urwid/tests/test_doctests.py | threefoldtech/sandbox_osx | e2a5ea812c3789dea40113719dbad6d6ee7cd720 | [
"Apache-2.0"
] | 4 | 2021-10-14T21:22:25.000Z | 2022-03-12T19:58:48.000Z | base/lib/pythonbin/urwid/tests/test_doctests.py | threefoldtech/sandbox_osx | e2a5ea812c3789dea40113719dbad6d6ee7cd720 | [
"Apache-2.0"
] | 3 | 2020-06-05T18:53:36.000Z | 2021-06-10T20:47:05.000Z | base/lib/pythonbin/urwid/tests/test_doctests.py | threefoldtech/sandbox_osx | e2a5ea812c3789dea40113719dbad6d6ee7cd720 | [
"Apache-2.0"
] | 1 | 2022-03-15T22:52:53.000Z | 2022-03-15T22:52:53.000Z | import unittest
import doctest
import urwid
def load_tests(loader, tests, ignore):
module_doctests = [
urwid.widget,
urwid.wimp,
urwid.decoration,
urwid.display_common,
urwid.main_loop,
urwid.monitored_list,
urwid.raw_display,
'urwid.split_repr', # override function with same name
urwid.util,
urwid.signals,
urwid.graphics,
]
for m in module_doctests:
tests.addTests(doctest.DocTestSuite(m,
optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL))
return tests
| 25 | 76 | 0.64 | 65 | 600 | 5.753846 | 0.630769 | 0.074866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285 | 600 | 23 | 77 | 26.086957 | 0.871795 | 0.053333 | 0 | 0 | 0 | 0 | 0.028269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
611e0ce498d0d6daa68a1e298efb23c3efe69b01 | 425 | py | Python | authentication/urls.py | NoMariusz/Praeteritum | c32fa017e23de7255224fcf72cd04abdfc3ebff4 | [
"MIT"
] | 3 | 2021-03-07T21:43:55.000Z | 2021-09-21T08:24:26.000Z | authentication/urls.py | NoMariusz/Praeteritum | c32fa017e23de7255224fcf72cd04abdfc3ebff4 | [
"MIT"
] | null | null | null | authentication/urls.py | NoMariusz/Praeteritum | c32fa017e23de7255224fcf72cd04abdfc3ebff4 | [
"MIT"
] | null | null | null | from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from .views import UserView, RegisterUser, LoginUser, LogoutUser, \
CheckAuthenticated
urlpatterns = [
path('', UserView.as_view()),
path('register', csrf_exempt(RegisterUser.as_view())),
path('login', LoginUser.as_view()),
path('logout', LogoutUser.as_view()),
path('isAuthenticated', CheckAuthenticated.as_view())
]
| 32.692308 | 67 | 0.729412 | 48 | 425 | 6.3125 | 0.4375 | 0.09901 | 0.132013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131765 | 425 | 12 | 68 | 35.416667 | 0.821138 | 0 | 0 | 0 | 0 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6122f59b015b8f42249ec2c010138d836ac0f35e | 1,541 | py | Python | research/develop/2016-12-08-irio-invalid-cnpj-or-cpf.py | SuccessionEcologicalServices/serenata-de-amor | 718a74e031ea0a4b020bf42801e1d23353e6bc34 | [
"MIT"
] | 59 | 2018-10-03T18:46:31.000Z | 2022-01-05T22:39:17.000Z | research/develop/2016-12-08-irio-invalid-cnpj-or-cpf.py | SuccessionEcologicalServices/serenata-de-amor | 718a74e031ea0a4b020bf42801e1d23353e6bc34 | [
"MIT"
] | 16 | 2018-10-03T21:36:50.000Z | 2021-04-12T22:10:16.000Z | research/develop/2016-12-08-irio-invalid-cnpj-or-cpf.py | SuccessionEcologicalServices/serenata-de-amor | 718a74e031ea0a4b020bf42801e1d23353e6bc34 | [
"MIT"
] | 20 | 2018-10-03T19:14:57.000Z | 2021-04-12T20:50:44.000Z |
# coding: utf-8
# # Invalid CNPJ or CPF
#
# `cnpj_cpf` is the column identifying the company or individual who received the payment made by the congressperson. Having this value empty should mean that it's an expense made outside Brazil, with a company (or person) without a Brazilian ID.
# In[1]:
import numpy as np
import pandas as pd
dataset = pd.read_csv('../data/2016-11-19-reimbursements.xz',
dtype={'applicant_id': np.str,
'cnpj_cpf': np.str,
'congressperson_id': np.str,
'subquota_number': np.str},
low_memory=False)
dataset.shape
# In[2]:
from pycpfcnpj import cpfcnpj
def validate_cnpj_cpf(cnpj_or_cpf):
return (cnpj_or_cpf == None) | cpfcnpj.validate(cnpj_or_cpf)
cnpj_cpf_list = dataset['cnpj_cpf'].astype(np.str).replace('nan', None)
dataset['valid_cnpj_cpf'] = np.vectorize(validate_cnpj_cpf)(cnpj_cpf_list)
# `document_type` 2 means expenses made abroad.
# In[3]:
keys = ['year',
'applicant_id',
'document_id',
'total_net_value',
'cnpj_cpf',
'supplier',
'document_type']
dataset.query('document_type != 2').loc[~dataset['valid_cnpj_cpf'], keys]
# With 1,532,491 records in the dataset and just 10 with invalid CNPJ/CPF, we can probably assume that the Chamber of Deputies has a validation in the tool where the congressperson requests for reimbursements. These represent a mistake in the implemented algorithm.
# In[ ]:
| 28.018182 | 265 | 0.659961 | 216 | 1,541 | 4.550926 | 0.532407 | 0.078332 | 0.036623 | 0.02645 | 0.032553 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019591 | 0.238157 | 1,541 | 54 | 266 | 28.537037 | 0.817717 | 0.401038 | 0 | 0 | 0 | 0 | 0.238148 | 0.039691 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0.045455 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6123c525e5a5da797d3ca93718ec18aa3078da5c | 5,170 | py | Python | examples/convolutional_vae.py | twiecki/edward | 1ac2eeb7f5163915848afd3b027c714255459de3 | [
"Apache-2.0"
] | 4 | 2016-05-09T18:48:21.000Z | 2018-03-01T22:50:42.000Z | examples/convolutional_vae.py | twiecki/edward | 1ac2eeb7f5163915848afd3b027c714255459de3 | [
"Apache-2.0"
] | null | null | null | examples/convolutional_vae.py | twiecki/edward | 1ac2eeb7f5163915848afd3b027c714255459de3 | [
"Apache-2.0"
] | 3 | 2016-07-05T14:19:08.000Z | 2019-09-04T13:48:59.000Z | #!/usr/bin/env python
"""
Convolutional variational auto-encoder for MNIST data. The model is
written in TensorFlow, with neural networks using Pretty Tensor.
Probability model
Prior: Normal
Likelihood: Bernoulli parameterized by convolutional NN
Variational model
Likelihood: Mean-field Normal parameterized by convolutional NN
"""
from __future__ import print_function
import os
import edward as ed
import prettytensor as pt
import tensorflow as tf
from convolutional_vae_util import deconv2d
from edward import Variational, Normal
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.misc import imsave
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.flags
logging = tf.logging
flags.DEFINE_integer("num_vars", 10, "Number of latent variables.")
flags.DEFINE_integer("n_iter_per_epoch", 1000, "Number of iterations per epoch.")
flags.DEFINE_integer("n_epoch", 100, "Maximum number of epochs.")
flags.DEFINE_integer("n_data", 128, "Mini-batch size for data subsampling.")
flags.DEFINE_string("data_directory", "data/mnist", "Directory to store data.")
flags.DEFINE_string("img_directory", "img", "Directory to store sampled images.")
FLAGS = flags.FLAGS
def mapping(self, x):
"""
lambda = phi(x)
"""
with pt.defaults_scope(activation_fn=tf.nn.elu,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True):
params = (pt.wrap(x).
reshape([FLAGS.n_data, 28, 28, 1]).
conv2d(5, 32, stride=2).
conv2d(5, 64, stride=2).
conv2d(5, 128, edges='VALID').
dropout(0.9).
flatten().
fully_connected(self.num_vars * 2, activation_fn=None)).tensor
mean = params[:, :self.num_vars]
stddev = tf.sqrt(tf.exp(params[:, self.num_vars:]))
return [mean, stddev]
def sample_noise(self, size):
"""
eps = sample_noise() ~ s(eps)
s.t. z = reparam(eps; lambda) ~ q(z | lambda)
"""
return tf.random_normal(size)
Normal.mapping = mapping
Normal.sample_noise = sample_noise
class NormalBernoulli:
def __init__(self, num_vars):
self.num_vars = num_vars
def mapping(self, z):
"""
p = varphi(z)
"""
with pt.defaults_scope(activation_fn=tf.nn.elu,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True):
return (pt.wrap(z).
reshape([FLAGS.n_data, 1, 1, self.num_vars]).
deconv2d(3, 128, edges='VALID').
deconv2d(5, 64, edges='VALID').
deconv2d(5, 32, stride=2).
deconv2d(5, 1, stride=2, activation_fn=tf.nn.sigmoid).
flatten()).tensor
def log_likelihood(self, x, z):
"""
log p(x | z) = log Bernoulli(x | p = varphi(z))
"""
p = self.mapping(z)
return x * tf.log(p + 1e-8) + (1.0 - x) * tf.log(1.0 - p + 1e-8)
def sample_prior(self, size):
"""
p ~ some complex distribution induced by
z ~ N(0, 1), p = phi(z)
"""
z = tf.random_normal(size)
return self.mapping(z)
class Data:
def __init__(self, data):
self.mnist = data
def sample(self, size):
x_batch, _ = mnist.train.next_batch(size)
return x_batch
ed.set_seed(42)
model = NormalBernoulli(FLAGS.num_vars)
# TODO This family is not currently amenable to the variational construction.
variational = Normal(FLAGS.num_vars)
if not os.path.exists(FLAGS.data_directory):
os.makedirs(FLAGS.data_directory)
mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)
data = Data(mnist)
inference = ed.VAE(model, variational, data)
sess = inference.initialize(n_data=FLAGS.n_data)
with tf.variable_scope("model", reuse=True) as scope:
p_rep = model.sample_prior([FLAGS.n_data, FLAGS.num_vars])
for epoch in range(FLAGS.n_epoch):
avg_loss = 0.0
widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
pbar = ProgressBar(FLAGS.n_iter_per_epoch, widgets=widgets)
pbar.start()
for t in range(FLAGS.n_iter_per_epoch):
pbar.update(t)
loss = inference.update(sess)
avg_loss += loss
# Take average of all ELBOs during the epoch.
avg_loss = avg_loss / FLAGS.n_iter_per_epoch
# Take average over each data point (pixel), where each image has
# 28*28 pixels.
avg_loss = avg_loss / (28 * 28 * FLAGS.n_data)
# Print a lower bound to the average marginal likelihood for a single pixel.
print("log p(x) >= %f" % avg_loss)
imgs = sess.run(p_rep)
for b in range(FLAGS.n_data):
if not os.path.exists(FLAGS.img_directory):
os.makedirs(FLAGS.img_directory)
imsave(os.path.join(FLAGS.img_directory, '%d.png') % b,
imgs[b].reshape(28, 28))
| 33.571429 | 81 | 0.624758 | 698 | 5,170 | 4.467049 | 0.312321 | 0.024695 | 0.019243 | 0.016677 | 0.115459 | 0.09814 | 0.084028 | 0.084028 | 0.084028 | 0.084028 | 0 | 0.026199 | 0.261702 | 5,170 | 153 | 82 | 33.79085 | 0.790673 | 0.160155 | 0 | 0.103093 | 0 | 0 | 0.072309 | 0 | 0 | 0 | 0 | 0.006536 | 0 | 1 | 0.082474 | false | 0 | 0.103093 | 0 | 0.268041 | 0.020619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61244fae3cb1d570e8f892707e02d30830b9dab4 | 4,998 | py | Python | cadnano/views/outlinerview/cnoutlineritem.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 1 | 2022-03-27T14:37:32.000Z | 2022-03-27T14:37:32.000Z | cadnano/views/outlinerview/cnoutlineritem.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | null | null | null | cadnano/views/outlinerview/cnoutlineritem.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T02:29:38.000Z | 2021-01-22T02:29:38.000Z | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QTreeWidgetItem
from cadnano.gui.palette import getBrushObj
from . import outlinerstyles as styles
NAME_COL = 0
LOCKED_COL = 1
VISIBLE_COL = 2
COLOR_COL = 3
LEAF_FLAGS = (Qt.ItemIsSelectable | Qt.ItemIsEditable |
Qt.ItemIsDragEnabled |
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled) # 55 + 8 = 63
DISABLE_FLAGS = Qt.NoItemFlags # 0
ROOT_FLAGS = ( Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled |
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled ) # 60
class CNOutlinerItem(QTreeWidgetItem):
PROPERTIES = {'name': NAME_COL, 'is_locked': LOCKED_COL, 'is_visible': VISIBLE_COL, 'color': COLOR_COL}
CAN_NAME_EDIT = True
def __init__(self, cn_model, parent):
super(QTreeWidgetItem, self).__init__(parent, QTreeWidgetItem.UserType)
self._cn_model = cn_model
name = cn_model.getName()
color = cn_model.getColor()
self.setData(NAME_COL, Qt.EditRole, name)
self.setData(LOCKED_COL, Qt.EditRole, False) # is_visible
self.setData(VISIBLE_COL, Qt.EditRole, True) # is_visible
self.setData(COLOR_COL, Qt.EditRole, color)
# end def
### PRIVATE SUPPORT METHODS ###
def __hash__(self):
""" necessary as CNOutlinerItem as a base class is unhashable
but necessary due to __init__ arg differences for whatever reason
"""
return hash(self._cn_model)
### PUBLIC SUPPORT METHODS ###
def itemType(self):
pass
# end def
def cnModel(self):
return self._cn_model
# end def
def getColor(self):
return self._cn_model.getProperty('color')
# end def
def createRootPartItem(self, item_name, parent):
""" use this for sub-lists for part items
"""
return RootPartItem(self._cn_model, item_name, parent)
# end def
def updateCNModel(self):
# this works only for color. uncomment below to generalize to properties
# print("outliner %s - updateCNModel" % (str(type(self))))
cn_model = self._cn_model
name = self.data(NAME_COL, Qt.DisplayRole)
color = self.data(COLOR_COL, Qt.DisplayRole)
is_visible = self.data(VISIBLE_COL, Qt.DisplayRole)
mname, mcolor, mvisible = cn_model.getOutlineProperties()
if name is not None and name != mname:
cn_model.setProperty('name', name)
if color is not None and color != mcolor:
cn_model.setProperty('color', color)
if is_visible is not None and is_visible != mvisible:
cn_model.setProperty('is_visible', is_visible)
# end def
def setValue(self, key, value):
# cn_model = self._model_part
if key == 'name':
name = self.data(NAME_COL, Qt.DisplayRole)
if name != value:
# print("setting name", self.isSelected())
self.setData(NAME_COL, Qt.EditRole, value)
elif key == 'color':
color = self.data(COLOR_COL, Qt.DisplayRole)
if color != value:
self.setData(COLOR_COL, Qt.EditRole, value)
elif key == 'is_locked':
is_locked = self.data(LOCKED_COL, Qt.DisplayRole)
if is_locked != value:
self.setData(LOCKED_COL, Qt.EditRole, value)
elif key == 'is_visible':
is_visible = self.data(VISIBLE_COL, Qt.DisplayRole)
if is_visible != value:
self.setData(VISIBLE_COL, Qt.EditRole, value)
else:
"property not supported"
# pass
# raise KeyError("No property %s in cn_model" % (key))
# end def
def activate(self):
self.setBackground(NAME_COL, getBrushObj(styles.ACTIVE_COLOR))
self.is_active = True
# end def
def deactivate(self):
# print("should deactivate outliner Part")
self.setBackground(NAME_COL, getBrushObj(styles.INACTIVE_COLOR))
self.is_active = False
# end def
# end class
class RootPartItem(QTreeWidgetItem):
def __init__(self, model_part, item_name, parent):
super(QTreeWidgetItem, self).__init__(parent, QTreeWidgetItem.UserType)
self._cn_model = model_part
self.item_name = item_name
self.setData(NAME_COL, Qt.EditRole, item_name)
self.setData(LOCKED_COL, Qt.EditRole, False) # is_locked
self.setData(VISIBLE_COL, Qt.EditRole, True) # is_visible
self.setData(COLOR_COL, Qt.EditRole, "#ffffff") # color
# self.setFlags(self.flags() & ~Qt.ItemIsSelectable)
self.setFlags(ROOT_FLAGS)
self.setExpanded(True)
# end def
def __repr__(self):
return "RootPartItem %s: for %s" % (self.item_name,
self._cn_model.getProperty('name'))
# end def
def part(self):
return self._cn_model
def getColor(self):
return "#ffffff"
| 35.7 | 107 | 0.622449 | 593 | 4,998 | 5.037099 | 0.227656 | 0.04687 | 0.052226 | 0.024104 | 0.325075 | 0.295949 | 0.214931 | 0.152662 | 0.125879 | 0.098427 | 0 | 0.003893 | 0.280512 | 4,998 | 139 | 108 | 35.956835 | 0.826752 | 0.146259 | 0 | 0.197802 | 0 | 0 | 0.034064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.010989 | 0.043956 | 0.054945 | 0.318681 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
612569303782cf9c7b7179cfa384ea54e28fb8c1 | 13,579 | py | Python | data/dedupe.py | mcguinlu/COVID_suicide_living | 81ac106065b1113706f2df26051e0d73efe382aa | [
"MIT"
] | 1 | 2020-11-29T18:42:53.000Z | 2020-11-29T18:42:53.000Z | data/dedupe.py | L-ENA/SR_automation_LSR | c9b5d3a121e4e141485b4ad0f2e3975217861a3b | [
"MIT"
] | 1 | 2020-06-24T18:48:56.000Z | 2020-06-24T18:48:56.000Z | data/dedupe.py | L-ENA/SR_automation_LSR | c9b5d3a121e4e141485b4ad0f2e3975217861a3b | [
"MIT"
] | 3 | 2020-03-30T13:55:38.000Z | 2020-10-27T20:38:49.000Z | import pandas as pd
import re
from fuzzywuzzy import fuzz
from tqdm import tqdm
from datetime import date
import os
#os.chdir("C:\\Users\\lm16564\\OneDrive - University of Bristol\\Documents\\rrr\\COVID_suicide_living")
def fuzzymatch(a, b, min_match):
if fuzz.ratio(a, b) > min_match: # matching ore than specified ratio
# print("-------match to {} ratio---------".format(min_match))
# print(a)
# print(b)
# print(fuzz.ratio(a, b))
return True
return False # match is less, therefore text is too different
def rowmatch(row, indexes, mydict, min_match_title, min_match_abstrct):
try:
t1 = row["title"].strip().lower() # remove trailing spaces and lower the letters
if t1=="":
return False, None
except:
return False, None
try:
a1 = row["abstract"].strip().lower()[:495]
except:
a1 = ""
match = False
index = None # save location of the duplicate in master df
if t1 != "": # only attempt matching if there is a title to start with.
for i in indexes: # attempt to match this title with every title in the master frame
try:
t2 = mydict["title"][i].strip().lower() # remove trailing spaces and lower the letters
except:
t2 = ""
match = fuzzymatch(t1, t2, min_match_title)
if match: # continue only if titles are matching
if a1 != "":
try:
a2 = mydict["abstract"][i].strip().lower()[:495]
except:
a2 = ""
# print("matched title but found no second abstract")
# print(t1)
# print(t2)
index = i
break
match = fuzzymatch(a1, a2, min_match_abstrct)
if match:
# print("Matched on full record")
# print(t1)
# print(t2)
# print(a1)
# print(a2)
index = i
break
else:
index = None
else:
# print("Matched title, but found no first abstract, returning True")#for e.g. dblp records there are no abstracts, but we still want to deduplicate and get rid of them!
# print(t1)
# print(t2)
# print("-------")
index = i
break
return match, index # is true if match was found and loop broken. Is false if all rows were checked and fuzzy matching was below the threshold
def dedupe_loop_within(wos, name, min_match_title, min_match_abstract):
wos_orig = wos.copy()
wos_orig["Deduplication_Notes"] = ["" for d in wos_orig["title"].values] # has no abstracts
orig_length = wos.shape[0]
print("Deduplicating {} data".format(name))
new_rows = []
counter = 0
masterdf = pd.DataFrame(columns=wos.columns.values)
#
disagreements=[]
all_dupes=[]
pd.set_option("display.max_colwidth", 5000)
with tqdm(total=wos.shape[0]) as pbar:
for i, row in wos.iterrows():
mydict = masterdf.to_dict()
indexes = list(masterdf.index.values) # iterate over dict rather than df for 6 times speedup!
match, index = rowmatch(row, indexes, mydict, min_match_title, min_match_abstract)
if match:
all_dupes.append(row)
all_dupes.append(masterdf.loc[index])
# print(index)
# print(masterdf.at[index, "Deduplication_Notes"])
init1=False
init2=False
if row["initial_decision"] == "Include" or pd.notna(row["expert_decision"]):
init1=True
if masterdf.loc[index]["initial_decision"] == "Include" or pd.notna(masterdf.loc[index]["expert_decision"]):
init2 = True
if init1 != init2:
#print("Mismatch!")
disagreements.append(row)
disagreements.append(masterdf.loc[index])
# wos_orig.at[i, "Deduplication_Notes"] = "{} CHECK DUPLICATE STATUS [SOURCE:{} {}]".format(
# str(wos_orig.at[index, "Deduplication_Notes"]), str(masterdf.loc[index]["source"]),
# re.sub(r"\s+", " ",
# masterdf.loc[index].to_string().replace("\n", "; "))).strip() # modift masterdf in place
# print(masterdf.at[index, "Deduplication_Notes"])
counter += 1
else:
masterdf = masterdf.append(row, ignore_index=True)
# print(masterdf.head())
pbar.update(1)
print(
"Adding {} rows out of {} to master data and identified {} as duplicates".format(masterdf.shape[0], orig_length,
counter))
print("Writing disagreements...")
dis=pd.DataFrame(disagreements, columns=wos.columns.values)
dis.to_csv("data//results//disagreements.csv")
print("Writing full deduplication of previous data frame (danger!)...")
dis = pd.DataFrame(all_dupes, columns=wos.columns.values)
dis.to_csv("data//results//dupes_previous.csv")
# masterdf.to_csv("all_results.csv")
# wos_orig.to_csv( "all_results_with_duplicates-{}.csv".format(date.today())) # save version that has dupes in it
# masterdf.to_csv(os.path.join("results", "all_results.csv"))
# wos_orig.to_csv(os.path.join("results", "all_results_with_duplicates-{}.csv".format(
# date.today()))) # save version that has dupes in it
return masterdf
def dedupe_loop_additional(original, new, name, min_match_title, min_match_abstract):
#
#Function to loop the new data, and add columns of new data only if they are not a duplicate already inside the data frame
#Also stores duplicates in a de-duplication master list, if they are not 100% replications of a previous duplicate.
#
#
print("Deduping additional dataframe")
new_rows = []
counter = 0
equals=0
masterdf = original.copy()
new_deduped=pd.DataFrame(columns=list(new.columns))
#
dupe_list=[]
new=new.fillna("")
masterdf = masterdf.fillna("")
pd.set_option("display.max_colwidth", 5000)#otherwise cell contents are cut away
print("Iterating {} rows of new data to find duplicates".format(new.shape[0]))
with tqdm(total=new.shape[0]) as pbar:
for i, row in new.iterrows():
mydict = masterdf.to_dict()
indexes = list(masterdf.index.values) # iterate over dict rather than df for 6 times speedup!
# print(row.to_string())
match, index = rowmatch(row, indexes, mydict, min_match_title, min_match_abstract)
if match:
def dupe_report(new, orig):
id=orig["ID"]
source_orig = str(orig["source"]).lower()
source_new = str(new["source"]).lower()
title_orig=str(orig["title"]).strip()
title_new = str(new["title"]).strip()
abstract_new = str(new["abstract"]).strip()
abstract_orig = str(orig["abstract"]).strip()
author_new = str(new["authors"]).strip()
author_orig = str(orig["authors"]).strip()
link_new = str(new["link"]).strip()
link_orig = str(orig["link"]).strip()
date_added=date.today()
#decision_orig=orig["initial_decision"]
if id== "nan" or id =="NaN" or id == "" or pd.isna(id) or id == "NA":#do not append this value, its already added to the new results, but has no ID assigned yet.
return "equal"
if source_new == source_orig and title_new == title_orig and abstract_new == abstract_orig and link_new == link_orig:#exact duplicates are not needed
#print("Direct duplicate: {} {}; {} {}; {} {}".format(source_orig,source_new,link_orig,link_new,title_orig,title_new))
return "equal"
else:
return pd.Series([id,source_orig,source_new,title_orig,title_new,abstract_orig,abstract_new,author_orig,author_new,link_orig,link_new, date_added], index=["ID","source original", "source new", "title original","title new","abstract original","abstract new","author original","author new","link original","link new", "date added"])
ret= dupe_report(row, masterdf.loc[index])
if type(ret) == pd.Series:
dupe_list.append(ret)#add a duplication report to the list
counter += 1
else:
equals += 1
else:
masterdf = masterdf.append(row, ignore_index=True)#add new entry to master data becasue it is not a duplicate
new_deduped = new_deduped.append(row, ignore_index=True)#add new entry to a data fram that just consists of new entries
# print(masterdf.head())
pbar.update(1)
print("Adding {} rows out of {} to master data and identified {} as duplicates with minor differences (the other {} were exactly identical and are discarded)".format(new_deduped.shape[0], new.shape[0],counter, equals))
print("Replacing NA with empty spaces...")
new_deduped= new_deduped.fillna("")
new_deduped['link'] = new_deduped['link'].apply(lambda x: re.sub("https://www.doi.org", "https://doi.org", x))
new_deduped.to_csv(name)
print("Saved the new, deduplicated rows as {}".format(name))
#################Deduplication report: append new duplicates to it
dup_df=pd.read_csv("data\\results\\dedupe_report.csv")
dup_df=dup_df.replace("NA", "")
dup_df = dup_df.fillna("")
counter=0
#print(len(dupe_list))
print("Checking if any new results need to be added to the deduplication master list:")
with tqdm(total=len(dupe_list)) as pbar:
for e in dupe_list:
dup_df = dup_df.fillna("")
ddf= dup_df[(dup_df['ID']==e[0]) & (dup_df['source original'] == e[1]) & (dup_df['source new'] == e[2]) & (dup_df['abstract new'] == e[6]) & (dup_df['title new'] == e[4])]
if ddf.shape[0]== 0:#checking if this record is already stored as duplicate - since many records are retrieved over and over again
dup_df = dup_df.append(e, ignore_index=True)
counter += 1
# print("found new: {}".format(e[0]))
# print("Test: {}".format(dup_df[dup_df['ID']==e[0]].shape[0]))
# print(e[0])
# print(dup_df['ID'].values[:100])
#else:
#print("Duplicate in dup_df")
pbar.update(1)
dup_df.to_csv("data\\results\\dedupe_report.csv",index=False)
print("Added {} records to the dedupe_report.csv".format(counter))
def dedupe_me(path, match_title, match_abstract, path_2=""):
df = pd.read_csv(path)
df = df.replace("NA", "")
df = df.replace("nan", "")
df= df.fillna("")
print("Reading the file all_results_tmp.csv that contains the previous results. It has {} records, and its {} column names are {}".format(df.shape[0], len(list(df.columns)), list(df.columns)))
if path_2 != "":
df_toadd = pd.read_csv(path_2)
df_toadd = df_toadd.replace("NA", "")
df_toadd = df_toadd.replace("nan", "")
df_toadd = df_toadd.fillna("")
print("Reading the file new_results.csv that contains the new results. It has {} records, and its {} column names are {}".format(df_toadd.shape[0], len(list(df.columns)), list(df_toadd.columns)))
dedupe_loop_additional(df, df_toadd, "data\\results\\new_and_deduped.csv", match_title, match_abstract)
else:
#use this to deduplicate results within one single spreadsheet - not needed for LSR app since deduplication hapens based on a deduplicated database+ newly added records
dedupe_loop_within(df, "data\\results\\new_and_deduped.csv", match_title, match_abstract)
path = "data\\results\\all_results_tmp.csv"#is previous results but with some replacements
path_new = "data\\results\\new_results.csv"
if not os.path.exists("data\\results\\dedupe_report.csv"):
dupes=pd.DataFrame(columns=["ID","source original", "source new","title original","title new","abstract original","abstract new","author original","author new","link original","link new", "date added"])
dupes.to_csv("data\\results\\dedupe_report.csv",index=False)
#alternative if you have problems with relative and absolute paths, try this! its the OS modeule that has an option to grab the current working directorys absolute path:
dedupe_me(path, 95, 90, path_new) # use this when adding data. creates the file "results/all_results_updated.csv"
#
#Code below to find screener conflicts and total number of previous duplicates. ote: need to retain column 'initial_decision' from all_results in order to run this code!
#
# print("Finding screener-conflicts within all_results_tmp.csv...")
# wos=pd.read_csv("data\\results\\all_results_tmp.csv")
# dedupe_loop_within(wos, "data\\results\\new_and_deduped.csv", 95, 90)
| 45.414716 | 354 | 0.590839 | 1,731 | 13,579 | 4.508377 | 0.198729 | 0.012814 | 0.014352 | 0.007688 | 0.319708 | 0.280369 | 0.241671 | 0.216684 | 0.197207 | 0.139928 | 0 | 0.010002 | 0.285809 | 13,579 | 298 | 355 | 45.567114 | 0.7947 | 0.287061 | 0 | 0.301676 | 0 | 0.01676 | 0.185034 | 0.033918 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03352 | false | 0 | 0.03352 | 0 | 0.117318 | 0.072626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6128d52040ae15c763ac67cfd1eb887cfac11cae | 10,920 | py | Python | transforms/detection/functional.py | qixuxiang/Pytorch_Lightweight_Network | 25fd3148b7c635cb6cbe6dc184dbed04d6f96282 | [
"MIT"
] | 82 | 2019-06-17T06:00:09.000Z | 2021-11-24T09:27:23.000Z | transforms/detection/functional.py | qixuxiang/Pytorch_Lightweight_Network | 25fd3148b7c635cb6cbe6dc184dbed04d6f96282 | [
"MIT"
] | 4 | 2019-06-20T11:29:19.000Z | 2021-07-28T03:31:20.000Z | transforms/detection/functional.py | qixuxiang/Pytorch_Lightweight_Network | 25fd3148b7c635cb6cbe6dc184dbed04d6f96282 | [
"MIT"
] | 17 | 2019-06-20T11:22:34.000Z | 2021-03-16T12:37:41.000Z | from typing import List, Dict, Sequence, Union, Tuple
from numbers import Number
import random
import numpy as np
from toolz import curry
from toolz.curried import get
from common import _tuple
__all__ = [
"resize", "resized_crop", "center_crop", "drop_boundary_bboxes",
"to_absolute_coords", "to_percent_coords", "hflip", "hflip2",
"vflip", "vflip2", "random_sample_crop", "move"
]
def iou_1m(box, boxes):
r"""
Calculates one-to-many ious.
Parameters
----------
box : ``Sequences[Number]``
A bounding box.
boxes : ``array_like``
Many bounding boxes.
Returns
-------
ious : ``array_like``
IoUs between the box and boxes.
"""
xi1 = np.maximum(boxes[..., 0], box[0])
yi1 = np.maximum(boxes[..., 1], box[1])
xi2 = np.minimum(boxes[..., 2], box[2])
yi2 = np.minimum(boxes[..., 3], box[3])
xdiff = xi2 - xi1
ydiff = yi2 - yi1
inter_area = xdiff * ydiff
box_area = (box[2] - box[0]) * (box[3] - box[1])
boxes_area = (boxes[..., 2] - boxes[..., 0]) * \
(boxes[..., 3] - boxes[..., 1])
union_area = boxes_area + box_area - inter_area
iou = inter_area / union_area
iou[xdiff < 0] = 0
iou[ydiff < 0] = 0
return iou
def random_sample_crop(anns, size, min_iou, min_ar, max_ar, max_attemps=50):
"""
Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
min_iou : ``float``
Minimal iou between the objects and the cropped image.
min_ar : ``Number``
Minimal aspect ratio.
max_ar : ``Number``
Maximum aspect ratio.
max_attemps: ``int``
Maximum attemps to try.
"""
width, height = size
bboxes = np.stack([ann['bbox'] for ann in anns])
bboxes[:, 2:] += bboxes[:, :2]
for _ in range(max_attemps):
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
if h / w < min_ar or h / w > max_ar:
continue
l = random.uniform(0, width - w)
t = random.uniform(0, height - h)
r = l + w
b = t + h
patch = np.array([l, t, r, b])
ious = iou_1m(patch, bboxes)
if ious.min() < min_iou:
continue
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0
mask = (l < centers[:, 0]) & (centers[:, 0] < r) & (
t < centers[:, 1]) & (centers[:, 1] < b)
if not mask.any():
continue
indices = np.nonzero(mask)[0].tolist()
return get(indices, anns), l, t, w, h
return None
@curry
def resized_crop(anns, left, upper, width, height, output_size, min_area_frac):
anns = crop(anns, left, upper, width, height, min_area_frac)
size = (width, height)
# if drop:
# anns = drop_boundary_bboxes(anns, size)
anns = resize(anns, size, output_size)
return anns
@curry
def drop_boundary_bboxes(anns, size):
r"""
Drop bounding boxes whose centers are out of the image boundary.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
width, height = size
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
x = (l + w) / 2.
y = (t + h) / 2.
if 0 <= x <= width and 0 <= y <= height:
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def center_crop(anns, size, output_size):
r"""
Crops the bounding boxes of the given PIL Image at the center.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
output_size : ``Union[Number, Sequence[int]]``
Desired output size of the crop. If size is an int instead of sequence like (w, h),
a square crop (size, size) is made.
"""
output_size = _tuple(output_size, 2)
output_size = tuple(int(x) for x in output_size)
w, h = size
th, tw = output_size
upper = int(round((h - th) / 2.))
left = int(round((w - tw) / 2.))
return crop(anns, left, upper, th, tw)
@curry
def crop(anns, left, upper, width, height, minimal_area_fraction=0.25):
r"""
Crop the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
left: ``int``
Left pixel coordinate.
upper: ``int``
Upper pixel coordinate.
width: ``int``
Width of the cropped image.
height: ``int``
Height of the cropped image.
minimal_area_fraction : ``int``
Minimal area fraction requirement.
"""
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
area = w * h
l -= left
t -= upper
if l + w >= 0 and l <= width and t + h >= 0 and t <= height:
if l < 0:
w += l
l = 0
if t < 0:
h += t
t = 0
w = min(width - l, w)
h = min(height - t, h)
if w * h < area * minimal_area_fraction:
continue
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def resize(anns, size, output_size):
"""
Parameters
----------
anns : List[Dict]
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : Sequence[int]
Size of the original image.
output_size : Union[Number, Sequence[int]]
Desired output size. If size is a sequence like (w, h), the output size will be matched to this.
If size is an int, the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if width > height, then image will be rescaled to
(output_size * width / height, output_size)
"""
w, h = size
if isinstance(output_size, int):
if (w <= h and w == output_size) or (h <= w and h == output_size):
return anns
if w < h:
ow = output_size
sw = sh = ow / w
else:
oh = output_size
sw = sh = oh / h
else:
ow, oh = output_size
sw = ow / w
sh = oh / h
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= sw
bbox[1] *= sh
bbox[2] *= sw
bbox[3] *= sh
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_percent_coords(anns, size):
r"""
Convert absolute coordinates of the bounding boxes to percent cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] /= w
bbox[1] /= h
bbox[2] /= w
bbox[3] /= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_absolute_coords(anns, size):
r"""
Convert percent coordinates of the bounding boxes to absolute cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= w
bbox[1] *= h
bbox[2] *= w
bbox[3] *= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] = w - (bbox[0] + bbox[2])
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip2(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, r, b].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
l = bbox[0]
bbox[0] = w - bbox[2]
bbox[2] = w - l
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def vflip(anns, size):
"""
Vertically flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[1] = h - (bbox[1] + bbox[3])
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def vflip2(anns, size):
r"""
Vertically flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
t = bbox[1]
bbox[1] = h - bbox[3]
bbox[3] = h - t
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def move(anns, x, y):
r"""
Move the bounding boxes by x and y.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
x : ``Number``
How many to move along the horizontal axis.
y : ``Number``
How many to move along the vertical axis.
"""
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] += x
bbox[1] += y
new_anns.append({**ann, "bbox": bbox})
return new_anns
| 26.962963 | 104 | 0.542582 | 1,508 | 10,920 | 3.854111 | 0.122679 | 0.010668 | 0.01755 | 0.011012 | 0.512732 | 0.46903 | 0.443909 | 0.43393 | 0.42808 | 0.421714 | 0 | 0.014192 | 0.316026 | 10,920 | 404 | 105 | 27.029703 | 0.763958 | 0.396795 | 0 | 0.38835 | 0 | 0 | 0.03566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0 | 0.033981 | 0 | 0.179612 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
612de04c96f064f94c0f251d285bdc28a27f4be1 | 1,310 | py | Python | src/robust_laplacian/core.py | nmwsharp/robust-laplacians-py | b1c0f8bcf94571d1c54ba1a79e6bc49c08c65562 | [
"MIT"
] | 123 | 2020-08-05T18:16:11.000Z | 2022-03-28T01:59:55.000Z | src/robust_laplacian/core.py | nmwsharp/robust-laplacians-py | b1c0f8bcf94571d1c54ba1a79e6bc49c08c65562 | [
"MIT"
] | 6 | 2020-08-28T02:42:57.000Z | 2022-02-01T21:32:34.000Z | src/robust_laplacian/core.py | nmwsharp/robust-laplacians-py | b1c0f8bcf94571d1c54ba1a79e6bc49c08c65562 | [
"MIT"
] | 12 | 2020-08-14T12:14:56.000Z | 2022-02-25T11:03:39.000Z | import numpy as np
import robust_laplacian_bindings as rlb
def mesh_laplacian(verts, faces, mollify_factor=1e-5):
## Validate input
if type(verts) is not np.ndarray:
raise ValueError("`verts` should be a numpy array")
if (len(verts.shape) != 2) or (verts.shape[1] != 3):
raise ValueError("`verts` should have shape (V,3), shape is " + str(verts.shape))
if type(faces) is not np.ndarray:
raise ValueError("`faces` should be a numpy array")
if (len(faces.shape) != 2) or (faces.shape[1] != 3):
raise ValueError("`faces` should have shape (F,3), shape is " + str(faces.shape))
## Call the main algorithm from the bindings
L, M = rlb.buildMeshLaplacian(verts, faces, mollify_factor)
## Return the result
return L, M
def point_cloud_laplacian(points, mollify_factor=1e-5, n_neighbors=30):
## Validate input
if type(points) is not np.ndarray:
raise ValueError("`points` should be a numpy array")
if (len(points.shape) != 2) or (points.shape[1] != 3):
raise ValueError("`points` should have shape (V,3), shape is " + str(points.shape))
## Call the main algorithm from the bindings
L, M = rlb.buildPointCloudLaplacian(points, mollify_factor, n_neighbors)
## Return the result
return L, M
| 35.405405 | 91 | 0.658779 | 192 | 1,310 | 4.4375 | 0.291667 | 0.105634 | 0.024648 | 0.049296 | 0.487089 | 0.409624 | 0.253521 | 0.169014 | 0.105634 | 0.105634 | 0 | 0.017595 | 0.219084 | 1,310 | 36 | 92 | 36.388889 | 0.815249 | 0.11374 | 0 | 0.1 | 0 | 0 | 0.192509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b616788026b220ba10bb555db6739d8f4ae8230d | 5,161 | py | Python | sparkdq/models/dbscan/DBSCAN.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | 1 | 2021-02-08T07:49:54.000Z | 2021-02-08T07:49:54.000Z | sparkdq/models/dbscan/DBSCAN.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | sparkdq/models/dbscan/DBSCAN.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | from operator import add
import numpy as np
from pyspark.sql.types import StructField, StructType, IntegerType
from scipy.spatial.distance import euclidean
import sklearn.cluster as skc
from sparkdq.conf.Context import Context
from sparkdq.models.CommonUtils import DEFAULT_CLUSTER_COL, DEFAULT_INDEX_COL
from sparkdq.models.dbscan.ClusterAggregator import ClusterAggregator
from sparkdq.models.dbscan.KDPartitioner import KDPartitioner
class DBSCAN:
def __init__(self, eps=0.5, min_pts=5, dist_type="euclidean", max_partitions=5, prediction_col=DEFAULT_CLUSTER_COL):
self._eps = eps
self._min_pts = min_pts
self._dist_type = dist_type
self._max_partitions = max_partitions
self._prediction_col = prediction_col
def set_params(self, eps=0.5, min_pts=5, dist_type="euclidean", max_partitions=5,
prediction_col=DEFAULT_CLUSTER_COL):
self._eps = eps
self._min_pts = min_pts
self._dist_type = dist_type
self._max_partitions = max_partitions
self._prediction_col = prediction_col
def transform(self, data, columns, index_col=DEFAULT_INDEX_COL):
total_columns = [index_col] + columns
index_type = data.schema[index_col]
rdd = data.select(*total_columns).rdd.map(lambda row: (row[0], np.array(row[1:])))
partitioner = KDPartitioner(rdd, max_partitions=self._max_partitions)
bounding_boxes = partitioner.get_bounding_boxes()
expanded_boxes = {}
# create neighbors
neighbors = {}
new_data = rdd.context.emptyRDD()
for label, box in bounding_boxes.items():
expanded_box = box.expand(2 * self._eps)
expanded_boxes[label] = expanded_box
neighbors[label] = rdd.filter(lambda row: expanded_box.contains(row[1])) \
.map(lambda row: ((row[0], label), row[1]))
new_data = new_data.union(neighbors[label])
rdd = new_data
rdd = rdd.map(lambda row: (row[0][1], (row[0][0], row[1])))\
.partitionBy(len(partitioner.get_partitions()))\
.map(lambda row: ((row[1][0], row[0]), row[1][1]))
if self._dist_type == "euclidean":
params = {"eps": self._eps, "min_samples": self._min_pts, "metric": euclidean}
else:
raise Exception("unsupported metric type {}".format(self._dist_type))
rdd = rdd.mapPartitions(lambda iterable: dbscan_partition(iterable, params))
# remap cluster ids
labeled_points = rdd.groupByKey()
labeled_points.cache()
mapper = labeled_points.aggregate(ClusterAggregator(), add, add)
bc_forward_mapper = rdd.context.broadcast(mapper.forward)
rdd = labeled_points.map(lambda x: map_cluster_id(x, bc_forward_mapper)).sortByKey()
# convert rdd to df
tmp_schema = StructType([
index_type,
StructField(DEFAULT_CLUSTER_COL, IntegerType(), False)
])
tmp_df = Context().spark.createDataFrame(rdd, tmp_schema)
return data.join(tmp_df, on=index_col, how="inner")
def dbscan_partition(iterable, params):
"""
Perform a DBSCAN on a given partition
:param iterable:
:param params:
:return:
"""
data = []
for x in iterable:
data.append(x)
if len(data) > 0:
x = np.array([row[1] for row in data])
parts = [row[0][1] for row in data]
y = np.array([row[0][0] for row in data])
model = skc.DBSCAN(**params)
c = model.fit_predict(x)
for i in range(len(c)):
yield (y[i], (parts[i], c[i]))
def map_cluster_id(row_id_labels, bc_forward_mapper):
row_id = int(row_id_labels[0])
labels = []
for label in row_id_labels[1]:
labels.append(label)
cluster_id = next(iter(labels))
cluster_dict = bc_forward_mapper.value
if (cluster_id[1] != -1) and (cluster_id in cluster_dict):
return row_id, int(cluster_dict[cluster_id])
else:
return row_id, -1
if __name__ == "__main__":
pass
# spark = Context().spark
# rdd = spark.sparkContext.parallelize([
# (1, "A", 19, 181, 67),
# (2, "C", 17, 179, 67),
# (3, 'E', 18, 180, 68),
# (4, 'E', 29, 180, 68),
# (5, 'E', 18, 180, 68),
# (6, 'E', 18, 180, 68),
# (7, 'E', 18, 180, 68),
# (8, 'E', 18, -180, 68),
# (9, 'F', 28, 21, 7),
# (10, 'F', 28, 22, 8),
# (11, 'F', 28, 22, 8),
# (12, 'F', 28, 22, 8),
# (13, 'F', 28, 22, 8),
# (14, 'F', 28, 23, 7),
# ])
# from pyspark.sql.types import StructType, StructField, LongType, StringType, IntegerType
#
# schema = StructType([
# StructField("id", LongType(), True),
# StructField("name", StringType(), True),
# StructField("age", LongType(), True),
# StructField("height", IntegerType(), True),
# StructField("weight", IntegerType(), True)
# ])
# df = spark.createDataFrame(rdd, schema)
#
# db = DBSCAN(max_partitions=3)
# db.fit(df, ["height", "weight"], "id")
# print(db.detect())
| 35.840278 | 120 | 0.606665 | 664 | 5,161 | 4.51506 | 0.268072 | 0.039026 | 0.010007 | 0.013342 | 0.167445 | 0.136758 | 0.124083 | 0.124083 | 0.124083 | 0.124083 | 0 | 0.0367 | 0.255571 | 5,161 | 143 | 121 | 36.090909 | 0.743623 | 0.200543 | 0 | 0.144578 | 0 | 0 | 0.021156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0.012048 | 0.108434 | 0 | 0.216867 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b618d3e757516d28daaaf4e251eeb45623d8f192 | 1,398 | py | Python | hycu-demo/hycu-centos-8.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | null | null | null | hycu-demo/hycu-centos-8.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | null | null | null | hycu-demo/hycu-centos-8.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | 1 | 2021-11-16T10:28:42.000Z | 2021-11-16T10:28:42.000Z | from calm.dsl.builtins import basic_cred, CalmTask, action
from calm.dsl.builtins import SimpleDeployment, SimpleBlueprint
from calm.dsl.builtins import read_provider_spec
from calm.dsl.builtins import CalmVariable
from calm.dsl.store import Secret
CENTOS = basic_cred('nutanix', 'nutanix/4u', name='CENTOS', default=True)
HYCU_CRED = basic_cred('admin', 'admin', name='HYCU_CRED', default=False)
class CentosDeployment(SimpleDeployment):
provider_spec = read_provider_spec('specs/centos-8.yaml')
os_type = 'Linux'
@action
def __create__(self):
CalmTask.Exec.escript(name='add_vm_to_hycu', filename='scripts/add_vm_to_hycu.py')
@action
def __install__(self):
# CalmTask.Exec.ssh(name='Update CentOS', script='sudo yum -y --quiet update')
CalmTask.Exec.ssh(name='Update CentOS', script='echo "hello world"')
class HYCUCentOS8(SimpleBlueprint):
credentials = [CENTOS, HYCU_CRED]
deployments = [CentosDeployment]
VM_NAME = CalmVariable.Simple.string('CentOS-VM', label='VM Name', runtime=True)
# HYCU IP address, assuming default port for API access (8443)
HYCU_IP = CalmVariable.Simple.string('10.21.21.100', runtime=False, is_hidden=True)
HYCU_PORT = CalmVariable.Simple.string('8443', runtime=False, is_hidden=True)
def main():
print(HYCUCentOS8.json_dumps(pprint=True))
if __name__ == '__main__':
main() | 36.789474 | 90 | 0.73176 | 185 | 1,398 | 5.313514 | 0.443243 | 0.040692 | 0.055951 | 0.077314 | 0.225839 | 0.07528 | 0.07528 | 0 | 0 | 0 | 0 | 0.017544 | 0.143777 | 1,398 | 38 | 91 | 36.789474 | 0.803676 | 0.097997 | 0 | 0.076923 | 0 | 0 | 0.139793 | 0.019857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.192308 | 0 | 0.653846 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b619e86dde26d288681bc5bbb637fb6786e9878f | 2,695 | py | Python | lc/0101_SymmetricTree.py | xiangshiyin/coding-challenge | a75a644b96dec1b6c7146b952ca4333263f0a461 | [
"Apache-2.0"
] | null | null | null | lc/0101_SymmetricTree.py | xiangshiyin/coding-challenge | a75a644b96dec1b6c7146b952ca4333263f0a461 | [
"Apache-2.0"
] | null | null | null | lc/0101_SymmetricTree.py | xiangshiyin/coding-challenge | a75a644b96dec1b6c7146b952ca4333263f0a461 | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# class Solution:
# def isSymmetric(self, root: TreeNode) -> bool:
# '''
# long, iterative solution
# '''
# if not root.left and not root.right:
# return True
# elif not root.left or not root.right:
# return False
# # BFS search left and right, compare the results
# from collections import deque
# q1 = deque()
# q2 = deque()
# q1.append(root.left)
# q2.append(root.right)
# while q1 and q2:
# n1 = q1.popleft()
# n2 = q2.popleft()
# if n1.val != n2.val or len(q1)!=len(q2):
# return False
# # add children
# if n1.right:
# q1.append(n1.right)
# elif n1.val != 200:
# q1.append(TreeNode(200))
# if n1.left:
# q1.append(n1.left)
# elif n1.val != 200:
# q1.append(TreeNode(200))
# if n2.left:
# q2.append(n2.left)
# elif n2.val != 200:
# q2.append(TreeNode(200))
# if n2.right:
# q2.append(n2.right)
# elif n2.val != 200:
# q2.append(TreeNode(200))
# if len(q1) != len(q2):
# return False
# return True
# class Solution:
# def isSymmetric(self, root: TreeNode) -> bool:
# '''
# recursive solution
# '''
# def mirror(n1, n2):
# if not n1 and not n2:
# return True
# if not n1 or not n2:
# return False
# return (n1.val == n2.val) & mirror(n1.right, n2.left) & mirror(n1.left, n2.right)
# return mirror(root, root)
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
'''
another iterative solution
'''
from collections import deque
q = deque()
q.append(root)
q.append(root)
while q:
n1 = q.popleft()
n2 = q.popleft()
if not n1 and not n2:
continue
if not n1 or not n2:
return False
if n1.val != n2.val:
return False
q.append(n1.left)
q.append(n2.right)
q.append(n1.right)
q.append(n2.left)
return True
| 29.615385 | 95 | 0.440074 | 294 | 2,695 | 4.020408 | 0.187075 | 0.055838 | 0.05753 | 0.064298 | 0.357868 | 0.334179 | 0.273266 | 0.273266 | 0.111675 | 0 | 0 | 0.057355 | 0.450093 | 2,695 | 91 | 96 | 29.615385 | 0.740216 | 0.696475 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61b1f4f777fb497c659daccaa184cb2e2a702f6 | 920 | py | Python | checkboxes2.py | PiyushKumar186/programming | 4dc17488a2d197ccdb6acd6f80732da81147bb1b | [
"MIT"
] | null | null | null | checkboxes2.py | PiyushKumar186/programming | 4dc17488a2d197ccdb6acd6f80732da81147bb1b | [
"MIT"
] | null | null | null | checkboxes2.py | PiyushKumar186/programming | 4dc17488a2d197ccdb6acd6f80732da81147bb1b | [
"MIT"
] | null | null | null | #!/usr/bin/python2
from Tkinter import *
class Checkbar(Frame):
def __init__(self,parent=None,picks=[],side=LEFT,anchor=W):
Frame.__init__(self,parent)
self.vars = []
for pick in picks:
var = IntVar()
chk = Checkbutton(self,text=pick,variable=var)
chk.pack(side=side,anchor=anchor,expand=YES)
self.vars.append(var)
def state(self):
return map((lambda var:var.get()),self.vars)
if __name__ == '__main__':
root = Tk()
lng = Checkbar(root,['Python','Ruby','Perl','C++'])
tgl = Checkbar(root,['English','German'])
lng.pack(side=TOP,fill=X)
tgl.pack(side=TOP)
lng.config(relief=GROOVE,bd=2)
def allstates():
print(list(lng.state()),list(tgl.state()))
Button(root,text='Quit',command=root.quit).pack(side=RIGHT)
Button(root, text='Peek',command=allstates).pack(side=RIGHT)
root.mainloop() | 34.074074 | 64 | 0.615217 | 124 | 920 | 4.435484 | 0.548387 | 0.072727 | 0.050909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00274 | 0.206522 | 920 | 27 | 65 | 34.074074 | 0.750685 | 0.018478 | 0 | 0 | 0 | 0 | 0.050941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0.041667 | 0.25 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61c54672fad12557646d3ef16c482952b01520a | 2,572 | py | Python | code/Experiments/Lasagne_examples/modelzoo/cifar10_nin.py | matthijsvk/convNets | 7e65db7857a4e6abfbcab264953eb7741319de6c | [
"Apache-2.0"
] | 1,034 | 2015-05-21T12:47:50.000Z | 2022-03-17T19:27:29.000Z | modelzoo/cifar10_nin.py | nestyme/Recipes | 553f5cf671f164da71152e33253cd7ed737dd2ac | [
"MIT"
] | 111 | 2015-07-04T11:38:59.000Z | 2022-03-04T01:12:11.000Z | modelzoo/cifar10_nin.py | nestyme/Recipes | 553f5cf671f164da71152e33253cd7ed737dd2ac | [
"MIT"
] | 528 | 2015-07-03T22:15:02.000Z | 2022-03-27T10:01:21.000Z | # Network in Network CIFAR10 Model
# Original source: https://gist.github.com/mavenlin/e56253735ef32c3c296d
# License: unknown
# Download pretrained weights from:
# https://s3.amazonaws.com/lasagne/recipes/pretrained/cifar10/model.pkl
from lasagne.layers import InputLayer, DropoutLayer, FlattenLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import Pool2DLayer as PoolLayer
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 32, 32))
net['conv1'] = ConvLayer(net['input'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp1'] = ConvLayer(
net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
net['cccp2'] = ConvLayer(
net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
net['pool1'] = PoolLayer(net['cccp2'],
pool_size=3,
stride=2,
mode='max',
ignore_border=False)
net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
net['conv2'] = ConvLayer(net['drop3'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp3'] = ConvLayer(
net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp4'] = ConvLayer(
net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
net['pool2'] = PoolLayer(net['cccp4'],
pool_size=3,
stride=2,
mode='average_exc_pad',
ignore_border=False)
net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
net['conv3'] = ConvLayer(net['drop6'],
num_filters=192,
filter_size=3,
pad=1,
flip_filters=False)
net['cccp5'] = ConvLayer(
net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp6'] = ConvLayer(
net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
net['pool3'] = PoolLayer(net['cccp6'],
pool_size=8,
mode='average_exc_pad',
ignore_border=False)
net['output'] = FlattenLayer(net['pool3'])
return net
| 41.483871 | 73 | 0.515552 | 264 | 2,572 | 4.878788 | 0.310606 | 0.074534 | 0.111801 | 0.132764 | 0.361801 | 0.328416 | 0.300466 | 0.23059 | 0.173137 | 0.173137 | 0 | 0.061162 | 0.364308 | 2,572 | 61 | 74 | 42.163934 | 0.726606 | 0.087092 | 0 | 0.372549 | 0 | 0 | 0.0807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.058824 | 0 | 0.098039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61d0a638f24888cb68e4936a01c7b39a707cb01 | 2,969 | py | Python | src/backend/models/placeModel.py | oasis-art-project/oasis-server | 63e8093ebafa76c90393eec7828221e255100252 | [
"Artistic-2.0"
] | 3 | 2022-03-07T23:40:29.000Z | 2022-03-07T23:40:35.000Z | src/backend/models/placeModel.py | oasis-art-project/oasis-server | 63e8093ebafa76c90393eec7828221e255100252 | [
"Artistic-2.0"
] | null | null | null | src/backend/models/placeModel.py | oasis-art-project/oasis-server | 63e8093ebafa76c90393eec7828221e255100252 | [
"Artistic-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Part of the OASIS ART PROJECT - https://github.com/orgs/oasis-art-project
Copyright (c) 2019-22 TEAM OASIS
License Artistic-2.0
"""
from marshmallow import fields, validate, post_dump
from sqlalchemy.types import ARRAY
from src.backend.extensions import db
from src.backend.models.model import SurrogatePK, BaseSchema
from src.backend.models.userModel import UserSchema, User
from src.backend.controllers.controller import build_image_list
class Place(SurrogatePK, db.Model):
__tablename__ = 'places'
host_id = db.Column(db.Integer, db.ForeignKey('users.id'))
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.String(1000), nullable=True)
address = db.Column(db.String(300), nullable=False)
location = db.Column(db.String(12), nullable=True)
homepage = db.Column(db.String(100), nullable=True)
instagram = db.Column(db.String(30), nullable=True)
facebook = db.Column(db.String(30), nullable=True)
matterport_link = db.Column(db.String(15), nullable=True)
active = db.Column(db.Boolean, nullable=True)
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
host = db.relationship('User', backref=db.backref('places'))
def __init__(self, **kwargs):
super(Place, self).__init__(**kwargs)
class PlaceSchema(BaseSchema):
# Overwritten fields
host = fields.Nested(UserSchema, only=('id',), required=True)
name = fields.Str(required=True, validate=validate.Length(max=100))
description = fields.Str(validate=validate.Length(max=1000))
address = fields.Str(required=True, validate=validate.Length(max=300))
location = fields.Str(allow_none=True, validate=validate.Length(max=12))
homepage = fields.Str(allow_none=True, validate=validate.Length(max=100))
instagram = fields.Str(allow_none=True, validate=validate.Length(max=30))
facebook = fields.Str(allow_none=True, validate=validate.Length(max=30))
matterport_link = fields.Str(validate=validate.Length(max=15))
active = fields.Boolean(allow_none=True)
class Meta:
# BaseSchema automatically generates fields based on the model
model = Place
# Since according to Nested schema loading is only with ID,
# dump loads other non-sensitive data from DB, enumerated below
@post_dump
def get(self, data):
if 'host' in data:
host = User.get_by_id(data['host']['id'])
if not host:
raise ValueError
d = UserSchema(only=('id', 'tags', 'firstName', 'lastName', 'bio', 'files', 'homepage', 'instagram', 'youtube', 'showChat', 'confirmed', 'active')).dump(host).data
data['host'] = d
if 'files' in data:
data['fullImages'] = build_image_list('place', data['id'], data['files'], 'f')
data['prevImages'] = build_image_list('place', data['id'], data['files'], 'p')
return data
| 43.661765 | 175 | 0.689795 | 393 | 2,969 | 5.127226 | 0.363868 | 0.043672 | 0.054591 | 0.063524 | 0.26799 | 0.26799 | 0.204467 | 0.17469 | 0.095285 | 0.048635 | 0 | 0.020698 | 0.170091 | 2,969 | 67 | 176 | 44.313433 | 0.797078 | 0.117885 | 0 | 0 | 0 | 0 | 0.064825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.12766 | 0 | 0.744681 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61ebeb23f8d54ceaf64080f94bfcc879df1a83f | 8,509 | py | Python | torcharc/module/perceiver_io/preprocessor.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | 1 | 2020-06-12T09:55:25.000Z | 2020-06-12T09:55:25.000Z | torcharc/module/perceiver_io/preprocessor.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | 5 | 2021-06-26T18:25:39.000Z | 2021-12-31T22:43:22.000Z | torcharc/module/perceiver_io/preprocessor.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | null | null | null | from einops import repeat, rearrange
from torch import nn
from torcharc import net_util
import math
import pydash as ps
import sys
import torch
def build_learned_pos_encoding(max_seq_len: int, embed_dim: int):
'''Build learned positional encoding with Deepmind's init'''
# learned position encoding
pos_encoding = nn.Parameter(torch.empty(max_seq_len, embed_dim))
nn.init.trunc_normal_(pos_encoding, mean=0.0, std=0.02) # Deepmind's init
return pos_encoding
class Identity(nn.Identity):
def __init__(self, in_shape: list):
super().__init__()
self.in_shape = in_shape
self.out_shape = in_shape
class TextPreprocessor(nn.Module):
'''Standard text preprocessing for transformer by embedding a tokenized tensor, then adding a learned position encoding.'''
def __init__(self, vocab_size: int, embed_dim: int, max_seq_len: int = 512, **_kwargs):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
# learned position encoding
self.pos_encoding = build_learned_pos_encoding(max_seq_len, embed_dim)
self.scale = embed_dim ** 0.5
self.out_shape = [max_seq_len, embed_dim]
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch, seq_len = x.shape
pe = repeat(self.pos_encoding[:seq_len], '... -> b ...', b=batch) # repeat for batch
return self.embedding(x) * self.scale + pe
class FourierPreprocessor(nn.Module):
'''
Spatial input preprocessor for PerceiverEncoder using Fourier positional encoding for any dimensions of spatial tensor with channels, i.e. shape (x, y, ..., c)
This builds Fourier pos_encoding for coordinates of N-dimensional spatial data as a meshgrid
e.g. for image of shape (x, y, c) -> get a meshgrid of shape (x, y, d=2), where each slice at d is a meshgrid of a dimension
then generate the sin, cos frequencies, stack [pos, sin, cos],
then flatten the meshgrid's spatial dimension into 1D to get the final pos_encoding of shape (x*y*..., d*(2*num_freq_bands+1)).
When encoding, this flattens the spatial dimensions of input into 1D, e.g. (x, y, ..., c) into (x*y*..., c), then concat it with the pos_encoding, so the final output tensor is a stack of the [flattened input with channels, pos_encoding with d*(2*num_freq_bands+1).
The output shape is (x*y*..., out_dim), where out_dim = (c+d*(2*num_freq_bands+1))
@example
batch = 2
in_shape = [64, 3]
num_freq_bands = 32
x = torch.rand(batch, *in_shape)
module = FourierPreprocessor(in_shape, num_freq_bands)
out = module(x)
assert [math.prod(in_shape[:-1]), module.out_dim] == module.out_shape
assert list(out.shape) == [batch, *module.out_shape]
'''
def __init__(self, in_shape: list, num_freq_bands: int, max_reso: list = None, cat_pos: bool = True):
super().__init__()
*self.spatial_shape, num_c = self.in_shape = list(in_shape) # shape excluding batch
self.num_freq_bands = num_freq_bands
self.cat_pos = cat_pos
# create fourier positional encoding
pos = self.build_positions()
self.pos_encoding = self.build_pos_encoding(pos, max_reso=max_reso)
flat_dim = math.prod(in_shape[:-1])
self.out_dim = num_c + self.get_pos_encoding_dim() # in_dim to PerceiverEncoder; we stack pos_encoding with top of channels
self.out_shape = [flat_dim, self.out_dim]
def build_positions(self, start: float = -1.0, end: float = 1.0):
'''Build spatial coordinates as a meshgrid, i.e. coordinates laid out such that values along the channel is a point in coordinate, e.g. shape = (x, y, 2)'''
x_y = [torch.linspace(start, end, steps=s) for s in self.spatial_shape]
return torch.stack(torch.meshgrid(*x_y), dim=len(self.spatial_shape))
def build_pos_encoding(self, pos: torch.Tensor, max_reso: list = None) -> torch.Tensor:
'''
Generate a Fourier frequency position encoding with linear spacing.
@param pos: meshgrid position coordinates of shape (x, y, d=len(shape)), e.g. (x, y, 2), or (x, y, z, 3) etc. in general
@param max_reso: maximum resolution (pixels) per dimension. Useful when input such as picture varies in size
@param cat_pos: whether to concat pos before the fourier encoding
@return position encodings tensor of shape (x, y,... d*(2*num_freq_bands+1))
'''
max_reso = max_reso or pos.shape[:-1]
assert len(max_reso) == len(pos.shape[:-1]), f'max_reso len(shape) must match pos len(shape), but got {len(max_reso)} instead of {len(pos.shape[:-1])}'
freq_bands = torch.stack([torch.linspace(1.0, max_r / 2.0, steps=self.num_freq_bands) for max_r in max_reso])
pos_freqs = rearrange(torch.einsum('...d,df->d...f', pos, freq_bands), 'd ... f -> ... (d f)')
encodings = [pos] if self.cat_pos else []
encodings += [torch.sin(math.pi * pos_freqs), torch.cos(math.pi * pos_freqs)]
spatial_encoding = torch.cat(encodings, dim=-1) # shape (x, y,... d*(2*num_freq_bands+1))
# flatten spatial dimensions into 1D
pos_encoding = rearrange(spatial_encoding, '... c -> (...) c')
return nn.Parameter(pos_encoding)
def get_pos_encoding_dim(self) -> int:
return len(self.spatial_shape) * (2 * self.num_freq_bands + int(self.cat_pos))
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch, *x_in_shape = x.shape
assert x_in_shape == self.in_shape, f'input shape {x_in_shape} != expected {self.in_shape}'
pos_encoding = repeat(self.pos_encoding, '... -> b ...', b=batch) # repeat for batch
x = rearrange(x, 'b ... c -> b (...) c') # flatten spatial dimensions into 1D
return torch.cat([x, pos_encoding], dim=-1) # stack 1D input with pos_encoding
class MultimodalPreprocessor(nn.Module):
'''
Multimodal preprocessor for multimodal input {mode: x}
This recursively builds a preprocessor for each mode, and applies them to the multimodal inputs in order.
To combine the multimodal preprocessed outputs,
first note that each output is a 2D array of (max_seq_len, channel) or (M, C) of Perceiver input array.
They are padded with trainable position encoding (1 position per mode, broadcasted) to have the same common_channels (max_channels + pad_channels), before getting concatenated along the sequences for transformer to attend to.
The output shape is [total_seq_len, common_channels]
'''
def __init__(self, in_shapes: dict, arc: dict, pad_channels: int = 2):
super().__init__()
self.preprocessors = nn.ModuleDict({
mode: net_util.build_component(arc, {'in_shape': in_shape}, mode, sys.modules[__name__])
for mode, in_shape in in_shapes.items()
})
self.out_shapes = {mode: preprocessor.out_shape for mode, preprocessor in self.preprocessors.items()}
total_seq_len = ps.sum_by(self.out_shapes, ps.head)
max_channels = ps.max_by(self.out_shapes, ps.last)[-1]
common_channels = max_channels + pad_channels
self.pos_encodings = nn.ParameterDict({
mode: build_learned_pos_encoding(1, common_channels - out_shape[-1])
for mode, out_shape in self.out_shapes.items()
})
self.out_shape = [total_seq_len, common_channels]
def pos_encoding_pad(self, mode: str, out: torch.Tensor) -> torch.Tensor:
'''
Pad output to ensure they result in shape [batch, seq_len, common_channels]
The padding channels ensured by pad_channels are used to stack learned pos_encoding of shape [1, common_channels - out_dim] (broadcasted) for each mode,
i.e. each mode has 1 encoded position for transformer to differentiate
'''
pos_encoding = self.pos_encodings[mode]
batch, seq_len, _channel = out.shape
padding = pos_encoding.broadcast_to((batch, seq_len, pos_encoding.shape[-1]))
return torch.cat([out, padding], dim=2) # concat along channel to result in common_channels
def forward(self, xs: dict) -> torch.Tensor:
outs = []
for mode, x in xs.items():
out = self.preprocessors[mode](x)
padded_out = self.pos_encoding_pad(mode, out)
outs.append(padded_out)
# NOTE concat along seq_len to result in [total_seq_len, common_channels] since transformer attention is along seq_len, not channel
return torch.cat(outs, dim=1)
| 54.197452 | 269 | 0.678106 | 1,276 | 8,509 | 4.323668 | 0.198276 | 0.059815 | 0.026101 | 0.008157 | 0.12253 | 0.085735 | 0.039514 | 0.027914 | 0.027914 | 0.0087 | 0 | 0.009245 | 0.211893 | 8,509 | 156 | 270 | 54.544872 | 0.813451 | 0.401222 | 0 | 0.091954 | 0 | 0.011494 | 0.052859 | 0.004319 | 0 | 0 | 0 | 0 | 0.022989 | 1 | 0.137931 | false | 0 | 0.08046 | 0.011494 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61f00da589e3e40dcc6ece3e1151abf782ac6ad | 3,027 | py | Python | utils/datafields.py | edgeless634/bilibili_spider | 589bbd029d3db3e9382d2e825250fe21b12edc39 | [
"MIT"
] | null | null | null | utils/datafields.py | edgeless634/bilibili_spider | 589bbd029d3db3e9382d2e825250fe21b12edc39 | [
"MIT"
] | null | null | null | utils/datafields.py | edgeless634/bilibili_spider | 589bbd029d3db3e9382d2e825250fe21b12edc39 | [
"MIT"
] | null | null | null | import os
import random
import logging
import threading
base_path = os.path.dirname(os.path.dirname(__file__))
base_path = os.path.join(base_path, "datafield")
if not os.path.exists(base_path):
os.mkdir(base_path)
def get_path(fieldname):
return os.path.join(base_path, fieldname)
class DataField:
'''
操作datafield/中数据的对象
'''
def __init__(self):
self.fields = set()
self.field_cache = {}
self.field_sent = {}
self.load_field()
self.lock = threading.Lock()
def load_field(self):
'''
load需要读取的field
'''
for filename in os.listdir(base_path):
if os.path.isfile(get_path(filename)):
continue
self.fields.add(filename)
def new_field(self, fieldname):
'''
新建field
'''
if fieldname in self.fields:
return
abs_path = get_path(fieldname)
if not os.path.exists(abs_path):
os.mkdir(abs_path)
self.fields.add(fieldname)
def load_field_data(self, fieldname):
'''
读取field中的数据
注:本功能线程不安全,请不要用本函数读取一个会变化的field
'''
ret = []
for root, _, files in os.walk(get_path(fieldname)):
for file in files:
with open(os.path.join(root, file), "r", encoding="utf-8") as f:
s = f.read()
ret.append(s)
return ret
def get_field_data(self, fieldname):
'''
读取field中的数据
注:本函数会随机返回一行数据
'''
with self.lock:
if fieldname not in self.field_sent:
self.field_sent[fieldname] = set()
if fieldname not in self.field_cache or self.field_cache[fieldname] == []:
self.field_cache[fieldname] = "\n".join(self.load_field_data(fieldname)).split("\n")
random.shuffle(self.field_cache[fieldname])
self.field_cache[fieldname] = [i for i in self.field_cache[fieldname] if i != "" and i not in self.field_sent]
if self.field_cache[fieldname] == []:
return None
ret = self.field_cache[fieldname].pop()
self.field_sent[fieldname].add(ret)
return ret
def save_to_field(self, fieldname, s, filename=None, mode="w"):
'''
保存至field中的某个文件
注:请手动添加\\n
'''
if filename is None:
for i in range(16, len(s)):
filename = f"{s[:i].__hash__()}.txt"
path = os.path.join(get_path(fieldname), filename)
if not os.exist(path):
break
else:
path = os.path.join(get_path(fieldname), filename)
with open(path, mode, encoding="utf-8") as f:
f.write(s)
datafields = DataField()
if __name__ == '__main__':
print(datafields.fields)
print(datafields.get_field_data("up_mid"))
print(datafields.get_field_data("up_mid"))
print(datafields.get_field_data("up_mid")) | 29.105769 | 126 | 0.561612 | 364 | 3,027 | 4.475275 | 0.255495 | 0.077348 | 0.077348 | 0.098834 | 0.309392 | 0.2345 | 0.162063 | 0.162063 | 0.058932 | 0.058932 | 0 | 0.001946 | 0.32078 | 3,027 | 104 | 127 | 29.105769 | 0.79037 | 0.045259 | 0 | 0.104478 | 0 | 0 | 0.026691 | 0.008044 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.059701 | 0.014925 | 0.253731 | 0.059701 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61f12f0a57f2ac17e29150643fd1a3a0801bb75 | 1,468 | py | Python | 0.mccntt/domain-wide/gmail_quickstart.py | mccntt/googleworkspace-python-samples | c1a24d4e06f2b14af4b494db55ebad04fbf6cf89 | [
"Apache-2.0"
] | null | null | null | 0.mccntt/domain-wide/gmail_quickstart.py | mccntt/googleworkspace-python-samples | c1a24d4e06f2b14af4b494db55ebad04fbf6cf89 | [
"Apache-2.0"
] | null | null | null | 0.mccntt/domain-wide/gmail_quickstart.py | mccntt/googleworkspace-python-samples | c1a24d4e06f2b14af4b494db55ebad04fbf6cf89 | [
"Apache-2.0"
] | null | null | null |
# https://docs.microsoft.com/en-us/windows/python/beginners
# https://developers.google.com/identity/protocols/oauth2/service-account#python
from __future__ import print_function
from pathlib import Path
from googleapiclient.discovery import build
from google.oauth2 import service_account
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
HOME_PATH = str(Path.home())
SERVICE_ACCOUNT_FILE = HOME_PATH + '/devkey/devhkmci-gmaildomainwide-1d7640a0c6d2.json'
def main():
DELEGATE='aaron.ko@dev.hkmci.com' # Service account will impersonate this user. Must have proper admin privileges in G Suite.
# TARGET='dev.hkmci.com' # Service account wants to access data from this.
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
credentials_delegated = credentials.with_subject(DELEGATE)
service = build('gmail', 'v1', credentials=credentials_delegated)
# Call the Gmail API
results = service.users().getProfile(userId='me').execute()
print(results)
results = service.users().labels().list(userId='me').execute()
print(results)
# labels = results.get('labels', [])
# for label in labels:
# print(label['name'])
# if not labels:
# print('No labels found.')
# else:
# print('Labels:')
# for label in labels:
# print(label['name'])
if __name__ == '__main__':
main()
# [END gmail_quickstart]
| 28.784314 | 130 | 0.706403 | 180 | 1,468 | 5.605556 | 0.505556 | 0.111001 | 0.053518 | 0.035679 | 0.178394 | 0.075322 | 0.075322 | 0.075322 | 0.075322 | 0 | 0 | 0.009016 | 0.168937 | 1,468 | 50 | 131 | 29.36 | 0.818033 | 0.372616 | 0 | 0.111111 | 0 | 0 | 0.151885 | 0.079823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.277778 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61fa6e0d30b3d5f87bf0ee960be776cf48333dc | 5,575 | py | Python | code/dpp/distributions/logistic_mixture.py | bsouhaib/qf-tpp | a5adf3f7203b920528c1c397329c4afd9039c3b4 | [
"MIT"
] | null | null | null | code/dpp/distributions/logistic_mixture.py | bsouhaib/qf-tpp | a5adf3f7203b920528c1c397329c4afd9039c3b4 | [
"MIT"
] | null | null | null | code/dpp/distributions/logistic_mixture.py | bsouhaib/qf-tpp | a5adf3f7203b920528c1c397329c4afd9039c3b4 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
from torch.distributions import constraints
from dpp.nn import BaseModule, Hypernet
from dpp.utils import clamp_preserve_gradients
def inverse_sigmoid(x):
# Clamp tiny values (<1e-38 for float32)
finfo = torch.finfo(x.dtype)
x = x.clamp(min=finfo.tiny, max=1. - finfo.eps)
return torch.log(x) - torch.log1p(-x)
def logistic_sample(means, log_scales):
if means.shape != log_scales.shape:
raise ValueError("Shapes of means and scales don't match.")
z = torch.rand(means.shape)
return torch.exp(log_scales) * inverse_sigmoid(z) + means
# Credit: https://github.com/aravindsrinivas/flowpp/blob/master/flows/logistic.py
def logistic_logpdf(x, mean, log_scale):
z = (x - mean) * torch.exp(-log_scale)
return z - log_scale - 2 * F.softplus(z)
def logistic_logcdf(x, mean, log_scale):
z = (x - mean) * torch.exp(-log_scale)
return F.logsigmoid(z)
def mixlogistic_logpdf(x, prior_logits, means, log_scales):
log_prior = F.log_softmax(prior_logits, dim=-1)
return torch.logsumexp(
log_prior + logistic_logpdf(x.unsqueeze(-1), means, log_scales),
dim=-1
)
def mixlogistic_logcdf(x, prior_logits, means, log_scales):
log_prior = F.log_softmax(prior_logits, dim=-1)
return torch.logsumexp(
log_prior + logistic_logcdf(x.unsqueeze(-1), means, log_scales),
dim=-1
)
class LogisticMixtureDistribution(BaseModule):
def __init__(self, config, n_components=32, hypernet_hidden_sizes=[64], min_clip=-5., max_clip=3.):
super().__init__()
self.n_components = n_components
self.use_history(config.use_history)
self.use_embedding(config.use_embedding)
self.min_clip = min_clip
self.max_clip = max_clip
self.hypernet = Hypernet(config,
hidden_sizes=hypernet_hidden_sizes,
param_sizes=[n_components, n_components, n_components])
def get_params(self, h, emb):
"""Generate model parameters based on the history and embeddings.
Args:
h: history embedding, shape [*, rnn_hidden_size]
emb: sequence embedding, shape [*, embedding_size]
Returns:
prior_logits: shape [*, n_components]
means: shape [*, n_components]
log_scales: shape [*, n_components]
"""
if not self.using_history:
h = None
if not self.using_embedding:
emb = None
prior_logits, means, log_scales = self.hypernet(h, emb)
# Clamp values that go through exp for numerical stability
prior_logits = clamp_preserve_gradients(prior_logits, self.min_clip, self.max_clip)
log_scales = clamp_preserve_gradients(log_scales, self.min_clip, self.max_clip)
return prior_logits, means, log_scales
def log_prob(self, y, h=None, emb=None):
prior_logits, means, log_scales = self.get_params(h, emb)
return mixlogistic_logpdf(y, prior_logits, means, log_scales)
def log_cdf(self, y, h=None, emb=None):
prior_logits, means, log_scales = self.get_params(h, emb)
return mixlogistic_logcdf(y, prior_logits, means, log_scales)
def sample(self, n_samples, h=None, emb=None):
"""Draw samples from the model.
Args:
n_samples: number of samples to generate.
h: hidden state, shape [*, rnn_hidden_size]
emb: sequence embedding, shape [*, embedding_size]
Returns:
samples: shape [*, n_samples]
"""
with torch.no_grad():
prior_logits, means, log_scales = self.get_params(h, emb)
# model parameters should have two dimensions for bmm to work
# first dimensions will be restored later
prior_logits = prior_logits.view(-1, self.n_components)
means = means.view(-1, self.n_components)
log_scales = log_scales.view(-1, self.n_components)
categorical = td.Categorical(logits=prior_logits)
z = categorical.sample([n_samples])
# z has shape [n_samples, *], convert to [*, n_samples]
dim_order = np.arange(len(prior_logits.shape))
dim_order = tuple(np.concatenate([dim_order[1:], [0]]))
z = z.permute(dim_order).contiguous()
# z_oh has shape [*, n_samples, n_components]
# convert it to [*, n_components, n_samples] for bmm to work
z_oh = F.one_hot(z, num_classes=self.n_components).float().transpose(-2, -1)
# add extra dim to means and log_scales for bmm to work
means.unsqueeze_(-2)
log_scales.unsqueeze_(-2)
# select the correct component for each sample
means_select = torch.bmm(means, z_oh)
log_scales_select = torch.bmm(log_scales, z_oh)
means_select.squeeze_(-2)
log_scales_select.squeeze_(-2)
# means_select and log_scales_select have shape [*, n_samples]
samples = logistic_sample(means_select, log_scales_select)
# reshape the samples back to the original shape
if (h is not None):
first_dims = h.shape[:-1]
elif (emb is not None):
first_dims = emb.shape[:-1]
else:
first_dims = torch.Size()
shape = first_dims + torch.Size([n_samples])
return samples.reshape(shape)
| 38.986014 | 103 | 0.636233 | 741 | 5,575 | 4.561404 | 0.240216 | 0.069231 | 0.049704 | 0.050592 | 0.277515 | 0.243787 | 0.230769 | 0.20355 | 0.17574 | 0.17574 | 0 | 0.008049 | 0.264574 | 5,575 | 142 | 104 | 39.260563 | 0.816341 | 0.210583 | 0 | 0.125 | 0 | 0 | 0.009161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.090909 | 0 | 0.340909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61fd88f4b3a01a3aa6ca746cfeb284296cf724d | 15,173 | py | Python | register/urls.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | register/urls.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | register/urls.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .views import *
urlpatterns = [
# Urls for authentication on noruh web
path('change_password/', RecoverPasswordByApi.as_view(), name='change_password'),
path('reset_passowrd/complete/', RecoverPasswordByApiComplete.as_view(), name='reset_password_complete'),
path('login/', Login.as_view(), name='login'),
path('user/logged/', ListUserLogged.as_view(), name='user_logged_detail'),
path('user/logged/alter/<int:pk>/', AlterUser.as_view(), name='user_logged_alter'),
path('', Home.as_view(), name='home'),
path('terms_and_conditions/', TermsAndContions.as_view(), name='terms_and_conditions'),
# Url's for Establishments and configurations
path('establishment/create/', CreateEstablishment.as_view(),
name='establishment_create'),
path('establishment/list/all/', ListAllEstablishment.as_view(),
name='establishment_list_all'),
path('establishment/search/list/', ListSearchEstablishment.as_view(),
name='establishment_search_list'),
path('establishment/configurations/<int:id>/', ConfigurationsEstablishment.as_view(),
name='establishment_configurations'),
path('establishment/base/<int:id>/', BaseEstablishment.as_view(),
name='establishment_base'),
path('establishment/update/location/<int:pk>/', UpdateEstablismentLocation.as_view(),
name='establishment_update_location'),
path('establishment/update/description/<int:pk>/', UpdateEstablismentDescription.as_view(),
name='establishment_update_description'),
path('establishment/update/amenities/<int:pk>/', UpdateEstablismentAmenities.as_view(),
name='establishment_update_amenities'),
path('establishment/update/gps_restriction/<int:pk>/', UpdateGPSRestrictionEstablishment.as_view(),
name='establishment_update_gps_restriction'),
path('establishment/update/featured/<int:pk>/', UpdateFeaturedEstablishment.as_view(),
name='establishment_update_featured'),
path('establishment/update/enable/<int:pk>/', DisableEstablishment.as_view(),
name='establishment_update_enable'),
path('establishment/update/taxes/<int:pk>/', UpdateEstablismentTaxes.as_view(),
name='establishment_update_taxes'),
path('establishment/update/pays_payment_tax/<int:pk>/', UpdateEstablishmentPaysPaymentTax.as_view(),
name='establishment_update_pays_payment_tax'),
path('establishment/update/couvert/<int:pk>/', UpdateEstablismentCouvert.as_view(),
name='establishment_update_couvert'),
path('establishment/update/offer_range_value/<int:pk>/', UpdateEstablismentOfferRangeValue.as_view(),
name='establishment_update_offer_range_value'),
path('establishment/update/open_close/<int:pk>/', UpdateOpenOrCloseEstablishment.as_view(),
name='establishment_update_open_close'),
path('establishment/delete/<int:pk>/', DeleteEstablishment.as_view(),
name='establishment_delete'),
path('establishment/create/photo/<int:establishment_id>/', AddPhotoOnEstablishment.as_view(),
name='establishment_add_photo'),
path('establishment/photo/delete/<int:pk>/', DeletePhotoFromEstablishment.as_view(),
name='establishment_delete_photo'),
path('dashboard/', DashboardAllEstablishments.as_view(), name='dashboard_all_establishments'),
path('establishment/dashboard/<int:establishment_id>/', DashboardEstablishment.as_view(),
name='establishment_dashboard'),
path('establishment/dashboard/items_more_requested/<int:establishment_id>/', ListAllItemsMoreRequested.as_view(),
name='establishment_items_more_requested'),
# Url's for when Request a Waiter
path('establishment/requests/list/<int:establishment_id>/',
RequestWaiter.as_view(), name='request_list'),
path('establishment/requests/accept/<int:pk>/',
AcceptRequestWaiter.as_view(), name='accept_request'),
path('establishment/requests/accept/all/<int:establishment_id>/',
AcceptAllRequestWaiter.as_view(), name='accept_all_requests'),
# Url's for Establishment Evaluations
path('establishment/evaluation/list/<int:establishment_id>/',
ListEvaluation.as_view(), name='evaluation_list'),
path('establishment/evaluation/answer/<int:evaluation_id>/',
CreateAnswerToEvaluation.as_view(), name='answer_evaluation'),
path('establishment/evaluation/delete/<int:pk>/',
DeleteEvaluation.as_view(), name='evaluation_delete'),
path('establishment/evaluation/answer/delete/<int:pk>/',
DeleteAnswerEvaluation.as_view(), name='evaluation_answer_delete'),
# Url's for Employees
path('establishment/employee/create/', CreateEmployee.as_view(),
name='employee_create'),
path('establishment/employee/list/<int:establishment_id>/',
ListEmployeeEstablishment.as_view(), name='employee_list_establishment'),
path('establishment/employee/list/<int:establishment_id>/search/',
ListSearchEmployeeEstablishment.as_view(), name='employee_list_search_establishment'),
path('establishment/employee/list/', ListEmployeeAll.as_view(),
name='employee_list_all'),
path('establishment/employee/list/search/', ListSearchEmployee.as_view(),
name='employee_search_list'),
path('establishment/employee/detail/<int:pk>/',
DetailEmployee.as_view(), name='employee_detail'),
path('establishment/employee/alter/<int:pk>/',
AlterEmployee.as_view(), name='employee_alter'),
path('establishment/employee/delete/<int:pk>/',
DeleteEmployee.as_view(), name='employee_delete'),
# Url's for Menu, MenuItem, ItemCategory, Observation
path('menu/list/<int:establishment_id>/', ListMenuFromEstablishment.as_view(),
name='menu_list_from_establishment'),
path('menu/list/<int:establishment_id>/search/', ListMenuSearchFromEstablishment.as_view(),
name='menu_list_search_from_establishment'),
# Items from Menu
path('menu/add/item/<int:establishment_id>/',
CreateItemOnMenu.as_view(), name='menu_create_item'),
path('menu/list/item/<int:establishment_id>/',
ListMenuItems.as_view(), name='menu_item_list'),
path('menu/list/item/<int:establishment_id>/search/',
ListMenuItemsSearch.as_view(), name='menu_item_list_search'),
path('menu/list/item/update/<int:pk>/',
UpdateItemOnMenu.as_view(), name='menu_item_update'),
path('menu/list/item/delete/<int:pk>/',
DeleteItemOnMenu.as_view(), name='menu_item_delete'),
# Category from Menu
path('menu/category/create/<int:establishment_id>/',
CreateCategory.as_view(), name='menu_category_create'),
path('menu/category/list/<int:establishment_id>/',
ListCategory.as_view(), name='menu_category_list'),
path('menu/category/update/<int:pk>/',
UpdateCategory.as_view(), name='menu_category_update'),
path('menu/category/delete<int:pk>/',
DeleteCategory.as_view(), name='menu_category_delete'),
# Observations from Menu
path('menu/observation/create/<int:establishment_id>',
CreateObservationItem.as_view(), name='menu_observation_item_create'),
path('menu/observation/list/<int:establishment_id>/',
ListObservationItem.as_view(), name='menu_observation_list'),
path('menu/observation/update/<int:pk>/',
UpdateObservationItem.as_view(), name='menu_observation_update'),
path('menu/observation/delete/<int:pk>/',
DeleteObservationItem.as_view(), name='menu_observation_delete'),
# Menu Offers
path('menu/offer/create/<int:establishment_id>',
CreateMenuOffer.as_view(), name='menu_offer_create'),
path('menu/offer/list/<int:establishment_id>/',
ListMenuOffers.as_view(), name='menu_offer_list'),
path('menu/offer/delete/<int:pk>/',
DeleteMenuOffer.as_view(), name='menu_offer_delete'),
path('menu/offer/update/<int:pk>/',
UpdateMenuOffer.as_view(), name='menu_offer_update'),
# Url's for Orders, Bills and Tables
path('orders/list/<int:establishment_id>/',
ListOrders.as_view(), name='orders_list'),
path('orders/list/kitchen/pending/<int:establishment_id>/',
ListOrdersPendingKitchen.as_view(), name='orders_list_kitchen_pending'),
path('orders/list/kitchen/preparing/<int:establishment_id>/',
ListOrdersPreparingKitchen.as_view(), name='orders_list_kitchen_preparing'),
path('orders/list/kitchen/done/<int:establishment_id>/',
ListOrdersDoneKitchen.as_view(), name='orders_list_kitchen_done'),
# Cancel Orders Button
path('order/cancel_from_list_orders/<int:order_id>/',
CancelOrderOnListOrders.as_view(), name='order_cancel_button_on_list_orders'),
path('order/cancel_from_bill/<int:order_id>/',
CancelOrderOnListBill.as_view(), name='order_cancel_button_on_list_bills'),
# Url's for Views for Kitchen List Orders
path('orders/list/kitchen/done/<int:establishment_id>/search/user/',
ListSearchDoneOrdersByUsers.as_view(), name='orders_kitchen_done_search_user'),
path('orders/list/kitchen/done/<int:establishment_id>/search/table/',
ListFilterOrdersByTableDone.as_view(), name='orders_kitchen_done_search_table'),
path('orders/list/<int:establishment_id>/search/',
ListSearchOrders.as_view(), name='orders_search_list'),
path('orders/list/filter/category/<int:establishment_id>/search/',
KitchenFilterOrdersByCategory.as_view(), name='orders_kitchen_category_filter'),
path('orders/list/<int:establishment_id>/filter_by_table/',
ListFilterOrdersByTable.as_view(), name='orders_filter_by_table'),
path('orders/list/items/to/order/<int:establishment_id>/',
ListItemsToOrder.as_view(), name='list_items_to_order'),
path('orders/create/<int:establishment_id>/',
CreateOrder.as_view(), name='order_create'),
path('orders/update/<int:pk>/',
UpdateOrder.as_view(), name='orders_update'),
path('orders/kitchen_accepted_at/<int:order_id>/',
KitchenAcceptOrder.as_view(), name='order_kitchen_accepted_at'),
path('orders/kitchen_done_order/<int:order_id>/',
KitchenDoneOrder.as_view(), name='order_kitchen_done'),
path('orders/kitchen_cancel_order/<int:order_id>/',
KitchenCancelOrder.as_view(), name='order_kitchen_cancel'),
# Url's for Bills and BillPayment
path('bill/list/<int:establishment_id>/',
ListBillsOpened.as_view(), name='bill_list'),
path('bill/list/closed/<int:establishment_id>/',
ListBillsClosed.as_view(), name='bill_list_closed'),
path('bill/list/<int:establishment_id>/search/',
ListSearchBills.as_view(), name='bill_search_list'),
path('bill/list/search/closed/<int:establishment_id>/search/',
ListSearchBillsClosed.as_view(), name='bill_search_list_closed'),
path('bill/payment/create/<int:bill_id>/',
CreatePaymentAllBill.as_view(), name='bill_payment_create'),
path('bill/payment/create/bill_member/<int:bill_member_id>/',
CreatePaymentOnBillMember.as_view(), name='bill_member_payment_create'),
path('bill/payment/aprove_or_reject/<int:bill_payment_id>/',
ApproveOrRejectPayment.as_view(), name='bill_payment_aprove_or_reject'),
path('bill/payment/reject/<int:bill_payment_id>/',
RejectPayment.as_view(), name='bill_payment_reject'),
path('bill/bill_members/list/<int:bill_id>/',
ListBillMembersOnBill.as_view(), name='bill_member_on_bill_list'),
path('ajax/load_bill_members/',
LoadBillMembers.as_view(), name='ajax_load_bill_members'),
path('bill/orders/list/<int:bill_id>/',
ListOrdersFromBill.as_view(), name='orders_from_bill'),
# Url's for Tables and TableZone
path('table_zone/create/<int:establishment_id>/',
CreateTableZone.as_view(), name='table_zone_create'),
path('table_zone/list/<int:establishment_id>/',
ListTableZone.as_view(), name='table_zone_list'),
path('table_zone/update/<int:pk>/',
UpdateTableZone.as_view(), name='table_zone_update'),
path('table_zone/delete/<int:pk>/',
DeleteTableZone.as_view(), name='table_zone_delete'),
path('table_zone/update/active_or_desactive/<int:pk>/',
DesactiveTableZone.as_view(), name='table_zone_active_or_desactive'),
path('table/create/<int:table_zone_id>/<int:establishment_id>/',
CreateTable.as_view(), name='table_create'),
path('table/update/<int:pk>/', UpdateTable.as_view(), name='table_update'),
path('table/update/enabled/<int:pk>/',
UpdateTableEnableOrDesable.as_view(), name='table_update_enabled'),
path('table/delete/<int:pk>/', DeleteTable.as_view(), name='table_delete'),
# Url's for Operating Hours
path('operating_hours/create/<int:establishment_id>/',
CreateOperatingHours.as_view(), name='operating_hour_create'),
path('operating_hours/delete/<int:pk>/',
DeleteOperatingHour.as_view(), name='operating_hour_delete'),
# Url's for Promocodes
path('promocode/create/<int:establishment_id>/',
CreatePromoCode.as_view(), name='promocode_create'),
path('promocode/update/<int:pk>/',
UpdatePromoCodes.as_view(), name='promocode_update'),
path('promocode/delete/<int:pk>/',
DeletePromocodes.as_view(), name='promocode_delete'),
# Url's for Events
path('events/create/<int:establishment_id>/',
CreateEvents.as_view(), name='events_create'),
path('events/update/<int:pk>/',
UpdateEvents.as_view(), name='events_update'),
path('events/delete/<int:pk>/',
DeleteEvents.as_view(), name='events_delete'),
# Url's for Wirecard Payment
path('wirecard/create/<int:establishment_id>/',
CreateWirecard.as_view(), name='wirecard_create'),
path('wirecard/company/create/<int:establishment_id>/',
CreateCompanyWirecard.as_view(), name='wirecard_company_create'),
path('wirecard/detail/<int:pk>/',
DetailWirecard.as_view(), name='wirecard_detail'),
# Url's for offline Compensations
path('offline/compensations/', ListCompensations.as_view(),
name='offline_compensations'),
path('offline/compensations/check_month/<int:month>/<int:year>/<int:establishment_id>/',
CreateCompensation.as_view(), name='offline_compensations_check_month'),
path('offline/compensations/generate_report/<int:month>/<int:year>/<int:establishment_id>/', GenerateCSVReport.as_view(),
name='offline_compensations_generate_report'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 55.375912 | 126 | 0.701114 | 1,690 | 15,173 | 6.02426 | 0.149112 | 0.067184 | 0.111973 | 0.047441 | 0.254199 | 0.087418 | 0.049209 | 0.020332 | 0.009626 | 0 | 0 | 0 | 0.148685 | 15,173 | 273 | 127 | 55.578755 | 0.788186 | 0.037699 | 0 | 0 | 0 | 0 | 0.464815 | 0.394719 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.008811 | 0.017621 | 0 | 0.017621 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b61fdd250445e3eab4d4df963d8cfba91ce0bd48 | 6,220 | py | Python | model/utils/config_helper.py | aashiqmuhamed/transformer-gan | 1ccc9f251c1b1d054c1acc8be36c1da7bf8cf11c | [
"Apache-2.0"
] | 32 | 2021-06-11T02:03:03.000Z | 2022-03-23T16:12:49.000Z | model/utils/config_helper.py | aashiqmuhamed/transformer-gan | 1ccc9f251c1b1d054c1acc8be36c1da7bf8cf11c | [
"Apache-2.0"
] | 3 | 2021-11-11T06:08:37.000Z | 2022-02-20T14:09:30.000Z | model/utils/config_helper.py | aashiqmuhamed/transformer-gan | 1ccc9f251c1b1d054c1acc8be36c1da7bf8cf11c | [
"Apache-2.0"
] | 7 | 2021-06-11T01:19:56.000Z | 2022-02-17T03:52:15.000Z | from yacs.config import CfgNode as CN
def model(cfg):
# For model
cfg.MODEL = CN()
cfg.MODEL.num_layers = 6
cfg.MODEL.num_heads = 10
cfg.MODEL.units = 500
cfg.MODEL.inner_size = 1000
cfg.MODEL.dropout = 0.1
cfg.MODEL.tie_embedding = True
cfg.MODEL.tie_proj = False
cfg.MODEL.attention_dropout = 0.1
cfg.MODEL.pre_lnorm = False
cfg.MODEL.clamp_len = -1
cfg.MODEL.same_length = False
return cfg
def train(cfg):
# For training
cfg.TRAIN = CN()
cfg.TRAIN.load_from_previous = "Null"
cfg.TRAIN.batch_size = 200
cfg.TRAIN.batch_chunk = 1
cfg.TRAIN.tgt_length = 500
cfg.TRAIN.mem_length = 50
cfg.TRAIN.seed = 1111
cfg.TRAIN.optim = "adam"
cfg.TRAIN.lr = 0.00025 / 4.0
cfg.TRAIN.lr_min = 0.0
cfg.TRAIN.scheduler = "cosine"
cfg.TRAIN.warmup_step = 0
cfg.TRAIN.decay_rate = 0.5
cfg.TRAIN.patience = 10
cfg.TRAIN.clip = 0.25
cfg.TRAIN.max_step = 200000
cfg.TRAIN.log_interval = 200
cfg.TRAIN.eval_interval = 4000
cfg.TRAIN.pad_type = "model" # model or anything else
cfg.TRAIN.use_mle = True
cfg.TRAIN.random_crop = False
cfg.TRAIN.replace_start_with_pad = False
cfg.TRAIN.weight_decay = 0.0 # Weight decay for adam or lamb
cfg.TRAIN.append_note_status = False # Append status to event representation
return cfg
def discriminator(cfg):
# For discriminator
# Discriminator related (used only if)
cfg.DISCRIMINATOR = CN()
cfg.DISCRIMINATOR.start_iter = 100 # To control when we start training critic
cfg.DISCRIMINATOR.dis_loss_freq = 50 # How often to use loss from discriminator
cfg.DISCRIMINATOR.gen_loss_freq = 10
cfg.DISCRIMINATOR.eval_loss_freq = 10 # How often to use loss from discriminator during eval
cfg.DISCRIMINATOR.freeze_discriminator = True
cfg.DISCRIMINATOR.truncate_backprop = False # while sampling do not propagate gradients beyond current token
cfg.DISCRIMINATOR.sample_chunks_mem = 1
cfg.DISCRIMINATOR.beta_max = 100. # TODO: temperature decay
cfg.DISCRIMINATOR.adapt = 'no'
cfg.DISCRIMINATOR.type = "Null" # or cnn or Null for no discriminator or 'bert' for BERT discriminator
cfg.DISCRIMINATOR.dis_steps = 1 # dis_step per gen_step (default 1 for bert and 5 for cnn)
cfg.DISCRIMINATOR.tgt_len = 64
cfg.DISCRIMINATOR.mem_len = 64
cfg.DISCRIMINATOR.gen_loss_factor = 30 # Multiplying factor for mmd/gan loss component in generator
cfg.DISCRIMINATOR.dis_loss_factor = 1 # Multiplying factor for mmd/gan loss component in discriminator
cfg.DISCRIMINATOR.batch_chunk = 1
cfg.DISCRIMINATOR.context_len = 5 # Randomly sample context length tokens from real data and use as context.
cfg.DISCRIMINATOR.backprop_outside = True
cfg.DISCRIMINATOR.src_mem_len = 200
# If 0 uses first token in real data
cfg.DISCRIMINATOR.gen_scheduler = "constant"
cfg.DISCRIMINATOR.gen_lr_min = 0.0
cfg.DISCRIMINATOR.gen_warmup_step = 0
cfg.DISCRIMINATOR.gen_decay_rate = 0.5
cfg.DISCRIMINATOR.gen_patience = 10
cfg.DISCRIMINATOR.gen_lr = 0.00025 / 4.0
cfg.DISCRIMINATOR.dis_scheduler = "constant"
cfg.DISCRIMINATOR.dis_lr_min = 0.0
cfg.DISCRIMINATOR.dis_warmup_step = 0
cfg.DISCRIMINATOR.dis_decay_rate = 0.5
cfg.DISCRIMINATOR.dis_patience = 10
cfg.DISCRIMINATOR.dis_lr = 0.00025 / 4.0
# Bert params
cfg.DISCRIMINATOR.BERT = CN()
cfg.DISCRIMINATOR.BERT.learning_rate = 1e-5 # Decrease learning rate since we're fine tuning
cfg.DISCRIMINATOR.BERT.weight_decay = 0.0
cfg.DISCRIMINATOR.BERT.adam_epsilon = 1e-8
cfg.DISCRIMINATOR.BERT.max_grad_norm = 1.0
cfg.DISCRIMINATOR.BERT.model_type = "bert_lm" # or "bert_cls"
cfg.DISCRIMINATOR.BERT.loss_type = "rsgan" # or 'standard’,'JS', 'KL', 'hinge', 'tv', 'rsgan', 'wgan-gp', "mmd", 'ppo', 'ppo-gp'
cfg.DISCRIMINATOR.BERT.model_path = "../BERT/checkpoint-1969000"
cfg.DISCRIMINATOR.BERT.freeze_layers = [] # Total layers ['0', '1', '2', '3', '4']
cfg.DISCRIMINATOR.BERT.random_weights = False # only implemented for bert_lm
# CNN params (Relgan)
cfg.DISCRIMINATOR.CNN = CN()
cfg.DISCRIMINATOR.CNN.learning_rate = 1e-4
cfg.DISCRIMINATOR.CNN.embed_dim = 64
cfg.DISCRIMINATOR.CNN.hidden_dim = 64
cfg.DISCRIMINATOR.CNN.num_rep = 64
cfg.DISCRIMINATOR.CNN.init = "uniform"
cfg.DISCRIMINATOR.CNN.loss_type = "rsgan" # or 'standard’,'JS', 'KL', 'hinge', 'tv', 'rsgan', 'wgan-gp', "mmd", "ppo-gp"
return cfg
def metric(cfg):
# Metrics
cfg.METRICS = CN()
cfg.METRICS.use_bleu = False # outdated
cfg.METRICS.use_self_bleu = False # outdated
cfg.METRICS.CLASSIFIER = CN()
cfg.METRICS.CLASSIFIER.use_classifier = False
cfg.METRICS.CLASSIFIER.gen_batch_size = 128
cfg.METRICS.CLASSIFIER.gen_seq_len = 2048
cfg.METRICS.CLASSIFIER.gen_num_samples = 256
cfg.METRICS.CLASSIFIER.block_size = 128 # For training classifier
cfg.METRICS.CLASSIFIER.bert_batch_size = 20 # For passing into bert
cfg.METRICS.CLASSIFIER.model_path = "../BERT/checkpoint-1969000"
return cfg
def init(cfg):
# For initialization
cfg.INITIALIZER = CN()
cfg.INITIALIZER.base_init = ["normal", 0.01]
cfg.INITIALIZER.embed_init = ["normal", 0.01]
# For evaluation
cfg.EVALUATE = CN()
cfg.EVALUATE.batch_size = 10
cfg.EVALUATE.tgt_length = 128
cfg.EVALUATE.mem_length = 128
# Event type related
cfg.DATASET = CN()
cfg.DATASET.event_type = "magenta" # or 'newevent'
cfg.DATASET.trim_padding = False
# Classifier related
cfg.PPO = CN() # For ppo loss type
cfg.PPO.dis_D_lr = 0.00025 / 4.0
cfg.PPO.dis_D_update_D0_freq = 20 # Should be multiple of gen_loss_freq
cfg.PPO.dis_D_type = "bert" # bert or cnn
cfg.PPO.clip_param = 0.4
cfg.PPO.dis_D_num_rep = 1
# For Problem Type
cfg.PROBLEM = CN()
cfg.PROBLEM.type = 'Null' # time extension: Null
cfg.PROBLEM.melody_len = 1024
return cfg
def get_default_cfg_training():
cfg = CN()
cfg = init(cfg)
cfg = model(cfg)
cfg = train(cfg)
cfg = discriminator(cfg)
cfg = metric(cfg)
cfg.freeze()
return cfg
| 37.926829 | 133 | 0.694855 | 906 | 6,220 | 4.625828 | 0.257174 | 0.190885 | 0.047721 | 0.00859 | 0.161059 | 0.093295 | 0.060129 | 0.043904 | 0.024338 | 0.024338 | 0 | 0.040677 | 0.201608 | 6,220 | 163 | 134 | 38.159509 | 0.803262 | 0.213826 | 0 | 0.044444 | 0 | 0 | 0.029703 | 0.010726 | 0 | 0 | 0 | 0.006135 | 0 | 1 | 0.044444 | false | 0 | 0.007407 | 0 | 0.096296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b62114fe26c6e23da2c727e699637285d222ecc8 | 832 | py | Python | examples/yaml/main.py | pseeth/argbind | 1b953e370065d9f3c91dee5c93cc6447b72e3744 | [
"MIT"
] | 19 | 2020-10-14T00:00:13.000Z | 2022-02-20T23:21:18.000Z | examples/yaml/main.py | pseeth/argbind | 1b953e370065d9f3c91dee5c93cc6447b72e3744 | [
"MIT"
] | 3 | 2021-03-30T15:56:55.000Z | 2022-03-21T20:52:56.000Z | examples/yaml/main.py | pseeth/argbind | 1b953e370065d9f3c91dee5c93cc6447b72e3744 | [
"MIT"
] | 1 | 2021-04-13T18:51:29.000Z | 2021-04-13T18:51:29.000Z | import argbind
import typing
@argbind.bind()
def func(
arg1 : str = 'default',
arg2 : str = 'default',
arg3 : str = 'default',
arg4 : str = 'default',
arg5 : typing.List[str] = ['default'],
):
"""Dummy function for binding.
Parameters
----------
arg1 : str, optional
Argument 1, by default 'default'
arg2 : str, optional
Argument 2, by default 'default'
arg3 : str, optional
Argument 3, by default 'default'
arg4 : str, optional
Argument 4, by default 'default'
"""
print(
f"Argument 1: {arg1}\n"
f"Argument 2: {arg2}\n"
f"Argument 3: {arg3}\n"
f"Argument 4: {arg4}\n"
f"Argument 5: {arg5}"
)
if __name__ == "__main__":
args = argbind.parse_args()
with argbind.scope(args):
func()
| 22.486486 | 42 | 0.550481 | 99 | 832 | 4.535354 | 0.373737 | 0.111359 | 0.169265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039792 | 0.305288 | 832 | 36 | 43 | 23.111111 | 0.737024 | 0.338942 | 0 | 0 | 0 | 0 | 0.282565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6215c1441983e96ac508f482bf4dc70d993cca3 | 2,585 | py | Python | handlers/article.py | armaaar/Multi-Users-Blog | 8b28b2816337d8f023bc6c1741e91c86d3127874 | [
"MIT"
] | null | null | null | handlers/article.py | armaaar/Multi-Users-Blog | 8b28b2816337d8f023bc6c1741e91c86d3127874 | [
"MIT"
] | null | null | null | handlers/article.py | armaaar/Multi-Users-Blog | 8b28b2816337d8f023bc6c1741e91c86d3127874 | [
"MIT"
] | null | null | null | from handlers import tables, helper, Handler
import time
class ArticleHandler(Handler):
def __init__(self, *args, **kwargs):
super(ArticleHandler, self).__init__(*args, **kwargs)
self.body_class = 'article-page'
def get(self, article_id):
if not article_id.isdigit():
self.page_redirect("/")
else:
article = tables.articles.get(article_id)
comments = tables.comments.get_comments(article_id)
self.render('article.jinja', handler=self,
article=article, comments=comments)
def post(self, article_id):
if not article_id.isdigit() or not self.is_loggedin():
self.page_redirect("/")
else:
like = self.request.get("like")
new_comment = self.request.get("new-comment")
delete_comment = self.request.get("delete-comment")
edit_comment = self.request.get("edit-comment")
if like:
username = self.get_cookie("username")
article = tables.articles.get(article_id)
if self.is_loggedin() and username != article.user:
if tables.likes.exist(article_id, username):
tables.likes.delete(article_id, username)
else:
tables.likes.add(article_id, username)
self.page_redirect("/article/%s/#like" % article_id)
elif new_comment:
new_comment = self.request.get("comment")
username = self.get_cookie("username")
if self.is_loggedin():
tables.comments.add(article_id, username, new_comment)
self.page_redirect("/article/%s/#comments" % article_id)
elif delete_comment:
comment_id = self.request.get("comment-id")
comment = tables.comments.get(comment_id)
if self.is_loggedin() == comment.user:
tables.comments.delete(comment_id)
self.page_redirect("/article/%s/#comments" % article_id)
elif edit_comment:
comment_id = self.request.get("comment-id")
comment = self.request.get("comment")
com = tables.comments.get(comment_id)
if self.is_loggedin() == com.user:
tables.comments.edit(comment_id, comment)
self.page_redirect("/article/%s/#comments" % article_id)
else:
self.page_redirect("/article/%s/" % article_id)
| 41.693548 | 74 | 0.56441 | 276 | 2,585 | 5.09058 | 0.177536 | 0.102491 | 0.079715 | 0.074733 | 0.464769 | 0.323843 | 0.276868 | 0.276868 | 0.22847 | 0 | 0 | 0 | 0.324565 | 2,585 | 61 | 75 | 42.377049 | 0.804696 | 0 | 0 | 0.288462 | 0 | 0 | 0.081238 | 0.024371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.038462 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b621e93761f39072896a2d33479068491b0d86fd | 428 | py | Python | Alignment/MuonAlignmentAlgorithms/python/MuonAlignmentPreFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Alignment/MuonAlignmentAlgorithms/python/MuonAlignmentPreFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Alignment/MuonAlignmentAlgorithms/python/MuonAlignmentPreFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
MuonAlignmentPreFilter = cms.EDFilter("MuonAlignmentPreFilter",
tracksTag = cms.InputTag("ALCARECOMuAlCalIsolatedMu:GlobalMuon"),
minTrackPt = cms.double(20.),
minTrackP = cms.double(0.),
minTrackerHits = cms.int32(10),
minDTHits = cms.int32(6),
minCSCHits = cms.int32(4),
allowTIDTEC = cms.bool(True),
minTrackEta = cms.double(-2.4),
maxTrackEta = cms.double(2.4)
)
| 28.533333 | 67 | 0.728972 | 49 | 428 | 6.367347 | 0.612245 | 0.115385 | 0.064103 | 0.070513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045576 | 0.128505 | 428 | 14 | 68 | 30.571429 | 0.790885 | 0 | 0 | 0 | 0 | 0 | 0.135831 | 0.135831 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6239675b28fbe08cb92d202a432a29c5c6dfd60 | 13,299 | py | Python | widgets/KeyEvents.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 3 | 2018-03-19T07:57:10.000Z | 2021-07-05T08:55:14.000Z | widgets/KeyEvents.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 6 | 2020-03-24T15:40:18.000Z | 2021-12-13T19:46:09.000Z | widgets/KeyEvents.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 4 | 2018-03-29T21:59:55.000Z | 2019-12-16T14:56:38.000Z | #!/usr/bin/env python
import wx
import wx.lib.mixins.listctrl as listmix
from six import unichr
#----------------------------------------------------------------------
keyMap = {
wx.WXK_BACK : "WXK_BACK",
wx.WXK_TAB : "WXK_TAB",
wx.WXK_RETURN : "WXK_RETURN",
wx.WXK_ESCAPE : "WXK_ESCAPE",
wx.WXK_SPACE : "WXK_SPACE",
wx.WXK_DELETE : "WXK_DELETE",
wx.WXK_START : "WXK_START",
wx.WXK_LBUTTON : "WXK_LBUTTON",
wx.WXK_RBUTTON : "WXK_RBUTTON",
wx.WXK_CANCEL : "WXK_CANCEL",
wx.WXK_MBUTTON : "WXK_MBUTTON",
wx.WXK_CLEAR : "WXK_CLEAR",
wx.WXK_SHIFT : "WXK_SHIFT",
wx.WXK_ALT : "WXK_ALT",
wx.WXK_MENU : "WXK_MENU",
wx.WXK_PAUSE : "WXK_PAUSE",
wx.WXK_CAPITAL : "WXK_CAPITAL",
#wx.WXK_PRIOR : "WXK_PRIOR",
#wx.WXK_NEXT : "WXK_NEXT",
wx.WXK_END : "WXK_END",
wx.WXK_HOME : "WXK_HOME",
wx.WXK_LEFT : "WXK_LEFT",
wx.WXK_UP : "WXK_UP",
wx.WXK_RIGHT : "WXK_RIGHT",
wx.WXK_DOWN : "WXK_DOWN",
wx.WXK_SELECT : "WXK_SELECT",
wx.WXK_PRINT : "WXK_PRINT",
wx.WXK_EXECUTE : "WXK_EXECUTE",
wx.WXK_SNAPSHOT : "WXK_SNAPSHOT",
wx.WXK_INSERT : "WXK_INSERT",
wx.WXK_HELP : "WXK_HELP",
wx.WXK_NUMPAD0 : "WXK_NUMPAD0",
wx.WXK_NUMPAD1 : "WXK_NUMPAD1",
wx.WXK_NUMPAD2 : "WXK_NUMPAD2",
wx.WXK_NUMPAD3 : "WXK_NUMPAD3",
wx.WXK_NUMPAD4 : "WXK_NUMPAD4",
wx.WXK_NUMPAD5 : "WXK_NUMPAD5",
wx.WXK_NUMPAD6 : "WXK_NUMPAD6",
wx.WXK_NUMPAD7 : "WXK_NUMPAD7",
wx.WXK_NUMPAD8 : "WXK_NUMPAD8",
wx.WXK_NUMPAD9 : "WXK_NUMPAD9",
wx.WXK_MULTIPLY : "WXK_MULTIPLY",
wx.WXK_ADD : "WXK_ADD",
wx.WXK_SEPARATOR : "WXK_SEPARATOR",
wx.WXK_SUBTRACT : "WXK_SUBTRACT",
wx.WXK_DECIMAL : "WXK_DECIMAL",
wx.WXK_DIVIDE : "WXK_DIVIDE",
wx.WXK_F1 : "WXK_F1",
wx.WXK_F2 : "WXK_F2",
wx.WXK_F3 : "WXK_F3",
wx.WXK_F4 : "WXK_F4",
wx.WXK_F5 : "WXK_F5",
wx.WXK_F6 : "WXK_F6",
wx.WXK_F7 : "WXK_F7",
wx.WXK_F8 : "WXK_F8",
wx.WXK_F9 : "WXK_F9",
wx.WXK_F10 : "WXK_F10",
wx.WXK_F11 : "WXK_F11",
wx.WXK_F12 : "WXK_F12",
wx.WXK_F13 : "WXK_F13",
wx.WXK_F14 : "WXK_F14",
wx.WXK_F15 : "WXK_F15",
wx.WXK_F16 : "WXK_F16",
wx.WXK_F17 : "WXK_F17",
wx.WXK_F18 : "WXK_F18",
wx.WXK_F19 : "WXK_F19",
wx.WXK_F20 : "WXK_F20",
wx.WXK_F21 : "WXK_F21",
wx.WXK_F22 : "WXK_F22",
wx.WXK_F23 : "WXK_F23",
wx.WXK_F24 : "WXK_F24",
wx.WXK_NUMLOCK : "WXK_NUMLOCK",
wx.WXK_SCROLL : "WXK_SCROLL",
wx.WXK_PAGEUP : "WXK_PAGEUP",
wx.WXK_PAGEDOWN : "WXK_PAGEDOWN",
wx.WXK_NUMPAD_SPACE : "WXK_NUMPAD_SPACE",
wx.WXK_NUMPAD_TAB : "WXK_NUMPAD_TAB",
wx.WXK_NUMPAD_ENTER : "WXK_NUMPAD_ENTER",
wx.WXK_NUMPAD_F1 : "WXK_NUMPAD_F1",
wx.WXK_NUMPAD_F2 : "WXK_NUMPAD_F2",
wx.WXK_NUMPAD_F3 : "WXK_NUMPAD_F3",
wx.WXK_NUMPAD_F4 : "WXK_NUMPAD_F4",
wx.WXK_NUMPAD_HOME : "WXK_NUMPAD_HOME",
wx.WXK_NUMPAD_LEFT : "WXK_NUMPAD_LEFT",
wx.WXK_NUMPAD_UP : "WXK_NUMPAD_UP",
wx.WXK_NUMPAD_RIGHT : "WXK_NUMPAD_RIGHT",
wx.WXK_NUMPAD_DOWN : "WXK_NUMPAD_DOWN",
#wx.WXK_NUMPAD_PRIOR : "WXK_NUMPAD_PRIOR",
wx.WXK_NUMPAD_PAGEUP : "WXK_NUMPAD_PAGEUP",
#wx.WXK_NUMPAD_NEXT : "WXK_NUMPAD_NEXT",
wx.WXK_NUMPAD_PAGEDOWN : "WXK_NUMPAD_PAGEDOWN",
wx.WXK_NUMPAD_END : "WXK_NUMPAD_END",
wx.WXK_NUMPAD_BEGIN : "WXK_NUMPAD_BEGIN",
wx.WXK_NUMPAD_INSERT : "WXK_NUMPAD_INSERT",
wx.WXK_NUMPAD_DELETE : "WXK_NUMPAD_DELETE",
wx.WXK_NUMPAD_EQUAL : "WXK_NUMPAD_EQUAL",
wx.WXK_NUMPAD_MULTIPLY : "WXK_NUMPAD_MULTIPLY",
wx.WXK_NUMPAD_ADD : "WXK_NUMPAD_ADD",
wx.WXK_NUMPAD_SEPARATOR : "WXK_NUMPAD_SEPARATOR",
wx.WXK_NUMPAD_SUBTRACT : "WXK_NUMPAD_SUBTRACT",
wx.WXK_NUMPAD_DECIMAL : "WXK_NUMPAD_DECIMAL",
wx.WXK_NUMPAD_DIVIDE : "WXK_NUMPAD_DIVIDE",
wx.WXK_WINDOWS_LEFT : "WXK_WINDOWS_LEFT",
wx.WXK_WINDOWS_RIGHT : "WXK_WINDOWS_RIGHT",
wx.WXK_WINDOWS_MENU : "WXK_WINDOWS_MENU",
wx.WXK_SPECIAL1 : "WXK_SPECIAL1",
wx.WXK_SPECIAL2 : "WXK_SPECIAL2",
wx.WXK_SPECIAL3 : "WXK_SPECIAL3",
wx.WXK_SPECIAL4 : "WXK_SPECIAL4",
wx.WXK_SPECIAL5 : "WXK_SPECIAL5",
wx.WXK_SPECIAL6 : "WXK_SPECIAL6",
wx.WXK_SPECIAL7 : "WXK_SPECIAL7",
wx.WXK_SPECIAL8 : "WXK_SPECIAL8",
wx.WXK_SPECIAL9 : "WXK_SPECIAL9",
wx.WXK_SPECIAL10 : "WXK_SPECIAL10",
wx.WXK_SPECIAL11 : "WXK_SPECIAL11",
wx.WXK_SPECIAL12 : "WXK_SPECIAL12",
wx.WXK_SPECIAL13 : "WXK_SPECIAL13",
wx.WXK_SPECIAL14 : "WXK_SPECIAL14",
wx.WXK_SPECIAL15 : "WXK_SPECIAL15",
wx.WXK_SPECIAL16 : "WXK_SPECIAL16",
wx.WXK_SPECIAL17 : "WXK_SPECIAL17",
wx.WXK_SPECIAL18 : "WXK_SPECIAL18",
wx.WXK_SPECIAL19 : "WXK_SPECIAL19",
}
if 'wxMac' in wx.PlatformInfo:
keyMap[wx.WXK_RAW_CONTROL] = 'WXK_RAW_CONTROL'
keyMap[wx.WXK_CONTROL] = "WXK_CONTROL"
keyMap[wx.WXK_COMMAND] = "WXK_COMMAND"
else:
keyMap[wx.WXK_COMMAND] = "WXK_COMMAND"
keyMap[wx.WXK_CONTROL] = "WXK_CONTROL"
#----------------------------------------------------------------------
class KeySink(wx.Window):
def __init__(self, parent):
wx.Window.__init__(self, parent, -1, style=wx.WANTS_CHARS
#| wx.RAISED_BORDER
#| wx.SUNKEN_BORDER
, name="sink")
self.SetBackgroundColour(wx.BLUE)
self.haveFocus = False
self.callSkip = True
self.logKeyDn = True
self.logKeyUp = True
self.logChar = True
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_CHAR, self.OnChar)
def SetCallSkip(self, skip):
self.callSkip = skip
def SetLogKeyUp(self, val):
self.logKeyUp = val
def SetLogKeyDn(self, val):
self.logKeyDn = val
def SetLogChar(self, val):
self.logChar = val
def OnPaint(self, evt):
dc = wx.PaintDC(self)
rect = self.GetClientRect()
dc.SetTextForeground(wx.WHITE)
dc.DrawLabel("Click here and then press some keys",
rect, wx.ALIGN_CENTER | wx.ALIGN_TOP)
if self.haveFocus:
dc.SetTextForeground(wx.GREEN)
dc.DrawLabel("Have Focus", rect, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM)
else:
dc.SetTextForeground(wx.RED)
dc.DrawLabel("Need Focus!", rect, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM)
def OnSetFocus(self, evt):
self.haveFocus = True
self.Refresh()
def OnKillFocus(self, evt):
self.haveFocus = False
self.Refresh()
def OnMouse(self, evt):
if evt.ButtonDown():
self.SetFocus()
def OnKeyDown(self, evt):
if self.logKeyDn:
self.GetParent().keylog.LogKeyEvent("KeyDown", evt)
if self.callSkip:
evt.Skip()
def OnKeyUp(self, evt):
if self.logKeyUp:
self.GetParent().keylog.LogKeyEvent("KeyUp", evt)
if self.callSkip:
evt.Skip()
def OnChar(self, evt):
if self.logChar:
self.GetParent().keylog.LogKeyEvent("Char", evt)
#----------------------------------------------------------------------
class KeyLog(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
colHeaders = [ "Event Type",
"Key Name",
"Key Code",
"Modifiers",
"Unicode",
"UniChr",
"RawKeyCode",
"RawKeyFlags",
]
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, -1,
style = wx.LC_REPORT|wx.LC_VRULES|wx.LC_HRULES)
listmix.ListCtrlAutoWidthMixin.__init__(self)
for idx, header in enumerate(self.colHeaders):
self.InsertColumn(idx, header)
idx += 1
self.InsertColumn(idx, "")
for x in range(idx):
self.SetColumnWidth(x, wx.LIST_AUTOSIZE_USEHEADER)
self.SetColumnWidth(1, 125)
def LogKeyEvent(self, evType, evt):
keycode = evt.GetKeyCode()
keyname = keyMap.get(keycode, None)
if keyname is None:
if keycode < 256:
if keycode == 0:
keyname = "NUL"
elif keycode < 27:
keyname = u"Ctrl-%s" % unichr(ord('A') + keycode-1)
else:
keyname = u"\"%s\"" % unichr(keycode)
else:
keyname = u"(%s)" % keycode
UniChr = ''
if "unicode" in wx.PlatformInfo:
UniChr = "\"" + unichr(evt.GetUnicodeKey()) + "\""
modifiers = ""
for mod, ch in [(evt.ControlDown(), 'C'),
(evt.AltDown(), 'A'),
(evt.ShiftDown(), 'S'),
(evt.MetaDown(), 'M'),
(evt.RawControlDown(), 'R'),]:
if mod:
modifiers += ch
else:
modifiers += '-'
id = self.InsertItem(self.GetItemCount(), evType)
self.SetItem(id, 1, keyname)
self.SetItem(id, 2, str(keycode))
self.SetItem(id, 3, modifiers)
self.SetItem(id, 4, str(evt.GetUnicodeKey()))
self.SetItem(id, 5, UniChr)
self.SetItem(id, 6, str(evt.GetRawKeyCode()))
self.SetItem(id, 7, str(evt.GetRawKeyFlags()))
self.EnsureVisible(id)
def ClearLog(self):
self.DeleteAllItems()
def CopyLog(self):
# build a newline and tab delimited string to put into the clipboard
if "unicode" in wx.PlatformInfo:
st = u""
else:
st = ""
for h in self.colHeaders:
st += h + "\t"
st += "\n"
for idx in range(self.GetItemCount()):
for col in range(self.GetColumnCount()):
item = self.GetItem(idx, col)
st += item.GetText() + "\t"
st += "\n"
data = wx.TextDataObject()
data.SetText(st)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1, style=0)
self.keysink = KeySink(self)
self.keysink.SetMinSize((100, 65))
self.keylog = KeyLog(self)
btn = wx.Button(self, -1, "Clear", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnClearBtn, btn)
btn.SetToolTip(
"Clear the items from the log window")
btn2 = wx.Button(self, -1, "Copy", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnCopyBtn, btn2)
btn2.SetToolTip(
"Copy the contents of the log window to the clipboard")
cb1 = wx.CheckBox(self, -1, "Call evt.Skip in Key* events")
self.Bind(wx.EVT_CHECKBOX, self.OnSkipCB, cb1)
cb1.SetValue(True)
cb2 = wx.CheckBox(self, -1, "KEY_UP")
self.Bind(wx.EVT_CHECKBOX, self.OnKeyUpCB, cb2)
cb2.SetValue(True)
cb3 = wx.CheckBox(self, -1, "KEY_DOWN")
self.Bind(wx.EVT_CHECKBOX, self.OnKeyDnCB, cb3)
cb3.SetValue(True)
cb4 = wx.CheckBox(self, -1, "CHAR")
self.Bind(wx.EVT_CHECKBOX, self.OnCharCB, cb4)
cb4.SetValue(True)
buttons = wx.BoxSizer(wx.HORIZONTAL)
buttons.Add(btn, 0, wx.ALL, 4)
buttons.Add(btn2, 0, wx.ALL, 4)
buttons.Add(cb1, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 6)
buttons.Add(cb2, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 6)
buttons.Add(cb3, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 6)
buttons.Add(cb4, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 6)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.keysink, 0, wx.GROW)
sizer.Add(buttons)
sizer.Add(self.keylog, 1, wx.GROW)
self.SetSizer(sizer)
def OnClearBtn(self, evt):
self.keylog.ClearLog()
def OnCopyBtn(self, evt):
self.keylog.CopyLog()
def OnSkipCB(self, evt):
self.keysink.SetCallSkip(evt.GetInt())
def OnKeyUpCB(self, evt):
self.keysink.SetLogKeyUp(evt.GetInt())
def OnKeyDnCB(self, evt):
self.keysink.SetLogKeyDn(evt.GetInt())
def OnCharCB(self, evt):
self.keysink.SetLogChar(evt.GetInt())
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wxKeyEvents</center></h2>
This demo simply catches all key events and prints info about them.
It is meant to be used as a compatibility test for cross platform work.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 31 | 79 | 0.577788 | 1,672 | 13,299 | 4.356459 | 0.221292 | 0.08855 | 0.040774 | 0.023202 | 0.110242 | 0.086079 | 0.046266 | 0.035008 | 0.021691 | 0.021691 | 0 | 0.024121 | 0.264306 | 13,299 | 428 | 80 | 31.07243 | 0.72036 | 0.050756 | 0 | 0.075075 | 0 | 0 | 0.158775 | 0.005076 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072072 | false | 0 | 0.015015 | 0 | 0.102102 | 0.003003 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b624ed925fddaa73c150d0b98d5fda740868dd65 | 4,071 | py | Python | app/movies/tests/test_view.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | app/movies/tests/test_view.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | app/movies/tests/test_view.py | NicolefAvella/ApiMovie | 4860b312f62dee73de6015c3029e75a6045f79a1 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from movies.models import Movies
from movies.serializers import MoviesSerializer
from user.models import User
import json
class BaseViewTest(APITestCase):
client = APIClient()
def create_movie(self, title="", genre="", cast="", director=""):
"""create a movie"""
if title != "" and genre != "" and cast!= "" and director != "":
return Movies.objects.create(title=title, genre=genre, cast=cast, director=director)
else:
print("complete data")
def movie_request(self, kind="post", **kwargs):
"""Post create movie and put"""
if kind == "post":
return self.client.post(reverse("movies-all"),
data=json.dumps(kwargs["data"]),
content_type='application/json'
)
elif kind == "put":
return self.client.put(
reverse(
"movies-detail",
kwargs={"pk" : kwargs["id"]}
),
data=json.dumps(kwargs["data"]),
content_type='application/json'
)
else:
return None
def retrieve_movie(self, pk=0):
return self.client.get(
reverse(
"movies-detail",
kwargs={"pk" : pk}
)
)
def delete_movie(self, pk=0):
return self.client.delete(
reverse(
"movies-detail",
kwargs={"pk" : pk}
)
)
def setUp(self):
"""Add test data"""
self.movie_1 = self.create_movie(title="Fast_and_Furious", genre="Action", cast="Dwayne_Johnson", director="flata")
self.create_movie(title="The_lion_king", genre="Drama", cast="Donal_Glover", director='st')
self.create_movie(title="The_mummy", genre="Horror", cast="Brendan_Fraser", director='md')
self.valid_movie_id = self.movie_1.id
self.invalid_movie_id = 50
"""create a user"""
self.user = User.objects.create_superuser(
username="test",
email="test@gmail.com",
password="test123",
first_name="first name",
last_name="last name",
is_active=True,
)
url = reverse('user:login')
data = {
"email": "test@gmail.com",
"password": "test123",
}
res = self.client.post(url, data=data, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK, res.content)
token=res.json().get('token')
self.client.credentials(HTTP_AUTHORIZATION='Bearer {0}'.format(token))
class GetAllMoviesTest(BaseViewTest):
def test_get_all_movies(self):
"""
This test ensures that all movies added in the setUp method
exist when we make a GET request to the movies/ endpoint
"""
#self.login_client("test@gmail.com", "test123")
response = self.client.get(
reverse("movies-all")
)
expected = Movies.objects.all()
serialized = MoviesSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class GetASingleMovieTest(BaseViewTest):
def test_get_a_movie(self):
"""Test movie with id exist"""
#self.login_client("test@gmail.com", "test123")
response = self.retrieve_movie(self.valid_movie_id)
expected = Movies.objects.get(pk=self.valid_movie_id)
serialized = MoviesSerializer(expected)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.retrieve_movie(self.invalid_movie_id)
self.assertEqual(
response.data["message"],
"Movie with id: 50 does not exist"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 33.368852 | 123 | 0.590273 | 455 | 4,071 | 5.151648 | 0.287912 | 0.029863 | 0.058874 | 0.03413 | 0.325939 | 0.264505 | 0.226536 | 0.156997 | 0.156997 | 0.075939 | 0 | 0.011383 | 0.28789 | 4,071 | 121 | 124 | 33.644628 | 0.797171 | 0.07099 | 0 | 0.2 | 0 | 0 | 0.102695 | 0 | 0 | 0 | 0 | 0 | 0.077778 | 1 | 0.077778 | false | 0.022222 | 0.077778 | 0.022222 | 0.266667 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b625420fbcf257af05779c352e7795a2abfb2733 | 5,426 | py | Python | examples/ConsumptionSaving/example_TractableBufferStockModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | examples/ConsumptionSaving/example_TractableBufferStockModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | examples/ConsumptionSaving/example_TractableBufferStockModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | # %%
import numpy as np # numeric Python
from HARK.utilities import plotFuncs # basic plotting tools
from HARK.ConsumptionSaving.ConsMarkovModel import (
MarkovConsumerType,
) # An alternative, much longer way to solve the TBS model
from time import process_time # timing utility
from HARK.ConsumptionSaving.TractableBufferStockModel import TractableConsumerType
do_simulation = True
# %%
# Define the model primitives
base_primitives = {
"UnempPrb": 0.00625, # Probability of becoming unemployed
"DiscFac": 0.975, # Intertemporal discount factor
"Rfree": 1.01, # Risk-free interest factor on assets
"PermGroFac": 1.0025, # Permanent income growth factor (uncompensated)
"CRRA": 1.0,
} # Coefficient of relative risk aversion
# %%
# Define a dictionary to be used in case of simulation
simulation_values = {
"aLvlInitMean": 0.0, # Mean of log initial assets for new agents
"aLvlInitStd": 1.0, # Stdev of log initial assets for new agents
"AgentCount": 10000, # Number of agents to simulate
"T_sim": 120, # Number of periods to simulate
"T_cycle": 1,
} # Number of periods in the cycle
# %%
# Make and solve a tractable consumer type
ExampleType = TractableConsumerType(**base_primitives)
t_start = process_time()
ExampleType.solve()
t_end = process_time()
print(
"Solving a tractable consumption-savings model took "
+ str(t_end - t_start)
+ " seconds."
)
# %%
# Plot the consumption function and whatnot
m_upper = 1.5 * ExampleType.mTarg
conFunc_PF = lambda m: ExampleType.h * ExampleType.PFMPC + ExampleType.PFMPC * m
# plotFuncs([ExampleType.solution[0].cFunc,ExampleType.mSSfunc,ExampleType.cSSfunc],0,m_upper)
plotFuncs([ExampleType.solution[0].cFunc, ExampleType.solution[0].cFunc_U], 0, m_upper)
# %%
if do_simulation:
ExampleType(**simulation_values) # Set attributes needed for simulation
ExampleType.track_vars = ["mLvlNow"]
ExampleType.makeShockHistory()
ExampleType.initializeSim()
ExampleType.simulate()
# %%
# Now solve the same model using backward induction rather than the analytic method of TBS.
# The TBS model is equivalent to a Markov model with two states, one of them absorbing (permanent unemployment).
MrkvArray = np.array(
[[1.0 - base_primitives["UnempPrb"], base_primitives["UnempPrb"]], [0.0, 1.0]]
) # Define the two state, absorbing unemployment Markov array
init_consumer_objects = {
"CRRA": base_primitives["CRRA"],
"Rfree": np.array(
2 * [base_primitives["Rfree"]]
), # Interest factor (same in both states)
"PermGroFac": [
np.array(
2 * [base_primitives["PermGroFac"] / (1.0 - base_primitives["UnempPrb"])]
)
], # Unemployment-compensated permanent growth factor
"BoroCnstArt": None, # Artificial borrowing constraint
"PermShkStd": [0.0], # Permanent shock standard deviation
"PermShkCount": 1, # Number of shocks in discrete permanent shock distribution
"TranShkStd": [0.0], # Transitory shock standard deviation
"TranShkCount": 1, # Number of shocks in discrete permanent shock distribution
"T_cycle": 1, # Number of periods in cycle
"UnempPrb": 0.0, # Unemployment probability (not used, as the unemployment here is *permanent*, not transitory)
"UnempPrbRet": 0.0, # Unemployment probability when retired (irrelevant here)
"T_retire": 0, # Age at retirement (turned off)
"IncUnemp": 0.0, # Income when unemployed (irrelevant)
"IncUnempRet": 0.0, # Income when unemployed and retired (irrelevant)
"aXtraMin": 0.001, # Minimum value of assets above minimum in grid
"aXtraMax": ExampleType.mUpperBnd, # Maximum value of assets above minimum in grid
"aXtraCount": 48, # Number of points in assets grid
"aXtraExtra": [None], # Additional points to include in assets grid
"aXtraNestFac": 3, # Degree of exponential nesting when constructing assets grid
"LivPrb": [np.array([1.0, 1.0])], # Survival probability
"DiscFac": base_primitives["DiscFac"], # Intertemporal discount factor
"AgentCount": 1, # Number of agents in a simulation (irrelevant)
"tax_rate": 0.0, # Tax rate on labor income (irrelevant)
"vFuncBool": False, # Whether to calculate the value function
"CubicBool": True, # Whether to use cubic splines (False --> linear splines)
"MrkvArray": [MrkvArray], # State transition probabilities
}
MarkovType = MarkovConsumerType(**init_consumer_objects) # Make a basic consumer type
employed_income_dist = [
np.ones(1),
np.ones(1),
np.ones(1),
] # Income distribution when employed
unemployed_income_dist = [
np.ones(1),
np.ones(1),
np.zeros(1),
] # Income distribution when permanently unemployed
MarkovType.IncomeDstn = [
[employed_income_dist, unemployed_income_dist]
] # set the income distribution in each state
MarkovType.cycles = 0
# %%
# Solve the "Markov TBS" model
t_start = process_time()
MarkovType.solve()
t_end = process_time()
MarkovType.unpackcFunc()
# %%
print(
'Solving the same model "the long way" took ' + str(t_end - t_start) + " seconds."
)
# plotFuncs([ExampleType.solution[0].cFunc,ExampleType.solution[0].cFunc_U],0,m_upper)
plotFuncs(MarkovType.cFunc[0], 0, m_upper)
diffFunc = lambda m: ExampleType.solution[0].cFunc(m) - MarkovType.cFunc[0][0](m)
print("Difference between the (employed) consumption functions:")
plotFuncs(diffFunc, 0, m_upper)
| 41.419847 | 116 | 0.71397 | 685 | 5,426 | 5.578102 | 0.354745 | 0.005758 | 0.031405 | 0.039257 | 0.200995 | 0.145512 | 0.132426 | 0.075373 | 0.075373 | 0.035069 | 0 | 0.021525 | 0.178032 | 5,426 | 130 | 117 | 41.738462 | 0.835202 | 0.441946 | 0 | 0.126214 | 0 | 0 | 0.181174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.048544 | 0 | 0.048544 | 0.029126 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b625948601304a37edf120d20921eb82fe58c66b | 3,299 | py | Python | util/utils.py | tanghaotommy/Self-supervised-Fewshot-Medical-Image-Segmentation | 9ff8cd2421ee2f7c038d8eec15b0296b365e0c46 | [
"MIT"
] | 176 | 2020-09-10T16:32:16.000Z | 2022-03-30T12:06:02.000Z | util/utils.py | tanghaotommy/Self-supervised-Fewshot-Medical-Image-Segmentation | 9ff8cd2421ee2f7c038d8eec15b0296b365e0c46 | [
"MIT"
] | 14 | 2020-09-18T02:56:53.000Z | 2022-03-16T00:31:12.000Z | util/utils.py | tanghaotommy/Self-supervised-Fewshot-Medical-Image-Segmentation | 9ff8cd2421ee2f7c038d8eec15b0296b365e0c46 | [
"MIT"
] | 29 | 2020-09-13T20:00:00.000Z | 2022-02-11T00:40:00.000Z | """Util functions
Extended from original PANet code
TODO: move part of dataset configurations to data_utils
"""
import random
import torch
import numpy as np
import operator
def set_seed(seed):
"""
Set the random seed
"""
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
CLASS_LABELS = {
'SABS': {
'pa_all': set( [1,2,3,6] ),
0: set([1,6] ), # upper_abdomen: spleen + liver as training, kidneis are testing
1: set( [2,3] ), # lower_abdomen
},
'C0': {
'pa_all': set(range(1, 4)),
0: set([2,3]),
1: set([1,3]),
2: set([1,2]),
},
'CHAOST2': {
'pa_all': set(range(1, 5)),
0: set([1, 4]), # upper_abdomen, leaving kidneies as testing classes
1: set([2, 3]), # lower_abdomen
},
}
def get_bbox(fg_mask, inst_mask):
"""
Get the ground truth bounding boxes
"""
fg_bbox = torch.zeros_like(fg_mask, device=fg_mask.device)
bg_bbox = torch.ones_like(fg_mask, device=fg_mask.device)
inst_mask[fg_mask == 0] = 0
area = torch.bincount(inst_mask.view(-1))
cls_id = area[1:].argmax() + 1
cls_ids = np.unique(inst_mask)[1:]
mask_idx = np.where(inst_mask[0] == cls_id)
y_min = mask_idx[0].min()
y_max = mask_idx[0].max()
x_min = mask_idx[1].min()
x_max = mask_idx[1].max()
fg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 1
for i in cls_ids:
mask_idx = np.where(inst_mask[0] == i)
y_min = max(mask_idx[0].min(), 0)
y_max = min(mask_idx[0].max(), fg_mask.shape[1] - 1)
x_min = max(mask_idx[1].min(), 0)
x_max = min(mask_idx[1].max(), fg_mask.shape[2] - 1)
bg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 0
return fg_bbox, bg_bbox
def t2n(img_t):
"""
torch to numpy regardless of whether tensor is on gpu or memory
"""
if img_t.is_cuda:
return img_t.data.cpu().numpy()
else:
return img_t.data.numpy()
def to01(x_np):
"""
normalize a numpy to 0-1 for visualize
"""
return (x_np - x_np.min()) / (x_np.max() - x_np.min() + 1e-5)
def compose_wt_simple(is_wce, data_name):
"""
Weights for cross-entropy loss
"""
if is_wce:
if data_name in ['SABS', 'SABS_Superpix', 'C0', 'C0_Superpix', 'CHAOST2', 'CHAOST2_Superpix']:
return torch.FloatTensor([0.05, 1.0]).cuda()
else:
raise NotImplementedError
else:
return torch.FloatTensor([1.0, 1.0]).cuda()
class CircularList(list):
"""
Helper for spliting training and validation scans
Originally: https://stackoverflow.com/questions/8951020/pythonic-circular-list/8951224
"""
def __getitem__(self, x):
if isinstance(x, slice):
return [self[x] for x in self._rangeify(x)]
index = operator.index(x)
try:
return super().__getitem__(index % len(self))
except ZeroDivisionError:
raise IndexError('list index out of range')
def _rangeify(self, slice):
start, stop, step = slice.start, slice.stop, slice.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
return range(start, stop, step)
| 27.722689 | 102 | 0.585935 | 501 | 3,299 | 3.668663 | 0.309381 | 0.038085 | 0.026115 | 0.006529 | 0.129489 | 0.100109 | 0.080522 | 0.025027 | 0.025027 | 0.025027 | 0 | 0.040316 | 0.270688 | 3,299 | 118 | 103 | 27.957627 | 0.723608 | 0.173992 | 0 | 0.0625 | 0 | 0 | 0.04084 | 0 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.0875 | false | 0 | 0.05 | 0 | 0.2625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b626f7b18fa5d92ee89efc8f742da215c496b617 | 663 | py | Python | src/my_project/medium_problems/from1to50/final_prices_with_special_discount_shop.py | ivan1016017/LeetCodeAlgorithmProblems | 454284b76634cc34ed41f7fa30d857403cedf1bf | [
"MIT"
] | null | null | null | src/my_project/medium_problems/from1to50/final_prices_with_special_discount_shop.py | ivan1016017/LeetCodeAlgorithmProblems | 454284b76634cc34ed41f7fa30d857403cedf1bf | [
"MIT"
] | 1 | 2021-09-22T12:26:14.000Z | 2021-09-22T12:26:14.000Z | src/my_project/medium_problems/from1to50/final_prices_with_special_discount_shop.py | ivan1016017/LeetCodeAlgorithmProblems | 454284b76634cc34ed41f7fa30d857403cedf1bf | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def finalPrices(self, prices: List[int]) -> List[int]:
# initialize variables
solution = list()
len_prices = len(prices)
flag = -1
for i in range(len_prices):
flag = -1
for j in range(i+1, len_prices):
if prices[j] <= prices[i]:
solution.append(prices[i]-prices[j])
flag = 1
break
if flag == 1: continue
else:
solution.append((prices[i]))
return solution
solution = Solution()
print(solution.finalPrices(prices = [1,2,3,4,5])) | 26.52 | 58 | 0.503771 | 76 | 663 | 4.355263 | 0.421053 | 0.108761 | 0.07855 | 0.084592 | 0.102719 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02457 | 0.386124 | 663 | 25 | 59 | 26.52 | 0.788698 | 0.030166 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b627c5785e80c08378e3b966c7612558816085f7 | 23,226 | py | Python | gammapy/estimators/ts_map.py | vikasj78/gammapy | 46deb872bbcbf36748df71e659dc3fa592f6dc27 | [
"BSD-3-Clause"
] | null | null | null | gammapy/estimators/ts_map.py | vikasj78/gammapy | 46deb872bbcbf36748df71e659dc3fa592f6dc27 | [
"BSD-3-Clause"
] | null | null | null | gammapy/estimators/ts_map.py | vikasj78/gammapy | 46deb872bbcbf36748df71e659dc3fa592f6dc27 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to compute TS images."""
import functools
import logging
import warnings
import numpy as np
import scipy.optimize
from astropy.coordinates import Angle
from gammapy.datasets.map import MapEvaluator
from gammapy.maps import Map, WcsGeom
from gammapy.modeling.models import PointSpatialModel, PowerLawSpectralModel, SkyModel
from gammapy.stats import (
amplitude_bounds_cython,
cash,
cash_sum_cython,
f_cash_root_cython,
x_best_leastsq,
)
from gammapy.utils.array import shape_2N, symmetric_crop_pad_width
from .core import Estimator
__all__ = ["TSMapEstimator"]
log = logging.getLogger(__name__)
FLUX_FACTOR = 1e-12
MAX_NITER = 20
RTOL = 1e-3
def round_up_to_odd(f):
return int(np.ceil(f) // 2 * 2 + 1)
def _extract_array(array, shape, position):
"""Helper function to extract parts of a larger array.
Simple implementation of an array extract function , because
`~astropy.ndata.utils.extract_array` introduces too much overhead.`
Parameters
----------
array : `~numpy.ndarray`
The array from which to extract.
shape : tuple or int
The shape of the extracted array.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array.
"""
x_width = shape[1] // 2
y_width = shape[0] // 2
y_lo = position[0] - y_width
y_hi = position[0] + y_width + 1
x_lo = position[1] - x_width
x_hi = position[1] + x_width + 1
return array[y_lo:y_hi, x_lo:x_hi]
def f_cash(x, counts, background, model):
"""Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count image slice, where model is defined.
background : `~numpy.ndarray`
Background image slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return cash_sum_cython(
counts.ravel(), (background + x * FLUX_FACTOR * model).ravel()
)
class TSMapEstimator(Estimator):
r"""Compute TS map from a MapDataset using different optimization methods.
The map is computed fitting by a single parameter amplitude fit. The fit is
simplified by finding roots of the the derivative of the fit statistics using
various root finding algorithms. The approach is described in Appendix A
in Stewart (2009).
Parameters
----------
model : `~gammapy.modeling.model.SkyModel`
Source model kernel. If set to None, assume point source model, PointSpatialModel.
kernel_width : `~astropy.coordinates.Angle`
Width of the kernel to use: the kernel will be truncated at this size
downsampling_factor : int
Sample down the input maps to speed up the computation. Only integer
values that are a multiple of 2 are allowed. Note that the kernel is
not sampled down, but must be provided with the downsampled bin size.
method : str ('root')
The following options are available:
* ``'root brentq'`` (default)
Fit amplitude by finding the roots of the the derivative of the fit
statistics using the brentq method.
* ``'root newton'``
Fit amplitude by finding the roots of the the derivative of the fit
statistics using Newton's method.
* ``'leastsq iter'``
Fit the amplitude by an iterative least square fit, that can be solved
analytically.
error_method : ['covar', 'conf']
Error estimation method.
error_sigma : int (1)
Sigma for flux error.
ul_method : ['covar', 'conf']
Upper limit estimation method.
ul_sigma : int (2)
Sigma for flux upper limits.
threshold : float (None)
If the TS value corresponding to the initial flux estimate is not above
this threshold, the optimizing step is omitted to save computing time.
rtol : float (0.001)
Relative precision of the flux estimate. Used as a stopping criterion for
the amplitude fit.
Notes
-----
Negative :math:`TS` values are defined as following:
.. math::
TS = \left \{
\begin{array}{ll}
-TS \text{ if } F < 0 \\
TS \text{ else}
\end{array}
\right.
Where :math:`F` is the fitted flux amplitude.
References
----------
[Stewart2009]_
"""
tag = "TSMapEstimator"
def __init__(
self,
model=None,
kernel_width="0.2 deg",
downsampling_factor=None,
method="root brentq",
error_method="covar",
error_sigma=1,
ul_method="covar",
ul_sigma=2,
threshold=None,
rtol=0.001,
):
if method not in ["root brentq", "root newton", "leastsq iter"]:
raise ValueError(f"Not a valid method: '{method}'")
if error_method not in ["covar", "conf"]:
raise ValueError(f"Not a valid error method '{error_method}'")
self.kernel_width = Angle(kernel_width)
if model is None:
model = SkyModel(
spectral_model=PowerLawSpectralModel(),
spatial_model=PointSpatialModel(),
)
self.model = model
self.downsampling_factor = downsampling_factor
self.parameters = {
"method": method,
"error_method": error_method,
"error_sigma": error_sigma,
"ul_method": ul_method,
"ul_sigma": ul_sigma,
"threshold": threshold,
"rtol": rtol,
}
def get_kernel(self, dataset):
"""Set the convolution kernel for the input dataset.
Convolves the model with the PSFKernel at the center of the dataset.
If no PSFMap or PSFKernel is found the dataset, the model is used without convolution.
"""
# TODO: further simplify the code below
geom = dataset.counts.geom
if self.downsampling_factor:
geom = geom.downsample(self.downsampling_factor)
model = self.model.copy()
model.spatial_model.position = geom.center_skydir
binsz = np.mean(geom.pixel_scales)
width_pix = self.kernel_width / binsz
npix = round_up_to_odd(width_pix.to_value(""))
axis = dataset.exposure.geom.get_axis_by_name("energy_true")
geom = WcsGeom.create(
skydir=model.position, proj="TAN", npix=npix, axes=[axis], binsz=binsz
)
exposure = Map.from_geom(geom, unit="cm2 s1")
exposure.data += 1.0
# We use global evaluation mode to not modify the geometry
evaluator = MapEvaluator(model, evaluation_mode="global")
evaluator.update(exposure, dataset.psf, dataset.edisp, dataset.counts.geom)
kernel = evaluator.compute_npred().sum_over_axes()
kernel.data /= kernel.data.sum()
if (self.kernel_width > geom.width).any():
raise ValueError(
"Kernel shape larger than map shape, please adjust"
" size of the kernel"
)
return kernel
@staticmethod
def flux_default(dataset, kernel):
"""Estimate default flux map using a given kernel.
Parameters
----------
dataset : `~gammapy.cube.MapDataset`
Input dataset.
kernel : `~numpy.ndarray`
Source model kernel.
Returns
-------
flux_approx : `~gammapy.maps.WcsNDMap`
Approximate flux map (2D).
"""
flux = dataset.counts - dataset.npred()
flux = flux.sum_over_axes(keepdims=False)
flux /= dataset.exposure.sum_over_axes(keepdims=False)
flux /= np.sum(kernel ** 2)
return flux.convolve(kernel)
@staticmethod
def mask_default(exposure, background, kernel):
"""Compute default mask where to estimate TS values.
Parameters
----------
exposure : `~gammapy.maps.Map`
Input exposure map.
background : `~gammapy.maps.Map`
Input background map.
kernel : `~numpy.ndarray`
Source model kernel.
Returns
-------
mask : `gammapy.maps.WcsNDMap`
Mask map.
"""
mask = np.zeros(exposure.data.shape, dtype=int)
# mask boundary
slice_x = slice(kernel.shape[1] // 2, -kernel.shape[1] // 2 + 1)
slice_y = slice(kernel.shape[0] // 2, -kernel.shape[0] // 2 + 1)
mask[slice_y, slice_x] = 1
# positions where exposure == 0 are not processed
mask &= exposure.data > 0
# in some image there are pixels, which have exposure, but zero
# background, which doesn't make sense and causes the TS computation
# to fail, this is a temporary fix
mask[background.data == 0] = 0
return exposure.copy(data=mask.astype("int"), unit="")
@staticmethod
def sqrt_ts(map_ts):
r"""Compute sqrt(TS) map.
Compute sqrt(TS) as defined by:
.. math::
\sqrt{TS} = \left \{
\begin{array}{ll}
-\sqrt{-TS} & : \text{if} \ TS < 0 \\
\sqrt{TS} & : \text{else}
\end{array}
\right.
Parameters
----------
map_ts : `gammapy.maps.WcsNDMap`
Input TS map.
Returns
-------
sqrt_ts : `gammapy.maps.WcsNDMap`
Sqrt(TS) map.
"""
with np.errstate(invalid="ignore", divide="ignore"):
ts = map_ts.data
sqrt_ts = np.where(ts > 0, np.sqrt(ts), -np.sqrt(-ts))
return map_ts.copy(data=sqrt_ts)
def run(self, dataset, steps="all"):
"""
Run TS map estimation.
Requires a MapDataset with counts, exposure and background_model
properly set to run.
Parameters
----------
dataset : `~gammapy.datasets.MapDataset`
Input MapDataset.
steps : list of str or 'all'
Which maps to compute. Available options are:
* "ts": estimate delta TS and significance (sqrt_ts)
* "flux-err": estimate symmetric error on flux.
* "flux-ul": estimate upper limits on flux.
By default all steps are executed.
Returns
-------
maps : dict
Dictionary containing result maps. Keys are:
* ts : delta TS map
* sqrt_ts : sqrt(delta TS), or significance map
* flux : flux map
* flux_err : symmetric error map
* flux_ul : upper limit map
"""
p = self.parameters
# First create 2D map arrays
counts = dataset.counts.sum_over_axes(keepdims=False)
background = dataset.npred().sum_over_axes(keepdims=False)
exposure = dataset.exposure.sum_over_axes(keepdims=False)
kernel = self.get_kernel(dataset)
if dataset.mask is not None:
mask = counts.copy(data=(dataset.mask.sum(axis=0) > 0).astype("int"))
else:
mask = counts.copy(data=np.ones_like(counts).astype("int"))
if self.downsampling_factor:
shape = counts.data.shape
pad_width = symmetric_crop_pad_width(shape, shape_2N(shape))[0]
counts = counts.pad(pad_width).downsample(
self.downsampling_factor, preserve_counts=True
)
background = background.pad(pad_width).downsample(
self.downsampling_factor, preserve_counts=True
)
exposure = exposure.pad(pad_width).downsample(
self.downsampling_factor, preserve_counts=False
)
mask = mask.pad(pad_width).downsample(
self.downsampling_factor, preserve_counts=False
)
mask.data = mask.data.astype("int")
mask.data &= self.mask_default(exposure, background, kernel.data).data
if steps == "all":
steps = ["ts", "sqrt_ts", "flux", "flux_err", "flux_ul", "niter"]
result = {}
for name in steps:
data = np.nan * np.ones_like(counts.data)
result[name] = counts.copy(data=data)
flux_map = self.flux_default(dataset, kernel.data)
if p["threshold"] or p["method"] == "root newton":
flux = flux_map.data
else:
flux = None
# prepare dtype for cython methods
counts_array = counts.data.astype(float)
background_array = background.data.astype(float)
exposure_array = exposure.data.astype(float)
# Compute null statistics per pixel for the whole image
c_0 = cash(counts_array, background_array)
error_method = p["error_method"] if "flux_err" in steps else "none"
ul_method = p["ul_method"] if "flux_ul" in steps else "none"
wrap = functools.partial(
_ts_value,
counts=counts_array,
exposure=exposure_array,
background=background_array,
c_0=c_0,
kernel=kernel.data,
flux=flux,
method=p["method"],
error_method=error_method,
threshold=p["threshold"],
error_sigma=p["error_sigma"],
ul_method=ul_method,
ul_sigma=p["ul_sigma"],
rtol=p["rtol"],
)
x, y = np.where(np.squeeze(mask.data))
positions = list(zip(x, y))
results = list(map(wrap, positions))
# Set TS values at given positions
j, i = zip(*positions)
for name in ["ts", "flux", "niter"]:
result[name].data[j, i] = [_[name] for _ in results]
if "flux_err" in steps:
result["flux_err"].data[j, i] = [_["flux_err"] for _ in results]
if "flux_ul" in steps:
result["flux_ul"].data[j, i] = [_["flux_ul"] for _ in results]
# Compute sqrt(TS) values
if "sqrt_ts" in steps:
result["sqrt_ts"] = self.sqrt_ts(result["ts"])
if self.downsampling_factor:
for name in steps:
order = 0 if name == "niter" else 1
result[name] = result[name].upsample(
factor=self.downsampling_factor, preserve_counts=False, order=order
)
result[name] = result[name].crop(crop_width=pad_width)
# Set correct units
if "flux" in steps:
result["flux"].unit = flux_map.unit
if "flux_err" in steps:
result["flux_err"].unit = flux_map.unit
if "flux_ul" in steps:
result["flux_ul"].unit = flux_map.unit
return result
def __repr__(self):
p = self.parameters
info = self.__class__.__name__
info += "\n\nParameters:\n\n"
for key in p:
info += f"\t{key:13s}: {p[key]}\n"
return info
def _ts_value(
position,
counts,
exposure,
background,
c_0,
kernel,
flux,
method,
error_method,
error_sigma,
ul_method,
ul_sigma,
threshold,
rtol,
):
"""Compute TS value at a given pixel position.
Uses approach described in Stewart (2009).
Parameters
----------
position : tuple (i, j)
Pixel position.
counts : `~numpy.ndarray`
Counts image
background : `~numpy.ndarray`
Background image
exposure : `~numpy.ndarray`
Exposure image
kernel : `astropy.convolution.Kernel2D`
Source model kernel
flux : `~numpy.ndarray`
Flux image. The flux value at the given pixel position is used as
starting value for the minimization.
Returns
-------
TS : float
TS value at the given pixel position.
"""
# Get data slices
counts_ = _extract_array(counts, kernel.shape, position)
background_ = _extract_array(background, kernel.shape, position)
exposure_ = _extract_array(exposure, kernel.shape, position)
c_0_ = _extract_array(c_0, kernel.shape, position)
model = exposure_ * kernel
c_0 = c_0_.sum()
if threshold is not None:
with np.errstate(invalid="ignore", divide="ignore"):
amplitude = flux[position]
c_1 = f_cash(amplitude / FLUX_FACTOR, counts_, background_, model)
# Don't fit if pixel significance is low
if c_0 - c_1 < threshold:
result = {}
result["ts"] = (c_0 - c_1) * np.sign(amplitude)
result["flux"] = amplitude
result["niter"] = 0
result["flux_err"] = np.nan
result["flux_ul"] = np.nan
return result
if method == "root brentq":
amplitude, niter = _root_amplitude_brentq(
counts_, background_, model, rtol=rtol
)
elif method == "root newton":
amplitude, niter = _root_amplitude(
counts_, background_, model, flux[position], rtol=rtol
)
elif method == "leastsq iter":
amplitude, niter = _leastsq_iter_amplitude(
counts_, background_, model, rtol=rtol
)
else:
raise ValueError(f"Invalid method: {method}")
with np.errstate(invalid="ignore", divide="ignore"):
c_1 = f_cash(amplitude, counts_, background_, model)
result = {}
result["ts"] = (c_0 - c_1) * np.sign(amplitude)
result["flux"] = amplitude * FLUX_FACTOR
result["niter"] = niter
if error_method == "covar":
flux_err = _compute_flux_err_covar(amplitude, counts_, background_, model)
result["flux_err"] = flux_err * error_sigma
elif error_method == "conf":
flux_err = _compute_flux_err_conf(
amplitude, counts_, background_, model, c_1, error_sigma
)
result["flux_err"] = FLUX_FACTOR * flux_err
if ul_method == "covar":
result["flux_ul"] = result["flux"] + ul_sigma * result["flux_err"]
elif ul_method == "conf":
flux_ul = _compute_flux_err_conf(
amplitude, counts_, background_, model, c_1, ul_sigma
)
result["flux_ul"] = FLUX_FACTOR * flux_ul + result["flux"]
return result
def _leastsq_iter_amplitude(counts, background, model, maxiter=MAX_NITER, rtol=RTOL):
"""Fit amplitude using an iterative least squares algorithm.
Parameters
----------
counts : `~numpy.ndarray`
Slice of counts image
background : `~numpy.ndarray`
Slice of background image
model : `~numpy.ndarray`
Model template to fit.
maxiter : int
Maximum number of iterations.
rtol : float
Relative flux error.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
bounds = amplitude_bounds_cython(counts, background, model)
amplitude_min, amplitude_max, amplitude_min_total = bounds
if not counts.sum() > 0:
return amplitude_min_total, 0
weights = np.ones(model.shape)
x_old = 0
for i in range(maxiter):
x = x_best_leastsq(counts, background, model, weights)
if abs((x - x_old) / x) < rtol:
return max(x / FLUX_FACTOR, amplitude_min_total), i + 1
else:
weights = x * model + background
x_old = x
return max(x / FLUX_FACTOR, amplitude_min_total), MAX_NITER
def _root_amplitude(counts, background, model, flux, rtol=RTOL):
"""Fit amplitude by finding roots using newton algorithm.
See Appendix A Stewart (2009).
Parameters
----------
counts : `~numpy.ndarray`
Slice of count image
background : `~numpy.ndarray`
Slice of background image
model : `~numpy.ndarray`
Model template to fit.
flux : float
Starting value for the fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
args = (counts, background, model)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
return (
scipy.optimize.newton(
f_cash_root_cython, flux, args=args, maxiter=MAX_NITER, tol=rtol
),
0,
)
except RuntimeError:
# Where the root finding fails NaN is set as amplitude
return np.nan, MAX_NITER
def _root_amplitude_brentq(counts, background, model, rtol=RTOL):
"""Fit amplitude by finding roots using Brent algorithm.
See Appendix A Stewart (2009).
Parameters
----------
counts : `~numpy.ndarray`
Slice of count image
background : `~numpy.ndarray`
Slice of background image
model : `~numpy.ndarray`
Model template to fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
# Compute amplitude bounds and assert counts > 0
bounds = amplitude_bounds_cython(counts, background, model)
amplitude_min, amplitude_max, amplitude_min_total = bounds
if not counts.sum() > 0:
return amplitude_min_total, 0
args = (counts, background, model)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
result = scipy.optimize.brentq(
f_cash_root_cython,
amplitude_min,
amplitude_max,
args=args,
maxiter=MAX_NITER,
full_output=True,
rtol=rtol,
)
return max(result[0], amplitude_min_total), result[1].iterations
except (RuntimeError, ValueError):
# Where the root finding fails NaN is set as amplitude
return np.nan, MAX_NITER
def _compute_flux_err_covar(x, counts, background, model):
"""
Compute amplitude errors using inverse 2nd derivative method.
"""
with np.errstate(invalid="ignore", divide="ignore"):
stat = (model ** 2 * counts) / (background + x * FLUX_FACTOR * model) ** 2
return np.sqrt(1.0 / stat.sum())
def _compute_flux_err_conf(amplitude, counts, background, model, c_1, error_sigma):
"""
Compute amplitude errors using likelihood profile method.
"""
def ts_diff(x, counts, background, model):
return (c_1 + error_sigma ** 2) - f_cash(x, counts, background, model)
args = (counts, background, model)
amplitude_max = amplitude + 1e4
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
result = scipy.optimize.brentq(
ts_diff,
amplitude,
amplitude_max,
args=args,
maxiter=MAX_NITER,
rtol=1e-3,
)
return result - amplitude
except (RuntimeError, ValueError):
# Where the root finding fails NaN is set as amplitude
return np.nan
| 31.514247 | 94 | 0.592267 | 2,749 | 23,226 | 4.858858 | 0.157512 | 0.027551 | 0.034589 | 0.020214 | 0.30995 | 0.264356 | 0.22243 | 0.199221 | 0.161189 | 0.161189 | 0 | 0.008213 | 0.308017 | 23,226 | 736 | 95 | 31.557065 | 0.82286 | 0.331654 | 0 | 0.201681 | 0 | 0 | 0.060615 | 0 | 0 | 0 | 0 | 0.001359 | 0 | 1 | 0.047619 | false | 0 | 0.033613 | 0.005602 | 0.151261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b62a7fea18f8f4556139383b37d4d565e04f0ab2 | 2,195 | py | Python | reporter/factories/slack.py | itsdkey/workreporter | daea921a03f4798c9acd689fc9bc6010e72cf886 | [
"MIT"
] | null | null | null | reporter/factories/slack.py | itsdkey/workreporter | daea921a03f4798c9acd689fc9bc6010e72cf886 | [
"MIT"
] | 21 | 2020-04-04T11:08:20.000Z | 2021-01-29T07:58:40.000Z | reporter/factories/slack.py | itsdkey/workreporter | daea921a03f4798c9acd689fc9bc6010e72cf886 | [
"MIT"
] | null | null | null | import string
from factory import Dict, DictFactory, Faker, List
from factory.fuzzy import FuzzyChoice, FuzzyText
from reporter.apps import __version__
class SectionButtonFactory(DictFactory):
"""A factory for a section with a button."""
type = 'section'
accessory = Dict({
'text': {
'emoji': True,
'text': 'Review Now',
'type': 'plain_text',
},
'type': 'button',
'url': FuzzyText(
prefix='https://bitbucket.org/example/example_repos/pull-requests/',
length=4,
chars=string.digits,
),
})
text = Dict({
'text': FuzzyText(prefix='<@', suffix='>', length=2, chars=string.digits),
'type': 'mrkdwn',
})
class SectionBlockFactory(DictFactory):
"""A factory for a section block."""
type = 'section'
text = Dict({
'text': Dict({
'text': Faker('sentence'),
'type': FuzzyChoice(['mrkdwn', 'plain_text']),
}),
})
class ContextBlockFactory(DictFactory):
"""A factory for a context block."""
type = 'context'
elements = List([
Dict({'text': '*Author:* dave', 'type': 'mrkdwn'}),
Dict({'text': f'*version:* {__version__}', 'type': 'mrkdwn'}),
])
class DividerBlockFactory(DictFactory):
"""A factory for a divider block."""
type = 'divider'
class BlockFactory(DictFactory):
"""A factory for a block used in slack messages."""
text = Dict({
'text': Dict({
'text': Faker('sentence'),
'type': FuzzyChoice(['mrkdwn', 'plain_text']),
'emoji': FuzzyChoice([True, False]),
}),
})
type = FuzzyChoice(['section', 'divider', 'context'])
class SlackMessageFactory(DictFactory):
"""
A factory for a slack message.
This message is built via block kits that is a UI framework designed for slack.
Support url:
https://api.slack.com/block-kit
"""
blocks = List([
SectionBlockFactory(),
ContextBlockFactory(),
DividerBlockFactory(),
SectionButtonFactory(),
SectionButtonFactory(),
DividerBlockFactory(),
])
| 24.662921 | 83 | 0.569021 | 208 | 2,195 | 5.947115 | 0.375 | 0.051738 | 0.092158 | 0.10671 | 0.224737 | 0.150364 | 0.101859 | 0.101859 | 0.101859 | 0.101859 | 0 | 0.001263 | 0.278815 | 2,195 | 88 | 84 | 24.943182 | 0.780164 | 0.153531 | 0 | 0.403509 | 0 | 0 | 0.175623 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070175 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b62c3785b8faee0ef4f6c5a2aca7da2f7a1f610d | 4,040 | py | Python | pybrain/inspect_ops.py | Kevinfu510/TridentFrame | 9766b3642ad065662ca428212bfe3f3dca25139d | [
"MIT"
] | null | null | null | pybrain/inspect_ops.py | Kevinfu510/TridentFrame | 9766b3642ad065662ca428212bfe3f3dca25139d | [
"MIT"
] | null | null | null | pybrain/inspect_ops.py | Kevinfu510/TridentFrame | 9766b3642ad065662ca428212bfe3f3dca25139d | [
"MIT"
] | null | null | null | import os
import string
import math
from random import choices
from pprint import pprint
from urllib.parse import urlparse
from PIL import Image
from apng import APNG
from colorama import init, deinit
from hurry.filesize import size, alternative
from .config import IMG_EXTS, STATIC_IMG_EXTS, ANIMATED_IMG_EXTS
def _inspect_image(animage_path):
"""Returns information of an animted GIF/APNG"""
abspath = os.path.abspath(animage_path)
filename = str(os.path.basename(abspath))
ext = str.lower(os.path.splitext(filename)[1])
frame_count = 0
fps = 0
avg_delay = 0
fsize = size(os.stat(abspath).st_size, system=alternative)
# fsize = 0
width = height = 0
loop_duration = 0
extension = ''
if ext == '.gif':
try:
gif: Image = Image.open(abspath)
except Exception:
raise Exception(f'The chosen file ({filename}) is not a valid GIF image')
if gif.format != 'GIF' or not gif.is_animated:
raise Exception(f"The chosen GIF ({filename}) is not an animated GIF!")
width, height = gif.size
frame_count = gif.n_frames
# pprint(gif.info)
delays = []
for f in range(0, gif.n_frames):
gif.seek(f)
delays.append(gif.info['duration'])
avg_delay = sum(delays) / len(delays)
fps = round(1000.0 / avg_delay, 3)
loop_duration = round(frame_count / fps, 3)
extension = 'GIF'
elif ext == '.png':
try:
apng: APNG = APNG.open(abspath)
except Exception:
raise Exception(f'The chosen file ({filename}) is not a valid PNG image')
frames = apng.frames
frame_count = len(frames)
if frame_count <= 1:
raise Exception(f"The chosen PNG ({filename}) is not an animated PNG!")
png_one, controller_one = frames[0]
# pprint(png_one.__dict__)
# pprint(controller_one.__dict__)
extension = 'APNG'
width = png_one.width
height = png_one.height
avg_delay = sum([f[1].delay for f in frames]) / frame_count
fps = round(1000.0 / avg_delay, 3)
loop_duration = round(frame_count / fps, 3)
image_info = {
"name": filename,
"fps": fps,
"avg_delay": round(avg_delay / 1000, 3),
"fsize": fsize,
"extension": extension,
"frame_count": frame_count,
"absolute_url": abspath,
"width": width,
"height": height,
"loop_duration": loop_duration,
}
return image_info
def _inspect_sequence(image_paths):
"""Returns information of a selected sequence of images"""
abs_image_paths = [os.path.abspath(ip) for ip in image_paths if os.path.exists(ip)]
img_paths = [f for f in abs_image_paths if str.lower(os.path.splitext(f)[1][1:]) in STATIC_IMG_EXTS]
# raise Exception("imgs", imgs)
print("imgs count", len(img_paths))
# pprint(imgs)
if not img_paths:
raise Exception("No images selected. Make sure the path to them are correct")
first_img_name = os.path.splitext(img_paths[0])[0]
filename = os.path.basename(first_img_name.split('_')[0] if '_' in first_img_name else first_img_name)
# apngs = [apng for apng in (APNG.open(i) for i in imgs) if len(apng.frames) > 1]
# gifs = [gif for gif in (Image.open(i) for i in imgs) if gif.format == "GIF" and gif.is_animated]
static_imgs = [i for i in img_paths if len(APNG.open(i).frames) == 1 and Image.open(i).format != "GIF"]
sequence_size = size(sum([os.stat(i).st_size for i in static_imgs]), system=alternative)
print("statics count", len(static_imgs))
if not static_imgs:
raise Exception("The images choosen must be static images, not animted GIFs or PNGs!")
# pprint(apngs)
# pprint(gifs)
# if any(APNG.open(i) for i in imgs)):
sequence_info = {
"name": filename,
"total": len(static_imgs),
"sequences": static_imgs,
"size": sequence_size,
}
return sequence_info
| 35.752212 | 107 | 0.630446 | 572 | 4,040 | 4.298951 | 0.225524 | 0.0366 | 0.0122 | 0.02928 | 0.181781 | 0.125661 | 0.125661 | 0.102481 | 0.102481 | 0.102481 | 0 | 0.012317 | 0.256436 | 4,040 | 112 | 108 | 36.071429 | 0.806258 | 0.114851 | 0 | 0.113636 | 0 | 0 | 0.136709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.125 | 0 | 0.170455 | 0.034091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b62dc7b9f4a6677f9a7cc3ff035bfd06aa2b42eb | 468 | py | Python | mlrun/data_types/__init__.py | yanburman/mlrun | f6d2bb1d99d163ab47774f15b86008bfd76f6ba1 | [
"Apache-2.0"
] | null | null | null | mlrun/data_types/__init__.py | yanburman/mlrun | f6d2bb1d99d163ab47774f15b86008bfd76f6ba1 | [
"Apache-2.0"
] | null | null | null | mlrun/data_types/__init__.py | yanburman/mlrun | f6d2bb1d99d163ab47774f15b86008bfd76f6ba1 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
from .data_types import ValueType, pd_schema_to_value_type, InferOptions
from .infer import DFDataInfer
class BaseDataInfer:
infer_schema = None
get_preview = None
get_stats = None
def get_infer_interface(df) -> BaseDataInfer:
if hasattr(df, "rdd"):
from .spark import SparkDataInfer
return SparkDataInfer
return DFDataInfer
| 24.631579 | 100 | 0.737179 | 61 | 468 | 5.42623 | 0.721311 | 0.042296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01084 | 0.211538 | 468 | 18 | 101 | 26 | 0.886179 | 0.209402 | 0 | 0 | 0 | 0 | 0.008152 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.909091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b62e64b939d1bd9c03a4b5b970f6b1625a5fffd7 | 7,942 | py | Python | sanity_test.py | C2SM/clim-sanity-checker | 3d5d610b16ca7e87c841ef7ad06a94d0399b4773 | [
"MIT"
] | null | null | null | sanity_test.py | C2SM/clim-sanity-checker | 3d5d610b16ca7e87c841ef7ad06a94d0399b4773 | [
"MIT"
] | 3 | 2021-07-29T11:26:20.000Z | 2021-07-29T16:01:54.000Z | sanity_test.py | C2SM/clim-sanity-checker | 3d5d610b16ca7e87c841ef7ad06a94d0399b4773 | [
"MIT"
] | null | null | null | # standard modules
import argparse
import os
# aliased standard modules
import pandas as pd
# modules of sanity checker
import add_exp_to_ref
import lib.paths as paths
import lib.utils as utils
import perform_test
import process_data
import lib.logger_config as logger_config
import lib.test_config as test_config
# aliased modules of sanity checker
import lib.plot_mean_std as plt
# standalone imports
from lib.logger_config import log
'''
Script to test sanity of climate models. It contains:
- main: process model output, perform tests and plot results,
each function called by main() can be called itself
as a main(). Prior to the execution, paths_init.py
needs to be executed.
Note that this script requires user input at some stages,
so it cannot be run as a batched job.
Help: python sanity_test.py --help
# C.Siegenthaler, 2019
# J.Jucker, 2020
'''
def main(new_exp,
p_raw_files,
raw_f_subfold,
p_stages,
p_ref_csv_files,
wrk_dir,
f_vars_to_extract,
f_pattern_ref,
tests,
spinup,
lclean,
ltestsuite,
lverbose):
# init logger
logger_config.init_logger(lverbose,__file__)
log.banner('Start sanity checker')
# make all paths from user to absolute paths
wrk_dir = utils.abs_path(wrk_dir)
p_stages = utils.abs_path(p_stages)
p_ref_csv_files = utils.abs_path(p_ref_csv_files)
f_pattern_ref = utils.abs_path(f_pattern_ref)
# create directories
os.makedirs(p_stages,exist_ok=True)
os.makedirs(wrk_dir,exist_ok=True)
# go to working directory
os.chdir((wrk_dir))
log.info('Working directory is {}'.format(wrk_dir))
# data processing takes a while, check that no step is done twice
actions = utils.determine_actions_for_data_processing(new_exp,
tests,
p_stages,
lclean)
# create dataframe out of raw data
results_data_processing = process_data.main(
new_exp,
actions,
tests,
spinup,
p_raw_files=p_raw_files,
p_stages=p_stages,
raw_f_subfold=raw_f_subfold,
f_vars_to_extract=f_vars_to_extract,
f_pattern_ref=f_pattern_ref)
results_test, references = perform_test.main(
new_exp,
results_data_processing=results_data_processing,
p_stages=p_stages,
tests=tests,
p_ref_csv_files=p_ref_csv_files,
ltestsuite=ltestsuite,
f_vars_to_extract=f_vars_to_extract)
if 'welch' in tests:
test = 'welch'
plt.plt_welchstest(
references[test].append(results_data_processing[test],
sort=False),
new_exp,
results_test[test],
p_stages=p_stages)
# Add experiment to the reference pool
#--------------------------------------------------------------------
log.banner('')
log.banner('Check results again before adding to reference pool')
log.banner('')
for test in tests:
test_cfg = test_config.get_config_of_current_test(test)
utils.print_warning_if_testresult_is_bad(
test,
results_test[test],
test_cfg.metric_threshold,test_cfg.metric)
if ltestsuite:
asw = 'YES'
else:
asw = input('If you are happy with this experiment, '
'do you want to add it to the reference pool ?'
'(yes/[No])\n')
if (asw.strip().upper() == 'YES') or (asw.strip().upper() == 'Y'):
add_exp_to_ref.main(new_exp,
tests,
p_stages=p_stages,
ltestsuite=ltestsuite,
p_ref_csv_files=p_ref_csv_files)
else:
args_for_manual_execution = \
utils.derive_arguments_for_add_exp_to_ref(new_exp,
tests,
p_stages,
p_ref_csv_files)
log.info('The experiment {} is NOT added to '
'the reference pool \n'.format(new_exp))
log.info('If you want to add the experiment {} '
'to the reference pool later on, type '
'the following line when you are ready:'
.format(new_exp, new_exp))
log.info('')
log.info('python add_exp_to_ref.py {}'
.format(args_for_manual_execution))
log.banner('')
log.banner('Sanity test finished')
log.banner('')
if __name__ == '__main__':
# parsing arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--exp','-e', dest='exp',
required=True,
help='exp to proceed')
parser.add_argument('--p_raw_files', dest='p_raw_files',
default=paths.p_raw_files,
help='absolute path to raw files')
parser.add_argument('--p_stages', dest='p_stages',
default=paths.p_stages,
help='relative or absolute path '
'to write csv files of the testresults')
parser.add_argument('--raw_f_subfold', dest='raw_f_subfold',
default='',
help='Subfolder where the raw data are ')
parser.add_argument('--wrkdir','-w', dest='wrk_dir',
default=paths.p_wrkdir,
help='relative or absolute path to working directory')
parser.add_argument('--p_ref_csv_files', dest='p_ref_csv_files',
default=paths.p_ref_csv_files,
help='relative or absolute path to reference files')
parser.add_argument('--f_vars_to_extract',dest='f_vars_to_extract',
default='vars_echam-hammoz.csv',
help='File containing variables to anaylse')
parser.add_argument('--verbose','-v', dest='lverbose',
action='store_true',
help='Debug output')
parser.add_argument('--clean','-c', dest='lclean',
action='store_true',
help='Redo all processing steps')
parser.add_argument('--testsuite','-ts', dest='ltestsuite',
action='store_true',
help='Run of testsuite')
parser.add_argument('--spinup', dest='spinup',
type=int,
default=3,
help='Do not consider first month '
'of the data due to model spinup')
parser.add_argument('--tests','-t', dest='tests',
default=['welch','fldcor','rmse','emi'],
nargs='+',
help='Tests to apply on your data')
parser.add_argument('--f_pattern_ref', dest='f_pattern_ref',
default='',
help='Absolute or relative path to reference '
'netCDF for spatial correlation tests')
args = parser.parse_args()
main(new_exp=args.exp,
p_raw_files=args.p_raw_files,
raw_f_subfold=args.raw_f_subfold,
wrk_dir=args.wrk_dir,
p_stages=args.p_stages,
p_ref_csv_files=args.p_ref_csv_files,
f_vars_to_extract=args.f_vars_to_extract,
f_pattern_ref=args.f_pattern_ref,
tests=args.tests,
spinup=args.spinup,
lclean=args.lclean,
ltestsuite=args.ltestsuite,
lverbose=args.lverbose)
| 33.510549 | 78 | 0.559179 | 936 | 7,942 | 4.470085 | 0.251068 | 0.031788 | 0.02175 | 0.037285 | 0.131692 | 0.094407 | 0.039436 | 0.024857 | 0 | 0 | 0 | 0.001733 | 0.346134 | 7,942 | 236 | 79 | 33.652542 | 0.803967 | 0.05515 | 0 | 0.187879 | 0 | 0 | 0.179425 | 0.003005 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006061 | false | 0 | 0.072727 | 0 | 0.078788 | 0.006061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6353c0bdb47d9dde56dcc48c5df873e0f1636bc | 1,278 | py | Python | api/rqst_getter.py | Maziar110/api_client_test | 52e5a2ffb0b46be71f34452132b13e5e941ae327 | [
"MIT"
] | null | null | null | api/rqst_getter.py | Maziar110/api_client_test | 52e5a2ffb0b46be71f34452132b13e5e941ae327 | [
"MIT"
] | null | null | null | api/rqst_getter.py | Maziar110/api_client_test | 52e5a2ffb0b46be71f34452132b13e5e941ae327 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from flask import Flask, request
from datetime import datetime
from flask_opentracing import FlaskTracing
from jaeger_client import Config
app = Flask(__name__)
config = Config(config=
{
'sampler': {'type': 'const', 'param': 1},
'local_agent':
{'reporting_host': '172.2.1.5'}
},
service_name='api_rst_getter'
)
jaeger_tracer = config.initialize_tracer()
tracing = FlaskTracing(jaeger_tracer, True, app)
@app.route('/', methods=['GET', 'POST'])
def get_header():
now = datetime.now()
print(now)
file = open('./api_header.log', 'a')
req_header = request.headers.values()
time = '\n' + str(now) + '\n'
file.write(time)
req_body = request.values
for items in req_header:
file.write(' - ')
print(items)
file.write(items)
file.write('\n')
for items in req_body:
file.write(' - ')
print(items, ': ', req_body[items])
item = str(items)+': '
file.write(item)
file.write(req_body[items])
file.close()
return "it is what it is"
@app.route('/test')
def test():
print("This is a test method")
return ('Yooohooo, you\'re connected to backend\nv2')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| 22.034483 | 57 | 0.618153 | 171 | 1,278 | 4.444444 | 0.473684 | 0.082895 | 0.055263 | 0.034211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013065 | 0.22144 | 1,278 | 57 | 58 | 22.421053 | 0.750754 | 0.016432 | 0 | 0.046512 | 0 | 0 | 0.144338 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.093023 | 0 | 0.186047 | 0.093023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b636dd98793502ba5f717594cef6b13dafcec083 | 799 | py | Python | packages/core/minos-microservice-common/tests/test_common/test_model/test_abc.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 247 | 2022-01-24T14:55:30.000Z | 2022-03-25T12:06:17.000Z | packages/core/minos-microservice-common/tests/test_common/test_model/test_abc.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 400 | 2021-04-03T08:51:40.000Z | 2022-01-28T11:51:22.000Z | packages/core/minos-microservice-common/tests/test_common/test_model/test_abc.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 21 | 2022-02-06T17:25:58.000Z | 2022-03-27T04:50:29.000Z | import unittest
from collections.abc import (
Mapping,
)
from uuid import (
UUID,
uuid4,
)
from minos.common import (
DeclarativeModel,
Field,
Model,
)
from tests.model_classes import (
FooBar,
)
class TestModel(unittest.TestCase):
def test_base(self):
self.assertTrue(issubclass(Model, Mapping))
def test_fields(self):
uuid = uuid4()
model = FooBar(uuid)
self.assertEqual({"identifier": Field("identifier", UUID, uuid)}, model.fields)
def test_eq_reversing(self):
class _Fake(DeclarativeModel):
def __eq__(self, other):
return True
self.assertEqual(FooBar(uuid4()), _Fake())
self.assertEqual(_Fake(), FooBar(uuid4()))
if __name__ == "__main__":
unittest.main()
| 19.487805 | 87 | 0.628285 | 85 | 799 | 5.670588 | 0.435294 | 0.043568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006711 | 0.254068 | 799 | 40 | 88 | 19.975 | 0.802013 | 0 | 0 | 0 | 0 | 0 | 0.035044 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.129032 | false | 0 | 0.16129 | 0.032258 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b63706705437012c6dcf007e355dcfa0951e03d3 | 7,197 | py | Python | twitterBattleGame/twitterbattlegame.py | ferrithemaker/makertrends-twitter | 6055a2437cf567f14aa513a906615488f7c35549 | [
"MIT"
] | null | null | null | twitterBattleGame/twitterbattlegame.py | ferrithemaker/makertrends-twitter | 6055a2437cf567f14aa513a906615488f7c35549 | [
"MIT"
] | null | null | null | twitterBattleGame/twitterbattlegame.py | ferrithemaker/makertrends-twitter | 6055a2437cf567f14aa513a906615488f7c35549 | [
"MIT"
] | null | null | null | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import threading
import sys
import pygame
import os
if len(sys.argv) == 3:
search_strings = [sys.argv[1],sys.argv[2]]
else:
print("Usage: twitterbattlegame.py [TREND1_STRING] [TREND2_STRING]")
sys.exit(0)
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key=""
consumer_secret=""
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token=""
access_token_secret=""
# This is the string to search in the twitter feed
# May be a word, an #hashtag or a @username
twitterText = ""
text_x = 30
color = 1
dwarfGo = False
gladiatorGo = False
finish = False
# final animation
dwarfdirection = -1
dwarfmove = 0
gladiatordirection = -1
gladiatormove = 0
def startTwitter():
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=search_strings)
class StdOutListener(StreamListener):
def on_data(self, data):
global twitterText
global first
global text_x
global color
global dwarfGo
global gladiatorGo
data = json.loads(data)
twitterText = data['text'].lower()
if search_strings[0] in twitterText:
dwarfGo = True
if search_strings[1] in twitterText:
gladiatorGo = True
return True
def on_error(self, status):
return False
def get_sprite(image, x, y, width, height):
sprite = pygame.Surface([width, height], pygame.SRCALPHA, 32).convert_alpha()
sprite.blit(image, (0, 0), (x, y, width, height))
return sprite
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
twitterThread = threading.Thread(target = startTwitter)
twitterThread.start()
pygame.init()
clock = pygame.time.Clock()
size = width, height = 1056, 672
screen = pygame.display.set_mode(size)
# fonts
font = pygame.font.Font('./assets/PressStart2P-Regular.ttf', 16)
fontTitles = pygame.font.Font('./assets/PressStart2P-Regular.ttf', 32)
# default text from twitter
text = font.render(twitterText.encode('utf-8'), True, (0,0,0))
textRect = text.get_rect()
# info texts
textTile = fontTitles.render("Twitter #hashtags battle!", True, (100,250,80))
textTileRect = textTile.get_rect()
textTileRect.center = (520,40)
hashtagText = fontTitles.render(sys.argv[1]+" VS "+sys.argv[2], True, (250,50,250))
hashtagTextRect = hashtagText.get_rect()
hashtagTextRect.center = (520,100)
# set background
background = pygame.image.load("./assets/bulkhead-wallsx3.png")
backgroundRect = background.get_rect()
# set dwarf sprites
dwarfSpritesSheet = pygame.image.load("./assets/Dwarf_Sprite_Sheet1.2v-4x.png")
dwarfSprites = []
dwarfSpritesNumber = 4
for i in range(dwarfSpritesNumber):
dwarfSprites.append(get_sprite(dwarfSpritesSheet,150 * i,640,150,100))
dwarfRect = pygame.Rect(50,470,128,128)
dwarfSpritePos = 0
# set gladiator sprites
gladiatorSpritesSheet = pygame.image.load("./assets/Gladiator-Sprite Sheet-Left4x.png")
gladiatorSprites = []
gladiatorSpritesNumber = 5
for i in range(gladiatorSpritesNumber):
gladiatorSprites.append(get_sprite(gladiatorSpritesSheet,128 * i,0,128,128))
gladiatorRect = pygame.Rect(874,430,128,128)
gladiatorSpritePos = 0
# set key
collectablesSpritesSheet = pygame.image.load("./assets/Dungeon Collectables4x.png")
keySprites = []
keySpritesNumber = 12
for i in range(keySpritesNumber):
keySprites.append(get_sprite(collectablesSpritesSheet,64 * i,260,64,64))
keyRect = pygame.Rect(496,490,64,64)
keySpritePos = 0
# set box and money
box = pygame.image.load("./assets/box.png")
boxRect = box.get_rect()
boxRect.center = (523,510)
money = pygame.image.load("./assets/money.png")
moneyRect = money.get_rect()
moneyRect.center = (523,520)
while 1:
clock.tick(24)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
os._exit(1)
if event.type == pygame.KEYDOWN:
# key control (for testing)
if event.key == pygame.K_LEFT:
dwarfRect = dwarfRect.move(-10,0)
dwarfSpritePos -= 1
if dwarfSpritePos < 0:
dwarfSpritePos = dwarfSpritesNumber - 1
if event.key == pygame.K_RIGHT:
dwarfRect = dwarfRect.move(10,0)
dwarfSpritePos += 1
if dwarfSpritePos > dwarfSpritesNumber -1:
dwarfSpritePos = 0
if event.key == pygame.K_a:
gladiatorRect = gladiatorRect.move(-10,0)
gladiatorSpritePos -= 1
if gladiatorSpritePos < 0:
gladiatorSpritePos = gladiatorSpritesNumber - 1
if event.key == pygame.K_s:
gladiatorRect = gladiatorRect.move(10,0)
gladiatorSpritePos += 1
if gladiatorSpritePos > gladiatorSpritesNumber -1:
gladiatorSpritePos = 0
# draw background
screen.blit(background, backgroundRect)
# automated sprites movement
if dwarfGo == True and finish == False:
#print("ENTRAAAAA")
dwarfGo = False
dwarfRect = dwarfRect.move(10,0)
dwarfSpritePos += 1
if dwarfSpritePos > dwarfSpritesNumber -1:
dwarfSpritePos = 0
# render text
text = font.render(str(twitterText.encode('utf-8'))[:60]+"...", True, (255,0,0))
textRect = text.get_rect()
textRect.x = 20
textRect.y = dwarfRect.y - 200
if gladiatorGo == True and finish == False:
gladiatorGo = False
gladiatorRect = gladiatorRect.move(-10,0)
gladiatorSpritePos -= 1
if gladiatorSpritePos < 0:
gladiatorSpritePos = gladiatorSpritesNumber - 1
# render text
text = font.render(str(twitterText.encode('utf-8'))[:60]+"...", True, (0,0,255))
textRect = text.get_rect()
textRect.x = 20
textRect.y = gladiatorRect.y - 100
# draw tweet
if finish == False:
screen.blit(text,textRect)
# draw texts
screen.blit(textTile,textTileRect)
screen.blit(hashtagText,hashtagTextRect)
# draw box
screen.blit(box,boxRect)
# game ending
if dwarfRect.right > keyRect.left:
# draw money and box
finish = True
screen.blit(money,moneyRect)
dwarfRect = dwarfRect.move(0,dwarfdirection)
if dwarfmove > 10:
dwarfdirection = dwarfdirection * -1
dwarfmove = 0
dwarfmove += 1
winText = fontTitles.render(sys.argv[1]+" WINS!!", True, (0,0,255))
winTextRect = winText.get_rect()
winTextRect.center = (520,200)
screen.blit(winText,winTextRect)
if gladiatorRect.left < keyRect.right:
# draw money and box
finish = True
screen.blit(money,moneyRect)
gladiatorRect = gladiatorRect.move(0,gladiatordirection)
if gladiatormove > 10:
gladiatordirection = gladiatordirection * -1
gladiatormove = 0
gladiatormove += 1
winText = fontTitles.render(sys.argv[2]+" WINS!!", True, (0,0,255))
winTextRect = winText.get_rect()
winTextRect.center = (520,200)
screen.blit(winText,winTextRect)
# draw key
if finish == False:
screen.blit(keySprites[keySpritePos],keyRect)
keySpritePos += 1
if keySpritePos > keySpritesNumber -1:
keySpritePos = 0
# draw dwarf
screen.blit(dwarfSprites[dwarfSpritePos],dwarfRect)
# draw gladiator
screen.blit(gladiatorSprites[gladiatorSpritePos],gladiatorRect)
pygame.display.flip()
| 25.888489 | 87 | 0.725163 | 923 | 7,197 | 5.605634 | 0.266522 | 0.023193 | 0.017395 | 0.024353 | 0.263433 | 0.232315 | 0.209123 | 0.192888 | 0.192888 | 0.153073 | 0 | 0.042928 | 0.155204 | 7,197 | 277 | 88 | 25.981949 | 0.808059 | 0.096707 | 0 | 0.247312 | 0 | 0 | 0.057368 | 0.024432 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026882 | false | 0 | 0.043011 | 0.010753 | 0.096774 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b63aa4a552b83a7cbc88ec12fcd658dfebd4cd02 | 9,346 | py | Python | reachyAudio/reachyAudioAnswering.py | sizingservers/ReachyAudio | af91ed57015d693cc942620495541b482728a513 | [
"MIT"
] | 3 | 2021-04-28T15:16:50.000Z | 2021-11-01T17:36:09.000Z | reachyAudio/reachyAudioAnswering.py | sizingservers/Reachy.Audio | 7e515459b72f2bdc05ee73f159d6bcaaabaef6f5 | [
"MIT"
] | null | null | null | reachyAudio/reachyAudioAnswering.py | sizingservers/Reachy.Audio | 7e515459b72f2bdc05ee73f159d6bcaaabaef6f5 | [
"MIT"
] | 2 | 2021-11-22T13:43:37.000Z | 2022-03-03T09:44:16.000Z | """This module defines the ReachyAudioAnswering class."""
import nltk
import json
import torch
import random
import pickle
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
CONFIDENCE_THRESHOLD = 0.7
class ReachyAudioAnswering():
"""ReachyAudioAnswering class.
This class implements a small neural network allowing Reachy to answer to
simple questions. To make it flexible, it uses sentence tokenizing and word
stemming such that the network can provide answers to sentences different
to the one used for the training. These input sentences have to remain
close to the training sentences however.
"""
def __init__(self):
"""Train the model of the network or load it if it already exists."""
print("Initializing Reachy answering model...")
# Load the json file containing the training data
with open("utils/intents.json") as myFile:
self.data = json.load(myFile)
# Load the data necessary to the initialization
# of the network if the training has already been
# done before, create it otherwise
try:
with open("utils/data.pickle", "rb") as f:
self.words, self.labels,
train_input, train_target = pickle.load(f)
except:
# Contain all the different stemmed words constituing the patterns
self.words = []
# Contain all the different intents of the input sentences
self.labels = []
# Contain the training sentences of the network
docs_x = []
# Contain the corresponding intent of a tokenized pattern
docs_y = []
# Contain the training inputs of the network
train_input = []
# Contain the expected output for the training of the network
train_target = []
# Extract the data from the json file
for intent in self.data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
self.words.extend(wrds)
docs_x.append(pattern)
docs_y.append(intent["tag"])
if intent["tag"] not in self.labels:
self.labels.append(intent["tag"])
# Apply word stemming i.e. find the root of the word
# (ex: happened -> happen)
self.words = [stemmer.stem(w.lower()) for w in self.words
if w != "?"]
# transform to set to remove doublons
self.words = sorted(list(set(self.words)))
self.labels = sorted(self.labels)
out_empty = [0 for _ in range(len(self.labels))]
# Transform each training sentence into a bag of words (an input
# for the network) and compute the corresponding expected output
for x, doc in enumerate(docs_x):
bag = self.bag_of_words(doc)
# Expected output
output_row = out_empty[:]
output_row[self.labels.index(docs_y[x])] = 1
# We add the input and the expected output to the training set
train_input.append(bag)
train_target.append(output_row)
# We store the computed training set for future uses
with open("utils/data.pickle", "wb") as f:
pickle.dump((self.words, self.labels,
train_input, train_target), f)
# Load the model if it already exists, train it otherwise
try:
self.model = torch.load('utils/model.pth')
except:
self.model = torch.nn.Sequential(
torch.nn.Linear(len(train_input[0]), 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, len(train_target[0])),
torch.nn.Softmax(dim=-1))
self.train_model(torch.Tensor(train_input),
torch.Tensor(train_target))
torch.save(self.model, 'utils/model.pth')
print("Done")
def train_model(self, train_input, train_target, nb_epochs=500,
show_metric=False):
"""Train the model of the network.
:param data_input: The inputs of the training set.
:param data_target: The corresponding outputs of the training set.
:param nb_epochs: The number of times that the learning algorithm will
work through the entire training dataset.
:param show_metric: Allow to show the performance of the model during
his training.
"""
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(self.model.parameters())
for e in range(nb_epochs):
# Compute the output of the model (forward pass)
output = self.model(train_input)
# Compute the error between the predicted output and the ground
# truth
loss = criterion(output, train_target)
# Reset the sum of the gradients (the previous epoch should not
# influence the current epoch)
self.model.zero_grad()
# Apply a backward pass
loss.backward()
# Update the parameters of the model with respect to the backward
# pass previously done
optimizer.step()
# Compute the error of the current state of the network's model
# with respect to the training set
if show_metric:
with torch.no_grad():
print("Epoch {} -> Train error = {:.02f} %".format(
e, self.compute_nb_errors(train_input, train_target) /
train_input.size(0) * 100))
def compute_nb_errors(self, data_input, data_target):
"""Compute the number of classification errors of our network's model.
:param data_input: The inputs of the testing set.
:param data_target: The corresponding outputs of the testing set.
:return: The number of classification errors made on the testing set.
"""
nb_data_errors = 0
# Compute the output of the model
output = self.model(data_input)
# Take the most confident output as the result
predicted_classes = torch.argmax(output, 1)
expected_classes = torch.argmax(data_target, 1)
# Compare the prediction of the model with the ground truth
for predicted_classe, expected_classe in zip(predicted_classes,
expected_classes):
if predicted_classe != expected_classe:
nb_data_errors = nb_data_errors + 1
return nb_data_errors
def bag_of_words(self, input_sentence):
"""Compute a bag of words that will be used as input for the network.
A bag of words is a vector whose length correspond to the "vocabulary"
known by the network (all the different words composing the sentences
of the training set). For each word of the vocabulary, if this word is
present in the input sentence, then the vector contains a 1, otherwise
it contains a 0.
:param input_sentence: The sentence to be answered.
:return: The bag of word corresponding to the input sentence.
"""
bag = []
# Tokenize the input sentence and apply word stemming
# on each of the tokenized words
sentence_words = nltk.word_tokenize(input_sentence)
stemmed_words = [stemmer.stem(word.lower()) for word in sentence_words]
# Fill the vector
for w in self.words:
if w in stemmed_words:
bag.append(1)
else:
bag.append(0)
return bag
def answer(self, input_sentence):
"""Allow Reachy to answer to a question.
:param input_sentence: The sentence to be answered.
:return: The detected intent of the input sentence
(None if the intent could not be detected).
:return: The answer to the input sentence.
"""
# Compute the output of the model with respect to the input sentence
results = self.model(torch.Tensor(self.bag_of_words(input_sentence)))
# Take the most confident output as the result
results_index = torch.argmax(results)
intent = self.labels[results_index]
# Provide an answer only if the network
# was confident enough about his output
if results[results_index] > CONFIDENCE_THRESHOLD:
for tg in self.data["intents"]:
if tg["tag"] == intent:
# The response is picked randomly among the ones
# related to the detected input sentence intent
responses = tg["responses"]
answer = random.choice(responses)
return intent, answer
return None, "I didn't get that, can you try again ?"
| 39.770213 | 80 | 0.581639 | 1,123 | 9,346 | 4.75512 | 0.247551 | 0.023408 | 0.01573 | 0.01573 | 0.148127 | 0.113858 | 0.09176 | 0.064794 | 0.035955 | 0.018727 | 0 | 0.00462 | 0.351594 | 9,346 | 234 | 81 | 39.940171 | 0.876568 | 0.398673 | 0 | 0.038462 | 0 | 0 | 0.048153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048077 | false | 0 | 0.057692 | 0 | 0.153846 | 0.028846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b63abdbbcdf468494ec4d6e1649a366257180326 | 4,937 | py | Python | mlp/mlp.py | sovrasov/mlp_sample | c27aa4893960e3531fb3135148a26fdf75a2f1d2 | [
"MIT"
] | null | null | null | mlp/mlp.py | sovrasov/mlp_sample | c27aa4893960e3531fb3135148a26fdf75a2f1d2 | [
"MIT"
] | null | null | null | mlp/mlp.py | sovrasov/mlp_sample | c27aa4893960e3531fb3135148a26fdf75a2f1d2 | [
"MIT"
] | null | null | null | import numpy as np
def softmax(x):
ex = np.exp(-x)
return ex / np.sum(ex)
def relu(x):
return x * (x > 0.)
def relu_der(x):
return np.ones_like(x) * (x > 0.)
class MLP:
def __init__(self, lr, bs, momentum, verbose, max_iters, eps=0., hidden_dims=[10]):
self.layers = []
self.labels_ = []
self.lr = lr
self.batch_size = bs
self.momentum = momentum
self.verbose = verbose
self.max_iters = max_iters
self.eps = eps
assert len(hidden_dims) > 0
self.hidden_dims = hidden_dims
def _create_layer(self, num_inputs, num_outputs, activate=True):
return {'w':np.random.rand(num_inputs, num_outputs), 'b': np.random.rand(num_outputs), 'a':activate,
'batch_grad_w':np.zeros((num_inputs, num_outputs), dtype=np.float32),
'w_v':np.zeros((num_inputs, num_outputs), dtype=np.float32),
'batch_grad_b':np.zeros(num_outputs, dtype=np.float32),
'b_v':np.zeros(num_outputs, dtype=np.float32)}
def init_layers_(self, num_inputs, num_labels):
np.random.seed(0)
self.layers = []
self.layers.append(self._create_layer(num_inputs, self.hidden_dims[0], True))
for i in range(1, len(self.hidden_dims)):
self.layers.append(self._create_layer(self.hidden_dims[i - 1], self.hidden_dims[i], True))
self.layers.append(self._create_layer(self.hidden_dims[-1], num_labels, False))
def forward_(self, x, train=False):
signal = x
for layer in self.layers:
if train: layer['input'] = np.copy(signal)
signal = np.matmul(np.transpose(layer['w']), signal) + layer['b']
if layer['a']:
if train: layer['pre_output'] = signal
signal = relu(signal)
return signal
def backward_(self, expected, outputs):
for i in reversed(range(len(self.layers))):
current_layer = self.layers[i]
if i == len(self.layers) - 1: # handle the last layer
errors = expected - outputs
current_layer['delta'] = errors
if current_layer['a']:
current_layer['delta'] *= relu_der(current_layer['pre_output'])
else:
next_layer = self.layers[i + 1]
current_layer['delta'] = np.matmul(next_layer['w'], next_layer['delta']) * \
relu_der(current_layer['pre_output'])
current_layer['batch_grad_b'] += current_layer['delta']
current_layer['batch_grad_w'] += np.matmul(current_layer['input'].reshape(-1, 1),
current_layer['delta'].reshape(1, -1))
def update_weights_(self):
for i in reversed(range(len(self.layers))):
current_layer = self.layers[i]
current_layer['b_v'] = self.momentum * current_layer['b_v'] + (self.lr / self.batch_size) * current_layer['batch_grad_b']
current_layer['w_v'] = self.momentum * current_layer['w_v'] + (self.lr / self.batch_size) * current_layer['batch_grad_w']
current_layer['b'] -= current_layer['b_v']
current_layer['w'] -= current_layer['w_v']
def init_train_iter_(self):
for layer in self.layers:
layer['batch_grad_b'] *= 0.
layer['batch_grad_w'] *= 0.
def fit(self, x, y):
num_samples = len(x)
assert num_samples > 0
assert num_samples == len(y)
num_inputs = len(x[0])
assert num_inputs > 0
self.labels_ = np.unique(y)
num_labels = len(self.labels_)
assert num_labels > 0
x = np.array(x)
y = np.array(y)
self.init_layers_(num_inputs, num_labels)
np.random.seed(1)
for i in range(self.max_iters):
batch_indices = np.random.random_integers(0, num_samples - 1, self.batch_size)
batch_x = x[batch_indices]
batch_y = y[batch_indices]
self.init_train_iter_()
for j in range(self.batch_size):
outputs = softmax(self.forward_(batch_x[j], train=True))
idx = np.argmax(outputs)
label = self.labels_[idx]
expected = (self.labels_ == batch_y[j]).astype(np.int8)
self.backward_(expected, outputs)
self.update_weights_()
def predict(self, x):
predictions = np.zeros(len(x))
for i in range(len(x)):
probs = softmax(self.forward_(x[i]))
idx = np.argmax(probs)
predictions[i] = self.labels_[idx]
return predictions
def score(self, x, y):
assert len(x) == len(y)
y = np.array(y).reshape(-1)
predictions = self.predict(x)
num_correct = np.sum(predictions == y)
return float(num_correct) / y.shape[0]
| 38.570313 | 133 | 0.567956 | 652 | 4,937 | 4.075153 | 0.162577 | 0.103877 | 0.031615 | 0.028604 | 0.288671 | 0.24012 | 0.228453 | 0.164848 | 0.136244 | 0.072262 | 0 | 0.010715 | 0.300587 | 4,937 | 127 | 134 | 38.874016 | 0.758761 | 0.004254 | 0 | 0.075472 | 0 | 0 | 0.0407 | 0 | 0 | 0 | 0 | 0 | 0.056604 | 1 | 0.122642 | false | 0 | 0.009434 | 0.028302 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b63d8c3c7c6fd356106b5b059b25964eee3e6080 | 4,858 | py | Python | cap/path.py | ArashLab/CAP | 9e6d413e000ebfcade3020985fdedd9aa703d68a | [
"MIT"
] | null | null | null | cap/path.py | ArashLab/CAP | 9e6d413e000ebfcade3020985fdedd9aa703d68a | [
"MIT"
] | 8 | 2021-06-24T06:08:27.000Z | 2021-07-22T03:47:11.000Z | cap/path.py | ArashLab/CAP | 9e6d413e000ebfcade3020985fdedd9aa703d68a | [
"MIT"
] | null | null | null | import os
import subprocess
from munch import Munch
from .logutil import *
from .decorators import *
if __name__ == '__main__':
print('This module is not executable.')
exit(0)
FileSystems = [
'file',
'hdfs',
's3',
'gs',
'mysql',
'http',
'https'
]
# If a path could match more that one there is uncertainity in the outcome
extensionMapper = {
'.mt': ('mt', None),
'.ht': ('ht', None),
'.vcf': ('vcf', None),
'.vcf.gz': ('vcf', 'gz'),
'.vcf.bgz': ('vcf', 'bgz'),
'.tsv': ('tsv', None),
'.tsv.gz': ('tsv', 'gz'),
'.tsv.bgz': ('tsv', 'bgz'),
'.csv': ('csv', None),
'.csv.gz': ('csv', 'gz'),
'.csv.bgz': ('csv', 'bgz'),
'.json': ('json', None),
'.json.gz': ('json', 'gz'),
'.json.bgz': ('json', 'bgz'),
'.yaml': ('yaml', None),
'.yaml.gz': ('yaml', 'gz'),
'.yaml.bgz': ('yaml', 'bgz'),
'.bed': ('bed', None),
'.bim': ('bim', None),
'.fam': ('fam', None),
'.parquet': ('parquet', None)
}
class Path:
# If true, remove file system prefix (i.e. 'file://' or 'hdfs://') of the defaultFileSystem.
# For example, if 'defaultFileSystem=local' it removes the 'file://' from the path
__defaultMode = True
@classmethod
def SetDefaultMode(cls, defaultMode):
cls.__defaultMode = defaultMode
@classmethod
def GetDefaultMode(cls):
return cls.__defaultMode
# If the path does not have a file system prefix (i.e. 'file://' or 'hdfs://') adds the prefix based on the default file system
__defaultFileSystem = 'file'
@classmethod
def SetDefaultFileSystem(cls, defaultFileSystem):
if defaultFileSystem in FileSystems:
cls.__defaultFileSystem = defaultFileSystem
else:
LogException(f'File system `{defaultFileSystem}` is not supported')
@classmethod
def GetDefaultFileSystem(cls):
return cls.__defaultFileSystem
def __init__(self, path=None):
self.__path = None
self.__raw = None
if path:
self.path = path
def __repr__(self):
rep = dict()
for k in ['raw', 'path', 'fileSystem', 'format', 'compression']:
rep[k] = getattr(self,k)
return str(rep)
@property
def path(self):
if self.GetDefaultMode():
if self.__fileSystem == self.GetDefaultFileSystem():
return self.__path
return '://'.join([self.__fileSystem, self.__path])
@property
def local(self):
return self.__path
@property
def fileSystem(self):
return self.__fileSystem
@property
def raw(self):
return self.__raw
@property
def format(self):
return self.__format
@property
def compression(self):
return self.__compression
@path.setter
def path(self, path):
if isinstance(path, str):
self.__raw = str(path)
self.Processes()
else:
LogExceptionType(path, expectedType='str')
def Processes(self):
# Identify the file system and extract it from the path
rawPath = os.path.expandvars(self.__raw)
if ':' in rawPath:
parts = rawPath.split(':')
if len(parts) > 2:
LogException(f'Path `{rawPath}` has more than one `:`')
elif not parts[0]:
LogException(f'Path `{rawPath}` starts with `:`')
elif parts[0] not in FileSystems:
LogException(f'File system `{parts[0]}` in path `{rawPath}` not supported.')
else:
self.__fileSystem = parts[0]
path = self.Trim(parts[1])
else:
self.__fileSystem = self.GetDefaultFileSystem()
path = rawPath
self.__path = self.Trim(path)
self.Absolute()
self.InferFormat()
@classmethod
def Trim(cls, path, char='/'):
while True:
if path.startswith(char*2):
path = path[1:]
else:
break
return path
def Absolute(self):
fs = self.fileSystem
if fs not in ['file']:
LogException(f'File system `{fs}` is not supported')
elif fs == 'file':
self.__path = os.path.abspath(self.__path)
def InferFormat(self):
for ext in extensionMapper:
if self.local.endswith(ext):
self.__format, self.__compression = extensionMapper[ext]
break
def Exist(self):
fs = self.fileSystem
if fs not in ['file', 'hdfs']:
LogException(f'File system `{fs}` is not supported')
elif fs == 'file':
return os.path.exists(self.local)
elif fs == 'local':
return not subprocess.run(['hdfs', 'dfs', '-test', '-e', self.path]).returncode
| 27.446328 | 132 | 0.550638 | 535 | 4,858 | 4.872897 | 0.257944 | 0.033755 | 0.026851 | 0.03529 | 0.084388 | 0.084388 | 0.084388 | 0.084388 | 0.062908 | 0.037591 | 0 | 0.002959 | 0.304446 | 4,858 | 176 | 133 | 27.602273 | 0.768571 | 0.087485 | 0 | 0.183099 | 0 | 0 | 0.140501 | 0.004744 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126761 | false | 0 | 0.035211 | 0.049296 | 0.274648 | 0.007042 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b640cb56865053f7246a487959ec18a980db1340 | 1,823 | py | Python | main.py | Vivektp/Image-UploadBot-1 | 01d70d4425d082639e46d954d0b900d478ad29c9 | [
"MIT"
] | null | null | null | main.py | Vivektp/Image-UploadBot-1 | 01d70d4425d082639e46d954d0b900d478ad29c9 | [
"MIT"
] | null | null | null | main.py | Vivektp/Image-UploadBot-1 | 01d70d4425d082639e46d954d0b900d478ad29c9 | [
"MIT"
] | 1 | 2021-01-07T02:26:26.000Z | 2021-01-07T02:26:26.000Z | from pyrogram import Client, filters
import os, shutil
from creds import my
from telegraph import upload_file
import logging
logging.basicConfig(level=logging.INFO)
TGraph = Client(
"Image upload bot",
bot_token = my.BOT_TOKEN,
api_id = my.API_ID,
api_hash = my.API_HASH
)
@TGraph.on_message(filters.command("start"))
async def start(client, message):
await message.reply_text(f"<b>Hello {message.from_user.first_name}, My Name Is MeG Telegraph Bot 🥳\n\nI'm A <u>Telegraph Uploader Bot.</u>\n\nSend Me Any <u>Image</u>& I'll Upload It To Telegra.ph & Send You Back A Link\n\n🙂 Join & Support Us Via 👉 @MeGLeech.\n\n 🌟 Powered By @MeGBots</b>", True)
@TGraph.on_message(filters.command("help"))
async def help(client, message):
await message.reply_text(f"<b> 💁 Hey Its Not Tough To Ise Me...!!!\n\n Just Follow These Steps\n\n ▪️ Send Me Any Image (or) GIF (or) MP4 Below 5MB \n ▪️ Wait For To Generate Link For U\n\n 🌟 Powered By @MeGBots || @MeGLeech</b>", True)
@TGraph.on_message(filters.photo)
async def getimage(client, message):
tmp = os.path.join("downloads",str(message.chat.id))
if not os.path.isdir(tmp):
os.makedirs(tmp)
imgdir = tmp + "/" + str(message.message_id) +".jpg"
dwn = await message.reply_text("Downloading Please Wait...🤗", True)
await client.download_media(
message=message,
file_name=imgdir
)
await dwn.edit_text("Starting Upload...🤗")
try:
response = upload_file(imgdir)
except Exception as error:
await dwn.edit_text(f"Oops something went wrong\n{error}")
return
await dwn.edit_text(f"https://telegra.ph{response[0]}")
shutil.rmtree(tmp,ignore_errors=True)
TGraph.run()
| 37.204082 | 302 | 0.64893 | 281 | 1,823 | 4.170819 | 0.459075 | 0.008532 | 0.038396 | 0.056314 | 0.199659 | 0.139932 | 0.061433 | 0.061433 | 0 | 0 | 0 | 0.002116 | 0.222161 | 1,823 | 48 | 303 | 37.979167 | 0.815938 | 0 | 0 | 0 | 0 | 0.052632 | 0.345352 | 0.017465 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.131579 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b64465faae2a9d77dbcd14ac084106824ac896e5 | 1,237 | py | Python | action-server/covidflow/utils/geocoding.py | nuecho/covidflow | 050665c629ea46bfebc0920ba1dba841c2268d08 | [
"MIT"
] | 7 | 2020-05-23T07:07:26.000Z | 2021-11-29T05:58:51.000Z | action-server/covidflow/utils/geocoding.py | dialoguemd/covidflow | b159b76dc68462f272614db4cbf716844872ebca | [
"MIT"
] | 210 | 2020-04-13T17:21:55.000Z | 2021-04-20T15:46:26.000Z | action-server/covidflow/utils/geocoding.py | dialoguemd/covidflow | b159b76dc68462f272614db4cbf716844872ebca | [
"MIT"
] | 3 | 2020-04-09T14:38:09.000Z | 2020-07-29T15:06:11.000Z | import os
from typing import Any, Dict, Optional
import googlemaps
import structlog
from geopy.point import Point
logger = structlog.get_logger()
DEFAULT_COUNTRY = "CA"
GOOGLE_API_KEY_ENV = "GOOGLE_GEOCODING_API_KEY"
GEOMETRY = "geometry"
LOCATION = "location"
LATITUDE = "lat"
LONGITUDE = "lng"
class Geocoding:
def __init__(self):
key = os.environ[GOOGLE_API_KEY_ENV]
self.client = googlemaps.Client(key=key)
def get_from_address(self, address: str) -> Optional[Point]:
request = {"address": address}
return self._get_geocode(request)
def get_from_postal_code(self, postal_code: str) -> Optional[Point]:
request = {
"components": {"postal_code": postal_code, "country": DEFAULT_COUNTRY}
}
return self._get_geocode(request)
def _get_geocode(self, request: Dict[str, Any]) -> Optional[Point]:
geocode_result = self.client.geocode(**request)
if (len(geocode_result)) == 0:
return None
location = geocode_result[0].get(GEOMETRY, {}).get(LOCATION, {})
if LATITUDE not in location or LONGITUDE not in location:
return None
return Point([location[LATITUDE], location[LONGITUDE]])
| 26.319149 | 82 | 0.669361 | 149 | 1,237 | 5.33557 | 0.328859 | 0.050314 | 0.030189 | 0.037736 | 0.083019 | 0.083019 | 0.083019 | 0 | 0 | 0 | 0 | 0.002077 | 0.221504 | 1,237 | 46 | 83 | 26.891304 | 0.823468 | 0 | 0 | 0.125 | 0 | 0 | 0.067098 | 0.019402 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.15625 | 0 | 0.46875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b646b8cf155b631e43329c320bbdd520d22b745c | 5,319 | py | Python | calculadora.py | WelberthyGustavo/Calculadora | 2d01dba2db06796c8d237302f3ad024c8be359ea | [
"MIT"
] | 4 | 2020-04-21T01:42:30.000Z | 2020-10-26T01:59:33.000Z | calculadora.py | WelberthyGustavo/Calculadora | 2d01dba2db06796c8d237302f3ad024c8be359ea | [
"MIT"
] | null | null | null | calculadora.py | WelberthyGustavo/Calculadora | 2d01dba2db06796c8d237302f3ad024c8be359ea | [
"MIT"
] | null | null | null | from functools import partial
from tkinter import *
#program by~ Welberthy Gustavo Developer
def calc(btn):
if btn['text'].isdigit() or btn['text'] == '.':
lbl['text'] += btn['text']
def soma():
global sinal
sinal = 'soma'
global valor1
valor1 = lbl['text']
lbl['text'] = ''
def sub():
global sinal
sinal = 'sub'
global valor1
valor1 = lbl['text']
lbl['text'] = ''
def mult():
global sinal
sinal = 'mult'
global valor1
valor1 = lbl['text']
lbl['text'] = ''
def div():
global sinal
sinal = 'div'
global valor1
valor1 = lbl['text']
lbl['text'] = ''
def raiz():
global sinal
sinal = 'raiz'
global valor1
valor1 = lbl['text']
lbl['text'] = '√'
def elev():
global sinal
sinal = 'elev'
global valor1
valor1 = lbl['text']
lbl['text'] = ''
def porc():
global sinal
sinal = 'porc'
global valor1
valor1 = lbl['text']
lbl['text'] = '%'
def ac():
lbl['text'] = ''
def igual():
if sinal == 'soma':
valor2 = lbl['text']
lbl['text'] = ''
soma = float(valor1) + float(valor2)
lbl['text'] = float(soma)
elif sinal == 'sub':
valor2 = lbl['text']
lbl['text'] = ''
subt = float(valor1) - float(valor2)
lbl['text'] = float(subt)
elif sinal == 'mult':
valor2 = lbl['text']
lbl['text'] = ''
multi = float(valor1) * float(valor2)
lbl['text'] = float(multi)
elif sinal == 'div':
valor2 = lbl['text']
lbl['text'] = ''
soma = float(valor1) / float(valor2)
lbl['text'] = float(soma)
elif sinal == 'raiz':
lbl['text'] = ''
rai = float(valor1) ** 0.5
lbl['text'] = float(rai)
elif sinal == 'elev':
valor2 = lbl['text']
lbl['text'] = ''
eleva = float(valor1) ** float(valor2)
lbl['text'] = float(eleva)
elif sinal == 'porc':
lbl['text'] = ''
porcen = float(valor1) / 100
lbl['text'] = float(porcen)
else:
lbl['text'] = 'Error!'
janela = Tk()
janela.title('Calculadora')
janela.iconbitmap('calculadoraProject/cal.ico')
janela['bg'] = 'gainsboro'
janela.geometry('400x450+400+100')
janela.resizable(0,0)
lbl = Label(janela,width=15, height=1, font='Arial 30', bd=1, relief='solid', justify=RIGHT, anchor=E, padx=15, pady=10)
lbl.place(x=100,y=100)
lbl.pack(side=TOP)
#Others buttons
btnab = Button(janela,width=8, height=2, font='Arial 11 bold', text='√', bg='gray80', command=raiz)
btnab.place(x=15,y=90)
btnfe = Button(janela,width=8, height=2, font='Arial 11 bold', text='x¹', bg='gray80', command=elev)
btnfe.place(x=110,y=90)
btnpor = Button(janela,width=8, height=2, font='Arial 11 bold', text='%', bg='gray80', command=porc)
btnpor.place(x=205,y=90)
btnac = Button(janela,width=8, height=2, font='Arial 11 bold', text='AC', bg='gray80', command=ac)
btnac.place(x=300,y=90)
#Numbers buttons
btn7 = Button(janela,width=8, height=2, font='Arial 12', text='7')
btn7['command'] = partial(calc, btn7)
btn7.place(x=15,y=160)
btn8 = Button(janela,width=8, height=2, font='Arial 12', text='8')
btn8['command'] = partial(calc, btn8)
btn8.place(x=110,y=160)
btn9 = Button(janela,width=8, height=2, font='Arial 12', text='9')
btn9['command'] = partial(calc, btn9)
btn9.place(x=205,y=160)
btn4 = Button(janela,width=8, height=2, font='Arial 12', text='4')
btn4['command'] = partial(calc, btn4)
btn4.place(x=15,y=230)
btn5 = Button(janela,width=8, height=2, font='Arial 12', text='5')
btn5['command'] = partial(calc, btn5)
btn5.place(x=110,y=230)
btn6 = Button(janela,width=8, height=2, font='Arial 12', text='6')
btn6['command'] = partial(calc, btn6)
btn6.place(x=205,y=230)
btn3 = Button(janela,width=8, height=2, font='Arial 12', text='3')
btn3['command'] = partial(calc, btn3)
btn3.place(x=15,y=300)
btn2 = Button(janela,width=8, height=2, font='Arial 12', text='2')
btn2['command'] = partial(calc, btn2)
btn2.place(x=110,y=300)
btn1 = Button(janela,width=8, height=2, font='Arial 12', text='1')
btn1['command'] = partial(calc, btn1)
btn1.place(x=205,y=300)
btn0 = Button(janela,width=8, height=2, font='Arial 12', text='0')
btn0['command'] = partial(calc, btn0)
btn0.place(x=15,y=370)
#Score button
btnp = Button(janela,width=8, height=2, font='Arial 11 bold', text='.')
btnp['command'] = partial(calc, btnp)
btnp.place(x=110,y=370)
#Equals button
btnig = Button(janela,width=8, height=2, font='Arial 11 bold', text='=', bg='blue2', fg='white', command=igual)
btnig.place(x=205,y=370)
#Operators button
btndiv = Button(janela,width=8, height=2, font='Arial 11 bold', text='÷', bg='gray80', command=div)
btndiv.place(x=300,y=160)
btnmul = Button(janela,width=8, height=2, font='Arial 11 bold', text='x',bg='gray80', command=mult)
btnmul.place(x=300,y=230)
btnsub = Button(janela,width=8, height=2, font='Arial 11 bold', text='-',bg='gray80', command=sub)
btnsub.place(x=300,y=300)
btnad = Button(janela,width=8, height=2, font='Arial 11 bold', text='+', bg='gray80', command=soma)
btnad.place(x=300,y=370)
janela.mainloop() | 29.065574 | 121 | 0.588644 | 764 | 5,319 | 4.102094 | 0.175393 | 0.080408 | 0.108488 | 0.114869 | 0.457243 | 0.438098 | 0.438098 | 0.395341 | 0.328334 | 0.328334 | 0 | 0.078087 | 0.217522 | 5,319 | 183 | 122 | 29.065574 | 0.674195 | 0.020493 | 0 | 0.280822 | 0 | 0 | 0.131396 | 0.005176 | 0.006849 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.013699 | 0 | 0.082192 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6473eeb720250834546c75004a0f9e6557be8db | 1,928 | py | Python | fastfood/exc.py | enterstudio/fastfood | 6e18500b2d08698f6fa8d9d54daee6aa78f9efd0 | [
"Apache-2.0"
] | null | null | null | fastfood/exc.py | enterstudio/fastfood | 6e18500b2d08698f6fa8d9d54daee6aa78f9efd0 | [
"Apache-2.0"
] | null | null | null | fastfood/exc.py | enterstudio/fastfood | 6e18500b2d08698f6fa8d9d54daee6aa78f9efd0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""Fastfood Exceptions."""
import re
# python 2 vs. 3 string types
try:
basestring
except NameError:
basestring = str
_SPLITCASE_RE = re.compile(r'[A-Z][^A-Z]*')
class FastfoodError(Exception):
"""Base class for all exceptions raised by fastfood."""
class FastfoodStencilSetNotListed(FastfoodError):
"""Stencil set specified was not listed in the templatepack manifest."""
class FastfoodStencilSetInvalidPath(FastfoodError):
"""Specified path to stencil set does not exist."""
class FastfoodStencilSetMissingManifest(FastfoodError):
"""Stencil set is missing a manifest.json file."""
class FastfoodTemplatePackAttributeError(AttributeError, FastfoodError):
"""Invalid stencilset request from TemplatePack."""
def get_friendly_title(err):
"""Turn class, instance, or name (str) into an eyeball-friendly title.
E.g. FastfoodStencilSetNotListed --> 'Stencil Set Not Listed'
"""
if isinstance(err, basestring):
string = err
else:
try:
string = err.__name__
except AttributeError:
string = err.__class__.__name__
split = _SPLITCASE_RE.findall(string)
if not split:
split.append(string)
if len(split) > 1 and split[0] == 'Fastfood':
split.pop(0)
return " ".join(split)
| 26.054054 | 76 | 0.708506 | 240 | 1,928 | 5.616667 | 0.554167 | 0.04451 | 0.019288 | 0.023739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008393 | 0.196577 | 1,928 | 73 | 77 | 26.410959 | 0.861846 | 0.528008 | 0 | 0.08 | 0 | 0 | 0.024706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6476924d1d5ed2df7e1b8fbabacbac62cb195f4 | 2,320 | py | Python | script.py | Freakwill/nb-combination | 716227ba22f6c0c404898a00c18362a41ae3c701 | [
"MIT"
] | null | null | null | script.py | Freakwill/nb-combination | 716227ba22f6c0c404898a00c18362a41ae3c701 | [
"MIT"
] | null | null | null | script.py | Freakwill/nb-combination | 716227ba22f6c0c404898a00c18362a41ae3c701 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nb_comb import *
from sklearn.naive_bayes import *
from sklearn.tree import *
from sklearn.neural_network import *
from sklearn.model_selection import *
import pandas as pd
data = pd.read_csv('dataset.csv', index_col=0)
X, Y = data.iloc[:, :-1], data.iloc[:, -1].values
for i, y in enumerate(Y):
if y>600:
Y[i]=2
elif y>500:
Y[i]=1
else:
Y[i]=0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
import numpy as np
keys = data.columns
key1=['用户实名制是否通过核实', '是否大学生客户', '是否黑名单客户', '是否4G不健康客户', '缴费用户当前是否欠费缴费',
'是否经常逛商场的人', '当月是否逛过福州仓山万达', '当月是否到过福州山姆会员店', '当月是否看电影', '当月是否景点游览', '当月是否体育场馆消费']
key2 = ['用户年龄', '用户话费敏感度', '用户当月账户余额(元)', '近三个月月均商场出现次数',
'当月物流快递类应用使用次数', '当月飞机类应用使用次数', '当月火车类应用使用次数', '当月旅游资讯类应用使用次数',
'用户网龄(月)', '用户最近一次缴费距今时长(月)', '当月通话交往圈人数']
key3 = ['缴费用户最近一次缴费金额(元)', '用户近6个月平均消费值(元)', '用户账单当月总费用(元)',
'当月网购类应用使用次数', '当月金融理财类应用使用总次数', '当月视频播放类应用使用次数']
import time
estimators = [('bernoulli', BernoulliNB()), ('multinomial', MultinomialNB()), ('gauss', GaussianNB())]
nba1 = NBAdditive(estimators=estimators)
estimators = [('bernoulli', BernoulliNB()), ('tree', DecisionTreeClassifier()), ('gauss', GaussianNB())]
nba2 = NBAdditive(estimators=estimators)
estimators = [('bernoulli', BernoulliNB()), ('tree', DecisionTreeClassifier()), ('mlp', MLPClassifier(hidden_layer_sizes=(5,), max_iter=2000))]
nba3 = NBAdditive(estimators=estimators)
models = [('NB组合0(NB)', nba1), ('NB组合1(非NB)', nba2), ('NB组合2(非NB)', nba3),
('高斯NB', GaussianNB()), ('多项式NB', MultinomialNB()), ('决策树', DecisionTreeClassifier()), ('神经网络', MLPClassifier(hidden_layer_sizes=(8,), max_iter=2000))]
np.random.seed(1001)
perf = []
for name, model in models:
dts = []
for _ in range(2):
time1 = time.perf_counter()
if name.startswith('NB'):
model.fit(X_train, Y_train, inds=[key1, key2, key3])
else:
model.fit(X_train, Y_train)
time2 = time.perf_counter()
dt = time2 - time1
dts.append(dt)
perf.append([name, model.score(X_train, Y_train), model.score(X_test, Y_test), np.mean(dts)])
p = pd.DataFrame(data=perf, columns=('name', 'train-score', 'test-score', 'time'))
print(p)
| 31.351351 | 152 | 0.64181 | 284 | 2,320 | 5.137324 | 0.482394 | 0.06854 | 0.046607 | 0.024674 | 0.145305 | 0.145305 | 0.117889 | 0.117889 | 0 | 0 | 0 | 0.027055 | 0.171552 | 2,320 | 73 | 153 | 31.780822 | 0.73205 | 0.018534 | 0 | 0.04 | 0 | 0 | 0.194897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b64a9935e9810f6c5f1a61a7b125688afb12a906 | 3,073 | py | Python | corehq/blobs/tests/test_export.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | null | null | null | corehq/blobs/tests/test_export.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | corehq/blobs/tests/test_export.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | null | null | null | import os
import uuid
from io import BytesIO
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
from django.test import TestCase
from corehq.apps.hqmedia.models import CommCareAudio, CommCareVideo, CommCareImage
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.export import EXPORTERS
from corehq.blobs.tests.util import TemporaryFilesystemBlobDB, new_meta
class TestBlobExport(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.db = TemporaryFilesystemBlobDB()
assert get_blob_db() is cls.db, (get_blob_db(), cls.db)
data = b'binary data not valid utf-8 \xe4\x94'
cls.blob_metas = []
cls.not_found = set()
cls.domain_name = str(uuid.uuid4)
for type_code in [CODES.form_xml, CODES.multimedia, CODES.data_export]:
for domain in (cls.domain_name, str(uuid.uuid4())):
meta = cls.db.put(BytesIO(data), meta=new_meta(domain=domain, type_code=type_code))
lost = new_meta(domain=domain, type_code=type_code, content_length=42)
cls.blob_metas.append(meta)
cls.blob_metas.append(lost)
lost.save()
cls.not_found.add(lost.key)
@classmethod
def tearDownClass(cls):
for blob in cls.blob_metas:
blob.delete()
cls.db.close()
super().tearDownClass()
def test_migrate_all(self):
expected = {
m.key for m in self.blob_metas
if m.domain == self.domain_name and m.key not in self.not_found
}
with NamedTemporaryFile() as out:
exporter = EXPORTERS['all_blobs'](self.domain_name)
exporter.migrate(out.name, force=True)
with ZipFile(out.name, 'r') as zip:
self.assertEqual(expected, set(zip.namelist()))
def test_migrate_multimedia(self):
image_path = os.path.join('corehq', 'apps', 'hqwebapp', 'static', 'hqwebapp', 'images',
'commcare-hq-logo.png')
with open(image_path, 'rb') as f:
image_data = f.read()
files = (
(CommCareImage, self.domain_name, image_data),
(CommCareAudio, self.domain_name, b'fake audio'),
(CommCareVideo, self.domain_name, b'fake video'),
(CommCareAudio, 'other_domain', b'fake audio 1'),
)
blob_keys = []
for doc_class, domain, data in files:
obj = doc_class.get_by_data(data)
obj.attach_data(data)
obj.add_domain(domain)
self.addCleanup(obj.delete)
self.assertEqual(data, obj.get_display_file(False))
blob_keys.append(obj.blobs[obj.attachment_id].key)
expected = set(blob_keys[:-1])
with NamedTemporaryFile() as out:
exporter = EXPORTERS['multimedia'](self.domain_name)
exporter.migrate(out.name, force=True)
with ZipFile(out.name, 'r') as zip:
self.assertEqual(expected, set(zip.namelist()))
| 37.024096 | 99 | 0.617312 | 379 | 3,073 | 4.860158 | 0.324538 | 0.043431 | 0.045603 | 0.017372 | 0.248643 | 0.228013 | 0.153094 | 0.153094 | 0.115092 | 0.115092 | 0 | 0.00449 | 0.275301 | 3,073 | 82 | 100 | 37.47561 | 0.822631 | 0 | 0 | 0.144928 | 0 | 0 | 0.052392 | 0 | 0 | 0 | 0 | 0 | 0.057971 | 1 | 0.057971 | false | 0 | 0.144928 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b64f13ebbb17fadf2e674f33e8566118f8aa3dfa | 922 | py | Python | telescope/utils/annotation.py | froy0212/telescope | 05f6f058d8106c86cb4eb62239800ab2261eaaad | [
"MIT"
] | 25 | 2019-05-31T23:27:56.000Z | 2022-03-11T07:43:59.000Z | telescope/utils/annotation.py | jianguozhouzunyimedicaluniversity/telescope | 6cd55256c6016feffdbfe10346bfecfcb1e30965 | [
"MIT"
] | 24 | 2018-12-10T16:44:59.000Z | 2022-03-20T19:58:37.000Z | telescope/utils/annotation.py | jianguozhouzunyimedicaluniversity/telescope | 6cd55256c6016feffdbfe10346bfecfcb1e30965 | [
"MIT"
] | 8 | 2019-09-04T13:45:08.000Z | 2022-03-15T15:57:22.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
__author__ = 'Matthew L. Bendall'
__copyright__ = "Copyright (C) 2019 Matthew L. Bendall"
def get_annotation_class(annotation_class_name):
""" Get Annotation class matching provided name
Args:
annotation_class_name (str): Name of annotation class.
Returns:
Annotation class with data structure and function(s) for finding
overlaps
"""
if annotation_class_name == 'htseq':
raise NotImplementedError('"htseq" is not compatible.')
# from ._annotation_htseq import _AnnotationHTSeq
# return _AnnotationHTSeq
elif annotation_class_name == 'intervaltree':
from ._annotation_intervaltree import _AnnotationIntervalTree
return _AnnotationIntervalTree
else:
raise NotImplementedError('Choices are "htseq" or "intervaltree".')
| 34.148148 | 75 | 0.719089 | 96 | 922 | 6.53125 | 0.53125 | 0.191388 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006849 | 0.208243 | 922 | 26 | 76 | 35.461538 | 0.852055 | 0.321041 | 0 | 0 | 0 | 0 | 0.22973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.416667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b64f934d0ea49d49f24b9f5e245749b3e6460dfb | 6,012 | py | Python | web/frontend/views/config.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 45 | 2020-11-06T20:31:13.000Z | 2022-03-24T06:14:18.000Z | web/frontend/views/config.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 10 | 2020-12-14T00:17:11.000Z | 2022-02-06T19:39:01.000Z | web/frontend/views/config.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 3 | 2020-12-15T22:50:09.000Z | 2022-03-13T21:12:28.000Z | from backend.models import *
from django.views.generic import TemplateView
from django.shortcuts import render, redirect
import os
import hass_api.rest as hass_rest
from frontend.util import get_server, refresh_hass_token, \
get_device_names, get_activity_names, get_person_hass_names, \
get_person_names, input_is_empty
import frontend.experiment as experiment
LOCAL_URL_PROVIDED = 'server_local_url_provided'
INVALID_ADDRESS_PROVIDED = 'server_invalid_address_provided'
class ConfigView(TemplateView):
def get_context(self, add_to_context):
srv = get_server()
person_list = Person.objects.all()
act_list = Activity.objects.all()
url = 'config'
exp_active = experiment.is_active()
refresh_hass_token()
# get hass devices
hass_devices = hass_rest.get_device_list(
settings.HASS_API_URL , srv.hass_api_token)
dev_list = get_device_names()
hass_devices = list(set(hass_devices).difference(set(dev_list)))
# get hass users
hass_users = hass_rest.get_user_names(
settings.HASS_API_URL, srv.hass_api_token,)
hass_users = list(set(hass_users).difference(set(get_person_hass_names())))
context = {'server': srv,
'url': url,
'person_list':person_list,
'hass_dev_list' : hass_devices,
'aa_dev_list' : dev_list,
'activity_list' : act_list,
'hass_user_list' : hass_users,
'aa_user_list' : person_list,
'poll_int_list' : settings.POLL_INTERVAL_LST,
'experiment_active':exp_active,
}
context.update(add_to_context)
return context
def get(self, request, *args, **kwargs):
context = self.get_context({})
return render(request, 'config.html', context)
def post(self, request):
from_section = request.POST.get("from", "")
add_to_context = {}
assert from_section in ["conf_devices", "conf_persons",\
"conf_activities", "conf_server", "debug"]
if from_section == 'conf_devices':
conf_devices(request)
elif from_section == 'conf_persons':
conf_persons(request)
elif from_section == 'conf_activities':
conf_activities(request)
elif from_section == 'conf_server':
success, reason = conf_server(request)
if not success and reason:
add_to_context[reason] = True
if not success and reason:
add_to_context[reason] = True
elif from_section == 'debug':
debug(request)
context = self.get_context(add_to_context)
return render(request, 'config.html', context)
def debug(request):
from frontend.util import collect_data_from_hass
collect_data_from_hass()
def conf_server(request):
""" sets server related stuff
"""
logger.error('test')
srv = get_server()
try:
pol_int = request.POST.get("poll_interval", "")
srv.poll_interval = pol_int
except:
pass
srv.save()
try:
address = request.POST.get("address", "")
if input_is_valid_address(address):
if input_is_local_address(address):
return False, LOCAL_URL_PROVIDED
address = url_strip_appendix(address)
srv.server_address = address
srv.save()
return (True, None)
else:
return False, INVALID_ADDRESS_PROVIDED
except:
return (True, None)
def url_strip_appendix(url):
""" removes trailing stuff behind a url definition
"""
lst = url.split('/')
return lst[0] + '//' + lst[2]
def input_is_valid_address(address):
""" checks whether the given address is either a valid ipv4 or a valid url
"""
from django.core.validators import URLValidator
try:
URLValidator()(address)
return True
except:
return False
def input_is_local_address(address):
return '.local' in address
def conf_devices(request):
intent = request.POST.get("intent","")
assert intent in ['track', 'remove']
dev_lst = request.POST.getlist('devices')
if intent == 'track':
lst = request.POST.getlist('hass_select')
if len(lst) == 1 and input_is_empty(lst[0]):
return
for name in lst:
Device(name=name).save()
else:
lst = request.POST.getlist('act_assist_select')
if len(lst) == 1 and input_is_empty(lst[0]):
return
for name in lst:
Device.objects.get(name=name).delete()
def conf_activities(request):
intent = request.POST.get("intent", "")
assert intent in ['add', 'delete']
if intent == 'delete':
for name in request.POST.getlist('act_select'):
Activity.objects.get(name=name).delete()
else:
name = request.POST.get("name", "")
if name not in get_activity_names() and not input_is_empty(name):
Activity(name=name).save()
def conf_persons(request):
intent = request.POST.get("intent","")
assert intent in ['track', 'remove', 'add']
dev_lst = request.POST.getlist('devices')
if intent == 'track':
lst = request.POST.getlist('hass_select')
if len(lst) == 1 and input_is_empty(lst[0]):
return
for hass_name in lst:
name = hass_name.split('.')[1]
Person(name=name, hass_name=hass_name).save()
elif intent == 'remove':
lst = request.POST.getlist('act_assist_select')
if len(lst) == 1 and input_is_empty(lst[0]):
return
for col in lst:
name = col.split(' ')[0]
Person.objects.get(name=name).delete()
else:
name = request.POST.get("name", "")
if name not in get_person_names() and not input_is_empty(name):
Person(name=name, hass_name='person.' + name).save()
| 33.966102 | 83 | 0.614604 | 740 | 6,012 | 4.747297 | 0.178378 | 0.046968 | 0.031882 | 0.035867 | 0.356106 | 0.299744 | 0.281526 | 0.266154 | 0.221178 | 0.2078 | 0 | 0.002989 | 0.276447 | 6,012 | 176 | 84 | 34.159091 | 0.804598 | 0.03177 | 0 | 0.304054 | 0 | 0 | 0.088778 | 0.009654 | 0 | 0 | 0 | 0 | 0.027027 | 1 | 0.074324 | false | 0.006757 | 0.060811 | 0.006757 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b650cf8f96e44c66b3acac463da66cefb0635f96 | 1,843 | py | Python | File System/main.py | IRIDIUM-SUB/Sys_Course_Design | 52ec96378e9f9c8d7dc366efcba154df3f1cbc67 | [
"MIT"
] | null | null | null | File System/main.py | IRIDIUM-SUB/Sys_Course_Design | 52ec96378e9f9c8d7dc366efcba154df3f1cbc67 | [
"MIT"
] | null | null | null | File System/main.py | IRIDIUM-SUB/Sys_Course_Design | 52ec96378e9f9c8d7dc366efcba154df3f1cbc67 | [
"MIT"
] | null | null | null | import os
from toolbox import *
import pickle
import logging
import commandresolve
def console(data:dict,logger):
'''
Main console program
'''
consoleobj=commandresolve.commandresolve(data,logger)
flag=True# to mark if it is time to exit
while (flag):
rawcommand=input(">")
flag=consoleobj.resolvecommand(rawcommand)
#Exit now
logger.info("Exit Successfully")
return #NOTE data should be saved in exit
if __name__=="__main__":
#Mainloop
#Search for file
filename="simdisk.bin"
'''
Setup logger
'''
logger = logging.getLogger()#创建对象
logger.setLevel(logging.INFO)#设定起始显示级别
# 创建Handler
# 终端Handler
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter('%(asctime)s [%(levelname)s] \t %(message)s')
consoleHandler.setFormatter(formatter)
# 添加到Logger中
logger.addHandler(consoleHandler)
if not os.path.isfile(filename):
logger.warning("File not exist. Trying to create...")
createfile(filename,20000000)
with open(filename,'wb') as p:
pickle.dump({},p)
#Build simlink
data=dict()
with open(filename,"rb") as f:
data=pickle.load(f)
if data !={}:
logger.info("Get existed file data, trying to resolve...")
if type(data)!=dict:
print(data)
logger.error("File structure is unable to resolve")
else:
logger.info("File structure is resolved successfully")
logger.info("Jumping to command line...")
console(data,logger)
else:
logger.info("File structure is resolved successfully")
logger.info("Jumping to command line...")
console(data,logger)
| 26.710145 | 79 | 0.62344 | 203 | 1,843 | 5.62069 | 0.472906 | 0.052585 | 0.039439 | 0.031551 | 0.168273 | 0.168273 | 0.168273 | 0.168273 | 0.168273 | 0.168273 | 0 | 0.005908 | 0.265328 | 1,843 | 68 | 80 | 27.102941 | 0.83678 | 0.098209 | 0 | 0.186047 | 0 | 0 | 0.202989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.116279 | 0 | 0.162791 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6511db93d9ed485759c7b0e96ca84109e977890 | 1,428 | py | Python | benchmarks/evaluate.py | benetech/Winnow2.0 | bc428d7f74bd7db71b6d70ab15dc7a5c37786c46 | [
"MIT"
] | 26 | 2019-12-16T21:22:14.000Z | 2022-03-25T16:05:32.000Z | benchmarks/evaluate.py | benetech/Winnow2.0 | bc428d7f74bd7db71b6d70ab15dc7a5c37786c46 | [
"MIT"
] | 325 | 2019-10-28T16:24:45.000Z | 2022-03-31T13:12:15.000Z | benchmarks/evaluate.py | benetech/Winnow2.0 | bc428d7f74bd7db71b6d70ab15dc7a5c37786c46 | [
"MIT"
] | 9 | 2019-10-09T16:20:38.000Z | 2021-12-22T18:44:45.000Z | import pandas as pd
from glob import glob
from utils import evaluate_augmented_dataset, evaluate_landmarks, evaluate_scene_detection
import os
from winnow.utils.config import resolve_config
import click
import numpy as np
import json
pd.options.mode.chained_assignment = None
@click.command()
@click.option("--benchmark", "-b", help="name of the benchmark to evaluated", default="augmented_dataset")
@click.option(
"--force-download",
"-fd",
help="Force download of the dataset (even if an existing directory for the dataset has been detected",
default=False,
is_flag=True,
)
@click.option(
"--overwrite",
"-o",
help="Force feature extraction, even if we detect that signatures have already been processed.",
default=False,
is_flag=True,
)
def main(benchmark, force_download, overwrite):
config_path = os.path.join("benchmarks", benchmark, "config.yml")
config = resolve_config(config_path)
if benchmark == "augmented_dataset":
evaluate_augmented_dataset(config, force_download, overwrite, config_path)
elif benchmark == "landmarks":
evaluate_landmarks(config, force_download, overwrite, config_path)
elif benchmark == "scene_detection":
evaluate_scene_detection(config, force_download, overwrite, config_path)
else:
print(f"Please review the dataset (@ {config.sources.root})")
if __name__ == "__main__":
main()
| 26.444444 | 106 | 0.72549 | 179 | 1,428 | 5.592179 | 0.435754 | 0.077922 | 0.087912 | 0.111888 | 0.215784 | 0.13986 | 0.101898 | 0.101898 | 0 | 0 | 0 | 0 | 0.171569 | 1,428 | 53 | 107 | 26.943396 | 0.846154 | 0 | 0 | 0.157895 | 0 | 0 | 0.278711 | 0.015406 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.210526 | 0 | 0.236842 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b653a28ba11c9bc2e835fdedaf5686ad56584df6 | 909 | py | Python | Symmetric/Stream-Cipher/LFSR/script.py | killua4564/Symmetric | 183ea2ec1d1342e9124e710a2de0fcad8b399f3d | [
"MIT"
] | 1 | 2021-05-05T14:03:10.000Z | 2021-05-05T14:03:10.000Z | Symmetric/Stream-Cipher/LFSR/script.py | killua4564/Symmetric | 183ea2ec1d1342e9124e710a2de0fcad8b399f3d | [
"MIT"
] | null | null | null | Symmetric/Stream-Cipher/LFSR/script.py | killua4564/Symmetric | 183ea2ec1d1342e9124e710a2de0fcad8b399f3d | [
"MIT"
] | null | null | null | from itertools import combinations
class LFSR:
def __init__(self, register, taps):
self.register = register
self.taps = taps
def next(self):
new = 0
ret = self.register[0]
for i in self.taps:
new ^= self.register[i]
self.register = self.register[1:] + [new]
return ret
register = list(map(int, ('{:08b}'.format(i ^ j) for i, j in zip(b'flag', flag_enc))))
print('register: ', register)
for i in combinations(list(range(16)), 5):
lfsr = LFSR(register[:16], list(i))
if all(bit == lfsr.next() for bit in register):
taps = list(i)
break
print('taps: ', taps)
lfsr = LFSR(register[:16], taps)
flag = []
for char in flag_enc:
dec_char = 0
for binary in '{:08b}'.format(char):
dec_char <<= 1
dec_char += int(binary) ^ lfsr.next()
flag.append(dec_char)
print(bytes(flag).decode())
| 24.567568 | 86 | 0.583058 | 130 | 909 | 4 | 0.338462 | 0.138462 | 0.023077 | 0.069231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023916 | 0.264026 | 909 | 36 | 87 | 25.25 | 0.753363 | 0 | 0 | 0 | 0 | 0 | 0.035204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.172414 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65951eb0ef82ffdc947697f22310dd635865642 | 4,122 | py | Python | src/mapping/cartographer.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | src/mapping/cartographer.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | src/mapping/cartographer.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | from utils.utils import bresenham_line
from math import hypot, cos, sin
from utils.position import Position
class Cartographer:
"""
Class that implements a Cartographer, used to update the map of the environment using the lasers echoes.
"""
def __init__(self, lasers_distance = 0.15, min_increment = 0.015, increment = 0.15, max_distance = 40, safe_distance_obstacle = 5, safe_distance_empty = 10):
"""
Instantiates a Cartographer.
:param lasers_distance: Offset of the lasers in regard of the robot.
:type lasers_distance: float
:param min_increment: Minimal increment for update of the cells of the map.
:type min_increment: float
:param increment: Increment for update of the cells of the map.
:type increment: float
:param max_distance: Maximum distance of the echoes.
:type max_distance: float
:param safe_distance_obstacle: Used to be more precise on echoes readings.
:type safe_distance_obstacle: float
:param safe_distance_obstacle: Used to be more precise on echoes readings.
:type safe_distance_obstacle: float
"""
self.__lasers_distance = lasers_distance
self.__max_distance = max_distance
self.__min_increment = min_increment
self.__increment = increment
self.__safe_distance_obstacle = safe_distance_obstacle
self.__safe_distance_empty = safe_distance_empty
def update(self, robot_map, robot_pos, lasers):
"""
Function used to update the map by analyzing the lasers echoes, it uses the Bresenham line algorithm (implemented in utils.utils) to update lines.
:param robot_map: The map to update.
:type robot_map: Map
:param robot_pos: Robot position in the real world.
:type robot_pos: Position
:param lasers: The lasers datas.
:type lasers: A list of Laser objects.
:return: The map updated.
:rtype: Map
"""
lasers_pos_x = robot_pos.x + self.__lasers_distance * cos(robot_pos.angle)
lasers_pos_y = robot_pos.y + self.__lasers_distance * sin(robot_pos.angle)
lasers_cell = robot_map.to_grid_pos(Position(lasers_pos_x, lasers_pos_y))
real_lasers_cell = robot_map.to_real_pos(lasers_cell)
for laser in lasers:
angle = robot_pos.angle + laser.angle
laser_hit = Position(lasers_pos_x + laser.echoe * cos(angle), lasers_pos_y + laser.echoe * sin(angle))
hit_cell = robot_map.to_grid_pos(laser_hit)
cells = bresenham_line(lasers_cell.x, lasers_cell.y, hit_cell.x, hit_cell.y)
for cell in cells:
if robot_map.is_in_bound(cell):
if cell.x == hit_cell.x and cell.y == hit_cell.y:
if laser.echoe < self.__max_distance - self.__safe_distance_obstacle:
inc_iro_certainty = self.__min_increment if robot_map.is_empty(cell) else self.__increment
inc_factor_iro_dist = (1.0 - (laser.echoe / self.__max_distance))
robot_map.grid[cell.x][cell.y] += inc_factor_iro_dist * inc_iro_certainty
if robot_map.grid[cell.x][cell.y] > 1.0:
robot_map.grid[cell.x][cell.y] = 1.0
else:
real_cell = robot_map.to_real_pos(cell)
distance = hypot(real_cell.x - real_lasers_cell.x, real_cell.y - real_lasers_cell.y)
if distance < self.__max_distance - self.__safe_distance_empty:
inc_iro_certainty = self.__min_increment if robot_map.is_obstacle(cell) else self.__increment
inc_factor_iro_dist = (1.0 - (distance / self.__max_distance))
robot_map.grid[cell.x][cell.y] -= inc_factor_iro_dist * inc_iro_certainty
if robot_map.grid[cell.x][cell.y] < 0.0:
robot_map.grid[cell.x][cell.y] = 0.0
return robot_map
| 56.465753 | 161 | 0.63246 | 551 | 4,122 | 4.408348 | 0.170599 | 0.05599 | 0.065871 | 0.039522 | 0.361054 | 0.333059 | 0.272952 | 0.272952 | 0.272952 | 0.251544 | 0 | 0.009266 | 0.293062 | 4,122 | 72 | 162 | 57.25 | 0.824297 | 0.2705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.075 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b659d814fb65cdd70ff97f52711483193c63f987 | 5,106 | py | Python | demosys/opengl/texture.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 70 | 2017-03-31T12:01:41.000Z | 2022-01-05T06:30:57.000Z | demosys/opengl/texture.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 69 | 2017-06-18T22:37:46.000Z | 2020-01-23T04:02:22.000Z | demosys/opengl/texture.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 9 | 2017-05-13T21:13:02.000Z | 2020-10-01T18:09:49.000Z | """
Draw methods for textures and depth textures
"""
import moderngl
from demosys import context, geometry
class TextureHelper:
"""Draw methods for textures and depth textures"""
_quad = None
_texture2d_shader = None # Type: moderngl.Program
_texture2d_sampler = None # Type: moderngl.Sampler
_depth_shader = None # Type: moderngl.Program
_depth_sampler = None # Type: moderngl.Sampler
def __init__(self):
self._init_texture2d_draw()
self._init_depth_texture_draw()
@property
def initialized(self):
return self._quad is not None
@property
def ctx(self):
return context.ctx()
def draw(self, texture, pos=(0.0, 0.0), scale=(1.0, 1.0)):
"""
Draw texture using a fullscreen quad.
By default this will conver the entire screen.
:param pos: (tuple) offset x, y
:param scale: (tuple) scale x, y
"""
if not self.initialized:
self.init()
self._texture2d_shader["offset"].value = (pos[0] - 1.0, pos[1] - 1.0)
self._texture2d_shader["scale"].value = (scale[0], scale[1])
texture.use(location=0)
self._texture2d_sampler.use(location=0)
self._texture2d_shader["texture0"].value = 0
self._quad.render(self._texture2d_shader)
self._texture2d_sampler.clear(location=0)
def draw_depth(self, texture, near, far, pos=(0.0, 0.0), scale=(1.0, 1.0)):
"""
Draw depth buffer linearized.
By default this will draw the texture as a full screen quad.
A sampler will be used to ensure the right conditions to draw the depth buffer.
:param near: Near plane in projection
:param far: Far plane in projection
:param pos: (tuple) offset x, y
:param scale: (tuple) scale x, y
"""
if not self.initialized:
self.init()
self._depth_shader["offset"].value = (pos[0] - 1.0, pos[1] - 1.0)
self._depth_shader["scale"].value = (scale[0], scale[1])
self._depth_shader["near"].value = near
self._depth_shader["far"].value = far
self._depth_sampler.use(location=0)
texture.use(location=0)
self._depth_shader["texture0"].value = 0
self._quad.render(self._depth_shader)
self._depth_sampler.clear(location=0)
def _init_texture2d_draw(self):
"""Initialize geometry and shader for drawing FBO layers"""
if not TextureHelper._quad:
TextureHelper._quad = geometry.quad_fs()
# Shader for drawing color layers
TextureHelper._texture2d_shader = context.ctx().program(
vertex_shader="""
#version 330
in vec3 in_position;
in vec2 in_uv;
out vec2 uv;
uniform vec2 offset;
uniform vec2 scale;
void main() {
uv = in_uv;
gl_Position = vec4((in_position.xy + vec2(1.0, 1.0)) * scale + offset, 0.0, 1.0);
}
""",
fragment_shader="""
#version 330
out vec4 out_color;
in vec2 uv;
uniform sampler2D texture0;
void main() {
out_color = texture(texture0, uv);
}
"""
)
TextureHelper._texture2d_sampler = self.ctx.sampler(
filter=(moderngl.LINEAR, moderngl.LINEAR),
)
def _init_depth_texture_draw(self):
"""Initialize geometry and shader for drawing FBO layers"""
from demosys import geometry
if not TextureHelper._quad:
TextureHelper._quad = geometry.quad_fs()
# Shader for drawing depth layers
TextureHelper._depth_shader = context.ctx().program(
vertex_shader="""
#version 330
in vec3 in_position;
in vec2 in_uv;
out vec2 uv;
uniform vec2 offset;
uniform vec2 scale;
void main() {
uv = in_uv;
gl_Position = vec4((in_position.xy + vec2(1.0, 1.0)) * scale + offset, 0.0, 1.0);
}
""",
fragment_shader="""
#version 330
out vec4 out_color;
in vec2 uv;
uniform sampler2D texture0;
uniform float near;
uniform float far;
void main() {
float z = texture(texture0, uv).r;
float d = (2.0 * near) / (far + near - z * (far - near));
out_color = vec4(d);
}
"""
)
TextureHelper._depth_sampler = self.ctx.sampler(
filter=(moderngl.LINEAR, moderngl.LINEAR),
compare_func='',
)
helper = TextureHelper()
| 32.316456 | 102 | 0.526244 | 560 | 5,106 | 4.633929 | 0.1875 | 0.01079 | 0.009249 | 0.006166 | 0.601156 | 0.506358 | 0.506358 | 0.455491 | 0.426204 | 0.383815 | 0 | 0.034266 | 0.377007 | 5,106 | 157 | 103 | 32.522293 | 0.781515 | 0.159812 | 0 | 0.480769 | 0 | 0.028846 | 0.385503 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067308 | false | 0 | 0.028846 | 0.019231 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65c498fb47fab313371a80e39143108433be373 | 1,136 | py | Python | avancado/POO/metaclasses.py | Nataliaartini/cursoPython | 01dc9cafd5cef1252ca84503e7a9011bd709ef46 | [
"MIT"
] | null | null | null | avancado/POO/metaclasses.py | Nataliaartini/cursoPython | 01dc9cafd5cef1252ca84503e7a9011bd709ef46 | [
"MIT"
] | null | null | null | avancado/POO/metaclasses.py | Nataliaartini/cursoPython | 01dc9cafd5cef1252ca84503e7a9011bd709ef46 | [
"MIT"
] | null | null | null | class Meta(type):
def __new__(mcs, name, bases, namespace):
print(name)
if name == "A":
return type.__new__(mcs, name, bases, namespace)
if "attr_classe" in namespace:
print(f"{name} tentou sobrescrever o atributo attr_classe")
del namespace["attr_classe"] # excluindo attr_classe da classe B
print(namespace)
if "b_fala" not in namespace:
print(f"você precisa criar o metodo de fala em {name}")
else:
if not callable(namespace["b_fala"]):
print(f"b_fala precisa ser um metodo, não atributo em {name}")
return type.__new__(mcs, name, bases, namespace)
class A(metaclass=Meta):
def fala(self):
self.b_fala()
attr_classe = "valor A" # para não ser sobrescrito estou tratando na metaclasse
class B(A):
# b_fala = "olá"
def b_fala(self):
print("oi")
attr_classe = "valor B"
b = B()
b.b_fala()
print(b.attr_classe)
C = type("C", (), {"attr": "olá Mundo!"}) #nome da classe, de quem ela está herdando e o que tem nela.
c = C()
print(c.attr)
print(type(c))
| 25.818182 | 102 | 0.601232 | 164 | 1,136 | 4.006098 | 0.365854 | 0.106545 | 0.045662 | 0.068493 | 0.14003 | 0.103501 | 0.103501 | 0 | 0 | 0 | 0 | 0 | 0.277289 | 1,136 | 43 | 103 | 26.418605 | 0.800244 | 0.141725 | 0 | 0.066667 | 0 | 0 | 0.218557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.333333 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65c6b66aba642829f6360c17136a6c5c24bf822 | 1,787 | py | Python | local_telegramListener/main.py | pratijayguha/AutomatedLightingControl | 0ce3b275b2734deb1695a28e43417784184dde84 | [
"MIT"
] | null | null | null | local_telegramListener/main.py | pratijayguha/AutomatedLightingControl | 0ce3b275b2734deb1695a28e43417784184dde84 | [
"MIT"
] | null | null | null | local_telegramListener/main.py | pratijayguha/AutomatedLightingControl | 0ce3b275b2734deb1695a28e43417784184dde84 | [
"MIT"
] | null | null | null | from utils import *
from bot import telegram_chatbot
from bulb import *
bot = telegram_chatbot(CONFIG_LOCATION)
print('Initialized Bot')
bulb = bulb(IP_RANGE)
print('Connected to bulb. IP address: {}'.format(bulb.address))
while True:
updates = bot.get_updates(offset=update_id)
updates = updates["result"]
if updates:
for item in updates:
update_id = item["update_id"]
from_ = item["message"]["from"]["id"]
try:
message_type = item['message']['entities'][0]['type']
message = item['message']['text']
except:
message_type = None
message = None
if message_type=='bot_command':
if message=='/lighton':
# Turn light on
bulb.toggle('ON')
reply = 'Lights have been turned on'
elif message=='/lightoff':
# Turn light off
bulb.toggle('OFF')
reply = 'Lights have been turned off'
elif message=='/getstatus':
# display status of light
status = bulb.getStatus()
if status==True:
reply = 'Lights are on.'
else:
reply = 'Lights are off.'
else:
reply = 'This is not a valid bot command. Please reach out to the developer for assistance.'
bot.send_message(reply, from_)
print(item)
else:
reply = 'Input is not a valid bot command. Please retry'
bot.send_message(reply, from_)
| 36.469388 | 113 | 0.476777 | 175 | 1,787 | 4.771429 | 0.405714 | 0.052695 | 0.035928 | 0.045509 | 0.179641 | 0.064671 | 0.064671 | 0 | 0 | 0 | 0 | 0.000986 | 0.432569 | 1,787 | 48 | 114 | 37.229167 | 0.822485 | 0.029099 | 0 | 0.125 | 0 | 0 | 0.213436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.075 | 0 | 0.075 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65c74d6a744f8c2e4b55ae69055df5a4d973d76 | 5,365 | py | Python | engines/email_engine.py | dho-IOD/futu_algo | f4bdf5edcc261efbd252e9e9c53a89563b0ed68f | [
"Apache-2.0"
] | 66 | 2020-12-29T15:03:21.000Z | 2022-03-29T01:24:59.000Z | engines/email_engine.py | dho-IOD/futu_algo | f4bdf5edcc261efbd252e9e9c53a89563b0ed68f | [
"Apache-2.0"
] | 22 | 2020-12-29T16:57:03.000Z | 2022-03-01T08:23:37.000Z | engines/email_engine.py | dho-IOD/futu_algo | f4bdf5edcc261efbd252e9e9c53a89563b0ed68f | [
"Apache-2.0"
] | 30 | 2021-01-07T07:33:22.000Z | 2022-03-17T11:37:02.000Z | # Futu Algo: Algorithmic High-Frequency Trading Framework
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by Bill Chan <billpwchan@hotmail.com>, 2021
# Copyright (c) billpwchan - All Rights Reserved
# the first step is always the same: import all necessary components:
import smtplib
import ssl
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from socket import gaierror
from util import logger
from util.global_vars import *
class Email:
def __init__(self):
"""
Email Engine Constructor
"""
self.config = config
self.port = self.config['Email'].get('Port')
self.smtp_server = self.config['Email'].get('SmtpServer')
self.sender = self.config['Email'].get('Sender')
self.login = self.config['Email'].get('Login')
self.password = self.config['Email'].get('Password')
# Create a secure SSL context
self.context = ssl.create_default_context()
self.default_logger = logger.get_logger("email")
def write_daily_stock_filter_email(self, receiver: str, filter_name: str, message_content: dict):
message = MIMEMultipart("alternative")
message["Subject"] = f"Daily Selected Stock List - {datetime.today().strftime('%Y-%m-%d')} - {filter_name}"
message["From"] = self.sender
message["To"] = receiver
text = "Please kindly review today's chosen stock list! "
html = """\
<style>
* {
font-family: sans-serif; /* Change your font family */
}
.content-table {
border-collapse: collapse;
margin: 25px 0;
font-size: 0.9em;
min-width: 400px;
border-radius: 5px 5px 0 0;
overflow: hidden;
box-shadow: 0 0 20px rgba(0, 0, 0, 0.15);
}
.content-table thead tr {
background-color: #009879;
color: #ffffff;
text-align: left;
font-weight: bold;
}
.content-table th,
.content-table td {
padding: 12px 15px;
}
.content-table tbody tr {
border-bottom: 1px solid #dddddd;
}
.content-table tbody tr:nth-of-type(even) {
background-color: #f3f3f3;
}
.content-table tbody tr:last-of-type {
border-bottom: 2px solid #009879;
}
.content-table tbody tr.active-row {
font-weight: bold;
color: #009879;
}
</style>
<table class="content-table">
<thead>
<tr>
<th>Stock Code</th>
<th>Company Name</th>
<th>Last Close</th>
<th>Day's Range</th>
<th>Market Cap</th>
<th>Beta (5Y Monthly)</th>
<th>PE (Trailing/Forward)</th>
<th>EPS (Trailing/Forward)</th>
<th>Volume</th>
</tr>
</thead>
<tbody>\n
"""
for equity, values in message_content.items():
html += f"""\
<tr>
<td>{equity}</td>
<td>{values['longName']}</td>
<td>{values['previousClose']}</td>
<td>{values['dayRange']}</td>
<td>{values['marketCap']}</td>
<td>{values['beta']}</td>
<td>{values['PE(Trailing/Forward)']}</td>
<td>{values['EPS(Trailing/Forward)']}</td>
<td>{values['volume']}</td>
</tr>\n
"""
html += """\
</tbody>
</table>
"""
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
try:
# send your message with credentials specified above
with smtplib.SMTP(self.smtp_server, self.port) as server:
server.starttls(context=self.context) # Secure the connection
server.login(self.login, self.password)
server.sendmail(self.sender, receiver, message.as_string())
self.default_logger.info(f'Email Sent: {receiver}')
except (gaierror, ConnectionRefusedError):
self.default_logger.info('Failed to connect to the server. Bad connection settings?')
except smtplib.SMTPServerDisconnected:
self.default_logger.info('Failed to connect to the server. Wrong user/password?')
except smtplib.SMTPException as e:
self.default_logger.info('SMTP error occurred: ' + str(e))
| 33.742138 | 115 | 0.569618 | 614 | 5,365 | 4.938111 | 0.423453 | 0.03562 | 0.026385 | 0.029683 | 0.047493 | 0.031003 | 0.031003 | 0.031003 | 0.031003 | 0.031003 | 0 | 0.016816 | 0.312768 | 5,365 | 158 | 116 | 33.955696 | 0.805533 | 0.192917 | 0 | 0.0625 | 0 | 0.008929 | 0.578037 | 0.085047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0.026786 | 0.071429 | 0 | 0.098214 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65ddb5d9166291914db0e277ccb00ba1af84adc | 502 | py | Python | ex03/ex03.py | cheng10/PythonExerciseBook | 11250020995c29e819540de787e91845b1bbbd99 | [
"MIT"
] | null | null | null | ex03/ex03.py | cheng10/PythonExerciseBook | 11250020995c29e819540de787e91845b1bbbd99 | [
"MIT"
] | null | null | null | ex03/ex03.py | cheng10/PythonExerciseBook | 11250020995c29e819540de787e91845b1bbbd99 | [
"MIT"
] | null | null | null | import string
import random
import redis
alpha = string.ascii_uppercase
l = []
while len(l) < 100:
res = ''
for i in range(16):
a = random.choice(alpha)
n = str(random.randrange(10))
rand = random.choice([a, n])
res += rand
if res not in l:
l.append(res)
# print(res)
print(len(l))
print(l)
r = redis.StrictRedis(host='localhost', port=6379, db=0)
for item in l:
r.set(item, True)
print("Showing data from redis:")
print(r.keys())
| 16.733333 | 56 | 0.589641 | 77 | 502 | 3.831169 | 0.558442 | 0.027119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03252 | 0.26494 | 502 | 29 | 57 | 17.310345 | 0.766938 | 0.01992 | 0 | 0 | 0 | 0 | 0.067347 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65e15ab134dbca7c02ad041522ed4d0b673d08e | 355 | py | Python | setup.py | hoogamaphone/world-manager | 8d4515b93d303cf91626f69257e7cf00e200807a | [
"MIT"
] | null | null | null | setup.py | hoogamaphone/world-manager | 8d4515b93d303cf91626f69257e7cf00e200807a | [
"MIT"
] | null | null | null | setup.py | hoogamaphone/world-manager | 8d4515b93d303cf91626f69257e7cf00e200807a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.read()
setup(
name='World-Manager-CLI',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
entry_points="""
[console_scripts]
world-manager=cli.cli:cli
""",
) | 22.1875 | 43 | 0.664789 | 43 | 355 | 5.325581 | 0.697674 | 0.104803 | 0.131004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010601 | 0.202817 | 355 | 16 | 44 | 22.1875 | 0.798587 | 0 | 0 | 0 | 0 | 0 | 0.289326 | 0.070225 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b65f74632dad7cb7cddacb4494d3a9d432840a4d | 1,886 | py | Python | main.py | Jelloeater/8266_web-relay | ac61a21bdfb1d6ff88be095f95059061f273c7b8 | [
"MIT"
] | null | null | null | main.py | Jelloeater/8266_web-relay | ac61a21bdfb1d6ff88be095f95059061f273c7b8 | [
"MIT"
] | null | null | null | main.py | Jelloeater/8266_web-relay | ac61a21bdfb1d6ff88be095f95059061f273c7b8 | [
"MIT"
] | null | null | null | import socket
import ure as re
import time
import machine
def run():
# Standard socket stuff:
host = ''
port = 80
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen(1) # don't queue up any requests
while True:
csock, caddr = sock.accept()
print("\nConnection from: " + str(caddr))
req = csock.recv(1024) # get the request, 1kB max
get_req = str(req).split('GET /')[1].split('HTTP')[0]
print('Req RAW:')
print(req)
output = parse_req(get_req)
csock.sendall("""HTTP/1.0 200 OK
Content-Type: text/html
<html>
<head>
</head>
<body>
<form action="" method="get">
<button name="pin1" value="True">P1-On</button>
</form>
<form action="" method="get">
<button name="pin1" value="False">P1-Off</button>
</form>
<br>
<form action="" method="get">
<button name="pin2" value="True">P2-On</button>
</form>
<form action="" method="get">
<button name="pin2" value="False">P2-Off</button>
</form>
<br>
OUTPUT:
{0}
</body>
</html>
""".format(str(output)))
csock.close()
def parse_req(get_req):
print('Get Req:')
print(get_req)
if 'favicon.ico' not in get_req:
get_req = get_req[1:]
data = get_req.split('=')
print(data)
return pin_logic(data)
def pin_logic(data):
import machine
if 'pin1' in data[0]:
machine.Pin(5, machine.Pin.OUT).on() if 'True' in data[1] else machine.Pin(5, machine.Pin.OUT).off()
if 'pin2' in data[0]:
machine.Pin(2, machine.Pin.OUT).on() if 'True' in data[1] else machine.Pin(2, machine.Pin.OUT).off()
try:
run()
except:
time.sleep(3)
machine.reset()
| 25.486486 | 108 | 0.544539 | 255 | 1,886 | 3.972549 | 0.364706 | 0.053307 | 0.035538 | 0.075025 | 0.347483 | 0.306022 | 0.256663 | 0.256663 | 0.16387 | 0.082922 | 0 | 0.026966 | 0.292153 | 1,886 | 73 | 109 | 25.835616 | 0.731835 | 0.039767 | 0 | 0.190476 | 0 | 0 | 0.39845 | 0.061981 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.079365 | 0 | 0.142857 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6621346c805c1e140f63c6f56323e6a373a58b0 | 1,744 | py | Python | src_para/params.py | david-yoon/detecting-incongruity | 2e121fdba0da3a6a0c63df0c46a101a789fe7565 | [
"MIT"
] | 36 | 2018-11-25T21:43:10.000Z | 2022-03-13T10:47:50.000Z | src_para/params.py | david-yoon/detecting-incongruity | 2e121fdba0da3a6a0c63df0c46a101a789fe7565 | [
"MIT"
] | 1 | 2019-06-16T07:45:47.000Z | 2019-10-14T06:00:29.000Z | src_para/params.py | david-yoon/detecting-incongruity | 2e121fdba0da3a6a0c63df0c46a101a789fe7565 | [
"MIT"
] | 5 | 2018-12-09T06:40:19.000Z | 2019-10-17T22:07:58.000Z |
class Params:
################################
# dataset
################################
DATA_DIR = '../data/'
DATA_TRAIN_TITLE = 'train/train_title.npy'
DATA_TRAIN_BODY = 'train/train_body.npy'
DATA_TRAIN_LABEL = 'train/train_label.npy'
DATA_DEV_TITLE = 'dev/dev_title.npy'
DATA_DEV_BODY = 'dev/dev_body.npy'
DATA_DEV_LABEL = 'dev/dev_label.npy'
DATA_TEST_TITLE_BODY = 'test/data_para_test.pkl'
DATA_TEST_LABEL = 'test/test_label.npy'
DATA_DEBUG_TITLE_BODY = 'debug/data_para_debug.pkl'
VOCA_FILE_NAME = 'dic_mincutN.pkl'
GLOVE_FILE_NAME = 'W_embedding.npy'
################################
# train
################################
till_max_epoch = False
num_till_max_epoch = 8
CAL_ACCURACY_FROM = 0
MAX_EARLY_STOP_COUNT = 10
EPOCH_PER_VALID_FREQ = 0.3
is_embeddign_train = True # True is better
dr_text_in = 0.3 # 0.3 naacl-18
dr_text_out = 1.0
dr_con_in = 1.0 # 1.0 naacl-18
dr_con_out = 1.0
################################
# model
################################
reverse_bw = True
is_text_encoding_bidir = False
is_chunk_encoding_bidir = True
is_text_residual = False
is_chunk_residual = False
add_attention = True
add_LTC = False
LTC_topic_size = 3
LTC_memory_dim = 256
LTC_dr_prob = 0.8
################################
# etc
################################
IS_DEBUG = False # use short dataset
| 26.830769 | 60 | 0.469037 | 193 | 1,744 | 3.818653 | 0.373057 | 0.066486 | 0.048847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02385 | 0.326835 | 1,744 | 64 | 61 | 27.25 | 0.603918 | 0.053899 | 0 | 0 | 0 | 0 | 0.15736 | 0.065265 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66267420e208edbe695e88c08255da8fc98c717 | 1,011 | py | Python | baselayer/services/webpack.py | yaowenxi/cesium | b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee | [
"BSD-3-Clause"
] | null | null | null | baselayer/services/webpack.py | yaowenxi/cesium | b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee | [
"BSD-3-Clause"
] | 6 | 2020-07-17T08:50:22.000Z | 2022-02-26T11:56:52.000Z | baselayer/services/webpack.py | yaowenxi/cesium | b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
from baselayer.app.env import load_env
import subprocess
import sys
import time
import os
from pathlib import Path
env, cfg = load_env()
bundle = Path(os.path.dirname(__file__))/'../../static/build/bundle.js'
def run(cmd):
print("开始了")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.stdout:
print(f'[service/webpack] {line.decode()}', end="")
sys.stdout.flush()
return p
if env.debug:
print("[service/webpack]: debug mode detected, launching webpack monitor")
p = run(['./node_modules/.bin/webpack', '--watch'])
sys.exit(p.returncode)
elif bundle.is_file():
print("[service/webpack]: bundle.js already built, exiting")
# Run for a few seconds so that supervisor knows the service was
# successful
time.sleep(3)
sys.exit(0)
else:
print("[service/webpack]: bundle.js not found, building")
p = run(['./node_modules/.bin/webpack'])
time.sleep(1)
sys.exit(p.returncode)
| 25.923077 | 78 | 0.672601 | 144 | 1,011 | 4.659722 | 0.534722 | 0.083458 | 0.084948 | 0.044709 | 0.154993 | 0.074516 | 0 | 0 | 0 | 0 | 0 | 0.004813 | 0.178042 | 1,011 | 38 | 79 | 26.605263 | 0.802647 | 0.088032 | 0 | 0.071429 | 0 | 0 | 0.314815 | 0.089325 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.214286 | 0 | 0.285714 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b662cf2d0ef7d2f3d75fe691f2648a210b3ef79c | 2,911 | py | Python | tests/test_interfaces/test_to_binary.py | softwareunderground/subsurface | ad5a6d2d24e710ce7a78ec99b2075ddbb9dfeb7d | [
"Apache-2.0"
] | 55 | 2019-05-09T12:26:28.000Z | 2021-11-05T07:35:15.000Z | tests/test_interfaces/test_to_binary.py | softwareunderground/subsurface | ad5a6d2d24e710ce7a78ec99b2075ddbb9dfeb7d | [
"Apache-2.0"
] | 33 | 2019-05-09T16:28:19.000Z | 2022-03-30T13:40:21.000Z | tests/test_interfaces/test_to_binary.py | softwareunderground/subsurface | ad5a6d2d24e710ce7a78ec99b2075ddbb9dfeb7d | [
"Apache-2.0"
] | 14 | 2019-05-09T12:26:33.000Z | 2021-09-01T11:31:27.000Z | import imageio
import pytest
from subsurface.reader.read_netcdf import read_unstruct
import json
try:
import geopandas as gpd
GEOPANDAS_IMPORTED = True
except ImportError:
GEOPANDAS_IMPORTED = False
import pytest
import numpy as np
from subsurface import UnstructuredData, TriSurf, StructuredData
from subsurface.reader.profiles.profiles_core import create_mesh_from_trace
from subsurface.visualization import to_pyvista_mesh, pv_plot, \
to_pyvista_mesh_and_texture
@pytest.fixture(scope='module')
def unstruct(data_path):
us = read_unstruct(data_path + '/interpolator_meshes.nc')
return us
@pytest.fixture(scope='module')
def wells(data_path):
us = read_unstruct(data_path + '/wells.nc')
return us
def test_wells_to_binary(wells):
bytearray_le, header = wells.to_binary()
print(header)
with open('well_f.json', 'w') as outfile:
json.dump(header, outfile)
new_file = open("wells_f.le", "wb")
new_file.write(bytearray_le)
@pytest.mark.skipif(GEOPANDAS_IMPORTED is False, reason="Geopandas is not imported " )
def test_profile_to_binary(data_path):
traces = gpd.read_file(data_path + '/profiles/Traces.shp')
v, e = create_mesh_from_trace(traces.loc[0, 'geometry'], traces.loc[0, 'zmax'],
traces.loc[0, 'zmin'])
unstruct_temp = UnstructuredData.from_array(v, e)
cross = imageio.imread(data_path + '/profiles/Profil1_cropped.png')
struct = StructuredData.from_numpy(np.array(cross))
texture_binary, texture_header = struct.to_binary()
origin = [traces.loc[0, 'geometry'].xy[0][0],
traces.loc[0, 'geometry'].xy[1][0],
int(traces.loc[0, 'zmin'])]
point_u = [traces.loc[0, 'geometry'].xy[0][-1],
traces.loc[0, 'geometry'].xy[1][-1],
int(traces.loc[0, 'zmin'])]
point_v = [traces.loc[0, 'geometry'].xy[0][0],
traces.loc[0, 'geometry'].xy[1][0],
int(traces.loc[0, 'zmax'])]
texture_header['texture_origin'] = origin
texture_header['texture_point_u'] = point_u
texture_header['texture_point_v'] = point_v
ts = TriSurf(
mesh=unstruct_temp,
texture=struct,
texture_origin=origin,
texture_point_u=point_u,
texture_point_v=point_v
)
_, uv = to_pyvista_mesh_and_texture(ts)
import pandas as pd
unstruct = UnstructuredData.from_array(v, e, vertex_attr=pd.DataFrame(uv, columns=['u', 'v']))
mesh_binary, mesh_header = unstruct.to_binary()
with open('mesh_uv.json', 'w') as outfile:
import json
json.dump(mesh_header, outfile)
with open('texture.json', 'w') as outfile:
json.dump(texture_header, outfile)
new_file = open("mesh_uv_f.le", "wb")
new_file.write(mesh_binary)
new_file = open("texture_f.le", "wb")
new_file.write(texture_binary)
return mesh_binary
| 30.010309 | 98 | 0.671591 | 401 | 2,911 | 4.643392 | 0.254364 | 0.058002 | 0.064447 | 0.067669 | 0.336735 | 0.2116 | 0.093448 | 0.061224 | 0.061224 | 0.061224 | 0 | 0.010748 | 0.200962 | 2,911 | 96 | 99 | 30.322917 | 0.789768 | 0 | 0 | 0.166667 | 0 | 0 | 0.109584 | 0.017863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.319444 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b666f9a2122d3e6d0251d1209907ba2b321af8c4 | 7,243 | py | Python | ticketsplease/ticketsplease/modules/adfs/envelope/sct.py | secureworks/whiskeysamlandfriends | 9334d0959aef64c06a716a5ed2e4f5582ab44a26 | [
"Apache-2.0"
] | 30 | 2021-11-10T16:28:34.000Z | 2022-03-03T19:46:21.000Z | ticketsplease/ticketsplease/modules/adfs/envelope/sct.py | secureworks/whiskeysamlandfriends | 9334d0959aef64c06a716a5ed2e4f5582ab44a26 | [
"Apache-2.0"
] | null | null | null | ticketsplease/ticketsplease/modules/adfs/envelope/sct.py | secureworks/whiskeysamlandfriends | 9334d0959aef64c06a716a5ed2e4f5582ab44a26 | [
"Apache-2.0"
] | 4 | 2021-11-11T19:29:11.000Z | 2021-11-15T15:56:57.000Z | # Copyright 2021 Secureworks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from os import urandom
from uuid import uuid4
from typing import Dict
from xml.etree import ElementTree
from ticketsplease.modules.adfs.envelope.utils import (
NAMESPACES,
send_envelope,
get_psha1,
derive_wstrustkey,
decrypt_wstrust_cipherdata,
create_soap_envelope,
)
class SCT_ENVELOPE:
def _create_sct_envelope(
self,
key: bytes,
clientSecret: bytes,
context: bytes,
keyIdentifier: bytes,
server: str,
):
"""Build a SCT enevlope.
Arguments:
key: security key from parsed RSTR
clientSecret: generated random bytes
context: security context from parsed RSTR
keyIdentifier: key identifier from parsed RSTR
server: ip_address|hostname of ADFS server
Returns:
SCT envelope
"""
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L627
payload = f'<t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust"><t:TokenType>http://schemas.xmlsoap.org/ws/2005/02/sc/sct</t:TokenType><t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType><t:Entropy><t:BinarySecret Type="http://schemas.xmlsoap.org/ws/2005/02/trust/Nonce" u:Id="uuid-{uuid4()}">{base64.b64encode(clientSecret).decode()}</t:BinarySecret></t:Entropy><t:KeySize>256</t:KeySize></t:RequestSecurityToken>'
action = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/SCT"
envelope = create_soap_envelope(
key,
context,
keyIdentifier,
server,
payload,
action,
)
return envelope
def _parse_sct_envelope(
self,
envelope: bytes,
key: bytes,
clientSecret: bytes,
) -> str:
"""Parse the SCT response envelope.
Arguments:
envelope: SCT response envelope
cipher: KRB_TGT cipher object
sessionKey: KRB_TGT session key object
Returns:
parsed SCT envelope (context, key, key identifier)
"""
try:
tree = ElementTree.fromstring(envelope)
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L709
# nonce0 = tree.findall(".//c:DerivedKeyToken", NAMESPACES["c"])[0][3].text
# cipher0 = tree.findall(".//e:EncryptedData", NAMESPACES["e"])[0][2][0].text
nonce1 = base64.b64decode(
tree.findall(".//c:DerivedKeyToken", NAMESPACES["c"])[1][1].text
)
cipher1 = base64.b64decode(
tree.findall(".//e:EncryptedData", NAMESPACES["e"])[1][2][0].text
)
except Exception as e:
logging.error(str(e))
raise TypeError("server responded with malformed SCT envelope") from e
derivedKey = derive_wstrustkey(key, nonce1, 32)
logging.debug(f"\tNonce: {base64.b64encode(nonce1)}")
logging.debug(f"\tDerived key: {base64.b64encode(derivedKey)}")
logging.info("\tDecrypting WSTrust Cipher Text")
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L727
# Decrypt the cipher data
bPlainText = decrypt_wstrust_cipherdata(cipher1, derivedKey)
logging.debug(f"\tDecrypted SCT Data:\n{bPlainText.decode().strip()}\n")
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L651
# Now parse the decrypted data from the outter SCT envelope
try:
tree = ElementTree.fromstring(bPlainText)
except Exception as e:
logging.error(str(e))
logging.error(f"invalid xml:\n{bPlainText}")
raise TypeError("failed to parse decrypted SCT envelope data") from e
token = tree.find(".//t:BinarySecret", NAMESPACES["t"]).text
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L653
serverSecret = base64.b64decode(token)
computedKey = get_psha1(clientSecret, serverSecret, 32)
# fmt: off
# https://github.com/Gerenios/AADInternals/blob/c255cd66a3731c32cfbdf9fdb17f2b03c7665b72/ADFS_utils.ps1#L656
context = tree.find(".//t:RequestedSecurityToken", NAMESPACES["t"])[0]
context = context.attrib["{%s}Id" % NAMESPACES["u"]["u"]]
keyIdentifier = tree.find(".//t:RequestedSecurityToken", NAMESPACES["t"])[0][0].text.split(":")[2]
# fmt: on
logging.debug(f"\tServer secret: {base64.b64encode(serverSecret)}")
logging.debug(f"\tComputed key: {base64.b64encode(computedKey)}")
logging.debug(f"\tContext: {context}")
logging.debug(f"\tIdentifier: {keyIdentifier}")
# https://github.com/Gerenios/AADInternals/blob/master/ADFS_utils.ps1#L665
# Construct the return value
retVal = {
"Context": context,
"Key": computedKey,
"Identifier": keyIdentifier,
}
return retVal
@classmethod
def run(
cls,
adfs_host: str,
rstr: Dict[str, bytes],
):
"""Generate and send an SCT envelope to the target ADFS server.
Receive the SCT response and parse the message for the context,
key, and key identifier.
Arguments:
adfs_host: target ADFS server
rsts: parsed RST response object
Returns:
dictionary of parsed SCT response data (context, key,
key identifier)
"""
logging.info(f"[ * ] Building and sending SCT envelope to the ADFS server")
clientSecret = urandom(32)
# Build the SCT envelope to request the configuration
sct_envelope = cls._create_sct_envelope(
cls,
rstr["Key"],
clientSecret,
rstr["Context"],
rstr["Identifier"],
adfs_host,
)
logging.debug(f"\tSCT Envelope:\n{sct_envelope.strip()}")
# Send the SCT envelope
response = send_envelope(adfs_host, sct_envelope)
logging.debug(f"\tRST Response Status: {response}")
logging.debug(f"\tRST Response:\n{response.content}")
if response.status_code == 200:
logging.info(f"[ * ] Parsing SCT envelope response")
sct_data = cls._parse_sct_envelope(
cls,
response.content,
rstr["Key"],
clientSecret,
)
else:
raise ValueError(f"Bad response from ADFS server: {response.status_code}")
return sct_data
| 35.856436 | 477 | 0.620875 | 818 | 7,243 | 5.438875 | 0.304401 | 0.046977 | 0.02922 | 0.034615 | 0.205664 | 0.194426 | 0.152619 | 0.12677 | 0.080917 | 0.080917 | 0 | 0.030217 | 0.268949 | 7,243 | 201 | 478 | 36.034826 | 0.810009 | 0.312992 | 0 | 0.190909 | 0 | 0.009091 | 0.290131 | 0.097628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027273 | false | 0 | 0.063636 | 0 | 0.127273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |