id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1611673 | #!/usr/bin/python3
s="Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
one=[1, 5, 6, 7, 8, 9, 15, 16, 19]
wlen=[]
for i in range(1, 21):
if i in one:
wlen.append(1)
else:
wlen.append(2)
ret={}
for idx, w in enumerate(s.split()):
ret[w[0:wlen[idx]]]=idx+1
print(ret)
| StarcoderdataPython |
8170137 | def rot_90_clock(strng):
ar = strng.split('\n')
return '\n'.join(''.join(j[i] for j in ar)[::-1] for i in range(len(ar[0])))
def diag_1_sym(strng):
arr = strng.split('\n')
return '\n'.join(''.join(j[i] for j in arr) for i in range(len(arr[0])))
def selfie_and_diag1(strng):
ar = strng.split('\n')
return '\n'.join(ar[i] + '|' + ''.join(p[i] for p in ar) for i in range(len(ar[0])))
def oper(fct, s):
return fct(s)
def rot_90_clock2(strng):
return '\n'.join(''.join(x) for x in zip(*strng.split('\n')[::-1]))
def diag_1_sym2(strng):
return '\n'.join(''.join(x) for x in zip(*strng.split('\n')))
def selfie_and_diag12(strng):
return '\n'.join('|'.join(x) for x in zip(strng.split('\n'), diag_1_sym(strng).split('\n')))
def oper2(fct, s):
return fct(s)
| StarcoderdataPython |
24150 | <reponame>gconine88/MATH_6204<filename>hw10_conine/thomas.py
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 23:06:49 2017
@author: Grant
An implementation of the Thomas algorithm in Python, using just-in-time
compiling from numba for additional speed
"""
import numpy as np
from numba import njit, f8
def solve(A, d):
'''Helper function for Thomas algorith. Breaks matrix into tridiagonal
elements for easier processing by algorithm. '''
# pass numba float64 dtype np.arrays to the solve function - need to
# perform this step to allow for nopython execution of thomas algorithm
# which yields maximum speed
a = f8(np.diagonal(A, offset=0))
b = f8(np.diagonal(A, offset=1))
c = f8(np.diagonal(A, offset=-1))
dfloat = f8(d)
D = np.diag(a, 0) + np.diag(b, 1) + np.diag(c, -1) #create test matrix
# test if D is 'close enough' to A - if not that means A was not
# tridiagonal and the function raises an exception
if not np.allclose(A, D):
raise Exception('The given A is not tridiagonal')
# pass to thomas algorithm solver
x = solve_body(a, b, c, dfloat)
return x
# chose to use njit decorator to force nopython implementation and
# get faster speed. Downside is I lose flexibility in input of solver, must
# wrap in another function which will format data correctly
@njit
def solve_body(a, b, c, d):
''' Thomas algorithm to solve a tridiagonal system of equations
INPUTS
========
a: numpy array
the diagonal entries
b: numpy array
the superdiagonal entries
c: numpy array
the subdiagonal entries
d: numpy array
the right-hand side of the system of equations
RETURNS
========
The solution for the given tri-diagonal system of equations.
'''
n = len(a) # determine number of equations in system
#initialize
alpha = np.zeros(n)
beta = np.zeros(n)
alpha[0] = a[0]
beta[0] = d[0]
# first (forward) loop to zero c[i]'s
for i in range(1, n, 1):
# in python, c's index is from 0 to n-2, not 1 to n-1, have to subtract 1
alpha[i] = a[i] - (b[i-1] * c[i-1]) / alpha[i-1]
beta[i] = d[i] - (beta[i-1] * c[i-1]) / alpha[i-1]
#initialize and set last step
x = np.zeros(n)
x[n-1] = beta[n-1] / alpha[n-1]
# second (backwards) loop to find solutions
for j in range(n-2, -1, -1): #indices are weird, want to step from n-2 to 0
x[j] = (beta[j] - b[j-1] * x[j+1]) / alpha[j]
return x
| StarcoderdataPython |
5120236 | <reponame>james7132/rTouhouModBot<gh_stars>0
import os
from util import *
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
db_path = "/db/r_touhou_mod.db"
engine = create_engine('sqlite:///{0}'.format(db_path))
Session = sessionmaker(bind=engine)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(String, primary_key=True, autoincrement=False)
class Post(Base):
__tablename__ = 'posts'
id = Column(String, primary_key=True, autoincrement=False)
date = Column(DateTime)
status = Column(Enum(Decision))
author_id = Column(String, ForeignKey('users.id'))
reported = Column(Boolean)
flair_warned = Column(Boolean)
Post.author = relationship('User', back_populates='posts')
User.posts = relationship('Post', order_by=Post.date, back_populates='author')
db_dir = os.path.dirname(db_path)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
if not os.path.isfile(db_path):
Base.metadata.create_all(engine)
| StarcoderdataPython |
9642756 | from .testutils import VerminTest
class VerminExclusionsTests(VerminTest):
def test_module(self):
visitor = self.visit("from email.parser import FeedParser")
self.assertEqual([(2, 4), (3, 0)], visitor.minimum_versions())
self.config.add_exclusion("email.parser.FeedParser")
visitor = self.visit("from email.parser import FeedParser")
self.assertEqual([(0, 0), (0, 0)], visitor.minimum_versions())
def test_kwarg(self):
visitor = self.visit("from argparse import ArgumentParser\nArgumentParser(allow_abbrev=False)")
self.assertEqual([None, (3, 5)], visitor.minimum_versions())
self.config.add_exclusion("argparse") # module
self.config.add_exclusion("argparse.ArgumentParser(allow_abbrev)") # kwarg
visitor = self.visit("from argparse import ArgumentParser\nArgumentParser(allow_abbrev=False)")
self.assertEqual([(0, 0), (0, 0)], visitor.minimum_versions())
def test_codecs_error_handler(self):
visitor = self.visit("import codecs\ncodecs.encode('test', 'utf-8', 'surrogateescape')")
self.assertEqual([None, (3, 1)], visitor.minimum_versions())
visitor = self.visit("import codecs\ncodecs.encode('test', 'utf-8', errors='surrogateescape')")
self.assertEqual([None, (3, 1)], visitor.minimum_versions())
self.config.add_exclusion("ceh=surrogateescape")
visitor = self.visit("import codecs\ncodecs.encode('test', 'utf-8', 'surrogateescape')")
self.assertEqual([(2, 4), (3, 0)], visitor.minimum_versions())
visitor = self.visit("import codecs\ncodecs.encode('test', 'utf-8', errors='surrogateescape')")
self.assertEqual([(2, 4), (3, 0)], visitor.minimum_versions())
def test_codecs_encoding(self):
visitor = self.visit("import codecs\ncodecs.encode('test', 'koi8_t')")
self.assertEqual([None, (3, 5)], visitor.minimum_versions())
visitor = self.visit("import codecs\ncodecs.encode('test', data_encoding='koi8_t')")
self.assertEqual([None, (3, 5)], visitor.minimum_versions())
self.config.add_exclusion("ce=koi8_t")
visitor = self.visit("import codecs\ncodecs.encode('test', 'koi8_t')")
self.assertEqual([(2, 4), (3, 0)], visitor.minimum_versions())
visitor = self.visit("import codecs\ncodecs.encode('test', data_encoding='koi8_t')")
self.assertEqual([(2, 4), (3, 0)], visitor.minimum_versions())
def test_long(self):
visitor = self.visit("a = long(1)")
self.assertEqual([(2, 0), None], visitor.minimum_versions())
self.config.add_exclusion("long")
visitor = self.visit("a = long(1)")
self.assertEqual([(0, 0), (0, 0)], visitor.minimum_versions())
| StarcoderdataPython |
324677 | from distutils.core import setup
setup(
name='rss2producer',
version='0.1.1',
description="Simplifies the process of creating an RSS 2.0 feed.",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/nathan-osman/rss2producer',
license='MIT',
packages=['rss2producer'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP'
]
)
| StarcoderdataPython |
4883 | # -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
def getSymbols():
#mid 1)从excel赋值粘贴获得如下数据
codesStr = """
XAGUSD
"""
#mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。
#codeList = ['000021.SZ','000022.SZ']
codeList = [code.split('.')[0] for code in codesStr.split()]
return codeList
def subMain():
DC = dataCenter.dataCenter()
remoteDataSourceType = 'mt5'
localStorageType = 'mongodb'
periodType = 'D'
timeStart = dt.datetime(2000,10,20)
timeEnd = dt.datetime.now()
# 1)get codes form eastmoney
codeList = getSymbols()
# 2)download history data
dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType,
codeList=codeList,timeFrom = timeStart,timeTo = timeEnd)
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_()) | StarcoderdataPython |
3318523 | <reponame>synapticarbors/mpire<gh_stars>100-1000
class StopWorker(Exception):
""" Exception used to kill workers from the main process """
pass
class CannotPickleExceptionError(Exception):
""" Exception used when Pickle has trouble pickling the actual Exception """
pass
| StarcoderdataPython |
5068993 | '''
Created on 13.06.2014
@author: schaffrr
'''
import csv
import sys
import getopt
class EnsemblAnnotation():
def __init__(self, inputFile):
self._inputFile = inputFile
def readAndModifyEnsembleAnnotation(self, ouputFile):
r = csv.reader(open(self._inputFile), delimiter="\t") # Here your csv file
writer = csv.writer(open(ouputFile, "wb"), delimiter = '\t')
firstRow = r.next() #header line
# print firstRow
transcriptID_idx = firstRow.index("transcript_id")
start_idx = firstRow.index("start")
end_idx = firstRow.index("end")
strand_idx = firstRow.index("strand")
width_idx = firstRow.index("width")
#optional argument therefore try to find it otherwise add it for comfort
exon_number_present = True
try:
exon_number_idx = firstRow.index("exon_number")
except ValueError:
exon_number_present = False
print "adding exon_number column"
firstRow.append("exon_number")
exon_number_idx = firstRow.index("exon_number")
type_idx = firstRow.index("type")
previous = ""
writer.writerow(firstRow)
insertCount = 0
exonCount = 1
for row in r:
if row:
if not exon_number_present:
row.append(1)
if row[type_idx] == "exon":
current = row
if previous == "":
previous = current
if current[transcriptID_idx] == previous[transcriptID_idx]:
exonCount = exonCount + 1
if not exon_number_present:
row[exon_number_idx] = exonCount
#checking strandness and adjusting insertStart and insertEnd
if previous[strand_idx] == "+":
insertStart = int(previous[end_idx]) + 1
insertEnd = int(current[start_idx])-1
elif previous[strand_idx] == "-":
insertEnd = int(previous[start_idx]) - 1
insertStart = int(current[end_idx]) + 1
if insertStart <= insertEnd:
insertCount = insertCount + 1
insert = list(current)
toChangeIdx = [ start_idx, end_idx, width_idx, type_idx, exon_number_idx]
toChangeVect = [insertStart, insertEnd, insertEnd-insertStart+1, "intron",insertCount]
for i,j in zip(toChangeIdx,range(0,len(toChangeVect))):
insert[i] = toChangeVect[j]
writer.writerow(insert)
else:
insertCount = 0
exonCount = 0
previous = current
writer.writerow(row)
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv, "hi:o:", ["help","ifile=", "ofile="])
except getopt.GetoptError:
print 'ModifyEnsemblAnnotation.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'help'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if inputfile == "":
print "input filename must not be empty! see -h for parameters"
sys.exit()
if outputfile == "":
print "no output filename, choosing <input>.mod"
outputfile = inputfile + ".mod"
modifyEnsemblAnnotation = EnsemblAnnotation(inputfile)
print "calling readAndModify"
modifyEnsemblAnnotation.readAndModifyEnsembleAnnotation(outputfile)
# print "finished correctly " + str(finValid)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
5023269 | import sys
sys.path.append('/home/dhe/hiwi/Exercises/Pretrained_Models_NMT/')
import onmt.Markdown
import argparse
from pretrain_module.roberta_tokenization_ch import FullTokenizer
from transformers import RobertaTokenizer
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.Markdown.add_md_help_argument(parser)
parser.add_argument('-src_data', default="",
help="Path to the source data")
parser.add_argument('-tgt_data', default="",
help="Path to the target data")
opt = parser.parse_args()
vocab_file="/project/student_projects2/dhe/BERT/experiments/pytorch_pretrained_models/roberta-base-layer12-zh/bert-base-chinese-vocab.txt"
def tokenize_data(raw_data, tokenizer, lang):
with open(raw_data, "r", encoding="utf-8") as f_raw:
tokenized_sents = []
for line in f_raw:
sent = line.strip()
tokenized_sent = tokenizer.tokenize(sent)
# 注意特殊符号前后空格
# tgt(zh) 在preprocess 文件中有加开始和结束符号,所以不用在这里加了
if lang == "en":
tokenized_sent.insert(0, "<s>")
tokenized_sent.append("</s>")
elif lang == "zh":
tokenized_sent = tokenized_sent
tokenized_sents.append(tokenized_sent)
new_data = raw_data + ".roberta.tok"
with open(new_data, "w", encoding="utf-8") as f_tok:
for sent in tokenized_sents:
sent = " ".join(sent)
f_tok.write(sent)
f_tok.write('\n')
def main():
tokenizer_zh = FullTokenizer(vocab_file)
# wordpiece_tokenizer_zh 只是整个tokenization中的一部分, 我们用端到端的tokenizer_zh
# wordpiece_tokenizer_zh = tokenizer_zh.wordpiece_tokenizer
tokenizer_en = RobertaTokenizer.from_pretrained('roberta-base')
src_lang = "en"
tgt_lang = "zh"
if opt.src_data is not "":
print("tokenzize src data")
tokenize_data(opt.src_data, tokenizer_en, src_lang)
if opt.tgt_data is not "":
print("tokenzize tgt data")
tokenize_data(opt.tgt_data, tokenizer_zh, tgt_lang)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3541957 | import unittest
from ..src.sfbulkapiv2 import Bulk
import uuid
import os
class BulkTest(unittest.TestCase):
# test case method should startpip with test
def connect_test_org(self):
# set the below enviroment variables before running test.
username = os.environ["USERNAME"]
password = os.environ["PASSWORD"]
client_id = os.environ["CLIENT_ID"]
client_secret = os.environ["CLIENT_SECRET"]
blk = Bulk(client_id, client_secret, username, password, False)
return blk
def clear_test_data(self):
bulk = self.connect_test_org()
testDataToDelete = bulk.query(
"""select id from contact where lastName like '%test%'"""
)
bulk.delete("contact", testDataToDelete)
print("test data deleted")
def test_bulkQuery(self):
bulk = self.connect_test_org()
results = bulk.query("select id from account limit 2")
print(results)
lines = results.split("\n")
self.assertEqual(len(lines), 4)
def test_insert(self):
bulk = self.connect_test_org()
contactLastName = uuid.uuid4()
content = """firstName,LastName
john,{}""".format(
contactLastName
)
bulk.insert("contact", content)
print(content)
results = bulk.query(
"""select id from contact where name like'%{}%'""".format(contactLastName)
)
lines = results.split("\n")
self.assertEqual(len(lines), 3)
def test_upsert(self):
# clear test data
self.clear_test_data()
# insert a contact record
bulk = self.connect_test_org()
content = """firstName,LastName,Customer_ID__c
smith,test,X9999998
"""
bulk.insert("contact", content)
# upsert the contact record.
upsertData = """firstName,LastName,Customer_ID__c
smith,test,X9999998
will,test,X9999999
"""
bulk.upsert("contact", "Customer_ID__c", upsertData)
# get the count and verify
query = """select id,name from contact where lastName like '%test%'"""
result = bulk.query(query)
print(result)
lines = result.split("\n")
print(lines)
self.assertEqual(len(lines), 4)
def test_delete(self):
bulk = self.connect_test_org()
# Create the test contact record
testContactLastName = uuid.uuid4()
content = """firstName,LastName
smith,{}""".format(
testContactLastName
)
bulk.insert("contact", content)
# Query the test contact record
query = """select id from contact where lastName='{}'""".format(
testContactLastName
)
result = bulk.query(query)
print(result)
bulk.delete("contact", result)
result = bulk.query(query)
lines = result.split("\n")
print(len(lines))
# Delete the test contact record
self.assertEqual(len(lines), 2)
# python3 -m unittest BulkTest.py
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9704758 | <gh_stars>1-10
from .connection import set_db_config_file_path
from .connection import DIALECT_MYSQL, DRIVER_PYMYSQL
from .connection import CURW_FCST_HOST, CURW_FCST_PORT, CURW_FCST_DATABASE, CURW_FCST_USERNAME, CURW_FCST_PASSWORD
from .connection import CURW_OBS_HOST, CURW_OBS_PORT, CURW_OBS_DATABASE, CURW_OBS_USERNAME, CURW_OBS_PASSWORD
from .connection import CURW_SIM_HOST, CURW_SIM_PORT, CURW_SIM_DATABASE, CURW_SIM_USERNAME, CURW_SIM_PASSWORD
from .connection import HOST, PORT, DATABASE, USERNAME, PASSWORD
from .common import COMMON_DATE_TIME_FORMAT
| StarcoderdataPython |
3248565 | <reponame>rishusingh022/My-Journey-of-Data-Structures-and-Algorithms<filename>Project Euler Problems/Problem30.py
def check_self_behaviour(num,pow):
return num == sum([int(elem)**pow for elem in str(num)])
final_ans = 0
for i in range(2,1000000):
if check_self_behaviour(i,5):
final_ans += i
print(final_ans) | StarcoderdataPython |
6442323 | <reponame>AnimeThemes/animethemes-batch-encoder
from ._bitrate_mode import BitrateMode
class EncodingConfig:
# Config keys
config_allowed_filetypes = 'AllowedFileTypes'
config_encoding_modes = 'EncodingModes'
config_crfs = 'CRFs'
config_include_unfiltered = 'IncludeUnfiltered'
# Default Config keys
config_default_video_stream = 'DefaultVideoStream'
config_default_audio_stream = 'DefaultAudioStream'
# Default config values
default_allowed_filetypes = '.avi,.m2ts,.mkv,.mp4,.wmv'
default_encoding_modes = f'{BitrateMode.VBR.name},{BitrateMode.CBR.name}'
default_crfs = '12,15,18,21,24'
default_include_unfiltered = True
default_video_filters = {'filtered': 'hqdn3d=0:0:3:3,gradfun,unsharp',
'lightdenoise': 'hqdn3d=0:0:3:3',
'heavydenoise': 'hqdn3d=1.5:1.5:6:6',
'unsharp': 'unsharp'}
def __init__(self, allowed_filetypes, encoding_modes, crfs, include_unfiltered, video_filters, default_video_stream,
default_audio_stream):
self.allowed_filetypes = allowed_filetypes
self.encoding_modes = encoding_modes
self.crfs = crfs
self.include_unfiltered = include_unfiltered
self.video_filters = video_filters
self.default_video_stream = default_video_stream
self.default_audio_stream = default_audio_stream
@classmethod
def from_config(cls, config):
allowed_filetypes = config['Encoding'].get(EncodingConfig.config_allowed_filetypes,
EncodingConfig.default_allowed_filetypes).split(',')
encoding_modes = config['Encoding'].get(EncodingConfig.config_encoding_modes,
EncodingConfig.default_encoding_modes).split(',')
crfs = config['Encoding'].get(EncodingConfig.config_crfs, EncodingConfig.default_crfs).split(',')
include_unfiltered = config.getboolean('Encoding', EncodingConfig.config_include_unfiltered,
fallback=EncodingConfig.default_include_unfiltered)
video_filters = config.items('VideoFilters', EncodingConfig.default_video_filters)
default_video_stream = config['Encoding'].get(EncodingConfig.config_default_video_stream)
default_audio_stream = config['Encoding'].get(EncodingConfig.config_default_audio_stream)
return cls(allowed_filetypes, encoding_modes, crfs, include_unfiltered, video_filters, default_video_stream,
default_audio_stream)
def get_default_stream(self, stream_type):
if stream_type == 'video':
return self.default_video_stream
elif stream_type == 'audio':
return self.default_audio_stream
return None | StarcoderdataPython |
3547390 | <gh_stars>1-10
"""
Models thermodynamic properties of the metal such as
generalized coordination number dependent dependent binding energies
"""
import numpy as np
import os
class metal:
'''
Class for properties of a metal
'''
def __init__(self, met_name):
'''
Pt DFT data from Tables S2 and S3 of <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, Science 350, 185 (2015).
Au data is taken from Figure Figure S22 of the same paper.
'''
self.name = met_name
if met_name == 'Pt':
self.E_coh = 4.5222 # cohesive energy (eV)
self.lc_PBE = 3.968434601 # lattice constant for the PBE functional
self.load_DFT_data('Pt_BEs.npy')
elif met_name == 'Au':
self.E_coh = 2.3645
self.lc_PBE = 4.155657928
#self.load_DFT_data('Au_BEs.npy')
self.load_DFT_data('Au_Nature_BEs.npy')
else:
raise ValueError(met_name + ' not found in metal database.')
def load_DFT_data(self,np_fname):
'''
:param np_fname: Name of the numpy file with binding energy data
The format is a n x 3 array where n is the number of data points. The first
column is the GCN of each site. The 2nd and 3rd columns are the OH* and OOH*
binding energies respectively, referenced to OH(g) and OOH(g)
'''
dir = os.path.dirname(__file__)
np_fname = os.path.join(dir, np_fname)
BEs = np.load(np_fname)
# Regress OH BE vs. GCN
self.OH_slope, self.OH_int = np.polyfit(BEs[:,0], BEs[:,1], 1)
BE_OH_pred = BEs[:,0] * self.OH_slope + self.OH_int
res_OH = BEs[:,1] - BE_OH_pred # Compute residuals
self.sigma_OH_BE = np.std(res_OH) # Compute variance of residuals
self.res_OH = res_OH
# Regress OOH BE vs. GCN
self.OOH_slope, self.OOH_int = np.polyfit(BEs[:,0], BEs[:,2], 1)
BE_OOH_pred = BEs[:,0] * self.OOH_slope + self.OOH_int
res_OOH = BEs[:,2] - BE_OOH_pred # Compute residuals
self.sigma_OOH_BE = np.std(res_OOH) # Compute variance of residuals
self.res_OOH = res_OOH
'''
Perform PCA on residuals
'''
data = np.transpose( np.vstack([res_OH, res_OOH]) )
eigenvectors, eigenvalues, V = np.linalg.svd(data.T, full_matrices=False)
projected_data = np.dot(data, eigenvectors)
sigma = projected_data.std(axis=0)
self.pca_mat = eigenvectors
self.pca_inv = np.linalg.inv( self.pca_mat )
self.sigma_pca_1 = sigma[0]
self.sigma_pca_2 = sigma[1]
def get_BEs(self, GCN, uncertainty = False, correlations = True):
'''
:param GCN: generalized binding energy of the site
:param uncertainty: If true, add random noise due to error in GCN relation
:param correlations: If true, use PCA as joint PDF of
:returns: OH and OOH* binding energies
'''
OH_BE = self.OH_slope * GCN + self.OH_int
OOH_BE = self.OOH_slope * GCN + self.OOH_int
if uncertainty:
if correlations:
pca1 = self.sigma_pca_1 * np.random.normal()
pca2 = self.sigma_pca_2 * np.random.normal()
BE_errors = np.matmul(np.array([pca1, pca2]), self.pca_mat )
OH_BE_error = BE_errors[0]
OOH_BE_error = BE_errors[1]
else:
OH_BE_error = self.sigma_OH_BE * np.random.normal()
OOH_BE_error = self.sigma_OOH_BE * np.random.normal()
OH_BE += OH_BE_error
OOH_BE += OOH_BE_error
return [OH_BE, OOH_BE] | StarcoderdataPython |
3283155 | input()
arr = list(map(int, input().split()))
arr.reverse()
for num in arr:
print(f"{num} ", end="") | StarcoderdataPython |
5098924 | from typing import Union
from flask import Blueprint, current_app, jsonify, request, json
from app import SessionKey, socketio
from app.models import KeyLookupTable
from app.utils import error_respond
from app.utils.decorators import session_verify, master_password_verify
from app.utils.master_password import MasterPassword
from app.utils.misc import base64_decode, base64_encode
bp = Blueprint('api.password', __name__, url_prefix='/api/password')
def simple_decrypt_then_json(master_password: MasterPassword, data: Union[str, bytes]) -> Union[dict, list]:
if isinstance(data, str):
data = base64_decode(data)
return json.loads(master_password.simple_decrypt(data).decode())
def simple_encrypt_from_dict(master_password: MasterPassword, data: Union[dict, list]) -> bytes:
return master_password.simple_encrypt(json.dumps(data))
@bp.route('/')
@master_password_verify
def get_table():
hidden = request.args.get('hidden')
master_password: MasterPassword = current_app.config['MASTER_PASSWORD']
storage = current_app.config['STORAGE']
if hidden:
entries = KeyLookupTable.query.all()
else:
entries = KeyLookupTable.query.filter_by(hidden=False).all()
new_entries = [entry for entry in entries if entry.meta_data == '']
if master_password.check_expire(len(entries) + len(new_entries) * 2):
error_respond.master_password_expired()
for new_entry in new_entries:
encrypted: str = storage.get(new_entry.key)
if encrypted:
metadata = json.loads(
master_password.decrypt(
base64_decode(encrypted),
new_entry.key).decode())
del metadata['password']
metadata = base64_encode(simple_encrypt_from_dict(master_password, metadata))
new_entry.meta_data = metadata
KeyLookupTable.query.session.commit()
entries = [
{
'key': entry.key,
'hidden': entry.hidden,
'metadata': simple_decrypt_then_json(master_password, entry.meta_data)
} for entry in entries
]
return SessionKey().encrypt_response(entries)
@bp.route('/persistent/', methods=['POST'])
@session_verify
# FIXME: this may not need to verify the master password
@master_password_verify
def persistent():
data = json.loads(request.decrypted_data.decode())
try:
key = data["key"]
except KeyError:
return error_respond.invalid_post_data()
persistence = current_app.config['STORAGE'].get(key, True)[1]
if persistence is not None:
return jsonify(result=persistence)
else:
return error_respond.key_not_found()
@bp.route('/get/', methods=['POST'])
@session_verify
@master_password_verify
def get():
master_password: MasterPassword = current_app.config['MASTER_PASSWORD']
data = json.loads(request.decrypted_data.decode())
try:
key = data["key"]
except KeyError:
return error_respond.invalid_post_data()
get_password = current_app.config['STORAGE'].get(key)
if get_password is not None:
password_entry = base64_decode(get_password)
return SessionKey().encrypt_response(master_password.decrypt(password_entry, key))
else:
return error_respond.key_not_found()
# FIXME: catch database error
@bp.route('/new/', methods=['POST'])
@session_verify
@master_password_verify(2)
def new():
master_password: MasterPassword = current_app.config['MASTER_PASSWORD']
data = json.loads(request.decrypted_data.decode())
try:
del data['password']
except KeyError:
# ignore it if the `password` entry is not provided
pass
entry = KeyLookupTable.new_entry(base64_encode(simple_encrypt_from_dict(master_password, data)))
current_app.config['STORAGE'].add(entry.key,
base64_encode(
master_password.encrypt(request.decrypted_data.decode(), entry.key)))
return SessionKey().encrypt_response({'key': entry.key})
@bp.route('/modify/', methods=['POST'])
@session_verify
@master_password_verify(4)
def modify():
master_password: MasterPassword = current_app.config['MASTER_PASSWORD']
data = json.loads(request.decrypted_data.decode())
modify_key = data.get('key')
if modify_key is None or 'modified' not in data:
error_respond.invalid_post_data()
entry = KeyLookupTable.query.get(modify_key)
if entry is None:
error_respond.key_not_found()
password = data['modified'].get('password')
if password:
del data['modified']['password']
# modify metadata
metadata = simple_decrypt_then_json(master_password, entry.meta_data)
for k in data['modified']:
metadata[k] = data['modified'][k]
entry.meta_data = base64_encode(simple_encrypt_from_dict(master_password, metadata))
KeyLookupTable.query.session.commit()
# modify storage
storage = current_app.config['STORAGE']
encrypted = storage.get(modify_key)
password_data = json.loads(
master_password.decrypt(
base64_decode(encrypted),
modify_key).decode())
for k in data['modified']:
password_data[k] = data['modified'][k]
if password:
password_data['password'] = password
storage.add(modify_key,
base64_encode(
master_password.encrypt(json.dumps(password_data), modify_key)))
return SessionKey().encrypt_response({'key': modify_key})
@bp.route('/delete/', methods=['POST'])
@session_verify
@master_password_verify
def delete():
storage = current_app.config['STORAGE']
data = json.loads(request.decrypted_data.decode())
key = data.get('key')
if key is None:
error_respond.invalid_post_data()
if storage.get(key) is None:
error_respond.key_not_found()
current_app.config['STORAGE'].delete(key)
KeyLookupTable.query.filter_by(key=key).delete()
KeyLookupTable.query.session.commit()
return jsonify(message='Success')
@bp.route('/mark/', methods=['POST'])
@session_verify
@master_password_verify
def mark():
data = json.loads(request.decrypted_data.decode())
key = data.get('key')
if key is None:
error_respond.invalid_post_data()
if 'hidden' not in data:
error_respond.invalid_post_data()
entry = KeyLookupTable.query.get(key)
if entry is None:
error_respond.key_not_found()
entry.hidden = data['hidden']
KeyLookupTable.query.session.commit()
return jsonify(message='Success')
| StarcoderdataPython |
8161971 | <reponame>supercatex/Machine_Learning
import numpy as np
from keras.models import Sequential
from keras import layers
from keras import activations
from keras import optimizers
from keras import losses
from keras import metrics
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# New model
model = Sequential()
# Input Layer
input_layer = layers.Dense(
input_dim=1,
units=1,
activation=activations.linear
)
model.add(input_layer)
# Compile
model.compile(
optimizer=optimizers.SGD,
loss=losses.mean_squared_error,
metrics=[metrics.mean_squared_error]
)
model.summary()
X = np.array([[1], [2], [3], [4], [5], [6], [7], [8], [9]])
y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
earlyStopping = EarlyStopping(monitor='loss', min_delta=1e-10, patience=5, verbose=1, mode='auto')
history = model.fit(
X, y,
epochs=1000,
verbose=1,
batch_size=128,
shuffle=True,
callbacks=[earlyStopping]
)
print("Layer 1 weight thetas: ", input_layer.get_weights()[0])
print("Layer 1 weight biases: ", input_layer.get_weights()[1])
print("y = %f * x + %f" % (input_layer.get_weights()[0][0][0], input_layer.get_weights()[1][0]))
print("Predicts: ", model.predict(np.array([[10]])))
plt.plot(history.history['loss'])
plt.plot(history.history['mean_squared_error'])
plt.title('Model training history')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
| StarcoderdataPython |
12847250 | # Copyright (c) 2019, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from typing import List
class KafkaOffsetsHandler:
def store_or_update_Kafka_offset(self, topic_partition: str, offset_start: str, offset_until: str)->bool:
"""
Store or Update kafka topic offsets. Offsets are used to track what messages have been processed.
Args:
topic (str): name of the kafka topic
offset_start (str): starting of offset
offset_until (str): last processed offset
Raises:
ValueError: All params are required.
Exception: Cannot add/update kafka offsets because ERROR-MESSAGE
Returns:
bool: returns True if offsets are add/updated or throws an exception.
"""
if not topic_partition and not offset_start and not offset_until:
raise ValueError("All params are required.")
try:
qry = "REPLACE INTO " + self.kafkaOffsetsTable + " (topic, topic_partition, offset_start, offset_until) VALUES(%s, %s, %s, %s)"
vals = str(self.study_name), str(topic_partition), str(offset_start), json.dumps(offset_until)
self.execute(qry, vals, commit=True)
return True
except Exception as e:
raise Exception("Cannot add/update kafka offsets because "+str(e))
def get_kafka_offsets(self) -> List[dict]:
"""
Get last stored kafka offsets
Returns:
list[dict]: list of kafka offsets. This method will return empty list if topic does not exist and/or no offset is stored for the topic.
Raises:
ValueError: Topic name cannot be empty/None
Examples:
>>> CC = CerebralCortex("/directory/path/of/configs/")
>>> CC.get_kafka_offsets("live-data")
>>> [{"id","topic", "topic_partition", "offset_start", "offset_until", "offset_update_time"}]
"""
results = []
qry = "SELECT * from " + self.kafkaOffsetsTable + " where topic = %(topic)s order by id DESC"
vals = {'topic': str(self.study_name)}
rows = self.execute(qry, vals)
if rows:
for row in rows:
results.append(row)
return results
else:
return []
| StarcoderdataPython |
1779256 | # Copyright 2020 AppScale Systems, Inc
# SPDX-License-Identifier: BSD-2-Clause
from awscli_plugin_logs_tail.tail import TailCommand
def awscli_initialize(cli):
cli.register('building-command-table.logs', inject_tail_command)
def inject_tail_command(command_table, session, **kwargs):
command_table['tail'] = TailCommand(session)
| StarcoderdataPython |
1657415 | import warnings
import numpy as np
import pandas as pd
from scipy import optimize
from autocnet.camera import camera
from autocnet.camera import utils as camera_utils
from autocnet.utils.utils import make_homogeneous, normalize_vector
try:
import cv2
cv2_avail = True
except: # pragma: no cover
cv_avail = False
def compute_epipolar_lines(F, x, index=None):
"""
Given a fundamental matrix and a set of homogeneous points
Parameters
----------
F : ndarray
of shape (3,3) that represents the fundamental matrix
x : ndarray
of shape (n, 3) of homogeneous coordinates
Returns
-------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
"""
if isinstance(x, pd.DataFrame):
x = x.values
if not x.shape[1] == 3:
raise ValueError('The input points must be homogenous with shape (n,3)')
# Compute the unnormalized epipolar lines
lines = np.inner(F, x)
# Normalize the lines
nu = lines[0] ** 2 + lines[1] ** 2
try:
nu = 1 / np.sqrt(nu)
except:
nu = 1
lines *= nu
lines = lines.T
if index is not None:
lines = pd.DataFrame(lines, columns=['a', 'b', 'c'], index=index)
# Inner transposes the result, so transpose back into the 3 column form
return lines
def epipolar_distance(lines, pts):
"""
Given a set of epipolar lines and a set of points, compute the euclidean
distance between each point and the corresponding epipolar line
Parameters
----------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
pts : ndarray
of shape (n, 3) of homogeneous coordinates
"""
num = np.abs(lines[:,0] * pts[:,0] + lines[:,1] * pts[:,1] + lines[:,2])
denom = np.sqrt(lines[:,0] ** 2 + lines[:,1] ** 2)
return num / denom
def compute_reprojection_error(F, x, x1, index=None):
"""
Given a set of matches and a known fundamental matrix,
compute distance between match points and the associated
epipolar lines.
The distance between a point and the associated epipolar
line is computed as: $d = \frac{\lvert ax_{0} + by_{0} + c \rvert}{\sqrt{a^{2} + b^{2}}}$.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.values
if isinstance(x1, (pd.Series, pd.DataFrame)):
x1 = x1.values
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
# Compute the epipolar lines
lines1 = compute_epipolar_lines(F,x)
lines2 = compute_epipolar_lines(F.T, x1)
# Compute the euclidean distance from the pt to the line
d1 = epipolar_distance(lines2, x)
d2 = epipolar_distance(lines1, x1)
# Grab the max err from either reprojection
err = np.max(np.column_stack((d1,d2)), axis=1)
if index is not None:
err = pd.Series(err, index=index)
return err
def compute_fundamental_error(F, x, x1):
"""
Compute the fundamental error using the idealized error metric.
Ideal error is defined by $x^{\intercal}Fx = 0$,
where $x$ are all matchpoints in a given image and
$x^{\intercal}F$ defines the standard form of the
epipolar line in the second image.
This method assumes that x and x1 are ordered such that x[0]
correspondes to x1[0].
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
# TODO: Can this be vectorized for performance?
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
if isinstance(x, pd.DataFrame):
x = x.values
if isinstance(x1, pd.DataFrame):
x1 = x1.values
err = np.empty(len(x))
for i in range(len(x)):
err[i] = x1[i].T.dot(F).dot(x[i])
return err
def update_fundamental_mask(F, x1, x2, threshold=1.0, index=None, method='reprojection'):
"""
Given a Fundamental matrix and two sets of points, compute the
reprojection error between x1 and x2. A mask is returned with all
repojection errors greater than the error set to false.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x2 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
threshold : float
The new upper limit for error. If using
reprojection this is measured in pixels (the default). If
using fundamental, the idealized error is 0. Values +- 0.05
should be good.
index : ndarray
Optional index for mapping between reprojective error
and an associated dataframe (e.g., an indexed matches dataframe).
Returns
-------
mask : dataframe
"""
if method == 'reprojection':
error = compute_reprojection_error(F, x1, x2)
elif method == 'fundamental':
error = compute_fundamental_error(F, x1, x2)
else:
warnings.warn('Unknown error method. Options are "reprojection" or "fundamental".')
mask = pd.DataFrame(np.abs(error) <= threshold, index=index, columns=['fundamental'])
if index is not None:
mask.index = index
return mask
def enforce_singularity_constraint(F):
"""
The fundamental matrix should be rank 2. In instances when it is not,
the singularity constraint should be enforced. This is forces epipolar lines
to be conincident.
Parameters
----------
F : ndarray
(3,3) Fundamental Matrix
Returns
-------
F : ndarray
(3,3) Singular Fundamental Matrix
References
----------
.. [Hartley2003]
"""
if np.linalg.matrix_rank(F) != 2:
u, d, vt = np.linalg.svd(F)
F = u.dot(np.diag([d[0], d[1], 0])).dot(vt)
return F
def compute_fundamental_matrix(kp1, kp2, method='mle', reproj_threshold=2.0,
confidence=0.99, mle_reproj_threshold=0.5):
"""
Given two arrays of keypoints compute the fundamental matrix. This function
accepts two dataframe of keypoints that have
Parameters
----------
kp1 : arraylike
(n, 2) of coordinates from the source image
kp2 : ndarray
(n, 2) of coordinates from the destination image
method : {'ransac', 'lmeds', 'normal', '8point'}
The openCV algorithm to use for outlier detection
reproj_threshold : float
The maximum distances in pixels a reprojected points
can be from the epipolar line to be considered an inlier
confidence : float
[0, 1] that the estimated matrix is correct
Returns
-------
F : ndarray
A 3x3 fundamental matrix
mask : pd.Series
A boolean mask identifying those points that are valid.
Notes
-----
While the method is user definable, if the number of input points
is < 7, normal outlier detection is automatically used, if 7 > n > 15,
least medians is used, and if 7 > 15, ransac can be used.
"""
if method == 'mle':
# Grab an initial estimate using RANSAC, then apply MLE
method_ = cv2.FM_RANSAC
elif method == 'ransac':
method_ = cv2.FM_RANSAC
elif method == 'lmeds':
method_ = cv2.FM_LMEDS
elif method == 'normal':
method_ = cv2.FM_7POINT
elif method == '8point':
method_ = cv2.FM_8POINT
else:
raise ValueError("Unknown estimation method. Choices are: 'lme', 'ransac', 'lmeds', '8point', or 'normal'.")
if len(kp1) == 0 or len(kp2) == 0:
warnings.warn("F-matix computation failed. One of the keypoint args is empty. kp1:{}, kp2:{}.".format(len(kp1), len(kp2)))
return None, None
# OpenCV wants arrays
try: # OpenCV < 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
param1=reproj_threshold,
param2=confidence)
except: # OpenCV >= 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
ransacReprojThreshold=reproj_threshold,
confidence=confidence)
if F is None:
warnings.warn("F computation failed with no result. Returning None.")
return None, None
if F.shape != (3,3):
warnings.warn('F computation fell back to 7-point algorithm, not setting F.')
return None, None
# Ensure that the singularity constraint is met
F = enforce_singularity_constraint(F)
try:
mask = mask.astype(bool).ravel() # Enforce dimensionality
except:
return # pragma: no cover
if method == 'mle':
# Now apply the gold standard algorithm to refine F
if kp1.shape[1] != 3:
kp1 = make_homogeneous(kp1)
if kp2.shape[1] != 3:
kp2 = make_homogeneous(kp2)
# Generate an idealized and to be updated camera model
p1 = camera.camera_from_f(F)
p = camera.idealized_camera()
if kp1[mask].shape[0] <=12 or kp2[mask].shape[0] <=12:
warnings.warn("Unable to apply MLE. Not enough correspondences. Returning with a RANSAC computed F matrix.")
return F, mask
# Apply Levenber-Marquardt to perform a non-linear lst. squares fit
# to minimize triangulation error (this is a local bundle)
result = optimize.least_squares(camera.projection_error, p1.ravel(),
args=(p, kp1[mask].T, kp2[mask].T),
method='lm')
gold_standard_p = result.x.reshape(3, 4) # SciPy Lst. Sq. requires a vector, camera is 3x4
optimality = result.optimality
gold_standard_f = camera_utils.crossform(gold_standard_p[:,3]).dot(gold_standard_p[:,:3])
F = gold_standard_f
mask = update_fundamental_mask(F, kp1, kp2,
threshold=mle_reproj_threshold).values
return F, mask
| StarcoderdataPython |
6483077 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractYoursiteCom.py
def extractYoursiteCom(item):
'''
Parser for 'yoursite.com'
Note: Feed returns incorrect URLs! Actual site is pbsnovel.rocks
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
item['guid'] = item['guid'].replace('http://yoursite.com/', 'http://pbsnovel.rocks/')
item['linkUrl'] = item['linkUrl'].replace('http://yoursite.com/', 'http://pbsnovel.rocks/')
if not item['title'].startswith("Chapter"):
return False
if len(item['tags']) != 1:
return False
chp_tag = item['tags'][0]
# The items right now have titles that start with "Chapter", and a single tag with the format "int-int"
# Validate that structure before assuming it's a tag for PBS
try:
chp_1, chp_2 = chp_tag.split("-")
int(chp_1)
int(chp_2)
return buildReleaseMessageWithType(item, "Peerless Battle Spirit", vol, chp, frag=frag, postfix=postfix, tl_type='translated')
except:
return False
return False | StarcoderdataPython |
3548850 | # Generated by Django 2.1.5 on 2020-02-07 14:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200207_1349'),
]
operations = [
migrations.RenameField(
model_name='tutorial',
old_name='tutorial_tittle',
new_name='tutorial_title',
),
]
| StarcoderdataPython |
3260694 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import QueueBase
from ssdb.connection import BlockingConnectionPool
from ssdb import SSDB
import json
class QueueSSDB(QueueBase.QueueBase):
def __init__(self, name, host='localhost', port=8888, **kwargs):
QueueBase.QueueBase.__init__(self, name, host, port)
self.__conn = SSDB(connection_pool=BlockingConnectionPool(host=self.host, port=self.port))
@QueueBase.catch
def put(self, value, *args, **kwargs):
return self.__conn.qpush_back(self.name,
json.dumps(value) if isinstance(value, dict) or isinstance(value, list) else value)
@QueueBase.catch
def get(self, *args, **kwargs):
value = self.__conn.qpop_front(self.name)
return value[0] if value else value
@QueueBase.catch
def size(self, *args, **kwargs):
return self.__conn.qsize(self.name)
| StarcoderdataPython |
387123 | <filename>tests/test_Vitodens200W.py
import unittest
from tests.ViCareServiceMock import ViCareServiceMock
from PyViCare.PyViCareGazBoiler import GazBoiler
from PyViCare.PyViCare import PyViCareNotSupportedFeatureError
import PyViCare.Feature
class Vitodens200W(unittest.TestCase):
def setUp(self):
self.service = ViCareServiceMock('response_Vitodens200W.json', 0)
self.device = GazBoiler(None, None, None, 0, 0, self.service)
PyViCare.Feature.raise_exception_on_not_supported_device_feature = True
def test_getBurnerActive(self):
self.assertEqual(self.device.getBurnerActive(), False)
def test_getBurnerStarts(self):
self.assertEqual(self.device.getBurnerStarts(), 17169)
def test_getBurnerHours(self):
self.assertEqual(self.device.getBurnerHours(), 1589.3)
def test_getBurnerModulation(self):
self.assertEqual(self.device.getBurnerModulation(), 0)
def test_getPowerConsumptionDays_fails(self):
self.assertRaises(PyViCareNotSupportedFeatureError, self.device.getPowerConsumptionDays)
def test_getPrograms(self):
expected_programs = ['active', 'comfort', 'eco', 'external', 'holiday', 'normal', 'reduced', 'standby']
self.assertListEqual(self.device.getPrograms(), expected_programs)
def test_getModes(self):
expected_modes = ['standby', 'dhw', 'dhwAndHeating', 'forcedReduced', 'forcedNormal']
self.assertListEqual(self.device.getModes(), expected_modes)
def test_getPrograms(self):
expected_modes = ['active', 'comfort', 'eco', 'external', 'holiday', 'normal', 'reduced', 'standby']
self.assertListEqual(self.device.getPrograms(), expected_modes)
def test_ensure_old_behavior_non_supported_feature_returns_error(self):
PyViCare.Feature.raise_exception_on_not_supported_device_feature = False
self.assertEqual(self.device.getPowerConsumptionDays(), "error")
def test_getFrostProtectionActive(self):
self.assertEqual(self.device.getFrostProtectionActive(), False)
def test_getDomesticHotWaterCirculationPumpActive(self):
self.assertEqual(self.device.getDomesticHotWaterCirculationPumpActive(), True)
| StarcoderdataPython |
8045116 | #!/usr/bin/env python3
from data_loader import DataLoader
from keras import backend as K
import keras as ker
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Reshape
from keras.layers import Conv2DTranspose
from keras.layers import ZeroPadding2D, ZeroPadding3D
from keras.layers import Input, Lambda
from keras.losses import mse, binary_crossentropy
import keras.regularizers as reg
import keras.optimizers as opt
import matplotlib.pyplot as plt
file_location = "/home/solli-comphys/github/VAE-event-classification/data/real/packaged/x-y/proton-carbon-junk-noise.h5"
X_train, y_train, X_test, y_test = DataLoader(file_location)
def rgb2gray(rgb):
r, g, b = rgb[:, :, :, 0], rgb[:, :, :, 1], rgb[:, :, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
X_train = rgb2gray(X_train) / 255
X_test = rgb2gray(X_test) / 255
X_train = X_train.reshape(X_train.shape + (1,))
X_test = X_test.reshape(X_test.shape + (1,))
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
kernel_size = 4
filters = 20
latent_dim = 10
num_layers = 2
in_layer = Input(shape=(128, 128, 1))
h1 = in_layer
shape = K.int_shape(h1)
for i in range(1, num_layers + 1):
filters *= 2
h1 = Conv2D(
filters,
kernel_size,
activation="relu",
strides=2,
padding="same",
use_bias=True,
kernel_regularizer=reg.l2(0.01),
bias_regularizer=reg.l2(0.01),
)(h1)
shape = K.int_shape(h1)
h1 = Flatten()(h1)
h1 = Dense(16, activation="relu")(h1)
mean = Dense(latent_dim)(h1)
var = Dense(latent_dim)(h1)
sample = Lambda(sampling, output_shape=(latent_dim,))([mean, var])
encoder = Model(in_layer, [mean, var, sample], name="encoder")
encoder.summary()
# %%
latent_inputs = Input(shape=(latent_dim,), name="z_sampling")
de1 = Dense(shape[1] * shape[2] * shape[3], activation="relu")(latent_inputs)
de1 = Reshape((shape[1], shape[2], shape[3]))(de1)
for i in reversed(range(1, num_layers + 1)):
de1 = Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
activation="relu",
strides=2,
padding="same",
use_bias=True,
kernel_regularizer=reg.l2(0.01),
bias_regularizer=reg.l2(0.01),
)(de1)
filters //= 2
outputs = Conv2DTranspose(
filters=1,
kernel_size=kernel_size,
activation="sigmoid",
padding="same",
use_bias=True,
kernel_regularizer=reg.l2(0.01),
bias_regularizer=reg.l2(0.01),
name="decoder_output",
)(de1)
decoder = Model(input=latent_inputs, output=outputs)
outputs = decoder(encoder(in_layer)[2])
vae = Model(in_layer, outputs, name="vae")
def vae_loss(y_true, y_pred):
xent_loss = binary_crossentropy(K.flatten(y_true), K.flatten(y_pred)) * 784
kl_loss = -0.5 * K.sum(1 + var - K.square(mean) - K.exp(var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
return vae_loss
vae.compile(optimizer="adam", loss=[vae_loss])
# %%
earlystop = ker.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=2,
patience=0,
verbose=0,
mode="auto",
restore_best_weights=True,
)
tensorboard = ker.callbacks.TensorBoard(
log_dir="./Graph", write_graph=True, histogram_freq=0, write_images=True
)
vae.fit(
X_train,
X_train,
validation_data=(X_test, X_test),
epochs=20,
batch_size=100,
callbacks=[earlystop, tensorboard],
)
vae.save("/home/solli-comphys/github/VAE-event-classification/models/attpc_vae.h5")
encoder.save("/home/solli-comphys/github/VAE-event-classification/models/attpc_enc.h5")
decoder.save("/home/solli-comphys/github/VAE-event-classification/models/attpc_dec.h5")
| StarcoderdataPython |
4897912 | import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_moment import Moment
from flask_login import LoginManager
from config import Config
db = SQLAlchemy()
migrate = Migrate()
moment = Moment()
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(Config)
if test_config:
# load the test config if passed in
app.config.from_mapping(test_config)
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
db.init_app(app)
migrate.init_app(app, db)
moment.init_app(app)
login_manager.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
if not app.debug and not app.testing:
if not app.config['LOG_TO_STDOUT']:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/flaskr.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Flaskr startup')
return app
| StarcoderdataPython |
12810833 | # -*- coding: utf-8 -*-
import sys
import versioneer
if sys.version_info < (3, 0):
print('\nInstaMsg requires at least Python 3.0!')
sys.exit(1)
from setuptools import setup, find_packages
__version__ = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='InstaMsg',
version=__version__,
description='InstaMsg python library for IoT devices.',
long_description=readme,
long_description_content_type='text/markdown',
author='SenseGrow Inc.',
author_email='<EMAIL>',
url='https://www.sensegrow.com',
license=license,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'websocket-client>=0.54.0'
],
classifiers=[
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
cmdclass=cmdclass,
) | StarcoderdataPython |
6610841 | # -*- coding: UTF-8 -*-
import csv
import math
import operator
import random
import numpy as np
import pandas as pd
import os
from math import exp
import math
import sklearn
from sklearn import svm
from sklearn.model_selection import train_test_split
basedir = os.path.abspath(os.path.dirname(__file__))
def fit( weight_temp, train_ar,status):
accuracy = np.zeros(10)
for i in range (0,10):
predict_right = 0
predict_fail = 0
for j in range(len(train_ar)):
resuit_sum = 0
for k in range(0,status):#
resuit_sum = resuit_sum + (weight_temp[i][k] * train_ar[j][k])
te = weight_temp[i][k] * train_ar[j][k] #
# print(te,weight_temp[i][k],train_ar[j][k],sep = ',')
# if math.isnan(te) == False:
# print(j,k)
#print(j,resuit_sum,sep = ",")
if resuit_sum * (train_ar[j][status] - 1.5) > 0:#
predict_right = predict_right + 1
else:
predict_fail = predict_fail + 1
# print(predict_fail,predict_right,sep = ",")
# print(predict_fail + predict_right)
accuracy[i] = predict_right/(predict_right + predict_fail)
return accuracy
def SCA(generation,filename):
path = "data_train/" + filename
datafile = path #参数初始化
data = pd.read_excel(datafile, header = None) #读取数据
data1 = (data - data.mean())/data.std() #零-均值规范化
train_ar = np.array(data1)
#train_ar.dtype = 'float64'
train_ar1 = train_ar
status = np.shape(train_ar)[1]
status = status - 1
mean = data.mean()
std = data.std()
mest = np.zeros((2,status))
##支持向量机
# x,y=np.split(train_ar1,indices_or_sections=(status,),axis=1) #x为数据,y为标签
# train_data,test_data,train_label,test_label =sklearn.model_selection.train_test_split(x,y, random_state=1, train_size=0.8,test_size=0.2)
# train_label.dtype = 'int64'
# test_label.dtype = 'int64'
# classifier=svm.SVC(C=2,kernel='rbf',gamma=10,decision_function_shape='ovo')
# classifier.fit(train_data,train_label.ravel())
# print("训练集:",classifier.score(train_data,train_label))
# print("测试集:",classifier.score(test_data,test_label))
##
for i in range(0,status):
mest[0][i] = mean[i]
for i in range(0,status):
mest[1][i] = std[i]
file_name = filename.split('.')[0]
mestpath = basedir + "/standard/" + file_name
np.savetxt(mestpath,mest)
weight_origin = 2 * np.random.random((10,status)) - 1
#print(train_ar)
fitness = fit(weight_origin , train_ar,status)
best = 0
max = 0
for i in range(len(fitness)):
if fitness[i] > max:
max = fitness[i]
best = i
# for i in range(0,23):
# weight_origin[9] = weight_origin[best]
T = generation
population_best = np.zeros(status + 1)#
for i in range(0,status):
population_best[i] = weight_origin[best][i]
population_best[status] = fitness[best]
weight_temp = weight_origin
for FEs in range(T):
a1 = 5
if FEs < 100:
r1= a1-2 * a1 /(pow(math.e,FEs/T)); # r1 decreases linearly from a to 0
else:
r1= a1-FEs*( (a1) / T );
for i in range(0,10):
# if i != best:
for j in range(0,status):#
r2 = random.uniform(0,3.1415926)
r3 = random.uniform(0, 2)
r4 = random.random()
if r4 >= 0.5:
weight_temp[i][j] = weight_temp[i][j] + r1*(math.sin(r2)) * abs((r3*population_best[j])-weight_temp[i][j])
else:
weight_temp[i][j] = weight_temp[i][j] + r1*(math.cos(r2)) * abs((r3*population_best[j])-weight_temp[i][j])
fitness = fit(weight_temp , train_ar,status)
max = 0
for l in range(len(fitness)):
if fitness[l] >= max:
max = fitness[l]
best = l
for i in range(0,status):
if population_best[status] < fitness[best]:
population_best[i] = weight_temp[best][i]
population_best[status] = fitness[best]
print( population_best[status] , best )
# print(population_best)
#print(fitness[best], best, sep = ",")
weight_final = np.zeros(status)
for i in range(0,status):
weight_final[i] = weight_temp[best][i]
return population_best
def train(generation,filename):
weight_final = SCA(generation,filename)
return weight_final
def Run(testSet):
traingroup = []
testLength = len(testSet)
for x in range(testLength):
testSet[x] = float(testSet[x])
testSet[10] = round(testSet[10]*0.1,1)
weight = train()
result_sum = 0
for i in range(testLength):
result_sum =result_sum + (weight[i] * testSet[i])
if result_sum > 0 :
result = 1
else:
result = 0
return result
| StarcoderdataPython |
11265584 | <reponame>DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
'''
Copyright (C) 2017-2020 <NAME> - <EMAIL>
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import logging
from decimal import Decimal
from sortedcontainers import SortedDict as sd
from yapic import json
from cryptofeed.defines import BID, ASK, BUY, HITBTC, L2_BOOK, SELL, TICKER, TRADES
from cryptofeed.feed import Feed
from cryptofeed.standards import pair_exchange_to_std, timestamp_normalize
LOG = logging.getLogger('feedhandler')
class HitBTC(Feed):
id = HITBTC
def __init__(self, pairs=None, channels=None, callbacks=None, **kwargs):
super().__init__('wss://api.hitbtc.com/api/2/ws',
pairs=pairs,
channels=channels,
callbacks=callbacks,
**kwargs)
async def _ticker(self, msg: dict, timestamp: float):
await self.callback(TICKER, feed=self.id,
pair=pair_exchange_to_std(msg['symbol']),
bid=Decimal(msg['bid']),
ask=Decimal(msg['ask']),
timestamp=timestamp_normalize(self.id, msg['timestamp']),
receipt_timestamp=timestamp)
async def _book(self, msg: dict, timestamp: float):
delta = {BID: [], ASK: []}
pair = pair_exchange_to_std(msg['symbol'])
for side in (BID, ASK):
for entry in msg[side]:
price = Decimal(entry['price'])
size = Decimal(entry['size'])
if size == 0:
if price in self.l2_book[pair][side]:
del self.l2_book[pair][side][price]
delta[side].append((price, 0))
else:
self.l2_book[pair][side][price] = size
delta[side].append((price, size))
await self.book_callback(self.l2_book[pair], L2_BOOK, pair, False, delta, timestamp, timestamp)
async def _snapshot(self, msg: dict, timestamp: float):
pair = pair_exchange_to_std(msg['symbol'])
self.l2_book[pair] = {ASK: sd(), BID: sd()}
for side in (BID, ASK):
for entry in msg[side]:
price = Decimal(entry['price'])
size = Decimal(entry['size'])
self.l2_book[pair][side][price] = size
await self.book_callback(self.l2_book[pair], L2_BOOK, pair, True, None, timestamp, timestamp)
async def _trades(self, msg: dict, timestamp: float):
pair = pair_exchange_to_std(msg['symbol'])
for update in msg['data']:
price = Decimal(update['price'])
quantity = Decimal(update['quantity'])
side = BUY if update['side'] == 'buy' else SELL
order_id = update['id']
timestamp = timestamp_normalize(self.id, update['timestamp'])
await self.callback(TRADES, feed=self.id,
pair=pair,
side=side,
amount=quantity,
price=price,
order_id=order_id,
timestamp=timestamp,
receipt_timestamp=timestamp)
async def message_handler(self, msg: str, timestamp: float):
msg = json.loads(msg, parse_float=Decimal)
if 'method' in msg:
if msg['method'] == 'ticker':
await self._ticker(msg['params'], timestamp)
elif msg['method'] == 'snapshotOrderbook':
await self._snapshot(msg['params'], timestamp)
elif msg['method'] == 'updateOrderbook':
await self._book(msg['params'], timestamp)
elif msg['method'] == 'updateTrades' or msg['method'] == 'snapshotTrades':
await self._trades(msg['params'], timestamp)
else:
LOG.warning("%s: Invalid message received: %s", self.id, msg)
elif 'channel' in msg:
if msg['channel'] == 'ticker':
await self._ticker(msg['data'], timestamp)
else:
LOG.warning("%s: Invalid message received: %s", self.id, msg)
else:
if 'error' in msg or not msg['result']:
LOG.error("%s: Received error from server: %s", self.id, msg)
async def subscribe(self, websocket):
for channel in self.channels if not self.config else self.config:
for pair in self.pairs if not self.config else self.config[channel]:
await websocket.send(
json.dumps({
"method": channel,
"params": {
"symbol": pair
},
"id": 123
}))
| StarcoderdataPython |
12864748 | <reponame>uw-it-aca/course-roster-lti<gh_stars>0
from .base_settings import *
INSTALLED_APPS += [
'course_roster.apps.CourseRosterConfig',
'compressor',
]
COMPRESS_ROOT = '/static/'
COMPRESS_PRECOMPILERS = (('text/less', 'lessc {infile} {outfile}'),)
COMPRESS_OFFLINE = True
STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',)
if os.getenv('ENV', 'localdev') == 'localdev':
DEBUG = True
RESTCLIENTS_DAO_CACHE_CLASS = None
else:
RESTCLIENTS_DAO_CACHE_CLASS = 'course_roster.cache.IDCardPhotoCache'
COURSE_ROSTER_PER_PAGE = 50
IDCARD_PHOTO_EXPIRES = 60 * 60 * 2
IDCARD_TOKEN_EXPIRES = 60 * 60 * 2
| StarcoderdataPython |
135038 | import abc
import numpy as np
import torch
from utils.rbf import *
from utils.normalizer import *
class IEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.n_history = cfg['history_count']
self.n_features = cfg['history_features']
@abc.abstractmethod
def out_size(self):
pass
def count(self):
return self.n_history
def total_size(self):
return self.out_size() * self.count()
@abc.abstractmethod
def forward(self, states, history):
pass
def has_features(self):
return False
def extract_features(self, states):
feats = torch.zeros(len(states), 1, 1, self.n_features)
return self.forward(states, feats)
# better to rethink design of this ~ beacuse of RNN ~ features, multiple ? dont over-engineer though...
class StackedEncoder(IEncoder):
def __init__(self, cfg, size_in, encoder_a, encoder_b):
super().__init__({ 'history_count' : 1, 'history_features' : cfg['history_features'] }) # well this is questionable .. rethink .. redo
self.size = size_in
self.encoder_a = encoder_a
self.encoder_b = encoder_b
assert not self.encoder_a.has_features() or not self.encoder_a.has_features(), "only one RNN is allowed in encoder!"
assert not self.encoder_a.has_features(), "Currently RNN can be only *last* layer of encoder!!"
def out_size(self):
return self.encoder_b.out_size()
def has_features(self):
return self.encoder_a.has_features() or self.encoder_a.has_features()
def forward(self, states, history):
size = states.size(0)
states, history = self.encoder_a(states.reshape(-1, self.size), history)
states = states.reshape(size, -1)
return self.encoder_b(states, history)
def extract_features(self, states):
states, features_a = self.encoder_a.extract_features(states)
states, features_b = self.encoder_b.extract_features(states)
return states, features_b if self.encoder_b.has_features() else features_a
class IdentityEncoder(IEncoder):
def __init__(self, cfg, size):
super().__init__(cfg)
self.size = size
def out_size(self):
return self.size
def forward(self, states, history):
return states, history
class RBFEncoder(IEncoder):
def __init__(self, cfg, env, gamas, components, sampler = None):
super().__init__(cfg)
self.size = len(env.reset())
self.encoder = RbfState(env, gamas, components, sampler)
def out_size(self):
return self.encoder.size
def forward(self, states, history):
size = states.size(0)
states = self.encoder.transform(states.reshape(-1, self.size))
return torch.from_numpy(states.reshape(size, -1)), history
class BatchNormalizer2D(IEncoder):
def __init__(self, cfg, state_size):
super().__init__(cfg)
self.size = state_size * cfg['history_count']
self.bn = nn.BatchNorm1d(self.size)
def out_size(self):
return self.size
def forward(self, states, history):
if states.size(0) > 1:
return self.bn(states), history
self.eval() # this must be not called for training trolol
out = self.bn(states)
self.train()
return out, history
class BatchNormalizer3D(IEncoder):
def __init__(self, cfg, state_size):
super().__init__(cfg)
self.bn = nn.BatchNorm1d(state_size)
self.size = state_size
def out_size(self):
return self.size
def forward(self, states, history):
full_shape = states.shape
states = states.reshape(states.size(0), self.size, -1)
if states.size(0) > 1:
return self.bn(states).reshape(full_shape), history
self.eval() # this must be not called for training trolol
out = self.bn(states).reshape(full_shape)
self.train()
return out, history
class GlobalNormalizerWGrads(IEncoder):
def __init__(self, cfg, state_size):
super().__init__(cfg)
self.bn = Normalizer(state_size)
def out_size(self):
return self.bn.size
def forward(self, states, history):
full_shape = states.shape
states = states.reshape(-1, self.bn.size) # we stacking history states ( frames ) as well to batchnorm!
self.bn.update(states)
return self.bn.normalize(states).reshape(full_shape), history
class GlobalNormalizer(GlobalNormalizerWGrads):
def __init__(self, cfg, state_size):
super().__init__(cfg, state_size)
for p in self.bn.parameters():
p.requires_grad = False
| StarcoderdataPython |
1749753 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SettingsDeviceSupportBundlesGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``name:`` none
| ``attribute type:`` string
| ``version:`` none
| ``attribute type:`` string
| ``author:`` none
| ``attribute type:`` string
| ``neighbor_ind:`` none
| ``attribute type:`` string
| ``inventory_ind:`` none
| ``attribute type:`` string
| ``environmental_ind:`` none
| ``attribute type:`` string
| ``cpu_ind:`` none
| ``attribute type:`` string
| ``memory_ind:`` none
| ``attribute type:`` string
| ``vlan_ind:`` none
| ``attribute type:`` string
| ``forwarding_ind:`` none
| ``attribute type:`` string
| ``port_ind:`` none
| ``attribute type:`` string
| ``config_ind:`` none
| ``attribute type:`` string
| ``valid_ind:`` none
| ``attribute type:`` string
| ``unit_tests:`` none
| ``attribute type:`` string
| ``status:`` none
| ``attribute type:`` string
"""
properties = ("id",
"name",
"version",
"author",
"neighbor_ind",
"inventory_ind",
"environmental_ind",
"cpu_ind",
"memory_ind",
"vlan_ind",
"forwarding_ind",
"port_ind",
"config_ind",
"valid_ind",
"unit_tests",
"status",
)
| StarcoderdataPython |
3407411 | # Generated by Django 2.2.9 on 2020-02-06 23:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ride_Share', '0033_registeredsharer_pass_num'),
]
operations = [
migrations.AddField(
model_name='ride',
name='special_requests',
field=models.TextField(default='', max_length=140),
),
]
| StarcoderdataPython |
3451979 | # ---------------------------------------------------------------------
# Span handler
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import operator
import datetime
# NOC modules
from .base import BaseCard
from noc.core.clickhouse.connect import connection
class Span(object):
def __init__(
self,
ts,
id,
parent,
server,
service,
client,
duration,
sample,
error_code,
error_text,
in_label,
out_label,
):
self.ts = datetime.datetime.strptime(ts, "%Y-%m-%d %H:%M:%S")
self.id = int(id)
self.parent = int(parent) if parent else None
self.server = server
self.service = service
self.client = client
self.duration = int(duration)
self.sample = int(sample)
self.error_code = error_code
self.error_text = error_text.replace("\\r", "<br>").replace("\\n", "<br>").replace("\\", "")
self.in_label = in_label.replace("\\r", "<br>").replace("\\n", "<br>").replace("\\", "")
self.out_label = out_label.replace("\\r", "<br>").replace("\\n", "<br>").replace("\\", "")
self.children = []
self.level = 0
self.left = 0
self.width = 0
class SpanCard(BaseCard):
name = "span"
default_template_name = "span"
GRAPH_WIDTH = 400
def get_data(self):
# Get span data
ch = connection()
data = [
Span(*r)
for r in ch.execute(
"""
SELECT
ts, id, parent, server, service, client,
duration, sample, error_code,
error_text, in_label, out_label
FROM span
WHERE ctx = %s""",
[int(self.id)],
)
]
# Build hierarchy
smap = {s.id: s for s in data}
root = None
for s in data:
if s.parent:
smap[s.parent].children += [s]
else:
root = s
# Set width
for s in data:
if s.parent:
d = s.ts - root.ts
dt = d.seconds * 1000000 + d.microseconds
s.left = self.GRAPH_WIDTH * dt // root.duration
s.width = int(float(self.GRAPH_WIDTH) / (float(root.duration) / float(s.duration)))
else:
s.left = 0
s.width = self.GRAPH_WIDTH
# Flatten
spans = self.flatten_spans(root)
#
return {"context": int(self.id), "root": root, "spans": spans}
def flatten_spans(self, span, level=0):
span.level = level
r = [span]
for c in sorted(span.children, key=operator.attrgetter("ts")):
r += self.flatten_spans(c, level + 1)
return r
| StarcoderdataPython |
1750273 | <filename>src/NFixedPointQuery.py<gh_stars>0
# NFixedPointQuery.py
# <NAME>
# Modified from
# DoubleFixedPointQuery.py
# MIT LICENSE 2016
# <NAME>
from DSGRN.Query.FixedPointTables import *
import os, sys
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
class NFixedPointQuery:
def __init__ (self, database, *bounds):
self.database = database
c = database.conn.cursor()
N = len(bounds)
for i in range(N):
FPs = bounds[i]
table = 'Matches' + str(i)
with HiddenPrints():
MatchQuery(FPs, table, database)
MatchQuery(FPs, table, database)
c.execute('create temp table set' + str(i) + ' as select MorseGraphIndex from ' + table + ' group by MorseGraphIndex;')
if i==0:
c.execute('create temp table match0 as select * from set0')
else:
c.execute('create temp table match' + str(i) + ' as select * from (select * from match' + str(i-1) + ' intersect select * from set' + str(i) + ')')
self.set_of_matches = set([ row[0] for row in c.execute('select MorseGraphIndex from match' + str(N-1))])
overlap = set()
for i in range(N):
for j in range(N):
if j != i:
overlap1 = set([ row[0] for row in c.execute('select Label from (select Label from Matches' + str(i) + ' intersect select Label from Matches' + str(j) + ')')])
overlap = overlap.union(overlap1)
self.overlap = overlap
for i in range(N):
c.execute('drop table Matches' + str(i))
c.execute('drop table set' + str(i))
c.execute('drop table match' + str(i))
def matches(self):
"""
Return entire set of matches if bounds do not overlap, otherwise return overlaping FP
"""
CRED = '\033[1;31;47m '
CEND = '\033[1;30;47m '
if self.overlap == set():
return self.set_of_matches
else:
print(CRED + 'ERROR:' + CEND + 'overlapping bounds for ', self.overlap)
return self.overlap
def matches_with_PI(self):
CRED = '\033[1;31;47m '
CEND = '\033[1;30;47m '
database = self.database
c = database.conn.cursor()
PGI1 = set()
if self.overlap == set():
for i in self.set_of_matches:
c.execute('create temp table C' + str(i) + ' as select * from Signatures where MorseGraphIndex =' + str(i) )
set_of_matches = set([ (row[2],row[0],row[1]) for row in c.execute('select * from C' + str(i))])
PGI1 = PGI1.union(set_of_matches)
c.execute('drop table C' + str(i))
return PGI1
else:
print(CRED + 'ERROR:' + CEND + 'overlapping bounds for ', self.overlap)
return self.overlap
def __call__ (self, morsegraphindex ):
"""
Test if a single mgi is in the set of matches
"""
return morsegraphindex in self.set_of_matches
| StarcoderdataPython |
1939591 | import sys
import logging
import semver
import anymarkup
import reconcile.queries as queries
import reconcile.openshift_base as ob
import reconcile.openshift_resources_base as orb
from utils.openshift_resource import OpenshiftResource as OR
from utils.openshift_resource import ConstructResourceError
from utils.defer import defer
from utils.openshift_acme import (ACME_DEPLOYMENT,
ACME_ROLE,
ACME_ROLEBINDING,
ACME_SERVICEACCOUNT)
QONTRACT_INTEGRATION = 'openshift-acme'
QONTRACT_INTEGRATION_VERSION = semver.format_version(0, 2, 0)
def process_template(template, values):
try:
manifest = template % values
return OR(anymarkup.parse(manifest, force_types=None),
QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION)
except KeyError as e:
raise ConstructResourceError(
'could not process template: missing key {}'.format(e))
def construct_resources(namespaces):
for namespace in namespaces:
namespace_name = namespace["name"]
acme = namespace.get("openshiftAcme", {})
# Get the linked acme schema settings
acme_config = acme.get("config", {})
image = acme_config.get("image")
acme_overrides = acme_config.get("overrides", {})
default_name = 'openshift-acme'
default_rbac_api_version = 'authorization.openshift.io/v1'
deployment_name = \
acme_overrides.get('deploymentName') or default_name
serviceaccount_name = \
acme_overrides.get('serviceaccountName') or default_name
role_name = \
acme_overrides.get('roleName') or default_name
rolebinding_name = \
acme_overrides.get('roleName') or default_name
rbac_api_version = \
acme_overrides.get('rbacApiVersion') or default_rbac_api_version
# Create the resources and append them to the namespace
namespace["resources"] = []
namespace["resources"].append(
process_template(ACME_DEPLOYMENT, {
'deployment_name': deployment_name,
'image': image,
'serviceaccount_name': serviceaccount_name
})
)
namespace["resources"].append(
process_template(ACME_SERVICEACCOUNT, {
'serviceaccount_name': serviceaccount_name
})
)
namespace["resources"].append(
process_template(ACME_ROLE, {
'role_name': role_name,
'role_api_version': rbac_api_version
})
)
namespace["resources"].append(
process_template(ACME_ROLEBINDING, {
'role_name': role_name,
'rolebinding_name': rolebinding_name,
'rolebinding_api_version': rbac_api_version,
'serviceaccount_name': serviceaccount_name,
'namespace_name': namespace_name
})
)
# If acme-account Secret is defined, add it to the namespace
acme_account_secret = acme.get("accountSecret", {})
if acme_account_secret:
namespace["resources"].append(
orb.fetch_provider_vault_secret(
acme_account_secret['path'],
acme_account_secret['version'],
'acme-account',
labels={'kubernetes.io/acme.type': 'account'},
annotations={},
type='Opaque',
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION
)
)
return namespaces
def add_desired_state(namespaces, ri, oc_map):
for namespace in namespaces:
cluster = namespace['cluster']['name']
if not oc_map.get(cluster):
continue
for resource in namespace["resources"]:
ri.add_desired(
namespace['cluster']['name'],
namespace['name'],
resource.kind,
resource.name,
resource
)
@defer
def run(dry_run=False, thread_pool_size=10, internal=None,
use_jump_host=True, defer=None):
try:
namespaces = [
namespace_info for namespace_info
in queries.get_namespaces()
if namespace_info.get('openshiftAcme')
]
namespaces = construct_resources(namespaces)
ri, oc_map = ob.fetch_current_state(
namespaces=namespaces,
thread_pool_size=thread_pool_size,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
override_managed_types=[
'Deployment',
'Role',
'RoleBinding',
'ServiceAccount',
'Secret'],
internal=internal,
use_jump_host=use_jump_host)
add_desired_state(namespaces, ri, oc_map)
defer(lambda: oc_map.cleanup())
ob.realize_data(dry_run, oc_map, ri)
if ri.has_error_registered():
sys.exit(1)
except Exception as e:
msg = 'There was problem running openshift acme reconcile.'
msg += ' Exception: {}'
msg = msg.format(str(e))
logging.error(msg)
sys.exit(1)
| StarcoderdataPython |
3383943 | <filename>api_tools/urls.py
from .utils import DefaultModelSerializer
import debug_toolbar
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token
# Routing
urlpatterns = [
url(r'^jet/', include('jet.urls', 'jet')),
url(r'^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')),
url(r'^admin/', admin.site.urls),
url(r'^draceditor/', include('draceditor.urls')),
url(r'^api/auth/auth$', obtain_jwt_token, name='get_token'),
url(r'^api/auth/refresh$', refresh_jwt_token, name='refresh_token'),
url(r'^api/auth/check$', verify_jwt_token, name='check_token'),
] + DefaultModelSerializer().urls()
try:
urlpatterns = [url(r'^', include(settings.PROJECT_URLCONF))] + urlpatterns
except AttributeError:
pass
if settings.DEBUG:
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls))] | StarcoderdataPython |
3218634 | import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, BucketType, cooldown
from datetime import datetime, timedelta
import asyncio
import json
import pymongo
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['m'])
@commands.has_permissions(manage_roles = True)
async def mute(self, ctx, user:discord.Member = None, time:int = None):
if not user:
await ctx.send("Please specify whom to mute")
else:
role = discord.utils.get(ctx.guild.roles, name="Muted")
if not role:
perms = discord.Permissions(send_messages=False, read_messages=True)
await ctx.guild.create_role(name="Muted", permissions = perms)
role = discord.utils.get(ctx.guild.roles, name="Muted")
await user.add_roles(role)
if not time:
embed = discord.Embed(description = "Has been muted!" )
embed.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed, delete_after=5)
else:
embed1 = discord.Embed(description = f"Has been muted for {time} minutes!" )
embed1.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed1.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed1, delete_after=5)
if role in user.roles:
await asyncio.sleep(time*60)
await user.remove_roles(role)
embed2 = discord.Embed(description = f"Has been unmuted after {time} minutes!" )
embed2.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed2.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed2, delete_after=5)
await asyncio.sleep(4)
Message = ctx.message
await Message.delete()
@commands.command(aliases=['um'])
@commands.has_permissions(manage_roles = True)
async def unmute(self, ctx, user:discord.Member = None):
role = discord.utils.get(ctx.guild.roles, name="Muted")
if not user:
await ctx.send("Please mention whom to unmute!")
else:
if role in user.roles:
await user.remove_roles(role)
embed = discord.Embed(description = "Has been unmuted!" )
embed.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed, delete_after=5)
else:
embed = discord.Embed(description = "Hasn't been muted yet!" )
embed.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed, delete_after=5)
await asyncio.sleep(4)
Message = ctx.message
await Message.delete()
@commands.command(aliases=['k'])
@has_permissions(kick_members = True)
async def kick(self, ctx, user:discord.Member = None,*, reason = "No Reason Specified"):
if not user:
await ctx.send("Please specify whom to kick!")
else:
await user.kick(reason=reason)
embed = discord.Embed(description = f"Because {reason}")
embed.set_author(name= f"{user} has been kick", url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed)
try:
await user.send(f"You have been kicked. Reason {reason}")
except Exception:
await ctx.send(f"{user.mention}'s DM is closed!", delete_after = 5)
@commands.command(aliases=['b'])
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, user: discord.Member, *, reason=None):
await user.ban(reason=reason)
embed = discord.Embed(description = f"Has been Banned, because {reason}")
embed.set_author(name= user, url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed)
try:
await user.send(f"You have been banned. Reason {reason}")
except Exception:
await ctx.send(f"{user.mention}'s DM is closed!", delete_after = 5)
# The below code unbans player.
@commands.command(aliases=['ub'])
@commands.has_permissions(administrator=True)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
try:
member_name, member_discriminator = member.split("#")
except Exception:
await ctx.send("Use command properly! eg: `.unban MEE6#4876`")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned {user.mention}')
@commands.command(aliases=['w'])
@commands.has_permissions(kick_members = True)
async def warn(self, ctx, user: discord.Member = None, *, reason = "No reason provided"):
if not user:
await ctx.send("DUMBASS mention whom to warn!")
else:
embed = discord.Embed(description = f"Because {reason}")
embed.set_author(name= f"{user} <NAME> warned", url=user.avatar_url, icon_url=user.avatar_url)
embed.set_footer(text=f'Requested by {ctx.author}')
await ctx.send(embed=embed)
try:
await user.send(f"You have been warned. Reason {reason}")
except Exception:
await ctx.send(f"{user.mention}'s DM is closed!", delete_after = 5)
@commands.command(name='purge')
async def purge(self, ctx, num_messages: int = 10, user:discord.Member = None):
"""
Clear <n> messages from current channel
"""
if user:
channel = ctx.message.channel
def check(msg):
return msg.author.id == user.id
await ctx.message.delete()
await channel.purge(limit=num_messages, check=check, before=None)
await ctx.send(f"`{num_messages} messages from {user} deleted!`", delete_after=5)
return
channel = ctx.message.channel
await ctx.message.delete()
await channel.purge(limit=num_messages, check=None, before=None)
await ctx.send(f"`{num_messages} messages has been deleted!`", delete_after=5)
@commands.command()
@cooldown(1, 300, BucketType.user)
@commands.is_owner()
async def nuke(self, ctx, channels : discord.TextChannel=None):
if channels == None:
await ctx.send('Give a channel')
return
if ctx.author != ctx.guild.owner:
await ctx.send('Only **{}** Can use this Command'.format(ctx.guild.owner))
else:
verif = await ctx.send('Are you sure!')
await ctx.send('Type in `yes`. To proceed')
def check(m):
user = ctx.author
return m.author.id == user.id and m.content == 'yes'
msg = await self.bot.wait_for('message', check=check)
await ctx.channel.send('Theres no going back!\n**Are you sure.** \n Type in `yes` to proceed!')
msg = await self.bot.wait_for('message', check=check)
new = await channels.clone()
await channels.delete()
await new.send('https://media1.tenor.com/images/6c485efad8b910e5289fc7968ea1d22f/tenor.gif?itemid=5791468')
await asyncio.sleep(2)
await new.send(f'**{self.bot.user.name}** has nuked this channel!')
@commands.command(aliases=['nick'])
@commands.has_guild_permissions(manage_nicknames=True)
async def nickname(self, ctx, member : discord.Member, *args):
if member == None:
await ctx.send('Give me a user dumbass')
elif member == ctx.guild.owner:
await ctx.send('You cant name the owner!')
else:
x = ' '.join(map(str, args))
await member.edit(nick=f'{x}')
await ctx.send(f'{member.name} has been changed to {x}')
@commands.command()
@commands.has_guild_permissions(manage_channels=True)
@commands.cooldown(1, 60, BucketType.user)
async def slowmode(self, ctx, time : int=0):
if time < 0:
await ctx.send('Give a positive number.')
return
try:
if time > 21600:
await ctx.send('Number is too large. You can only have a maximum time of `21600` seconds (6 Hours)')
else:
await ctx.channel.edit(slowmode_delay=time)
await ctx.send(f'The channel {ctx.channel.name} now has a slowmode of {time} seconds')
except Exception:
await ctx.send('Not a number!')
@commands.command()
@commands.has_permissions(manage_channels=True)
async def lock(self, ctx, channel: discord.TextChannel=None):
channel = channel or ctx.channel
if ctx.guild.default_role not in channel.overwrites:
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(send_messages=False)
}
await channel.edit(overwrites=overwrites)
await ctx.send("**The channel `{}` has successfully been locked!**".format(ctx.channel.name))
elif channel.overwrites[ctx.guild.default_role].send_messages == True or channel.overwrites[ctx.guild.default_role].send_messages == None:
overwrites = channel.overwrites[ctx.guild.default_role]
overwrites.send_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrites)
await ctx.send("**The channel `{}` has successfully been locked!**".format(ctx.channel.name))
else:
overwrites = channel.overwrites[ctx.guild.default_role]
overwrites.send_messages = True
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrites)
await ctx.send('**The channel `{}` has now been unlocked!**'.format(ctx.channel.name))
@commands.command(aliases=['sw', 'setwelcome', 'set_w'])
async def set_welcome(self, ctx, channel : discord.TextChannel=None):
if channel == None:
await ctx.send('You havent provided a valid channel!')
else:
with open('./Other/json/welcome.json', 'r') as f:
welcome_id = json.load(f)
welcome_id[str(ctx.guild.id)] = f'{channel.id}'
with open('./Other/json/welcome.json', 'w') as f:
json.dump(welcome_id, f, indent=4)
await ctx.send(f'The welcomes channel has been set as `{channel.name}`.')
@commands.command(aliases=['rw', 'remove_w', 'r_welcome', 'removewelcome', 'rwelcome'])
async def remove_welcome(self, ctx):
with open('./Other/json/welcome.json', 'r') as f:
welcome_id = json.load(f)
welcome_id[str(ctx.guild.id)] = f'Not Set'
with open('./Other/json/welcome.json', 'w') as f:
json.dump(welcome_id, f, indent=4)
await ctx.send(f'You have removed the welcome messages!')
def setup(bot):
bot.add_cog(Moderation(bot))
| StarcoderdataPython |
4818687 | import elevation
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
%matplotlib inline
from rasterio.transform import from_bounds, from_origin
from rasterio.warp import reproject, Resampling
import rasterio as rio
bounds = gpd.read_file('E:/Msc/Dissertation/Code/Data/Input/Site_AOIs/Humberstone_AOI.shp').bounds
west, south, east, north = bounds = bounds.loc[0]
dtm = "E:/Msc/Dissertation/Code/Data/Generated/Humberstone_slope_dd.tif"
elevation.clip(bounds=bounds, output=dtm, product='SRTM1')
dem_raster = rio.open("E:/Msc/Dissertation/Code/Data/Input/DTM/Dales_Nidderdale_Moorland_Line_DTM_5m.tif")
src_crs = dem_raster.crs
src_shape = src_height, src_width = dem_raster.shape
src_transform = from_bounds(west, south, east, north, src_width, src_height)
source = dem_raster.read(1)
dst_crs = {'init': 'EPSG:32616'}
dst_transform = from_origin(268000.0, 5207000.0, 250, 250)
dem_array = np.zeros((451, 623))
dem_array[:] = np.nan
reproject(source,
dem_array,
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=Resampling.bilinear)
try:
import pycpt
topocmap = pycpt.load.cmap_from_cptcity_url('wkp/schwarzwald/wiki-schwarzwald-cont.cpt')
except:
topocmap = 'Spectral_r'
vmin = 180
vmax = 575
| StarcoderdataPython |
3464149 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from dataclasses import dataclass
from pathlib import Path
from typing import Tuple
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH
from airflow_breeze.console import console
from airflow_breeze.global_constants import AVAILABLE_INTEGRATIONS, get_airflow_version
from airflow_breeze.utils.host_info_utils import get_host_group_id, get_host_user_id, get_stat_bin
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCE, BUILD_CACHE_DIR, SCRIPTS_CI_DIR
from airflow_breeze.utils.run_utils import get_filesystem_type, run_command
@dataclass
class ShellBuilder:
python_version: str # check in cache
build_cache_local: bool
build_cache_pulled: bool
build_cache_disabled: bool
backend: str # check in cache
integration: Tuple[str] # check in cache
postgres_version: str # check in cache
mssql_version: str # check in cache
mysql_version: str # check in cache
force_build: bool
extra_args: Tuple
use_airflow_version: str = ""
install_airflow_version: str = ""
tag: str = "latest"
github_repository: str = "apache/airflow"
skip_mounting_local_sources: bool = False
mount_all_local_sources: bool = False
forward_credentials: str = "false"
airflow_branch: str = AIRFLOW_BRANCH
executor: str = "KubernetesExecutor" # check in cache
start_airflow: str = "false"
skip_twine_check: str = ""
use_packages_from_dist: str = "false"
github_actions: str = ""
issue_id: str = ""
num_runs: str = ""
version_suffix_for_pypi: str = ""
version_suffix_for_svn: str = ""
@property
def airflow_version(self):
return get_airflow_version()
@property
def airflow_version_for_production_image(self):
cmd = ['docker', 'run', '--entrypoint', '/bin/bash', f'{self.airflow_prod_image_name}']
cmd.extend(['-c', 'echo "${AIRFLOW_VERSION}"'])
output = run_command(cmd, capture_output=True, text=True)
return output.stdout.strip()
@property
def host_user_id(self):
return get_host_user_id()
@property
def host_group_id(self):
return get_host_group_id()
@property
def airflow_image_name(self) -> str:
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_ci_image_name(self) -> str:
"""Construct CI image link"""
image = f'{self.airflow_image_name}/{self.airflow_branch}/ci/python{self.python_version}'
return image
@property
def airflow_ci_image_name_with_tag(self) -> str:
image = self.airflow_ci_image_name
return image if not self.tag else image + f":{self.tag}"
@property
def airflow_prod_image_name(self) -> str:
image = f'{self.airflow_image_name}/{self.airflow_branch}/prod/python{self.python_version}'
return image
@property
def airflow_image_kubernetes(self) -> str:
image = f'{self.airflow_image_name}/{self.airflow_branch}/kubernetes/python{self.python_version}'
return image
@property
def airflow_sources(self):
return AIRFLOW_SOURCE
@property
def docker_cache(self) -> str:
if self.build_cache_local:
docker_cache = "local"
elif self.build_cache_disabled:
docker_cache = "disabled"
else:
docker_cache = "pulled"
return docker_cache
@property
def mount_selected_local_sources(self) -> bool:
mount_selected_local_sources = True
if self.mount_all_local_sources or self.skip_mounting_local_sources:
mount_selected_local_sources = False
return mount_selected_local_sources
@property
def enabled_integrations(self) -> str:
if "all" in self.integration:
enabled_integration = " ".join(AVAILABLE_INTEGRATIONS)
elif len(self.integration) > 0:
enabled_integration = " ".join(self.integration)
else:
enabled_integration = ""
return enabled_integration
@property
def the_image_type(self) -> str:
the_image_type = 'CI'
return the_image_type
@property
def image_description(self) -> str:
image_description = 'Airflow CI'
return image_description
@property
def md5sum_cache_dir(self) -> Path:
cache_dir = Path(BUILD_CACHE_DIR, self.airflow_branch, self.python_version, self.the_image_type)
return cache_dir
@property
def backend_version(self) -> str:
version = ''
if self.backend == 'postgres':
version = self.postgres_version
if self.backend == 'mysql':
version = self.mysql_version
if self.backend == 'mssql':
version = self.mssql_version
return version
@property
def sqlite_url(self) -> str:
sqlite_url = "sqlite:////root/airflow/airflow.db"
return sqlite_url
def print_badge_info(self):
console.print(f'Use {self.the_image_type} image')
console.print(f'Branch Name: {self.airflow_branch}')
console.print(f'Docker Image: {self.airflow_ci_image_name_with_tag}')
console.print(f'Airflow source version:{self.airflow_version}')
console.print(f'Python Version: {self.python_version}')
console.print(f'Backend: {self.backend} {self.backend_version}')
console.print(f'Airflow used at runtime: {self.use_airflow_version}')
@property
def compose_files(self):
compose_ci_file = []
main_ci_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/base.yml"
backend_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/backend-{self.backend}.yml"
backend_port_docker_compose_file = (
f"{str(SCRIPTS_CI_DIR)}/docker-compose/backend-{self.backend}-port.yml"
)
local_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/local.yml"
local_all_sources_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/local-all-sources.yml"
files_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/files.yml"
remove_sources_docker_compose_file = f"{str(SCRIPTS_CI_DIR)}/docker-compose/remove-sources.yml"
forward_credentials_docker_compose_file = (
f"{str(SCRIPTS_CI_DIR)}/docker-compose/forward-credentials.yml"
)
# mssql based check have to be added
if self.backend == 'mssql':
docker_filesystem = get_filesystem_type('.')
if docker_filesystem == 'tmpfs':
compose_ci_file.append(f"{str(SCRIPTS_CI_DIR)}/docker-compose/backend-mssql-bind-volume.yml")
else:
compose_ci_file.append(
f"{str(SCRIPTS_CI_DIR)}/docker-compose/backend-mssql-docker-volume.yml"
)
compose_ci_file.extend(
[main_ci_docker_compose_file, backend_docker_compose_file, files_docker_compose_file]
)
if self.mount_selected_local_sources:
compose_ci_file.extend([local_docker_compose_file, backend_port_docker_compose_file])
if self.mount_all_local_sources:
compose_ci_file.extend([local_all_sources_docker_compose_file, backend_port_docker_compose_file])
if self.forward_credentials:
compose_ci_file.append(forward_credentials_docker_compose_file)
if len(self.use_airflow_version) > 0:
compose_ci_file.append(remove_sources_docker_compose_file)
if "all" in self.integration:
integrations = AVAILABLE_INTEGRATIONS
else:
integrations = self.integration
if len(integrations) > 0:
for integration in integrations:
compose_ci_file.append(f"{str(SCRIPTS_CI_DIR)}/docker-compose/integration-{integration}.yml")
return ':'.join(compose_ci_file)
@property
def command_passed(self):
cmd = None
if len(self.extra_args) > 0:
cmd = str(self.extra_args[0])
return cmd
@property
def get_stat_bin(self):
return get_stat_bin()
| StarcoderdataPython |
192002 | <reponame>Farbfetzen/Advent_of_Code
# https://adventofcode.com/2020/day/22
from collections import deque
from copy import deepcopy
from itertools import islice
from src.util.types import Data, Solution
def prepare_data(data: str) -> list[deque[int]]:
return [deque([int(card) for card in deck.splitlines()[1:]])
for deck in data.split("\n\n")]
def calculate_score(cards):
return sum(i * v for i, v in enumerate(reversed(cards), start=1))
def part_1(decks):
decks = deepcopy(decks)
while True:
p0 = decks[0].popleft()
p1 = decks[1].popleft()
if p0 > p1:
decks[0].extend((p0, p1))
if len(decks[1]) == 0:
return calculate_score(decks[0])
else:
decks[1].extend((p1, p0))
if len(decks[0]) == 0:
return calculate_score(decks[1])
def part_2(decks, return_index=False):
cache = set()
winner_game = None
while winner_game is None:
d = (tuple(decks[0]), tuple(decks[1]))
if d in cache:
winner_game = 0 # player 1 (= index 0) wins
break
cache.add(d)
p0 = decks[0].popleft()
p1 = decks[1].popleft()
if p0 <= len(decks[0]) and p1 <= len(decks[1]):
new_deck_p0 = deque(islice(decks[0], 0, p0))
new_deck_p1 = deque(islice(decks[1], 0, p1))
winner_round = part_2([new_deck_p0, new_deck_p1], True)
elif p0 > p1:
winner_round = 0
else:
winner_round = 1
if winner_round == 0:
decks[0].extend((p0, p1))
if len(decks[1]) == 0:
winner_game = 0
else:
decks[1].extend((p1, p0))
if len(decks[0]) == 0:
winner_game = 1
if return_index:
return winner_game
else:
return calculate_score(decks[winner_game])
def solve(data: Data) -> Solution:
solution = Solution()
sample_data = prepare_data(data.samples[0])
solution.samples_part_1.append(part_1(sample_data))
solution.samples_part_2.append(part_2(sample_data))
challenge_data = prepare_data(data.input)
solution.part_1 = part_1(challenge_data)
solution.part_2 = part_2(challenge_data)
return solution
| StarcoderdataPython |
6428962 | <reponame>lukemshannonhill/LeetCode_Daily_Problem_Solutions
# https://leetcode.com/problems/minimum-domino-rotations-for-equal-row/
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) -> int:
# if flips are possible, then we can flip everything
top = [[0],[0],[0],[0],[0],[0]]
bottom = [[0],[0],[0],[0],[0],[0]]
duplicates = [[0],[0],[0],[0],[0],[0]]
for i in range(len(A)):
if A[i] != B[i]:
top[A[i]-1][0] += 1
bottom[B[i]-1][0] += 1
else:
duplicates[A[i]-1][0] += 1
# now check the trivial case: if no flips are needed, return 0. no flips are needed if any single digit count in either the top or the bottom row has value equal to the length of A (or B)
for j in top:
if j == len(A):
return 0
for k in bottom:
if k == len(B):
return 0
# if top[i] + bottom[i] == len(A) then it is a candidate solution. If no candidtae solutions exist, return -1:
candidates = []
for m in range(len(top)):
if top[m][0] + bottom[m][0] + duplicates[m][0] == len(A):
candidates.append(m)
if candidates == []:
return -1
# now, of the candidate solutions, we see which absolute difference between top[candidate[m]] and bottom[candidate[m]] is the least
differences = []
for n in candidates:
differences.append(abs(top[n][0] - bottom[n][0]))
# now I have n candidate differences. I need to find the smallest value in candidates. That gives me the position in both top and bottom I'll need to check. I'll return the smallest value between those values.
index = differences.index(min(differences))
return min(top[candidates[index]][0], bottom[candidates[index]][0])
| StarcoderdataPython |
6446876 | <filename>recbole/model/knowledge_aware_recommender/kgnnls.py
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : <NAME>
# @Email : <EMAIL>
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| StarcoderdataPython |
1788392 | import datetime
from django.db import models
from django.utils import timezone
class Location(models.Model):
image = models.ImageField(upload_to = 'artifacts/uploads', default = 'artifacts/uploads/qmark.png')
text = models.CharField(max_length=200)
def __str__(self):
return self.text
class Artifact(models.Model):
location = models.ForeignKey(Location, on_delete=models.CASCADE, default = 1)
text = models.CharField(max_length=200)
info = models.TextField(blank = True)
discovered_by = models.CharField(max_length=200, default="unknown")
pub_date = models.DateField('Date Published', default=datetime.date.today)
image = models.ImageField(upload_to = 'artifacts/uploads', default = 'artifacts/uploads/qmark.png')
def __str__(self):
return f'{self.text} : {self.info[:25]}...'
| StarcoderdataPython |
1792995 | <filename>scrapy/contrib/ibl/extraction/similarity.py
"""
Similarity calculation for Instance based extraction algorithm.
"""
from itertools import izip, count
from operator import itemgetter
from heapq import nlargest
def common_prefix_length(a, b):
"""Calculate the length of the common prefix in both sequences passed.
For example, the common prefix in this example is [1, 3]
>>> common_prefix_length([1, 3, 4], [1, 3, 5, 1])
2
If there is no common prefix, 0 is returned
>>> common_prefix_length([1], [])
0
"""
i = -1
for i, x, y in izip(count(), a, b):
if x != y:
return i
return i + 1
def common_prefix(*sequences):
"""determine the common prefix of all sequences passed
For example:
>>> common_prefix('abcdef', 'abc', 'abac')
['a', 'b']
"""
prefix = []
for sample in izip(*sequences):
first = sample[0]
if all(x == first for x in sample[1:]):
prefix.append(first)
else:
break
return prefix
def longest_unique_subsequence(to_search, subsequence, range_start=0,
range_end=None):
"""Find the longest unique subsequence of items in a list or array. This
searches the to_search list or array looking for the longest overlapping
match with subsequence. If the largest match is unique (there is no other
match of equivalent length), the index and length of match is returned. If
there is no match, (None, None) is returned.
Please see section 3.2 of Extracting Web Data Using Instance-Based
Learning by <NAME> and <NAME>
For example, the longest match occurs at index 2 and has length 3
>>> to_search = [6, 3, 2, 4, 3, 2, 5]
>>> longest_unique_subsequence(to_search, [2, 4, 3])
(2, 3)
When there are two equally long subsequences, it does not generate a match
>>> longest_unique_subsequence(to_search, [3, 2])
(None, None)
range_start and range_end specify a range in which the match must begin
>>> longest_unique_subsequence(to_search, [3, 2], 3)
(4, 2)
>>> longest_unique_subsequence(to_search, [3, 2], 0, 2)
(1, 2)
"""
startval = subsequence[0]
if range_end is None:
range_end = len(to_search)
# the comparison to startval ensures only matches of length >= 1 and
# reduces the number of calls to the common_length function
matches = ((i, common_prefix_length(to_search[i:], subsequence)) \
for i in xrange(range_start, range_end) if startval == to_search[i])
best2 = nlargest(2, matches, key=itemgetter(1))
# if there is a single unique best match, return that
if len(best2) == 1 or len(best2) == 2 and best2[0][1] != best2[1][1]:
return best2[0]
return None, None
def similar_region(extracted_tokens, template_tokens, labelled_region,
range_start=0, range_end=None):
"""Given a labelled section in a template, identify a similar region
in the extracted tokens.
The start and end index of the similar region in the extracted tokens
is returned.
This will return a tuple containing:
(match score, start index, end index)
where match score is the sum of the length of the matching prefix and
suffix. If there is no unique match, (0, None, None) will be returned.
start_index and end_index specify a range in which the match must begin
"""
data_length = len(extracted_tokens)
if range_end is None:
range_end = data_length
# calculate the prefix score by finding a longest subsequence in
# reverse order
reverse_prefix = template_tokens[labelled_region.start_index::-1]
reverse_tokens = extracted_tokens[::-1]
(rpi, pscore) = longest_unique_subsequence(reverse_tokens, reverse_prefix,
data_length - range_end, data_length - range_start)
# None means nothing exracted. Index 0 means there cannot be a suffix.
if not rpi:
return 0, None, None
# convert to an index from the start instead of in reverse
prefix_index = len(extracted_tokens) - rpi - 1
if labelled_region.end_index is None:
return pscore, prefix_index, None
suffix = template_tokens[labelled_region.end_index:]
# if it's not a paired tag, use the best match between prefix & suffix
if labelled_region.start_index == labelled_region.end_index:
(match_index, sscore) = longest_unique_subsequence(extracted_tokens,
suffix, prefix_index, range_end)
if match_index == prefix_index:
return (pscore + sscore, prefix_index, match_index)
elif pscore > sscore:
return pscore, prefix_index, prefix_index
elif sscore > pscore:
return sscore, match_index, match_index
return 0, None, None
# calculate the suffix match on the tokens following the prefix. We could
# consider the whole page and require a good match.
(match_index, sscore) = longest_unique_subsequence(extracted_tokens,
suffix, prefix_index + 1, range_end)
if match_index is None:
return 0, None, None
return (pscore + sscore, prefix_index, match_index)
| StarcoderdataPython |
1736835 | import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
import Dataset
import text_normalization
from pickle import dump, load
from sklearn.model_selection import train_test_split
def loadTrainValData(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
data = pd.read_csv('data/training_data.csv', delimiter=',')
Train_data, Dev_data = train_test_split(data, test_size=0.2, stratify=data[['sarcasm', 'sentiment']], random_state=42, shuffle=True)
Dev_data.to_csv('data/dev_set.csv')
Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))
Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))
print(f'Training data size {Train_data.shape}')
print(f'Validation data size {Dev_data.shape}')
DF_train = Dataset.TrainDataset(Train_data, pretraine_path)
DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)
DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,
num_workers=num_worker)
DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_train_loader, DF_dev_loader
def loadTestData(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
Test_data = pd.read_csv('data/test_set.csv', delimiter=',')
print(f'Test data size {Test_data.shape}')
Test_data['tweet'] = Test_data['tweet'].apply(lambda x: text_normalization.clean(x))
DF_test = Dataset.TestDataset(Test_data, pretraine_path)
DF_test_loader = DataLoader(dataset=DF_test, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_test_loader
def loadTrainValData_v2(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
Train_data = pd.read_csv('data/ArSarcasm_train.csv', delimiter=',')
Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))
Dev_data = pd.read_csv('data/ArSarcasm_test.csv', delimiter=',')
Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))
print(f'Training data size {Train_data.shape}')
print(f'Validation data size {Dev_data.shape}')
DF_train = Dataset.TrainDataset(Train_data, pretraine_path)
DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)
DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,
num_workers=num_worker)
DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_train_loader, DF_dev_loader
| StarcoderdataPython |
314058 | <gh_stars>10-100
import requests
from bs4 import BeautifulSoup, NavigableString, Tag
import time
import urllib
import pickle
res = requests.get('http://www.imsdb.com/all%20scripts/').text
soup = BeautifulSoup(res, 'html5lib')
movies = soup.find_all('td', {'valign': 'top'})[2].find_all('p')
base_url = 'http://www.imsdb.com'
movie_urls = [
base_url +
urllib.parse.quote(
m.find('a')['href']) for m in movies]
all_meta = []
# all_meta = pickle.load(open('meta_dicts.pkl', 'rb'))
for i, url in enumerate(movie_urls[:3]):
print(i)
res = requests.get(url).text
soup = BeautifulSoup(res, 'html5lib')
script_details = soup.find('table', {'class': 'script-details'})
title = script_details.find('h1').text.strip()
split_details = script_details.find_all('td')[2]
meta_data = {'title': title}
for t in split_details.find_all('b'):
sibling_data = ''
for s in t.next_siblings:
if isinstance(s, NavigableString):
if len(str(s).strip()) > 1:
sibling_data += str(s).strip()
break
elif isinstance(s, Tag):
try:
if s.name == 'a':
sibling_data += s.text + ';'
except:
pass
if s.name == 'b':
break
meta_data[t.text] = sibling_data
all_meta.append(meta_data)
if "Read" in script_details.find_all('a')[-1].text:
script_link = base_url + \
urllib.parse.quote(script_details.find_all('a')[-1]['href'])
script_path = "scripts/" + title + '.html'
with open(script_path, 'w') as f:
f.write(requests.get(script_link).text)
else:
script_path = "NA"
meta_data['script_path'] = script_path
pickle.dump(all_meta, open('meta_dicts.pkl', 'wb'))
time.sleep(1)
| StarcoderdataPython |
6621660 | """An easy-to-use wrapper for NTFS-3G on macOS."""
__version__ = "1.1.1"
| StarcoderdataPython |
1988790 | # Zadání:
#########
#
# Pro výpočet směrodatné odchylky, potřebujete znát střední hodnotu, kterou
# spočtete jako aritmetický průměr hodnot v poli. Napište funkci getMean,
# která vypočte střední hodnotu zadaného pole.
#
# Napište funkci getDeviation, která vypočte směrodatnou odchylku, ve funkci
# použijte volání funkce getMean
#
# Směrodatná odchylka je: σ^2 = 1/n * sum_(i=i)^n (x_i − E(x) ) kde E(x) je
# střední hodnota vypočtená funkcí getMean.
###############################################################################
import math
# Rozdělí vstup na pole čísel
numbers = list( map( int, input("Hodnoty:").split(" ") ) )
def getMean( array ):
"""
Funkce vrátí aritmetický průměr čísel v předaném poli.
Parametry:
----------
array - Pole čísel pro zpracování
Vrací:
------
Aritmetický průměr čísel v poli
"""
# Ochrana před dělením nulou
if len(array) == 0:
return 0
sum = 0
# Sečte všechny hodnoty v poli
for element in array:
sum += element
# Vrátí aritmetický průměr čísel
return sum/len(array)
def getDeviation( numbers ):
"""
Vypočte směrodatnou odchylku předaných čísel.
Parametry:
----------
numbers - Pole čísel pro zpracování
Vrací:
------
Směrodatná odchylka
"""
# Suma
deviation = 0
# Počet prvků
n = len(numbers)
# Střední hodnota
mean = getMean( numbers )
for x in numbers:
deviation += ( ( x - mean) ** 2)
# Vrací se odmocnina z jedné n-tiny sumy
return math.sqrt( (1/n) * deviation )
print( getDeviation(numbers) ) | StarcoderdataPython |
237706 | <filename>Emall/loggings.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# @Time : 2020/5/9 8:50
# @Author : 司云中
# @File : loggings.py
# @Software: PyCharm
import abc
import logging
# 工厂模式
class AbstractLoggerFactory(metaclass=abc.ABCMeta):
@abc.abstractmethod
def common_logger(self):
pass
@abc.abstractmethod
def specific_logger(self):
pass
class ConsumerFactory(AbstractLoggerFactory):
"""the factory with consumer"""
def common_logger(self):
return django_logger().create_logger()
def specific_logger(self):
return Consumer().create_logger()
class ShopperFactory(AbstractLoggerFactory):
"""the factory with shopper"""
def common_logger(self):
return django_logger().create_logger()
def specific_logger(self):
return Shopper().create_logger()
class AppBase(metaclass=abc.ABCMeta):
@abc.abstractmethod
def create_logger(self):
pass
class DjangoLogger(AppBase):
def __init__(self, logger_name='django'):
self.logger = logging.getLogger(logger_name)
def create_logger(self):
return self.logger
class Consumer(AppBase):
def __init__(self, logger_name='consumer_'):
self.logger = logging.getLogger(logger_name)
def create_logger(self):
return self.logger
class Shopper(AppBase):
def __init__(self, logger_name='shopper_'):
self.logger = logging.getLogger(logger_name)
def create_logger(self):
return self.logger
class InterfaceLogger:
def __init__(self, factory):
self.common_logger = factory.common_logger()
self.specific_logger = factory.specific_logger()
def get_common_logger(self):
return self.common_logger
def get_specific_logger(self):
return self.specific_logger
# 单例模式
class Logging:
_instance = {}
@classmethod
def get_logger(cls, logger_name):
return cls._instance[logger_name]
@classmethod
def logger(cls, logger_name):
if not cls._instance.setdefault(logger_name, None):
cls._instance[logger_name] = logging.getLogger(logger_name)
return cls._instance[logger_name]
| StarcoderdataPython |
3416659 | class Solution:
def rob(self, nums: List[int]) -> int:
# Max Amount (nth house) = max(Amount at nth house + Max Amount(n -2),
# Max Amount(n - 1))
# base case
if(len(nums) == 0): return 0;
hr = [];
for i in range(len(nums)):
h2 = 0
h1 = 0
if (i - 1) >= 0:
h1 = hr[i - 1]
if (i - 2) >= 0:
h2 = hr[i - 2]
hr.append(max(h2 + nums[i], h1))
return max(hr)
| StarcoderdataPython |
98170 | <gh_stars>0
from PyQt5.QtCore import *
class Factorial(QObject):
@pyqtSlot(int, result=int)
def factorial(self,n):
if n == 0 or n == 1:
return 1
else:
return self.factorial(n - 1) * n
| StarcoderdataPython |
312604 | '''
Licensing Information: Please do not distribute or publish solutions to this
project. You are free to use and extend Driverless Car for educational
purposes. The Driverless Car project was developed at Stanford, primarily by
<NAME> (<EMAIL>). It was inspired by the Pacman projects.
'''
from engine.const import Const
import util, math, random, collections
# Class: ExactInference
# ---------------------
# Maintain and update a belief distribution over the probability of a car
# being in a tile using exact updates (correct, but slow times).
class ExactInference(object):
# Function: Init
# --------------
# Constructor that initializes an ExactInference object which has
# numRows x numCols number of tiles.
def __init__(self, numRows, numCols):
self.skipElapse = False ### ONLY USED BY GRADER.PY in case problem 3 has not been completed
# util.Belief is a class (constructor) that represents the belief for a single
# inference state of a single car (see util.py).
self.belief = util.Belief(numRows, numCols)
self.transProb = util.loadTransProb()
##################################################################################
# Problem 2:
# Function: Observe (update the probabilities based on an observation)
# -----------------
# Takes |self.belief| -- an object of class Belief, defined in util.py --
# and updates it in place based on the distance observation $d_t$ and
# your position $a_t$.
#
# - agentX: x location of your car (not the one you are tracking)
# - agentY: y location of your car (not the one you are tracking)
# - observedDist: true distance plus a mean-zero Gaussian with standard
# deviation Const.SONAR_STD
#
# Notes:
# - Convert row and col indices into locations using util.rowToY and util.colToX.
# - util.pdf: computes the probability density function for a Gaussian
# - Don't forget to normalize self.belief after you update its probabilities!
##################################################################################
def observe(self, agentX, agentY, observedDist):
# BEGIN_YOUR_CODE (our solution is 6 lines of code, but don't worry if you deviate from this)
raise Exception("Not implemented yet")
# END_YOUR_CODE
##################################################################################
# Problem 3:
# Function: Elapse Time (propose a new belief distribution based on a learned transition model)
# ---------------------
# Takes |self.belief| and updates it based on the passing of one time step.
# Notes:
# - Use the transition probabilities in self.transProb, which is a dictionary
# containing all the ((oldTile, newTile), transProb) key-val pairs that you
# must consider.
# - If there are ((oldTile, newTile), transProb) pairs not in self.transProb,
# they are assumed to have zero probability, and you can safely ignore them.
# - Use the addProb and getProb methods of the Belief class to access and modify
# the probabilities associated with a belief. (See util.py.)
# - Be careful that you are using only the CURRENT self.belief distribution to compute
# updated beliefs. Don't incrementally update self.belief and use the updated value
# for one grid square to compute the update for another square.
# - Don't forget to normalize self.belief after all probabilities have been updated!
##################################################################################
def elapseTime(self):
if self.skipElapse: return ### ONLY FOR THE GRADER TO USE IN Problem 2
# BEGIN_YOUR_CODE (our solution is 7 lines of code, but don't worry if you deviate from this)
raise Exception("Not implemented yet")
# END_YOUR_CODE
# Function: Get Belief
# ---------------------
# Returns your belief of the probability that the car is in each tile. Your
# belief probabilities should sum to 1.
def getBelief(self):
return self.belief
# Class: Particle Filter
# ----------------------
# Maintain and update a belief distribution over the probability of a car
# being in a tile using a set of particles.
class ParticleFilter(object):
NUM_PARTICLES = 200
# Function: Init
# --------------
# Constructor that initializes an ParticleFilter object which has
# (numRows x numCols) number of tiles.
def __init__(self, numRows, numCols):
self.belief = util.Belief(numRows, numCols)
# Load the transition probabilities and store them in an integer-valued defaultdict.
# Use self.transProbDict[oldTile][newTile] to get the probability of transitioning from oldTile to newTile.
self.transProb = util.loadTransProb()
self.transProbDict = dict()
for (oldTile, newTile) in self.transProb:
if not oldTile in self.transProbDict:
self.transProbDict[oldTile] = collections.defaultdict(int)
self.transProbDict[oldTile][newTile] = self.transProb[(oldTile, newTile)]
# Initialize the particles randomly.
self.particles = collections.defaultdict(int)
potentialParticles = self.transProbDict.keys()
for i in range(self.NUM_PARTICLES):
particleIndex = int(random.random() * len(potentialParticles))
self.particles[potentialParticles[particleIndex]] += 1
self.updateBelief()
# Function: Update Belief
# ---------------------
# Updates |self.belief| with the probability that the car is in each tile
# based on |self.particles|, which is a defaultdict from particle to
# probability (which should sum to 1).
def updateBelief(self):
newBelief = util.Belief(self.belief.getNumRows(), self.belief.getNumCols(), 0)
for tile in self.particles:
newBelief.setProb(tile[0], tile[1], self.particles[tile])
newBelief.normalize()
self.belief = newBelief
##################################################################################
# Problem 4 (part a):
# Function: Observe:
# -----------------
# Takes |self.particles| and updates them based on the distance observation
# $d_t$ and your position $a_t$.
#
# This algorithm takes two steps:
# 1. Re-weight the particles based on the observation.
# Concept: We had an old distribution of particles, and now we want to
# update this particle distribution with the emission probability
# associated with the observed distance.
# Think of the particle distribution as the unnormalized posterior
# probability where many tiles would have 0 probability.
# Tiles with 0 probabilities (i.e. those with no particles)
# do not need to be updated.
# This makes particle filtering runtime to be O(|particles|).
# By comparison, the exact inference method (used in problem 2 + 3)
# assigns non-zero (though often very small) probabilities to most tiles,
# so the entire grid must be updated at each time step.
# 2. Re-sample the particles.
# Concept: Now we have the reweighted (unnormalized) distribution, we can now
# re-sample the particles, choosing a new grid location for each of
# the |self.NUM_PARTICLES| new particles.
#
# - agentX: x location of your car (not the one you are tracking)
# - agentY: y location of your car (not the one you are tracking)
# - observedDist: true distance plus a mean-zero Gaussian with standard deviation Const.SONAR_STD
#
# Notes:
# - Remember that |self.particles| is a dictionary with keys in the form of
# (row, col) grid locations and values representing the number of particles at
# that grid square.
# - Create |self.NUM_PARTICLES| new particles during resampling.
# - To pass the grader, you must call util.weightedRandomChoice() once per new
# particle. See util.py for the definition of weightedRandomChoice().
##################################################################################
def observe(self, agentX, agentY, observedDist):
# BEGIN_YOUR_CODE (our solution is 12 lines of code, but don't worry if you deviate from this)
raise Exception("Not implemented yet")
# END_YOUR_CODE
self.updateBelief()
##################################################################################
# Problem 4 (part b):
# Function: Elapse Time (propose a new belief distribution based on a learned transition model)
# ---------------------
# Reads |self.particles|, representing particle locations at time $t$, and
# writes an updated |self.particles| with particle locations at time $t+1$.
#
# This algorithm takes one step:
# 1. Proposal based on the particle distribution at current time $t$.
# Concept: We have a particle distribution at current time $t$, and we want
# to propose the particle distribution at time $t+1$. We would like
# to sample again to see where each particle would end up using
# the transition model.
#
# Notes:
# - Transition probabilities are stored in |self.transProbDict|.
# - To pass the grader, you must loop over the particles using a statement
# of the form 'for tile in self.particles: <your code>' and call
# util.weightedRandomChoice() to sample a new particle location.
# - Remember that if there are multiple particles at a particular location,
# you will need to call util.weightedRandomChoice() once for each of them!
##################################################################################
def elapseTime(self):
# BEGIN_YOUR_CODE (our solution is 6 lines of code, but don't worry if you deviate from this)
raise Exception("Not implemented yet")
# END_YOUR_CODE
# Function: Get Belief
# ---------------------
# Returns your belief of the probability that the car is in each tile. Your
# belief probabilities should sum to 1.
def getBelief(self):
return self.belief
| StarcoderdataPython |
4812640 | from core.settings import SITE_BASE_URL
from weedid.models import Dataset, WeedidUser
from weedid.utils import send_email
from textwrap import dedent
def upload_notification(upload_id):
upload_entity = Dataset.objects.get(upload_id=upload_id)
uploader = upload_entity.user
email_body = f"""\
User {uploader.username} <{uploader.email}> has uploaded a dataset {upload_entity.metadata['name']} to review.
{'Please review it now by accessing the dataset here: ' + SITE_BASE_URL + '/datasets/' + upload_id}.
"""
staff_recipients = [
staff.email for staff in WeedidUser.objects.filter(is_staff=True)
]
send_email(
f"New upload ({upload_entity.metadata['name']})",
dedent(email_body),
staff_recipients,
)
def review_notification(message, upload_id):
upload_entity = Dataset.objects.get(upload_id=upload_id)
uploader = upload_entity.user
email_body = f"""\
Dear {uploader.username},
Many thanks for contributing to a growing community and repository of weed image datasets.
Your dataset upload {upload_entity.metadata['name']} has been {message} after review.
{'Congratulations! You can now view the entire dataset online from ' + SITE_BASE_URL + '/datasets/' + upload_id if message == 'approved' else 'Unfortunately at this stage your dataset has not been approved. Please contact <EMAIL> for further information.'}.
Regards,
Weed-AI Team
"""
send_email(
f"Your upload ({upload_entity.metadata['name']}) has been {message}",
dedent(email_body),
[uploader.email],
)
| StarcoderdataPython |
1809805 | <filename>InfrastructureManager/utils/utils.py
"""
A collection of common utility functions which can be used by any
module within the AppScale Infrastructure Manager implementation.
"""
import os
import sys
import time
import uuid
__author__ = 'hiranya'
__email__ = '<EMAIL>'
def get_secret(filename='/etc/appscale/secret.key'):
"""
Reads a secret key string from the specified file and returns
it.
Args:
filename The input file from which the secret should be
read from (Optional). If not specified defaults to
/etc/appscale/secret.key
Returns:
A secret key string read from the input file
Raises
IOError If the input file does not exist
"""
return read_file(os.path.abspath(filename), chomp=True)
def read_file(location, chomp=True):
"""
Read the specified file and return the contents. Optionally
the file content could be subjected to a chomp operation
before returning.
Args:
location Location of the file that needs to be read
chomp True if the file content needs to be chomped
prior to returning. This is an optional parameter
and defaults to True.
Raises:
IOError If the specified file does not exist
"""
file_handle = open(location, 'r')
contents = file_handle.read()
file_handle.close()
if chomp:
return contents.rstrip('\n')
else:
return contents
def write_key_file(location, content):
"""
Write the specified content to the file locations in the given the list
and set the file permissions to 0600.
Args:
location A file name (string) or a list of file names
content Content of the cryptographic key
"""
if type(location) == type(''):
location = [location]
for entry in location:
path = os.path.abspath(entry)
file_handle = open(path, 'w')
file_handle.write(content)
file_handle.close()
os.chmod(path, 0600)
def log(msg):
"""
Log the specified message to the stdout and flush the stream.
Args:
msg Message to be logged
"""
print msg
sys.stdout.flush()
def get_random_alphanumeric(length=10):
"""
Generate a random alphanumeric string of the specified length.
Args:
length Length of the random string that should be
generated (Optional). Defaults to 10.
Returns:
A random alphanumeric string of the specified length.
"""
return str(uuid.uuid4()).replace('-', '')[:length]
def flatten(the_list):
"""
Flatten all the elements in the given list into a single list.
For an example if the input list is [1, [2,3], [4,5,[6,7]]],
the resulting list will be [1,2,3,4,5,6,7].
Args:
the_list A list of items where each member item could be a list
Returns:
A single list with no lists as its elements
"""
result = []
for entry in the_list:
if hasattr(entry, '__iter__'):
result.extend(flatten(entry))
else:
result.append(entry)
return result
def has_parameter(param, params):
"""
Checks whether the parameter param is present in the params map.
Args:
param A parameter name
params A dictionary of parameters
Returns:
True if params contains param.
Returns False otherwise.
"""
return params.has_key(param)
def diff(list1, list2):
"""
Returns the list of entries that are present in list1 but not
in list2.
Args:
list1 A list of elements
list2 Another list of elements
Returns:
A list of elements unique to list1
"""
diffed_list = []
for item in list1:
if item not in list2:
diffed_list.append(item)
return diffed_list
def obscure_string(input_string):
"""
Obscures the input string by replacing all but the last 4 characters
in the string with the character '*'. Useful for obscuring, security
credentials, credit card numbers etc.
Args:
input_string A string of characters
Returns:
A new string where all but the last 4 characters of the input
string has been replaced by '*'.
"""
if input_string is None or len(input_string) < 4:
return input_string
last_four = input_string[-4:]
obscured = '*' * (len(input_string) - 4)
return obscured + last_four
def sleep(seconds):
"""
Sleep and delay for the specified number of seconds.
Args:
seconds Number of seconds to sleep
"""
time.sleep(seconds)
| StarcoderdataPython |
5193984 | <filename>tools/test_PoseEstimation.py
import numpy as np
import cv2
from loadYamlData import *
data = loadYamlData("cam_left_1.yml")
K = data["K"]
D = data["D"]
"""
data = loadYamlData("cam_stereo_1.yml")
K1 = data["K1"]
D1 = data["D1"]
K2 = data["K2"]
D2 = data["D2"]
R = data["R"]
T = data["T"]
E = data["E"]
"""
focal = 837.69
pp = (332.9648, 220.3798)
def convertKeyPoint(kp):
point =[]
for k in range(0,len(kp)):
x,y = kp[k].pt
point.append([x, y])
return (np.asarray(point))
def convertMatchedKeyPoint(kp, matches):
point =[]
for k in range(0,len(matches)):
m = matches[k].imgIdx #get the matched index
x,y = kp[m].pt #then get the keypoint index
point.append([x, y]) #then get the keypoint value
#print("point", point)
return (np.asarray(point))
def reconstruct3Dfrom2D():
# reconstruct 3D from 2D image
# P1= K1 * [I3 |0]
# P2 = K2 * {R12 |t12]
print("reconstruct3Dfrom2D")
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
def opticalFlowTracking(old_gray, frame_gray, p0):
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
old_frame = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
img1 = cv2.imread("L1.jpg")
img1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
#img1 = cv2.undistort(img1, K,D, None)
# Initiate SIFT detector
orb = cv2.ORB_create()
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
missedFrame = 0
DCM = []
img2 = cv2.imread("L6.jpg")
# img2 = cv2.undistort(img2, K,D, None)
img2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
print("len(kp1) = ", len(kp1))
print("len(kp2) = ", len(kp2))
# Match descriptors.
if not kp1 or not kp2 or len(kp1) < 5 or len(kp2)<5:
missedFrame = missedFrame + 1
print("could not capture keypoints", missedFrame)
else:
matches = bf.match(des1,des2)
print("matched points = ", len(matches))
#for k in range(0, len(matches)):
# print(matches[k].queryIdx, matches[k].trainIdx, matches[k].imgIdx, matches[k].distance)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
# Draw first 10 matches.
matchNumber = 499
imgOut = cv2.drawMatches(img1 , kp1, img2, kp2, matches[:matchNumber], 2)
#cv2.imshow("matching",imgOut)
# cv2.drawKeypoints(img2, kp2, imgOut)
# im_with_keypoints = cv2.drawKeypoints(img2, kp2, np.array([]))
# cv2.imshow("matching",im_with_kypoints)
point1 = convertMatchedKeyPoint(kp1, matches)
point2 = convertMatchedKeyPoint(kp2, matches)
print(point1)
print(point2)
F,msk = cv2.findFundamentalMat(point1, point2)
print(F)
#retval,R,t, mask = cv2.recoverPose(F, kp1, kp2, focal, pp)
# print("t= ", t)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
11233383 | <filename>app/manage.py
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from .models import Ideas
app = Flask(__name__, static_folder="", static_url_path="")
app.config.from_object(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=os.environ.get("DATABASE_URL"),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
# initialize the database connection
db = SQLAlchemy(app)
app.config.from_object(__name__)
MIGRATION_DIR = os.path.join("app", "migrations")
migrate = Migrate(app, db, dirctory=MIGRATION_DIR)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
8117802 | print('='*50)
print('PESO IDEAL'.center(50))
print('='*50)
linha = '\033[1;96m=\033[m' * 50
def peso():
sexo = int(input('''Você é:
[ 1 ] Homem
[ 2 ] Mulher
Digite a número da sua opção: '''))
print('='*50)
altura = float(input('\nDigite a sua altura: '))
print(f'\n{linha}')
if sexo == 1:
print(f'\nO peso ideal para você é: {(72.7*altura) - 58:.3f}Kg')
elif sexo == 2:
print(f'\nO peso ideal para você é: {(62.1*altura) - 44.7:.3f}Kg')
print(f'\n{linha}')
peso() | StarcoderdataPython |
8064807 | <filename>viewsets/__init__.py
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
ImproperlyConfigured = ImportError
try:
from .base import ViewSet
from .model import ModelViewSet
# Allows to see module metadata outside of a Django project
# (including setup.py).
except (ImportError, ImproperlyConfigured):
pass
from .patterns import PK, SLUG
__author__ = '<NAME>'
__credits__ = ('<NAME>',)
__license__ = 'BSD License'
__version__ = '0.2.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = '3 - Alpha'
| StarcoderdataPython |
1602336 | # import sys
# sys.path.append('../../')
import numpy as np
import pandas as pd
import json
import copy
from plot_helper import coloring_legend, df_col_replace
from constant import REPORTDAYS, HEADER_NAME, COLUMNS_TO_DROP, FIRST_ROW_AFTER_BURNIN
def single_setting_IQR_json_generator(fpath_pattern_list, outfile_dir, outfile_stgy_tag, threshold):
def T01_IQR_reporter_oneset_mostdangtriple(dflist, pattern, threshold):
all_100_T01s = [] # in days
for onerun in dflist:
combined_geno_freq = onerun.filter(regex=pattern, axis=1).sum(axis=1).values # len=361
T01_this_run = float('inf')
for idx,val in enumerate(combined_geno_freq):
if val > threshold:
T01_this_run = REPORTDAYS[idx]
break
all_100_T01s.append(T01_this_run)
assert(len(all_100_T01s)==100)
return np.quantile(all_100_T01s, [0.25, 0.5, 0.75])
def T01_IQR_reporter_oneset_mostdangdouble(dflist_arg, drug, threshold):
option = 1
most_dang_double_tag = '2-2' if drug == 'DHA-PPQ' else '2-4'
all_100_T01s = [] # in days
dflist = copy.deepcopy(dflist_arg)
# rename all 100 df's by `drug` and sum-up columns
for i in range(len(dflist)):
dflist[i] = df_col_replace(dflist[i], drug, option)
combined_geno_freq = dflist[i][most_dang_double_tag].values # len=361
T01_this_run = float('inf')
for idx,val in enumerate(combined_geno_freq):
if val > threshold:
T01_this_run = REPORTDAYS[idx]
break
all_100_T01s.append(T01_this_run)
assert(len(all_100_T01s)==100)
return np.quantile(all_100_T01s, [0.25, 0.5, 0.75])
# Main Driver Code
set3_fpath, set4_fpath, set7_fpath, set8_fpath, set11_fpath, set12_fpath = fpath_pattern_list
# all rows, all sets
iqr_median = {}
iqr_25p = {}
iqr_75p = {}
dflist_set3 = []
dflist_set4 = []
dflist_set7 = []
dflist_set8 = []
dflist_set11 = []
dflist_set12 = []
for i in range(1,101):
dflist_set3.append(
pd.read_csv(set3_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
dflist_set4.append(
pd.read_csv(set4_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
dflist_set7.append(
pd.read_csv(set7_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
dflist_set8.append(
pd.read_csv(set8_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
dflist_set11.append(
pd.read_csv(set11_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
dflist_set12.append(
pd.read_csv(set12_fpath%i, index_col=False, names=HEADER_NAME, sep='\t').drop(columns=COLUMNS_TO_DROP)
)
# initialize with row1
# set3
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set3, 'TYY..Y2.', threshold)
assert(len(temp)==3) # 25p, median, and 75p values
iqr_median['row1'] = [temp[1]]
iqr_25p['row1'] = [temp[0]]
iqr_75p['row1'] = [temp[2]]
# set4
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set4, 'TYY..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row1'].append(temp[1])
iqr_25p['row1'].append(temp[0])
iqr_75p['row1'].append(temp[2])
# set7
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set7, 'TYY..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row1'].append(temp[1])
iqr_25p['row1'].append(temp[0])
iqr_75p['row1'].append(temp[2])
# set8
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set8, 'TYY..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row1'].append(temp[1])
iqr_25p['row1'].append(temp[0])
iqr_75p['row1'].append(temp[2])
# set11
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set11, 'TYY..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row1'].append(temp[1])
iqr_25p['row1'].append(temp[0])
iqr_75p['row1'].append(temp[2])
# set12
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set12, 'TYY..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row1'].append(temp[1])
iqr_25p['row1'].append(temp[0])
iqr_75p['row1'].append(temp[2])
# row2
# set3
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set3, 'KNF..Y2.', threshold)
assert(len(temp)==3) # 25p, median, and 75p values
iqr_median['row2'] = [temp[1]]
iqr_25p['row2'] = [temp[0]]
iqr_75p['row2'] = [temp[2]]
# set4
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set4, 'KNF..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row2'].append(temp[1])
iqr_25p['row2'].append(temp[0])
iqr_75p['row2'].append(temp[2])
# set7
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set7, 'KNF..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row2'].append(temp[1])
iqr_25p['row2'].append(temp[0])
iqr_75p['row2'].append(temp[2])
# set8
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set8, 'KNF..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row2'].append(temp[1])
iqr_25p['row2'].append(temp[0])
iqr_75p['row2'].append(temp[2])
# set11
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set11, 'KNF..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row2'].append(temp[1])
iqr_25p['row2'].append(temp[0])
iqr_75p['row2'].append(temp[2])
# set12
temp = T01_IQR_reporter_oneset_mostdangtriple(dflist_set12, 'KNF..Y2.', threshold)
assert(len(temp)==3)
iqr_median['row2'].append(temp[1])
iqr_25p['row2'].append(temp[0])
iqr_75p['row2'].append(temp[2])
# row3
# set3
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set3, 'DHA-PPQ', threshold)
assert(len(temp)==3) # 25p, median, and 75p values
iqr_median['row3'] = [temp[1]]
iqr_25p['row3'] = [temp[0]]
iqr_75p['row3'] = [temp[2]]
# set4
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set4, 'DHA-PPQ', threshold)
assert(len(temp)==3)
iqr_median['row3'].append(temp[1])
iqr_25p['row3'].append(temp[0])
iqr_75p['row3'].append(temp[2])
# set7
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set7, 'DHA-PPQ', threshold)
assert(len(temp)==3)
iqr_median['row3'].append(temp[1])
iqr_25p['row3'].append(temp[0])
iqr_75p['row3'].append(temp[2])
# set8
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set8, 'DHA-PPQ', threshold)
assert(len(temp)==3)
iqr_median['row3'].append(temp[1])
iqr_25p['row3'].append(temp[0])
iqr_75p['row3'].append(temp[2])
# set11
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set11, 'DHA-PPQ', threshold)
assert(len(temp)==3)
iqr_median['row3'].append(temp[1])
iqr_25p['row3'].append(temp[0])
iqr_75p['row3'].append(temp[2])
# set12
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set12, 'DHA-PPQ', threshold)
assert(len(temp)==3)
iqr_median['row3'].append(temp[1])
iqr_25p['row3'].append(temp[0])
iqr_75p['row3'].append(temp[2])
# row4
# set3
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set3, 'ASAQ', threshold)
assert(len(temp)==3) # 25p, median, and 75p values
iqr_median['row4'] = [temp[1]]
iqr_25p['row4'] = [temp[0]]
iqr_75p['row4'] = [temp[2]]
# set4
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set4, 'ASAQ', threshold)
assert(len(temp)==3)
iqr_median['row4'].append(temp[1])
iqr_25p['row4'].append(temp[0])
iqr_75p['row4'].append(temp[2])
# set7
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set7, 'ASAQ', threshold)
assert(len(temp)==3)
iqr_median['row4'].append(temp[1])
iqr_25p['row4'].append(temp[0])
iqr_75p['row4'].append(temp[2])
# set8
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set8, 'ASAQ', threshold)
assert(len(temp)==3)
iqr_median['row4'].append(temp[1])
iqr_25p['row4'].append(temp[0])
iqr_75p['row4'].append(temp[2])
# set11
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set11, 'ASAQ', threshold)
assert(len(temp)==3)
iqr_median['row4'].append(temp[1])
iqr_25p['row4'].append(temp[0])
iqr_75p['row4'].append(temp[2])
# set12
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set12, 'ASAQ', threshold)
assert(len(temp)==3)
iqr_median['row4'].append(temp[1])
iqr_25p['row4'].append(temp[0])
iqr_75p['row4'].append(temp[2])
# row5
# set3
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set3, 'AL', threshold)
assert(len(temp)==3) # 25p, median, and 75p values
iqr_median['row5'] = [temp[1]]
iqr_25p['row5'] = [temp[0]]
iqr_75p['row5'] = [temp[2]]
# set4
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set4, 'AL', threshold)
assert(len(temp)==3)
iqr_median['row5'].append(temp[1])
iqr_25p['row5'].append(temp[0])
iqr_75p['row5'].append(temp[2])
# set7
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set7, 'AL', threshold)
assert(len(temp)==3)
iqr_median['row5'].append(temp[1])
iqr_25p['row5'].append(temp[0])
iqr_75p['row5'].append(temp[2])
# set8
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set8, 'AL', threshold)
assert(len(temp)==3)
iqr_median['row5'].append(temp[1])
iqr_25p['row5'].append(temp[0])
iqr_75p['row5'].append(temp[2])
# set11
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set11, 'AL', threshold)
assert(len(temp)==3)
iqr_median['row5'].append(temp[1])
iqr_25p['row5'].append(temp[0])
iqr_75p['row5'].append(temp[2])
# set12
temp = T01_IQR_reporter_oneset_mostdangdouble(dflist_set12, 'AL', threshold)
assert(len(temp)==3)
iqr_median['row5'].append(temp[1])
iqr_25p['row5'].append(temp[0])
iqr_75p['row5'].append(temp[2])
# if directory exist check happening
# in main script notebook file
with open(outfile_dir+outfile_stgy_tag+'_median.json', 'w') as outfile:
json.dump(iqr_median, outfile)
with open(outfile_dir+outfile_stgy_tag+'_25p.json', 'w') as outfile:
json.dump(iqr_25p, outfile)
with open(outfile_dir+outfile_stgy_tag+'_75p.json', 'w') as outfile:
json.dump(iqr_75p, outfile)
| StarcoderdataPython |
1747313 | <gh_stars>1-10
#© Copyright IBM Corporation [2018], [2019] [<NAME>]
#LICENSE: [Apache License 2.0 (Apache-2.0) http://www.apache.org/licenses/LICENSE-2.0]
import sys
import requests
import json
import datetime
from datetime import date,timedelta
def main(dict):
#required parameters - actual secrets are entered in the IBM Function action's paremeter section. DO NOT PUT SECRETS IN CODE
top_info = dict['top_info']
h1_get_all_triaged_prod = dict['h1_get_all_triaged_prod']
h1_get_report = dict['h1_get_report']
h1_api_name = dict['h1_api_name']
h1_api_key = dict['h1_api_key']
sev_critical = dict['sev_critical']
sev_high = dict['sev_high']
sev_medium = dict['sev_medium']
sev_low = dict['sev_low']
past_due_report = "\n\n---------------PAST DUE REPORTS---------------\n"
#counter
past_due_count = 0
#fetches 2 pages (200) reports (if they exist)
a = 1
while a <= 3:
#convert number to string
page_number = str(a)
#get new reports
r = requests.get(h1_get_all_triaged_prod + page_number, auth=(h1_api_name, h1_api_key))
h1_data = json.loads(r.text)
for item in h1_data['data']:
#grab the h1_asset_id type
h1_asset_id = item['relationships']['structured_scope']['data']['attributes']['asset_identifier']
#reports that are websites
if h1_asset_id == 'IBM Websites':
#ignore reports that are not assignee
if 'assignee' in item['relationships']:
#verify it isn't assigned to a username
if 'username' not in item['relationships']['assignee']['data']['attributes']:
r = requests.get(h1_get_report + item['id'], auth=(h1_api_name, h1_api_key))
h1_data2 = json.loads(r.text)
found_activity = 0
b = 0
while found_activity == 0:
if h1_data2['data']['relationships']['activities']['data'][b]['type'] == 'activity-group-assigned-to-bug':
h1_owner = h1_data2['data']['relationships']['activities']['data'][b]['relationships']['actor']['data']['attributes']['username']
#putting report create date into string
h1_comment_year_created = (h1_data2['data']['relationships']['activities']['data'][b]['attributes']['created_at'])[0:4]
h1_comment_month_created = (h1_data2['data']['relationships']['activities']['data'][b]['attributes']['created_at'])[5:7]
h1_comment_day_created = (h1_data2['data']['relationships']['activities']['data'][b]['attributes']['created_at'])[8:10]
#converting string to int
h1_comment_year_created = int(h1_comment_year_created)
h1_comment_month_created = int(h1_comment_month_created)
h1_comment_day_created = int(h1_comment_day_created)
#calculate today's date
todays_date = date.today()
h1_date_created = date(h1_comment_year_created,h1_comment_month_created,h1_comment_day_created)
todays_date = date(todays_date.year,todays_date.month,todays_date.day)
#getting days_elapsed between created and today's date, excluding weekends
day_generator = (h1_date_created + timedelta(x + 1) for x in range((todays_date - h1_date_created).days))
days_elapsed = sum(1 for day in day_generator if day.weekday() < 5)
h1_report_severity = h1_data2['data']['relationships']['severity']['data']['attributes']['rating']
#grab last activity date
last_activity_date = (h1_data2['data']['relationships']['activities']['data'][0]['attributes']['created_at'])[0:10]
if h1_report_severity == "critical" and days_elapsed >= sev_critical:
missed_days = days_elapsed - sev_critical
missed_days = str(missed_days)
past_due_report = past_due_report + "\nReport: https://hackerone.com/reports/" + h1_data2['data']['id'] + "\nMissed Days: " + missed_days + "\nSeverity: " + h1_report_severity + "\nLast Activity on: " + last_activity_date + "\nOwner: " + h1_owner + "\n"
past_due_count = 1
elif h1_report_severity == "high" and days_elapsed >= sev_high:
missed_days = days_elapsed - sev_high
missed_days = str(missed_days)
past_due_report = past_due_report + "\nReport: https://hackerone.com/reports/" + h1_data2['data']['id'] + "\nMissed Days: " + missed_days + "\nSeverity: " + h1_report_severity + "\nLast Activity on: " + last_activity_date + "\nOwner: " + h1_owner + "\n"
past_due_count = 1
elif h1_report_severity == "medium" and days_elapsed >= sev_medium:
missed_days = days_elapsed - sev_medium
missed_days = str(missed_days)
past_due_report = past_due_report + "\nReport: https://hackerone.com/reports/" + h1_data2['data']['id'] + "\nMissed Days: " + missed_days + "\nSeverity: " + h1_report_severity + "\nLast Activity on: " + last_activity_date + "\nOwner: " + h1_owner + "\n"
past_due_count = 1
elif h1_report_severity == "low" and days_elapsed >= sev_low:
missed_days = days_elapsed - sev_low
missed_days = str(missed_days)
past_due_report = past_due_report + "\nReport: https://hackerone.com/reports/" + h1_data2['data']['id'] + "\nMissed Days: " + missed_days + "\nSeverity: " + h1_report_severity + "\nLast Activity on: " + last_activity_date + "\nOwner: " + h1_owner + "\n"
past_due_count = 1
else:
break;
found_activity = 1
else:
b += 1
a += 1
if r.status_code != 200:
return {
'statusCode': r.status_code,
'headers': { 'Content-Type': 'application/json'},
'body': {'message': 'Error processing your request'}
}
else:
return {
'text': past_due_report
}
| StarcoderdataPython |
5012347 | <filename>tests/test_cli.py
import unittest
import outputs
from smb.cli import ps_cmd
from smb.cli.smb_log import get_logger, log
from infi.execute import execute_assert_success, execute
from smb.cli.ibox_connect import InfiSdkObjects
share_names = ['share1', 'share 2', 'long_share_3_and more']
limited_share = 'limited_share'
fs_names = ['fs1', 'fs2', 'fs3']
# TODO: add new log for tests
logger = get_logger()
class TestCli(unittest.TestCase):
@classmethod
def setUpClass(cls):
# make sure we are on the Active Node to start with
cmd = ['smbmgr', 'fs', 'query']
result = execute(cmd)
if outputs.not_active_node in result.get_stdout():
ps_cmd._perform_cluster_failover()
@classmethod
def tearDownClass(cls):
log(logger, "Starting Teardown")
sdk = InfiSdkObjects()
for fs in fs_names + ['fs_test_for_shares']:
try:
cmd = ['smbmgr', 'fs', 'delete', '--name={}'.format(fs), '--yes']
execute(cmd)
except:
pass
try:
for fs in fs_names + ['fs_test_for_shares']:
vol = sdk.volumes.choose(name=fs)
vol.delete
except:
pass
def _get_random_size(self):
import random
size_unit = random.sample(['MB', 'MiB', 'GB', 'GiB', 'TB', 'TiB'], 1)[0]
if 'M' in size_unit:
return str(random.randrange(1000, 100000)) + size_unit
if 'G' in size_unit:
return str(random.randrange(1, 1000)) + size_unit
if 'T' in size_unit:
return str(1) + size_unit
def test_fs_query(self):
cmd = ['smbmgr', 'fs', 'query']
result = execute(cmd)
self.assertIn(outputs.fs_query_header, result.get_stdout())
ps_cmd._perform_cluster_failover()
result = execute(cmd)
result_out = result.get_stdout()
self.assertNotIn(outputs.fs_query_header, result_out)
self.assertIn(outputs.not_active_node, result_out)
ps_cmd._perform_cluster_failover()
def test_01_fs_create(self):
for fs in fs_names:
# size = self._get_random_size()
size = "1GB"
cmd = ['smbmgr', 'fs', 'create', '--name={}'.format(fs), '--size={}'.format(size)]
result = execute_assert_success(cmd).get_stdout()
if outputs.fs_delete in result:
raise
cmd = ['smbmgr', 'fs', 'query']
result_out = execute_assert_success(cmd).get_stdout()
for fs in fs_names:
self.assertIn('{}'.format(fs), result_out)
def test_02_fs_delete(self):
for fs in fs_names:
cmd = ['smbmgr', 'fs', 'delete', '--name={}'.format(fs), '--yes']
result = execute_assert_success(cmd)
self.assertIn(outputs.fs_delete.format('{}'.format(fs)), result.get_stdout())
def test_03_fs_detach_attach(self):
cmd = ['smbmgr', 'fs', 'create', '--name=detachable_fs', '--size={}'.format(self._get_random_size())]
execute_assert_success(cmd)
cmd = ['smbmgr', 'fs', 'detach', '--name=detachable_fs', '--yes']
execute_assert_success(cmd)
cmd = ['smbmgr', 'fs', 'attach', '--name=detachable_fs', '--yes']
execute_assert_success(cmd)
cmd = ['smbmgr', 'fs', 'delete', '--name=detachable_fs', '--yes']
execute_assert_success(cmd)
def test_04_share_create(self):
cmd = ['smbmgr', 'fs', 'create', '--name=fs_test_for_shares', '--size=1GB']
result = execute(cmd)
for share in share_names:
cmd = ['smbmgr', 'share', 'create', '--name={}'.format(share),
'--path=g:\\fs_test_for_shares\\{}'.format(share), '--mkdir']
result = execute(cmd).get_stdout()
self.assertIn(outputs.share_created.format(share), result)
cmd = ['smbmgr', 'share', 'create', '--name={}'.format(limited_share),
'--path=g:\\fs_test_for_shares\\{}'.format(limited_share),'--size=100MB', '--mkdir']
result = execute(cmd).get_stdout()
self.assertIn(outputs.share_created.format(limited_share), result)
def test_05_share_query(self):
cmd = ['smbmgr', 'share', 'query']
result = execute_assert_success(cmd).get_stdout()
self.assertIn(outputs.share_query_header, result)
self.assertIn('share1', result)
self.assertIn('share 2', result)
self.assertIn('long_share_3...', result)
def test_06_share_resize(self):
for share in share_names:
cmd = ['smbmgr', 'share', 'resize', '--name={}'.format(share),
'--size={}'.format(self._get_random_size()), '--yes']
result = execute(cmd).get_stdout()
if (outputs.bad_share_resize in result) or (outputs.share_limited in result):
pass
else:
self.assertTrue(False) # Test Failure
def test_07_share_delete(self):
for share in share_names + [limited_share]:
cmd = ['smbmgr', 'share', 'delete', '--name={}'.format(share), '--yes']
result = execute(cmd).get_stdout()
self.assertIn(outputs.share_deleted.format(share), result)
cmd = ['smbmgr', 'fs', 'delete', '--name=fs_test_for_shares', '--yes']
execute(cmd)
def test_08_share_query(self):
cmd = ['smbmgr', 'share', 'query']
result = execute_assert_success(cmd).get_stdout()
self.assertIn(outputs.no_shares, result)
def test_09_config_set(self):
cmd = ['smbmgr', 'config', 'set', 'TempDriveLetter=v:\\']
result = execute_assert_success(cmd).get_stdout()
self.assertIn(outputs.config_set, result)
cmd = ['smbmgr', 'config', 'set', 'TempDriveLetter=Z:\\']
result = execute_assert_success(cmd).get_stdout()
| StarcoderdataPython |
5174085 | <reponame>tor4z/shinypy<filename>tests/test_msg.py
import json
import random
from shiny.message import Message, Status, Method
from shiny.util import randstr
def test_str_msg_parser():
msg = {}
data = {}
for _ in range(10):
data[randstr(5)] = randstr(5)
reason = randstr(10)
msg['status'] = Status.SUCCESS
msg['reason'] = reason
msg['data'] = data
m = Message(msg)
assert m.data == data
assert m.status == Status.SUCCESS
assert m.reason == reason
assert m.keys == data.keys()
for key in data.keys():
assert m.value(key) == data.get(key)
str_msg = json.dumps(msg)
m = Message(str_msg)
assert m.data == data
assert m.status == Status.SUCCESS
assert m.reason == reason
assert m.keys == data.keys()
for key in data.keys():
assert m.value(key) == data.get(key)
def test_msg_get():
key = randstr(5)
msg = Message()
msg.query(key)
target = {'method': Method.GET,
'data': {'keys': [key]}}
assert msg.raw == target
def test_msg_get_multi_keys():
keys = []
for _ in range(random.randint(10, 20)):
keys.append(randstr(5))
msg = Message()
msg.query(keys)
target = {'method': 'GET',
'data': {'keys': keys}}
assert msg.raw == target
| StarcoderdataPython |
11256029 | <filename>airflow/migrations/versions/2e541a1dcfed_task_duration.py
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""task_duration
Revision ID: 2e541a<PASSWORD>
Revises: <PASSWORD>
Create Date: 2015-10-28 20:38:41.266143
"""
# revision identifiers, used by Alembic.
revision = '2e541a1dcfed'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column('duration',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True)
def downgrade():
pass
| StarcoderdataPython |
3366777 | from .custom_map_tests import *
| StarcoderdataPython |
6619257 | """
Copyright 2018 Accelize
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import requests
class WSListFunction(object):
def __init__(self, url=None, login=None, password=<PASSWORD>, token=None):
self.url = url
self.login = login
self.password = password
self.token = token
def _get_user_token_raw(self):
r = requests.post(self.url + '/o/token/?grant_type=client_credentials',
auth=(self.login, self.password),
headers={'Content-Type': 'application/json'})
json_acceptable_string = r.content.decode("latin1").replace("'", "\"")
try:
text = json.loads(json_acceptable_string)
except:
text = json_acceptable_string
return text, r.status_code
def _get_user_token(self):
text, status_code = self._get_user_token_raw()
assert status_code == 200
assert 'access_token' in text
self.token = text['access_token']
def _authentifed_call(self, method, url, data=None, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
headers['Content-Type'] = "application/json"
r = requests.request(method, self.url + url, data=json.dumps(data), headers=headers)
# json_acceptable_string = r.content.replace("'", "\"")
try:
text = json.loads(r.content)
except:
text = r.content
return text, r.status_code
def _download_authentifed_call(self, method, url, data, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
r = requests.request(method, self.url + url, data=data, headers=headers, stream=True)
return r.content, r.status_code
def get_authentification_token(self):
return self.token
def application_create(self, data):
# url(r'^auth/createapplication/', APIMetering.create_application),
response, status = self._authentifed_call("POST", "/auth/createapplication/", data=data)
return response, status
def application_list(self, data):
# url(r'^auth/listapplication/', APIMetering.list_application),
response, status = self._authentifed_call("POST", "/auth/listapplication/", data=data)
return response, status
def application_delete(self, data):
# url(r'^auth/deleteapplication/', APIMetering.delete_application),
response, status = self._authentifed_call("POST", "/auth/deleteapplication/", data=data)
return response, status
def metering_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/meteringinformation/", data=data)
return response, status
def floatingallinformation(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/floatingallinformation/", data=data)
return response, status
def vendorallinformation(self, data):
# url(r'^auth/vendorallinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorallinformation/", data=data)
return response, status
def vendorcheckinformation(self, data):
# url(r'^auth/vendorcheckinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorcheckinformation/", data=data)
return response, status
def nodelock_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.nodelockassociated),
response, status = self._authentifed_call("POST", "/auth/nodelockassociated/", data=data)
return response, status
def metering_get_license_timeout(self):
response, status = self._authentifed_call("GET", "/auth/getlicensetimeout/")
return response, status
def metering_lastinformation(self, data):
# url(r'^auth/lastmeteringinformation/', APIMetering.last_metering),
response, status = self._authentifed_call("POST", "/auth/lastmeteringinformation/", data=data)
return response, status
def metering_getlicense(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def metering_getlicense_random(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/tests/genlicense/", data=data)
return response, status
def nodelock_getlicense(self, data):
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def configuration_list(self):
# url(r'^auth/getlastcspconfiguration/', APIMetering.get_last_configuration),
response, status = self._authentifed_call("GET", "/auth/getlastcspconfiguration/")
return response, status
def configuration_create(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("POST", "/auth/cspconfiguration/", data=data)
return response, status
def configuration_delete(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("DELETE", "/auth/cspconfiguration/", data=data)
return response, status
def user_update_list(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/updateuserlist/")
return response, status
def remove_test_session(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/metering/rmsession/")
return response, status
def clear_token(self):
# url(r'^auth/admin/clear_token/', APIMetering.clear_token),
response, status = self._authentifed_call("GET", "/auth/admin/clear_token/")
return response, status
def user_single_user(self, data):
# url(r'^auth/userupdate/', APIMetering.user_update),
response, status = self._authentifed_call("POST", "/auth/userupdate/", data=data)
return response, status
def user_single_user_card(self, data):
# url(r'^auth/usercardupdate/', APIMetering.user_card_update),
response, status = self._authentifed_call("POST", "/auth/usercardupdate/", data=data)
return response, status
def ip_create(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/create/", data=data)
return response, status
def ip_delete(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/delete/", data=data)
return response, status
def ip_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/hdk/", data=data)
return response, status
def ip_create_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/get_create_hdk/", data=data)
return response, status
def server_get_version(self):
# url(r'^version/', APIMetering.get_version),
response, status = self._authentifed_call("GET", "/version/")
return response, status
def metering_synips(self):
# url(r'^auth/metering/syncips/', APIMetering.sync_IP_with_LGDN),
response, status = self._authentifed_call("GET", "/auth/metering/syncips/")
return response, status
def remove_product_information(self, data):
# url(r'^auth/metering/rmthissession/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/metering/archiveduserproductinfo/", data=data)
return response, status
def get_user_token(self, email):
# url(r'^auth/admin/get_token/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/admin/get_token/", data={"email":email})
return response, status
def object_manager(self, api_object, method, data, urlsuffix=''):
if urlsuffix == '' or urlsuffix.endswith('/'):
urlsuffix = urlsuffix+"?from=drmportalpreview.accelize.com"
else:
urlsuffix = urlsuffix+"&from=drmportalpreview.accelize.com"
response, status = self._authentifed_call(method, "/auth/objects/%s/%s" % (api_object, urlsuffix), data=data)
return response, status
| StarcoderdataPython |
6524511 | <reponame>bycristhian/psp
# Django
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.shortcuts import render
# Views
from psp.views import DashboardView, IndexView, HomeView, DashboardConfigurationView
from users.views import CalendarAdminView, DateDeliveryView
# Libraries
from decouple import config
urlpatterns = [
path('admin/', admin.site.urls),
path('home/', HomeView.as_view(), name='dashboard'),
path('i18n/', include('django.conf.urls.i18n')),
path('settings/', DashboardConfigurationView.as_view(), name='configuration'),
path('', IndexView.as_view(), name='index'),
# Calendar Admin
path('calendar/', CalendarAdminView.as_view(), name='calendar'),
path('dates-delivery/', DateDeliveryView.as_view(), name='date_delivery'),
# URL APP users
path('users/', include(('users.urls', 'users'), namespace='users')),
# URL APP projects
path('projects/', include(('projects.urls', 'projects'), namespace='projects')),
# URL APP Programs
path('', include(('programs.urls', 'programs'), namespace='programs')),
# URL APP Logs
path('', include(('logs.urls', 'logs'), namespace='logs')),
]
if not config('DJANGO_PRODUCTION_ENV', default=False, cast=bool):
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| StarcoderdataPython |
350536 | import os
import git
import gnupg
import shutil
from loguru import logger
def sync_password_store(repo_url=None, repo_dir='~/.password-store') -> git.Repo:
try:
repo = git.Repo(repo_dir)
logger.info(f'Password store at `{repo_dir}`')
if repo.git.diff():
raise ValueError('There are uncommitted changes')
logger.info(f'Pulling changes from `{repo.remote().url}`')
repo.git.pull()
except git.NoSuchPathError:
logger.info(f'Directory does not exist, creating `{repo_dir}`')
os.makedirs(repo_dir)
return sync_password_store(repo_url, repo_dir)
except git.InvalidGitRepositoryError:
if not os.path.isdir(repo_dir):
raise ValueError(f'Not a directory: `{repo_dir}`')
if os.listdir(repo_dir):
raise ValueError(f'Directory is not empty: `{repo_dir}`')
if not repo_url:
raise ValueError('Repository url is not specified')
logger.info(f'Directory is not a git repository, cloning from `{repo_url}`')
repo = git.Repo.clone_from(repo_url, repo_dir)
return repo
def get_gnupg():
for cmd in ['gpg2', 'gpg']:
if shutil.which(cmd) is not None:
logger.info(f'Using binary `{cmd}`')
return gnupg.GPG(gpgbinary=cmd)
raise ValueError('GNUPG is not installed or not in the %PATH%')
def sync_gpg_keys(password_store: git.Repo, filename='.gpg-keys'):
gpg = get_gnupg()
secret_keys = gpg.list_keys(secret=True)
if not secret_keys:
input_data = gpg.gen_key_input(
name_real=password_store.git.config('--get', 'user.name'),
name_email=password_store.git.config('--get', 'user.email'),
key_length=2048
)
logger.info(f'No gpg secret keys found, generating new one:\n{input_data}')
res = gpg.gen_key(input_data)
if not res.fingerprint:
raise ValueError(res.stderr)
secret_keys = gpg.list_keys(secret=True)
uid = secret_keys[0]["uids"][0]
if len(secret_keys) > 1:
raise ValueError(f'Multiple secret keys found, will use `{uid}`')
with open(os.path.join(password_store.working_dir, filename), 'a+') as f:
f.seek(0)
res = gpg.import_keys(f.read())
gpg.trust_keys(res.fingerprints, 'TRUST_ULTIMATE')
if not set(res.fingerprints).intersection(
set(map(lambda x: x['fingerprint'], secret_keys))):
logger.info(f'Your fingerprint is absent, exporting pubkey `{uid}`')
f.write(gpg.export_keys([uid], secret=False))
password_store.index.add([filename])
if password_store.git.diff():
password_store.index.commit(f'Add gpg key: {uid}')
password_store.git.push()
| StarcoderdataPython |
4869380 | from pytest import mark
from audit_log.api.serializers import AuditLogSerializer
_common_fields = {
"audit_event": {
"origin": "APARTMENT_APPLICATION_SERVICE",
"status": "SUCCESS",
"date_time_epoch": 1590969600000,
"date_time": "2020-06-01T00:00:00.000Z",
"actor": {
"role": "OWNER",
"profile_id": "73aa0891-32a3-42cb-a91f-284777bf1d7f",
},
"operation": "READ",
"target": {
"id": "73aa0891-32a3-42cb-a91f-284777bf1d7f",
"type": "Profile",
},
}
}
@mark.django_db
def test_audit_log_serializer_create():
serializer = AuditLogSerializer(data=_common_fields)
assert serializer.is_valid()
audit_log = serializer.save()
assert audit_log
assert audit_log.id
| StarcoderdataPython |
133747 | import requests
import json
import click
import datetime
import logging
logging.basicConfig(level=logging.INFO,
format='[*] %(asctime)s %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
@click.command()
@click.option('--net/--no-net', help='use the online data', default=True, show_default=True)
@click.option('-f', '--filename', 'filename', help='local data', type=str, default="exams.json", show_default=True)
@click.option('-s', '--startY', 'startY', help='gen file start year', type=int, default=2019, show_default=True)
@click.option('-e', '--endY', 'endY', help='gen file end year', type=int, default=datetime.datetime.now().year, show_default=True)
@click.option('-o', '--out', 'out', help='output filename', type=str, default="problems.json", show_default=True)
def main(filename, net, startY, endY, out):
uniq_problems = {}
if net:
response = requests.get(
"https://raw.githubusercontent.com/setsal/GPE-Helper/master/frontend/public/exams.json")
data = response.json()
else:
with open(filename) as f:
data = json.load(f)
logging.info('Load data successfully')
for key in data:
# before 2019
if data[key]['timestamp'] > datetime.datetime(startY, 1, 1).timestamp() and data[key]['timestamp'] < datetime.datetime(endY, 12, 31).timestamp():
for problem in data[key]['problems']:
if problem['pid'] in uniq_problems:
uniq_problems[problem['pid']
]['Appearance'] = uniq_problems[problem['pid']]['Appearance'] + 1
if (uniq_problems[problem['pid']]['LastAppearance'] < data[key]['timestamp']):
logging.info("PID: %d duplicate and update timestamp %s to %s",
problem['pid'], uniq_problems[problem['pid']]['LastAppearance'], data[key]['timestamp'])
uniq_problems[problem['pid']
]['LastAppearance'] = data[key]['timestamp']
else:
uniq_problems[problem['pid']] = problem
uniq_problems[problem['pid']
]['LastAppearance'] = data[key]['timestamp']
uniq_problems[problem['pid']]['Appearance'] = 1
logging.info('Fetch %d uniq problems', len(uniq_problems))
# sort
uniq_problems_sort = sorted(uniq_problems.keys())
# dump to array json for frontend
uniq_problems_array = []
# follow intuition to caculate rating?
ac_avg = 0
onsite_avg = 0
access_avg = 0
for key in uniq_problems_sort:
uniq_problems_array.append(uniq_problems[key])
ac_avg = ac_avg + uniq_problems[key]['AcceptRate']
onsite_avg = onsite_avg + uniq_problems[key]['onsite']
access_avg = access_avg + uniq_problems[key]['access']
p_len = len(uniq_problems_array)
ac_avg = ac_avg/p_len
onsite_avg = onsite_avg/p_len
access_avg = access_avg/p_len
# 就 隨直覺 其實應該要有更好的評估方法
for problem in uniq_problems_array:
problem['LastAppearance'] = datetime.datetime.fromtimestamp(
problem['LastAppearance']).strftime('%Y-%m-%d') # 順便做 懶得前端渲染轉換 :P
rating_val = problem['AcceptRate']/ac_avg*0.8 + \
problem['onsite']/onsite_avg*0.6 + problem['access']/access_avg*0.3
if rating_val > 2:
problem['rating'] = 3
elif rating_val > 1.4:
problem['rating'] = 2
elif rating_val > 0.9:
problem['rating'] = 1
else:
problem['rating'] = 0
problem['favorite'] = 0 # 也是懶惰的作法 :P
# write file
with open(out, 'w') as outfile:
json.dump(uniq_problems_array, outfile, indent=4)
logging.info('Write to file %s successfully', out)
if __name__ == '__main__':
main()
| StarcoderdataPython |
64449 | # Lib
from setuptools import setup, find_packages
exec(open('methylprep/version.py').read())
test_requirements = [
'methylcheck', # 'git+https://github.com/FoxoTech/methylcheck.git@feature/v0.7.7#egg=methylcheck',
'pytest',
'pytest_mock',
'matplotlib',
'scikit-learn', # openpyxl uses this, and forcing it to install the best version, not sklearn 0.0
'openpyxl',
'coverage'
]
setup(
name='methylprep',
version=__version__,
description='Python-based Illumina methylation array preprocessing software',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
project_urls = {
"Documentation": "https://life-epigenetics-methylprep.readthedocs-hosted.com/en/latest/",
"Source": "https://github.com/FOXOBioScience/methylprep/",
"Funding": "https://FOXOBioScience.com/"
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Framework :: Jupyter',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
],
keywords='methylation dna data processing epigenetics illumina',
url='https://github.com/FOXOBioScience/methylprep',
license='MIT',
author='Life Epigenetics',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
package_data={"":["*.txt.gz"]},
install_requires=[
'pyparsing > 3.0',
'numpy',
'pandas >=1.3.0',
'scipy',
'statsmodels',
'tqdm',
'bs4',
'lxml',
'requests',
],
extras_require={
'dev': test_requirements
},
setup_requires=['pytest-runner'],
tests_require= test_requirements,
entry_points='''
[console_scripts]
methylprep-cli=methylprep.cli:app
''',
)
| StarcoderdataPython |
6440398 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:jimmy1@127.0.0.1/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='<PASSWORD>',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | StarcoderdataPython |
78487 | <gh_stars>1-10
# encoding: utf-8
# page show record size
""" show_cnt = 15
"""
# msyql dababase connection info
""" mysqldb_conn = {
'host' : 'localhost',
'user' : 'root',
'password' : '',
'db' : '',
'charset' : 'utf8'
}
"""
# with out save http response content to database
""" save_content = True """
# http map filenames to MIME types
# https://docs.python.org/2/library/mimetypes.html
http_mimes = ['text', 'image', 'application', 'video', 'message', 'audio']
# http static resource file extension
#保留swf?
static_ext = ['js', 'css', 'ico','txt','svg','flv','jpg','png','jpeg','gif','pdf','ss3','rar','zip','avi','mp4','wmi','exe','mpeg','wav','mp3','json','appcache','cache']
# media resource files type
media_types = ['image', 'video', 'audio']
# http static resource files
static_files = [
'text/css',
# 'application/javascript',
# 'application/x-javascript',
'application/msword',
'application/vnd.ms-excel',
'application/vnd.ms-powerpoint',
'application/x-ms-wmd',
'application/x-shockwave-flash',
# 'image/x-cmu-raster',
# 'image/x-ms-bmp',
# 'image/x-portable-graymap',
# 'image/x-portable-bitmap',
# 'image/jpeg',
# 'image/gif',
# 'image/x-xwindowdump',
# 'image/png',
# 'image/vnd.microsoft.icon',
# 'image/x-portable-pixmap',
# 'image/x-xpixmap',
# 'image/ief',
# 'image/x-portable-anymap',
# 'image/x-rgb',
# 'image/x-xbitmap',
# 'image/tiff',
# 'video/mpeg',
# 'video/x-sgi-movie',
# 'video/mp4',
# 'video/x-msvideo',
# 'video/quicktime'
# 'audio/mpeg',
# 'audio/x-wav',
# 'audio/x-aiff',
# 'audio/basic',
# 'audio/x-pn-realaudio',
]
#snow_listener
snow_listener_url = "http://localhost:8083/listener"
#SSRF、SQL盲注、命令执行盲注的root domain,如vscode.baidu.com
blind_reverse_domain = "pz35ac.ceye.io"
#sqlmap api server address
sqlmap_api_address = 'http://127.0.0.1:8775'
#盲注反射检测地址api
blind_reverse_api = 'http://api.ceye.io/v1/records?token=0c28dc05dc90d6ecaab7fa1f28d09d9b&type=%s&filter=%s'
#所有都检测傻逼了,cmd执行检测费时间,建议简化。
detect_types = ['xss','dom_xss','url_redirect','file_download','file_read','pass_by','sqli','ssrf','xxe','ssi','ssti','crlf','command_exec']
def logo():
print '''\n
_____ ___________
/ _ \ ___ ___ ____\_ _____/_ __________________ ___________
/ /_\ \\ \/ // __ \| __)| | \___ /\___ // __ \_ __ \
/ | \> <\ ___/| \ | | // / / /\ ___/| | \/
\____|__ /__/\_ \\___ >___ / |____//_____ \/_____ \\___ >__|
\/ \/ \/ \/ \/ \/ \/
[+]axeproxy v1.1 based on ring04h@wyproxy@mitmproxy, thx all.
[+]AxeFuzzer v4.0 based on <EMAIL>)
''' | StarcoderdataPython |
3545291 | <reponame>baklanovp/pystella
import numpy as np
from pystella.rf import band
from pystella.rf.ts import TimeSeries, SetTimeSeries
__author__ = 'bakl'
class LightCurve(TimeSeries):
def __init__(self, b, time, mags, errs=None, tshift=0., mshift=0.):
"""Creates a Light Curve instance. Required parameters: b (band), time, mags."""
if isinstance(b, str): # convert band-name to band instance
if band.is_exist(b):
self._b = band.band_by_name(b)
else:
raise ValueError("No such band: {}".format(b))
else:
self._b = b
super().__init__(self._b.Name, time, mags, errs, tshift=tshift)
self._mshift = mshift
self._attrs = {}
@property
def Mag(self):
return self.V + self.mshift
@property
def M(self):
return self.V
@M.setter
def M(self, v):
self.V = v
@property
def MagErr(self):
return self.Err
@property
def Band(self):
return self._b
@property
def BName(self):
return self.Band.Name
@property
def mshift(self):
return self._mshift
@mshift.setter
def mshift(self, shift):
self._mshift = shift
@property
def TimeLcMax(self):
idx = np.argmin(self.Mag)
return self.Time[idx]
def attrs(self, nm, *val):
if not val:
return self._attrs[nm]
else:
self._attrs[nm] = val
def toarray(self, is_err=True):
if is_err and self.IsErr:
res = np.array([self.Time, self.Mag, self.MagErr])
else:
res = np.array([self.Time, self.Mag])
return res.T
def copy_tlim(self, tlim=None):
errs = None
if tlim is not None:
is_good = np.where((self.Time >= tlim[0]) & (self.Time <= tlim[1]))
time = self.T[is_good]
mags = self.V[is_good]
if self.IsErr:
errs = self.Err[is_good]
else:
time = self.T
mags = self.V
if self.IsErr:
errs = self.Err
lc = LightCurve(self.Band, time, mags, errs)
lc.tshift = self.tshift
lc.mshift = self.mshift
return lc
def copy(self, name=None, f=None):
lc = super(type(self), self).copy(name=name, f=f)
lc.mshift = self.mshift
return lc
def clone(self, t=None, m=None, err=None):
errs = None
tt = self.Time
mm = self.Mag
if self.IsErr:
errs = self.Err
if t is not None:
if len(t) != self.Length:
raise ValueError('Len(t)[{}] should be the same as origin [{}]'.format(len(t), self.Length))
tt = t
if m is not None:
if len(m) != self.Length:
raise ValueError('Len(m)[{}] should be the same as origin [{}]'.format(len(m), self.Length))
mm = m
if err is not None:
if len(err) != self.Length:
raise ValueError('Len(err)[{}] should be the same as origin [{}]'.format(len(err), self.Length))
errs = err
return LightCurve(self.Band, tt, mm, errs), self.tshift, self.mshift
def sorted_time(self, order=None):
ind = np.argsort(self.Time, order=order)
time = self.T[ind]
mags = self.V[ind]
errs = None
if self.IsErr:
errs = self.Err[ind]
lc = LightCurve(self.Band, time, mags, errs)
lc.tshift = self.tshift
lc.mshift = self.mshift
return lc
@classmethod
def Merge(cls, lc1, lc2):
if lc1.Band.Name != lc2.Band.Name:
raise ValueError("Merging is possible only for the same filters: {} VS {}".
format(lc1.Band.Name, lc2.Band.Name))
bname = lc1.Band.Name
t = np.concatenate((lc1.Time, lc2.Time))
m = np.concatenate((lc1.Mag, lc2.Mag))
sorti = np.argsort(t)
time = t[sorti]
mags = m[sorti]
errs = None
if lc1.IsErr and lc2.IsErr:
e = np.concatenate((lc1.Err, lc2.Err))
errs = e[sorti]
res = LightCurve(bname, time, mags, errs=errs)
return res
def LC_interp(orig, time, is_spline=True):
if is_spline:
from scipy.interpolate import InterpolatedUnivariateSpline
s = InterpolatedUnivariateSpline(orig.Time, orig.Mag, k=1)
mags = s(time)
else:
mags = np.interp(time, orig.Time, orig.Mag)
if orig.IsErr:
if is_spline:
from scipy.interpolate import InterpolatedUnivariateSpline
s = InterpolatedUnivariateSpline(orig.Time, orig.MagErr, k=1)
errs = s(time)
else:
errs = np.interp(time, orig.Time, orig.MagErr)
lc = LightCurve(orig.Band, time, mags, errs)
else:
lc = LightCurve(orig.Band, time, mags)
# lc.tshift = orig.tshift
# lc.mshift = orig.mshift
return lc
class SetLightCurve(SetTimeSeries):
"""Set of the Light Curves"""
def __init__(self, name=''):
"""Creates a Set of Light Curves."""
super().__init__(name)
# self._loop = 0
@property
def Bands(self):
if len(self.Set) == 0:
raise ValueError('There are no bands in SetLightCurve.')
# for name, lc in self.Set.items():
# yield lc.Band
res = (lc.Band for name, lc in self.Set.items())
return res
@property
def BandNames(self):
res = [b.Name for b in self.Bands]
return res
def IsBand(self, bname):
return bname in self.BandNames
def add(self, lc):
self._set[lc.Band.Name] = lc
def get(self, bn, default=None):
for n, lc in self.Set.items():
if lc.Band.Name == bn:
return lc
return default
# def __getattr__(self, attr):
# lc = self.get(attr, None)
# if lc is None:
# raise AttributeError(attr)
# return lc
#
def is_band(self, bn):
return bn in self.BandNames
def set_mshift(self, mshift):
for n, lc in self.Set.items():
lc.mshift = mshift
def clone(self, name=None, t=None, m=None, err=None):
def key_set(bn, nm, v):
if isinstance(v, dict):
return v[bn]
else:
return v
if name is None:
name = self.Name
res = SetLightCurve(name)
for lc in self:
kwargs = {'t': key_set(lc.Band.Name, 'm', t),
'm': key_set(lc.Band.Name, 'm', m),
'err': key_set(lc.Band.Name, 'err', err),
}
clone, tshift, mshift = lc.clone(**kwargs)
res.add(clone)
return res
def sorted_time(self, order=None):
res = SetLightCurve(self.Name)
for lc in self:
clone = lc.sorted_time(order=order)
res.add(clone)
return res
def copy(self, name=None, f=None):
if name is None:
name = self.Name
res = SetLightCurve(name)
for lc in self:
cp = lc.copy(f=f)
res.add(cp)
return res
def copy_tmlim(self, tlim=None, mlim=None):
"""
Copy SetLightCurve to other SetLightCurve
:param mlim: time limits, default None
:param tlim: magnitude limits, default None
:return:
"""
if tlim is not None:
res = self.copy(f=lambda x: (tlim[0] <= x.Time) & (x.Time <= tlim[1]))
else:
res = self.copy()
if mlim is not None:
res = res.copy(f=lambda x: (mlim[0] >= x.Mag) & (x.Mag >= mlim[1]))
return res
def merge(self, curves2, name=None):
res = SetLightCurve.Merge(self, curves2, name)
return res
@classmethod
def Merge(cls, curves1, curves2, name=None):
if curves1 is None:
return curves2
if curves2 is None:
return curves1
if name is None:
name = "{}+{}".format(curves1.Name, curves2.Name)
res = SetLightCurve(name)
# Add Light Curves from the first set
for lc1 in curves1:
lc2 = curves2.get(lc1.Band.Name)
if lc2 is None:
res.add(lc1)
else:
lc = LightCurve.Merge(lc1, lc2)
res.add(lc)
# Add remaining Light Curves from the second set
for lc in curves2:
if not res.IsBand(lc.Band.Name):
res.add(lc)
return res
| StarcoderdataPython |
8016198 | <filename>market/market.py<gh_stars>0
import json, hmac, hashlib, time, requests, base64
from requests.auth import AuthBase
from websocket import create_connection
class Market(AuthBase):
def __init__(self, api_key, secret_key, passphrase, api_url, ws_url, name):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = <PASSWORD>
self.api_url = api_url
self.ws_url = ws_url
self.name = name
self.ws = None
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or '')
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = signature.digest().encode('base64').rstrip('\n')
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'Content-Type': 'application/json'
})
return request
def connectToWebsocket(self):
"""Subscribe to a websocket"""
pass
def showStream(self, N=2):
for _ in range(N):
temp = self.ws.recv()
print(temp)
def disconnect(self):
"""Close all websocket connections"""
print("Closing connection to %s", self.ws_url)
self.ws.close()
def getCurrentPrices(self):
"""Returns the best bid and ask prices as a tuple"""
pass | StarcoderdataPython |
9744566 | <gh_stars>10-100
# Copyright (c) 2015 <NAME>
#
# See the file license.txt for copying permission.
from __future__ import annotations
import logging
import random
import yaml
import typing
if typing.TYPE_CHECKING:
from amqtt.session import Session
logger = logging.getLogger(__name__)
def format_client_message(
session: Session = None, address: str = None, port: int = None
) -> str:
if session:
return "(client id=%s)" % session.client_id
elif address is not None and port is not None:
return "(client @=%s:%d)" % (address, port)
else:
return "(unknown client)"
def gen_client_id() -> str:
"""Generates random client ID"""
gen_id = "amqtt/"
for i in range(7, 23):
gen_id += chr(random.randint(0, 74) + 48)
return gen_id
def read_yaml_config(config_file: str) -> dict:
config = None
try:
with open(config_file) as stream:
config = (
yaml.full_load(stream)
if hasattr(yaml, "full_load")
else yaml.load(stream)
)
except yaml.YAMLError as exc:
logger.error("Invalid config_file %s: %r", config_file, exc)
return config
| StarcoderdataPython |
238215 | <reponame>QualiSystems/OpenStack-Shell<filename>package/cloudshell/cp/openstack/command/operations/connectivity_operation.py
from cloudshell.cp.openstack.domain.services.connectivity.vlan_connectivity_service import VLANConnectivityService
class ConnectivityOperation(object):
public_ip = "Public IP"
def __init__(self, connectivity_service):
"""
:param connectivity_service:
"""
self.connectivity_service = connectivity_service
def apply_connectivity(self, openstack_session, cp_resource_model, conn_request, logger):
"""
Implements Apply connectivity - parses the conn_requests and creates
:param keystoneauth1.session.Session openstack_session:
:param OpenStackResourceModel cp_resource_model:
:param str conn_request: Connectivty Request JSON
:param logging.Logger logger:
:return DriverResponseRoot:
"""
return self.connectivity_service.\
perform_apply_connectivity(openstack_session=openstack_session,
cp_resource_model=cp_resource_model,
connection_request=conn_request,
logger=logger)
| StarcoderdataPython |
1779555 | <filename>stix2-jailbreak.py
#!/usr/bin/env python3
# Code based off of https://github.com/mvt-project/mvt
import sys
import os
from stix2.v21 import (Indicator, Malware, Relationship, Bundle, DomainName)
if __name__ == "__main__":
if os.path.isfile("jailbreak.stix2"):
os.remove("jailbreak.stix2")
with open("filenames.txt") as f:
filenames = list(set([a.strip() for a in f.read().split()]))
with open("processes.txt") as f:
processes = list(set([a.strip() for a in f.read().split()]))
res = []
malware = Malware(name="jailbreak", is_family=False, description="IOCs for checkra1n jailbreak")
res.append(malware)
for f in filenames:
i = Indicator(indicator_types=["malicious-activity"], pattern="[file:name='{}']".format(f), pattern_type="stix")
res.append(i)
res.append(Relationship(i, 'indicates', malware))
for p in processes:
i = Indicator(indicator_types=["malicious-activity"], pattern="[process:name='{}']".format(p), pattern_type="stix")
res.append(i)
res.append(Relationship(i, 'indicates', malware))
bundle = Bundle(objects=res)
with open("jailbreak.stix2", "w+") as f:
f.write(str(bundle))
print("jailbreak.stix2 file created") | StarcoderdataPython |
47823 | <filename>tests/test_bloomfilter.py
# -*- coding: utf-8 -*-
import unittest
import redis
import src.bloomfilter as bf
import src.exceptions as ep
class BloomFilterTest(unittest.TestCase):
redis_host = ''
redis_port = 6379
redis_db = 0
redis_client = None
name = 'bloom_for_test'
bloom_filter = None
insertions = 10000
error_rate = 0.001
def setup_redis_client(self):
self.redis_client = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=self.redis_db)
def setUp(self):
self.setup_redis_client()
self.bloom_filter = bf.RedisBloomFilter(self.name, self.insertions, self.error_rate, self.redis_client)
self.bloom_filter.initialize()
def tearDown(self):
self.bloom_filter.destroy()
def test_bit_offsets(self):
key = "abcefg"
offsets = bf.bit_offsets(key, self.bloom_filter.number_of_hashes, self.bloom_filter.bits_number)
self.assertTrue(len(offsets) == self.bloom_filter.number_of_hashes, 'offsets must be equal with number of hashes')
for offset in offsets:
self.assertGreater(offset, -1, 'offset must be greater than -1')
def test_bloom_do_not_reuse_exists(self):
anther_bloom_filter = bf.RedisBloomFilter(self.name, self.insertions, self.error_rate,
self.redis_client, auto_use_exists=False)
self.assertRaises(ep.BloomFilterAlreadyExists, anther_bloom_filter.initialize)
def test_put_none_or_empty(self):
self.assertTrue(self.bloom_filter.put(None), 'put none must be true')
self.assertTrue(self.bloom_filter.put(''), 'put empty must be true')
def test_contains_none_or_empty(self):
self.assertTrue(self.bloom_filter.contains(None), 'contains none is true')
self.assertTrue(self.bloom_filter.contains(''), 'contains empty is true')
def test_put_and_contains(self):
key = 'hello tests'
not_exists_key = 'hello tests gone'
self.assertTrue(self.bloom_filter.put(key), 'put must success')
self.assertTrue(self.bloom_filter.contains(key), 'must contains exists')
self.assertFalse(self.bloom_filter.contains(not_exists_key),
'no exists key may not be report exists for only one data')
self.assertTrue(key in self.bloom_filter, '__contains__ must work')
def test_count(self):
key = 'hello tests'
self.assertTrue(self.bloom_filter.put(key))
self.assertTrue(self.bloom_filter.count() > 0)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
5150428 | import unittest
from my_list import MyList
class TestStringMethods(unittest.TestCase):
"""
Realizar 3 unit tests por cada uno de los siguientes requerimientos para una lista:
We need to get the size of the list
We need to clear the list
We need to add Items
We need to be able to check if an item exists
We need to get elements by index
We need to search the index of an object
We need to remove an item by index
"""
@classmethod
def setUpClass(self):
self.myList1 = MyList((4, 5, 6,))
self.myList2 = MyList(tuple())
self.myList3 = MyList(("x",4,"que","Hola",))
self.myList4 = MyList(("x",4,"que",))
self.myList5 = MyList(("x",))
self.myList6 = MyList((4, 4, 4, 4, 4, 4,))
self.myList11 = MyList(("x", 4, "que",))
self.myList12 = MyList(("x",))
self.myList31 = MyList(("hola","x","como","estas",))
self.myList32 = MyList((3, "x", "como", ))
self.myList33 = MyList(( 5,))
self.myList34 = MyList(("hola", "x", "como", "x",))
def test_size(self):
self.assertEqual(len(self.myList1), 3) # Using modified __len__ function
self.assertEqual(len(self.myList2), 0) # Using modified __len__ function
self.assertEqual(len(self.myList3), 4) # Using modified __len__ function
def test_clear(self):
self.myList4.clear()
self.assertEqual(len(self.myList4), 0)
self.assertTrue(self.myList4._head is None)
self.myList5.clear()
self.assertEqual(len(self.myList5), 0)
self.assertTrue(self.myList5._head is None)
self.myList6.clear()
self.assertEqual(len(self.myList6), 0)
self.assertTrue(self.myList6._head is None)
def test_get(self):
self.assertEqual(self.myList1.get(-1), None)
self.assertEqual(self.myList1.get(0), 4)
self.assertEqual(self.myList1.get(2), 6)
self.assertEqual(self.myList1.get(3), None)
def test_add(self):
new = 9
self.myList11.add(new)
self.assertEqual(len(self.myList11), 4)
self.assertEqual(self.myList11.get(3), new)
new = "yo"
self.myList12.add(new)
self.assertEqual(len(self.myList12), 2)
self.assertEqual(self.myList12.get(1), new)
new = None
self.myList12.add(new)
self.assertEqual(len(self.myList12), 3)
self.assertEqual(self.myList12.get(2), new)
def test_exists(self):
self.assertEqual(self.myList1.exists(4), True)
self.assertEqual(self.myList1.exists(5), True)
self.assertEqual(self.myList1.exists(6), True)
self.assertEqual(self.myList1.exists(8), False)
self.assertEqual(self.myList1.exists("XX"), False)
self.assertEqual(self.myList1.exists(None), False)
def test_get_index_of(self):
self.assertEqual(self.myList1.get_index_of(4), 0)
self.assertEqual(self.myList1.get_index_of(5), 1)
self.assertEqual(self.myList1.get_index_of(6), 2)
self.assertEqual(self.myList1.get_index_of(8), -1)
self.assertEqual(self.myList1.get_index_of("XX"), -1)
self.assertEqual(self.myList1.get_index_of(None), -1)
def test_remove(self):
self.assertEqual(self.myList31.exists("x"), True)
self.assertEqual(len(self.myList31), 4)
self.assertEqual(self.myList31.remove(1), "x")
self.assertEqual(len(self.myList31), 3)
self.assertEqual(self.myList31.exists("x"), False)
self.assertEqual(self.myList32.exists(3), True)
self.assertEqual(len(self.myList32), 3)
self.assertEqual(self.myList32.remove(7), None)
self.assertEqual(len(self.myList32), 3)
self.assertEqual(self.myList32.exists(3), True)
self.assertEqual(self.myList34.get_index_of("x"), 1)
self.assertEqual(len(self.myList34), 4)
self.assertEqual(self.myList34.remove(1), "x")
self.assertEqual(len(self.myList34), 3)
self.assertEqual(self.myList34.get_index_of("x"), 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1720662 | <gh_stars>1-10
""":mod:`getpost.hogwarts` --- Controller module of getpost
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from collections import namedtuple
from flask import Blueprint, render_template
from ..orm import Session
ACCOUNT_PER_PAGE = 20
# Permissions
SELF_NONE = 0x0
SELF_READ = 0x1
SELF_WRIT = 0x2
SELF_REWR = 0x3
ALLL_READ = 0x4
ALLL_WRIT = 0x5
ALLL_REWR = 0x6
def update_model(model, identifier_key, identifier_value, **kwargs):
"""Update model instance.
Args:
model (:cls:`getpost.orm.ReprBase`): Model to update.
identifier_key (str): Key to query model.
identifier_value (str): Value to query model.
**kwargs: attribute keys and values to update.
Returns:
row_count (int): number of rows affected by update.
"""
db_session = Session()
row_count = db_session.query(
model
).filter_by(**{identifier_key: identifier_value}).update(
kwargs,
synchronize_session=False
)
db_session.commit()
return row_count
def generate_student_permissions():
Permission = namedtuple(
'Permission',
['student', 'employee', 'administrator', ]
)
attributes = [
'first_name',
'last_name',
'ocmr',
't_number',
'alternative_name',
'email_address',
]
permissions = {}
for attribute in attributes:
permissions[attribute] = Permission(
student=SELF_READ, employee=ALLL_REWR, administrator=ALLL_REWR
)
return permissions
STUDENT_PERMISSIONS = generate_student_permissions()
hogwarts_blueprint = Blueprint('hogwarts', __name__, url_prefix='')
@hogwarts_blueprint.route('/')
def hogwarts_index():
return render_template('hogwarts.html')
@hogwarts_blueprint.route('/ping')
def ping():
return 'What the brangan.'
| StarcoderdataPython |
3220559 | import torch
import torch.nn as nn
from torch import sigmoid
from torch.nn.init import xavier_uniform_, zeros_, kaiming_uniform_
import torchvision as tv
def conv(in_planes, out_planes, kernel_size=3):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=(kernel_size-1)//2, stride=2),
nn.ReLU(inplace=True)
)
def upconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
)
class PoseSep(nn.Module):
def __init__(self, nb_ref_imgs=2, output_exp=False, encoder='conv'):
super(PoseSep, self).__init__()
assert(output_exp == False)
self.nb_ref_imgs = nb_ref_imgs
self.output_exp = output_exp
conv_planes = [16, 32, 64, 128, 256, 256, 256]
if encoder == 'conv':
self.encoder = nn.Sequential(
conv(6, conv_planes[0], kernel_size=7),
conv(conv_planes[0], conv_planes[1], kernel_size=5),
conv(conv_planes[1], conv_planes[2]),
conv(conv_planes[2], conv_planes[3]),
conv(conv_planes[3], conv_planes[4]),
conv(conv_planes[4], conv_planes[5]),
conv(conv_planes[5], conv_planes[6])
)
self.pose_pred = nn.Conv2d(conv_planes[6], 6, kernel_size=1, padding=0)
elif encoder == 'resnet':
resnet = tv.models.resnet18(pretrained=False)
self.encoder = nn.Sequential(*(list(resnet.children())[:-2]))
self.encoder[0] = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.pose_pred = nn.Conv2d(512, 6, kernel_size=1, padding=0)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# xavier_uniform_(m.weight.data)
kaiming_uniform_(m.weight.data)
if m.bias is not None:
zeros_(m.bias)
def forward(self, target_image, ref_imgs):
assert(len(ref_imgs) == self.nb_ref_imgs)
poses = []
for i in range(self.nb_ref_imgs):
in_encoder = torch.cat([target_image, ref_imgs[i]], 1)
out_encoder = self.encoder(in_encoder)
out_pose = self.pose_pred(out_encoder)
out_pose = out_pose.mean(3).mean(2)
out_pose = 0.01 * out_pose.view(out_pose.size(0), 1, 6)
poses.append(out_pose)
pose = torch.cat(poses, 1)
exp_mask4 = None
exp_mask3 = None
exp_mask2 = None
exp_mask1 = None
if self.training:
return [exp_mask1, exp_mask2, exp_mask3, exp_mask4], pose
else:
return exp_mask1, pose
| StarcoderdataPython |
304552 | <filename>rough_surfaces/__init__.py
from . import analyse
from . import generate
from . import params
from . import contact
from . import surface
from . import plot
| StarcoderdataPython |
5171934 | <filename>nn_init.py
import os
def main():
print('Would you like to reset the network configurations?')
choice = input()
if (choice == 'Y' or choice == 'y'):
# reset models, pickles and clean dir
folders = ['clean/', 'models/', 'pickles/']
for folder in folders:
for file in os.listdir(folder):
file_path = os.path.join(folder, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
print('Directories have been cleaned.')
print('Ready to receive new inputs.')
print('Remember to edit the .csv files accordingly.')
if __name__ == '__main__':
main() | StarcoderdataPython |
11390694 | import json
from pprint import pprint
# Open data file and load into data dict
with open('data.json', 'r') as data_file:
data = json.load(data_file)
pprint(data)
# Just for fun, to try JSON dump
m = {'id': 2, 'name': 'hussain'}
n = json.dumps(m)
print type(n) | StarcoderdataPython |
4929042 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
###########THE FUNCTIONS###########################################
####Task1 Equation (dy/dt=-2x)####################################
def f(x,T):
return -2*x
#####Task 1 analytical soltution##################################
def f1():
x0 = 1
#change the second parameter of the x_a function to change the endtime of the analytical solution
x_a = np.arange(0,5,0.01)
y_a =x0*np.exp(-2*(x_a))
return x_a, y_a
######Task2 Equation (dy/dt=2-2X-e^-4t)############################
def f2(x,T):
e=np.exp(-4*T)
#2-2*x-e**-4*T
return 2-(2*x)-e
######Task 2 analytical soltution################################
def f3():
#change the second parameter of the t function to change the endtime of the analytical solution
t= np.arange(0,5,0.01)
y2_a = 1 +((1/2)*(np.exp(-4*t)))-((1/2)*(np.exp(-2*t)))
return t,y2_a
####################################################################
#This 'euler' function will require a function which are displayed above,
#A initial condition (in this case x(0)= 1)
#A start point, which is currently 0
# The amount of NUMERICAL PLOTS. (in this case it ranges from 50-5000 points)
def euler(f,x0,S,T,npoints):
#time step
h=(T-S)/npoints
#the 'linspace' function return evenly spaced numbers over a specified interval
#It takes in the Start parameter,End parameter and Amount of NUMERICAL PLOTS
t=np.linspace(S,T,npoints)
# the 'x' varible will contain 't' amount of values. These values will be 0.
x = np.zeros(len(t))
#This sets the 1st item of the 'x' list to the initial condition
x[0]=x0
#This is the for loop which will produce the the points required for the graph
for i in range(1, len(t)):
x[i]=x[i-1] + f(x[i-1],t[i])*h
#size of graph
plt.figure(figsize=(12,7))
return x,t,
#Here you have 2 functions that are being passes through the euler function
#The parameters of the euler function can be changed
#the first parameter:---function---
#second parameter:---Intial condition---
#Third parameter:---Start time---
#Fourth parameter: ---End time---
#Fifth parameter: ---Amount of numerical plots---
#(If you want to change the time of the analytical soltution,
#scroll to the analytical soltution function 1 or 2 and change the SECOND PARAMETER OF THE
#np.arange(...) function)
f1plot,t1, = euler(f,1,0,5,50)
f2plot,t2 = euler(f2,1,0,5,50)
#this part plots the function that is shown on the 2 previous lines
plt.plot(t1,f1plot,'o--',t2,f2plot,'o--',label=r'$f(x)$')
#the analytical solutions are passed through this function
t_t1, an_plot = f1()
t_t2, an2_plot = f3()
#these are where the analytical solutions are plotted
plt.plot(t_t1,an_plot,'r--',t_t2,an2_plot,'g--')
plt.legend(('f(x)|1','f(x)|2','An_sol|1','An_sol|2'),loc='upper right')
###These are the axis labels######
plt.ylabel('X AS A FUNCTION OF T')
plt.xlabel('time')
##################################
######These Are the x & y axises thart are displayed on the graph###
plt.axhline(color='black')
plt.axvline(color ='black')
####################################################################
#grid function that places a grid on the graph
plt.grid(linewidth=2)
| StarcoderdataPython |
3216675 | <filename>0023/driver.py
#!/usr/bin/python
import cProfile
def d(n):
if n in (0, 1):
return 0
y, q = int(n ** 0.5), [1]
m = y + 1
if y * y == n:
q.append(y)
m = y
for x in range(2, m):
if n % x == 0:
q.extend([x, int(n / x)])
return sum(q)
def method1():
alist = [n for n in range(28124) if n < d(n)]
s = [True] * 28124
for a in alist:
for b in alist:
if a + b <= 28123:
s[a + b] = False
else:
break
return sum(k for k in range(28124) if s[k])
def method2():
abundants = set(i for i in range(1,28124) if d(i) > i)
def abundantsum(i):
return any(i-a in abundants for a in abundants)
return sum(i for i in range(1,28124) if not abundantsum(i))
if __name__ == '__main__':
for m in (method1, method2):
cProfile.run('answer = m()')
print('Answer = {0}'.format(answer))
| StarcoderdataPython |
9719019 | <reponame>PJ-Schulz/reddish
from dataclasses import dataclass
from hiredis import ReplyError
class Ok:
@classmethod
def __get_validators__(cls):
yield cls._validate
@classmethod
def _validate(cls, value):
if isinstance(value, cls):
return value
elif b'OK' == value:
return cls
else:
raise ValueError('value is not a valid redis OK response')
@dataclass(frozen=True)
class ErrorMessage:
code: str
message: str
@classmethod
def __get_validators__(cls):
yield cls._validate
@classmethod
def _validate(cls, value):
if isinstance(value, cls):
return value
if isinstance(value, ReplyError):
code, message = str(value).split(maxsplit=1)
return cls(code, message)
else:
raise TypeError('value is not a valid redis error reply')
| StarcoderdataPython |
3378130 |
expected_output = {
'vlan':
{'100': {'vni': '8100'},
'1000': {'vni': '9100'},
'1005': {'vni': '9105'},
'1006': {'vni': '9106'},
'1007': {'vni': '9107'},
'1008': {'vni': '9108'},
'1009': {'vni': '9109'},
'101': {'vni': '8101'},
'103': {'vni': '8103'},
'105': {'vni': '8105'},
'106': {'vni': '8106'},
'107': {'vni': '8107'},
'108': {'vni': '8108'},
'109': {'vni': '8109'},
'110': {'vni': '8110'},
'111': {'vni': '8111'},
'112': {'vni': '8112'},
'113': {'vni': '8113'},
'114': {'vni': '8114'}
}
}
| StarcoderdataPython |
1801325 | <reponame>LawrenceDior/thetis
"""
Test GridInterpolator object
"""
from thetis.interpolation import GridInterpolator
import numpy as np
from scipy.interpolate import griddata
import pytest
def do_interpolation(dataset='random', plot=False):
"""
Compare GridInterpolator against scipy.griddata
"""
np.random.seed(2)
# fabricate dataset
x_scale = 100.
ndata = 35
x = np.linspace(0, x_scale, ndata)
y = np.linspace(0, x_scale, ndata)
xx, yy = np.meshgrid(x, y)
xy = np.vstack((xx.ravel(), yy.ravel())).T
if dataset == 'sin':
zz = np.sin(2*np.pi*xx/x_scale)*np.sin(1.5*2*np.pi*yy/x_scale)
elif dataset == 'gauss':
zz = np.exp(-(((xx - 50.)/20.)**2 + ((yy - 50.)/40.)**2))
else:
zz = np.random.rand(*xx.shape)
z = zz.ravel()
# generate 2D mesh points
x_lim = [20., 70.]
y_lim = [10., 90.]
npoints = 120
mesh_x = (x_lim[1] - x_lim[0])*np.random.rand(npoints) + x_lim[0]
mesh_y = (y_lim[1] - y_lim[0])*np.random.rand(npoints) + y_lim[0]
mesh_xy = np.vstack((mesh_x, mesh_y)).T
# interpolate with scipy
result = griddata(xy, z, mesh_xy, method='linear')
# interpolate with GridInterpolator
interp = GridInterpolator(xy, mesh_xy)
result2 = interp(z)
assert np.allclose(result, result2)
if plot:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.pcolorfast(x, y, zz, cmap=plt.get_cmap('RdBu_r'))
ax.plot(mesh_x, mesh_y, 'k.')
plt.show()
@pytest.mark.parametrize('dataset', ['random', 'sin', 'gauss'])
def test_gridinterpolator(dataset):
do_interpolation(dataset=dataset)
if __name__ == '__main__':
do_interpolation(dataset='sin', plot=True)
| StarcoderdataPython |
1925320 | import json
from math import ceil, degrees
from System.Net import WebClient
from pyrevit import revit, DB
from fetchbim import settings
from fetchbim.family import Family, GroupedFamily
from fetchbim.attributes import Parameter
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def normalize_number(number):
if isclose(number, number // 1, abs_tol=0.001):
return number // 1
elif isclose(number, ceil(number), abs_tol=0.001):
return ceil(number)
else:
return round(number, 4)
def normalize_angle(angle):
return int(round(angle, 0) % 360)
BUILTIN_KEEP = ["Width", "Depth", "Height"]
# ROUND_DIGITS = 3
if settings.BIM_KEY:
builtInCat = DB.BuiltInCategory.OST_IOSModelGroups
group = revit.selection.pick_element_by_category(
builtInCat, message="select a model group"
)
if group:
group_type = group.GroupType
group_type_params = group_type.Parameters
category_name = "Model Group/Test"
group_name = "Enter Group Name"
category_param = [
x for x in group_type_params if x.Definition.Name == "CategoryName"
]
group_name_param = [
x for x in group_type_params if x.Definition.Name == "Group Name"
]
if category_param:
category_name = category_param[0].AsString()
if group_name_param:
group_name = group_name_param[0].AsString()
group_members = group.GetMemberIds()
members = [revit.doc.GetElement(id) for id in group_members]
min_list = []
max_list = []
for family in members:
bbox = family.get_BoundingBox(None)
min_list.append((bbox.Min.X, bbox.Min.Y, bbox.Min.Z))
max_list.append((bbox.Max.X, bbox.Max.Y, bbox.Max.Z))
minX = min([x[0] for x in min_list])
minY = min([x[1] for x in min_list])
minZ = min([x[2] for x in min_list])
maxX = max([x[0] for x in max_list])
maxY = max([x[1] for x in max_list])
maxZ = max([x[2] for x in max_list])
Width = normalize_number(maxX - minX)
Depth = normalize_number(maxY - minY)
Height = normalize_number(maxZ - minZ)
GroupedFamilies = []
bbox_family = GroupedFamily(
settings.BOUNDING_BOX_ID,
settings.BOUNDING_BOX_TYPE_ID,
Parameters=[
Parameter("Width", Width, DataType="Length"),
Parameter("Depth", Depth, DataType="Length"),
Parameter("Height", Height, DataType="Length"),
Parameter("Group Name", group_name, DataType="Text"),
Parameter("ENTER_Shape Number", 1, DataType="Integer"),
],
)
bbox_family.ProjectId = None
GroupedFamilies.append(bbox_family)
ChildFamilies = []
print(group.Name)
print("Width={}, Depth={}, Height={}".format(Width, Depth, Height))
print("Instances: ")
for family in members:
Parameters = []
parent_family = family.SuperComponent
if not parent_family:
host = family.Host
rot = normalize_angle(degrees(family.Location.Rotation))
print("\t{}: {}".format(family.Symbol.FamilyName, family.Name))
print(
"\t\tX={}, Y={}, Rotation={}".format(
normalize_number(family.Location.Point.X - minX),
normalize_number(family.Location.Point.Y - maxY),
rot,
)
)
instance_params = family.Parameters
for param in instance_params:
if param.IsReadOnly is False:
if (
param.Definition.BuiltInParameter
== DB.BuiltInParameter.INVALID
or param.Definition.Name in BUILTIN_KEEP
):
if param.Definition.Name.startswith("z") == False:
# print(param.Definition.Name, param.Definition.ParameterType)
if (
param.Definition.ParameterType
== DB.ParameterType.Length
):
p = Parameter(
param.Definition.Name,
normalize_number(param.AsDouble()),
DataType="Length",
)
Parameters.append(p)
elif (
param.Definition.ParameterType
== DB.ParameterType.Integer
):
p = Parameter(
param.Definition.Name,
param.AsInteger(),
DataType="Integer",
)
Parameters.append(p)
elif (
param.Definition.ParameterType
== DB.ParameterType.YesNo
):
p = Parameter(
param.Definition.Name,
param.AsInteger(),
DataType="Boolean",
)
Parameters.append(p)
type_params = family.Symbol.Parameters
ssgfid = [
x.AsString() for x in type_params if x.Definition.Name == "SSGFID"
]
ssgtid = [
x.AsString() for x in type_params if x.Definition.Name == "SSGTID"
]
if ssgfid:
print("\t\tSSGFID: {}".format(ssgfid[0]))
else:
raise AttributeError(
"{} is missing SSGFID parameter".format(
family.Symbol.FamilyName
)
)
if ssgtid:
print("\t\tSSTFID: {}".format(ssgtid[0]))
else:
raise AttributeError(
"{} is missing SSGTID parameter".format(
family.Symbol.FamilyName
)
)
if host:
print("\t\tHosted to {}".format(host.Symbol.FamilyName))
child_fam = GroupedFamily(
ssgfid[0],
ssgtid[0],
Width=normalize_number(
normalize_number(family.Location.Point.X - minX)
),
Depth=normalize_number(
normalize_number(family.Location.Point.Y - maxY)
),
Rotation=rot,
Parameters=Parameters,
)
# print(repr(Parameters))
child_fam.HostProjectId = host.Id.IntegerValue
child_fam.ProjectId = family.Id.IntegerValue
double_nested = False
for child in ChildFamilies:
if child.ProjectId == child_fam.HostProjectId:
child.ChildModelGroups.append(child_fam)
double_nested = True
if double_nested is False:
ChildFamilies.append(child_fam)
else:
fam = GroupedFamily(
ssgfid[0],
ssgtid[0],
Width=normalize_number(
normalize_number(family.Location.Point.X - minX)
),
Depth=normalize_number(
normalize_number(family.Location.Point.Y - maxY)
),
Rotation=rot,
Parameters=Parameters,
)
# print(repr(Parameters))
fam.ProjectId = family.Id.IntegerValue
GroupedFamilies.append(fam)
for child in ChildFamilies:
for grouped_fam in GroupedFamilies:
if child.HostProjectId == grouped_fam.ProjectId:
grouped_fam.ChildModelGroups.append(child)
model_group = Family(
group.Name,
LoadMethod=1,
CategoryName=category_name,
FamilyObjectType="ModelGroup",
GroupedFamilies=GroupedFamilies,
)
data = model_group.to_json()
print("Publishing Group")
url = settings.POST_FAMILY
client = WebClient()
client.Headers.Add("Authorization", "Bearer " + settings.BIM_KEY)
client.Headers.Add("Accept", "application/json")
client.Headers.Add("Content-Type", "application/json")
response = client.UploadString(url, "POST", data)
response_dict = json.loads(response)
print("\tSSGFID: {}".format(response_dict["Id"]))
else:
print("Authorization key needed to run this script")
| StarcoderdataPython |
1743965 | import math
from functools import partial
from keras import backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
from keras.models import load_model
from legacy.unet3dlegacy.metrics import (dice_coefficient, dice_coefficient_loss, dice_coef, dice_coef_loss,
weighted_dice_coefficient_loss, weighted_dice_coefficient)
K.set_image_data_format('channels_first')
# learning rate schedule
def step_decay(epoch, initial_lrate, drop, epochs_drop):
return initial_lrate * math.pow(drop, math.floor((1+epoch)/float(epochs_drop)))
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
learning_rate_patience=50, logging_file="training.log", verbosity=1,
early_stopping_patience=None):
callbacks = list()
callbacks.append(ModelCheckpoint(model_file, save_best_only=True))
callbacks.append(CSVLogger(logging_file, append=True))
if learning_rate_epochs:
callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
else:
callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
verbose=verbosity))
if early_stopping_patience:
callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
return callbacks
def load_old_model(model_file):
print("Loading pre-trained model")
custom_objects = {'dice_coefficient_loss': dice_coefficient_loss, 'dice_coefficient': dice_coefficient,
'dice_coef': dice_coef, 'dice_coef_loss': dice_coef_loss,
'weighted_dice_coefficient': weighted_dice_coefficient,
'weighted_dice_coefficient_loss': weighted_dice_coefficient_loss}
try:
from keras_contrib.layers import InstanceNormalization
custom_objects["InstanceNormalization"] = InstanceNormalization
except ImportError:
pass
try:
return load_model(model_file, custom_objects=custom_objects)
except ValueError as error:
if 'InstanceNormalization' in str(error):
raise ValueError(str(error) + "\n\nPlease install keras-contrib to use InstanceNormalization:\n"
"'pip install git+https://www.github.com/keras-team/keras-contrib.git'")
else:
raise error
def train_model(model, model_file, training_generator, validation_generator, steps_per_epoch, validation_steps,
initial_learning_rate=0.001, learning_rate_drop=0.5, learning_rate_epochs=None, n_epochs=500,
learning_rate_patience=20, early_stopping_patience=None):
"""
Train a Keras model.
:param early_stopping_patience: If set, training will end early if the validation loss does not improve after the
specified number of epochs.
:param learning_rate_patience: If learning_rate_epochs is not set, the learning rate will decrease if the validation
loss does not improve after the specified number of epochs. (default is 20)
:param model: Keras model that will be trained.
:param model_file: Where to save the Keras model.
:param training_generator: Generator that iterates through the training data.
:param validation_generator: Generator that iterates through the validation data.
:param steps_per_epoch: Number of batches that the training generator will provide during a given epoch.
:param validation_steps: Number of batches that the validation generator will provide during a given epoch.
:param initial_learning_rate: Learning rate at the beginning of training.
:param learning_rate_drop: How much at which to the learning rate will decay.
:param learning_rate_epochs: Number of epochs after which the learning rate will drop.
:param n_epochs: Total number of epochs to train the model.
:return:
"""
model.fit_generator(generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=get_callbacks(model_file,
initial_learning_rate=initial_learning_rate,
learning_rate_drop=learning_rate_drop,
learning_rate_epochs=learning_rate_epochs,
learning_rate_patience=learning_rate_patience,
early_stopping_patience=early_stopping_patience))
| StarcoderdataPython |
296205 | import argparse
import logging
import os
import warnings
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.ndimage import gaussian_gradient_magnitude
from skimage import feature, morphology
from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS
warnings.filterwarnings("ignore")
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.WARNING,
datefmt='%Y-%m-%d %H:%M:%S'
)
_logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Generate a word cloud")
parser.add_argument("--text-path", required=True, help="path to a file containing text")
parser.add_argument("--image-path", required=True, help="path to a file containing an image")
parser.add_argument("--output-path", default="output/imagecloud.png", help="path to a output the word cloud image")
parser.add_argument("--downsample", type=int, default=1, help="amount by which to divide input image's pixels")
parser.add_argument("--seed", type=int, default=None, help="randomness seed")
parser.add_argument("--no-detect-edges", action='store_true', default=False, help="skip edge detection")
parser.add_argument("--edge-sigma", type=float, default=2,
help="standard deviations of the Gaussian filter used for edge detection")
parser.add_argument("--edge-threshold", type=float, default=0.08,
help="cutoff of gaussian filter above which edge is detected")
parser.add_argument("--extra-stopwords", default=None, help="comma-separated list of stopwords to add to default")
parser.add_argument("--max-font-size", type=int, default=None, help="maximum font size of a word in the cloud")
parser.add_argument("--max-words", type=int, default=200, help="maximum number of words in cloud")
parser.add_argument("--relative-scaling", type=float, default=0.5,
help="relative importance of frequency vs. rank for word size. 0 is completely rank. 1 is"
"completely frequency.")
parser.add_argument("--no-plot", action="store_true", default=False, help="skip plotting")
parser.add_argument("--log-level", default=logging.INFO, help="log level (DEBUG, INFO, WARNING, or ERROR)")
parser.add_argument("--edge-strategy", choices=("gaussian", "canny"), default="canny", help="how to detect edges")
parser.add_argument("--small-object-size", type=int, default=None, help="size in pixels of small objects to remove from"
" edge detection")
_ALPHA_TRANSPARENT = 0
_MASK_EXCLUDE = 255
def do_wordcloud(
text_path,
image_path,
output_path="output/imagecloud.png",
downsample_ratio=1,
random_seed=None,
do_detect_edges=True,
edge_sigma=2,
edge_threshold=0.8,
extra_stopwords=None,
max_font_size=None,
max_words=200,
relative_scaling=0.5,
do_plot=True,
edge_strategy="canny",
small_object_size=None,
):
"""
:param text_path: path to file containing text
:param image_path: path to file containing image for masking and coloring
:param output_path: where to output the WordCloud as an image
:param downsample_ratio: divide image pixels by this
:param random_seed: the randomness seed for the WordCloud
:param do_detect_edges: whether to do edge detection
:param edge_sigma: standard deviations of the Gaussian filter used for edge detection
:param edge_threshold: threshold of Gaussian used for edge detection
:param extra_stopwords: extra stopwords. If missing, defaults are used.
:param max_font_size: maximum font size of any word
:param max_words: maximum number of words
:param relative_scaling: relative importance of frequency vs. rank for word size. 0 is completely rank. 1 is
completely frequency.
:param do_plot: whether to show informative plots in addition to saving image
:param edge_strategy: how to detect edges: gaussian or canny
:param small_object_size: the size in pixels of small objects to remove from edge detection.
"""
text = open(text_path).read()
image = Image.open(image_path)
image_data = np.array(image)
if len(image_data.shape) < 3:
raise Exception("image_data needs three dimensions. (did you provice a color image?)")
if downsample_ratio != 1:
_logger.debug("downsampling by %d", downsample_ratio)
image_data = image_data[::downsample_ratio, ::downsample_ratio]
# create mask white is "masked out"
mask = image_data.copy()
mask[mask.sum(axis=2) == _ALPHA_TRANSPARENT] = _MASK_EXCLUDE
edges = None
if do_detect_edges:
_logger.debug("calculating edges")
if edge_strategy == "gaussian":
edges = np.mean([gaussian_gradient_magnitude(image_data[:, :, i] / 255., edge_sigma) for i in range(3)], axis=0)
elif edge_strategy == "canny":
edges = np.mean([feature.canny(image_data[:, :, i] / 255., sigma=edge_sigma) for i in range(3)], axis=0)
if small_object_size:
_logger.debug("removing objects smaller than %d pixels", small_object_size)
without_objects = morphology.remove_small_objects(edges > edge_threshold, small_object_size)
edges = without_objects.astype(int) * edges
_logger.debug("calculated edges")
mask[edges > edge_threshold] = _MASK_EXCLUDE
stopwords = STOPWORDS if extra_stopwords is None else STOPWORDS | set(extra_stopwords.split(","))
wc = WordCloud(
max_words=max_words,
mask=mask,
max_font_size=max_font_size,
random_state=random_seed,
# relative_scaling=0 means the frequencies in the data are reflected less
# accurately but it makes a better picture
relative_scaling=relative_scaling,
mode="RGBA",
stopwords=stopwords,
)
_logger.debug("generating word cloud")
wc.generate(text)
_logger.debug("generated word cloud")
if do_plot:
plt.title("WordCloud")
_logger.debug("plotting WordCloud")
plt.imshow(wc)
# create coloring from image
image_colors = ImageColorGenerator(image_data)
wc.recolor(color_func=image_colors)
output_dir = os.path.dirname(output_path)
Path(output_dir).mkdir(parents=True, exist_ok=True)
_logger.debug("writing file")
wc.to_file(output_path)
if do_plot:
plt.figure(figsize=(10, 10))
plt.title("WordCloud Recolored")
_logger.debug("plotting WordCloud Recolored")
plt.imshow(wc, interpolation="bilinear")
plt.figure(figsize=(10, 10))
plt.title("Original Image")
_logger.debug("plotting Original Image")
plt.imshow(image_data)
plt.figure(figsize=(10, 10))
plt.title("Mask")
_logger.debug("plotting Mask")
plt.imshow(mask)
if edges is not None:
plt.figure(figsize=(10, 10))
plt.title("Edges")
_logger.debug("plotting Edges")
plt.imshow(edges)
_logger.debug("showing plot")
plt.show()
if __name__ == "__main__":
args = parser.parse_args()
_logger.setLevel(logging.getLevelName(args.log_level))
do_wordcloud(
text_path=args.text_path,
image_path=args.image_path,
output_path=args.output_path,
downsample_ratio=args.downsample,
random_seed=args.seed,
do_detect_edges=not args.no_detect_edges,
edge_sigma=args.edge_sigma,
edge_threshold=args.edge_threshold,
extra_stopwords=args.extra_stopwords,
max_font_size=args.max_font_size,
max_words=args.max_words,
relative_scaling=args.relative_scaling,
do_plot=not args.no_plot,
edge_strategy=args.edge_strategy,
small_object_size=args.small_object_size,
)
| StarcoderdataPython |
109319 | # -*- coding: utf-8 -*-
import unittest
from converter import Converter, ConverterRequest, ConverterResponse
from datetime import datetime
from .rate_providers import RateProviderInterface
class TestConverter(unittest.TestCase):
def test_constructor_raises_when_invalid_rate_provider_is_given(self):
with self.assertRaises(Exception):
Converter("not a RateProvider")
def test_convert(self):
sut = Converter(MockRateProvider())
test_cases = [
[15.0, "EUR", "EUR", "2020-01-13", ConverterResponse(15.0, "EUR")],
[15.0, "EUR", "USD", "2020-01-14", ConverterResponse(30.0, "USD")],
]
for test_case in test_cases:
req = ConverterRequest(
test_case[0],
test_case[1],
test_case[2],
test_case[3],
)
res = sut.convert(req)
self.assertEqual(test_case[4].serialize(), res.serialize())
class MockRateProvider(RateProviderInterface):
def get_rate(self, src_currency: str, dest_currency: str, reference_date: datetime) -> float:
return 2.0
| StarcoderdataPython |
1789446 | <reponame>andremartins746/OpenCV_Python-Processamento_de_imagens
import numpy as np
import cv2
#caminhos dos videos
VIDEO_SOURSE = 'videos/Cars.mp4'
VIDEO_OUT = 'videos/results/filtragem_mediana_temporal.avi'
#amarzenando o video em uma variavel
cap = cv2.VideoCapture(VIDEO_SOURSE)
#lendo o video
hasFrame, frame = cap.read()
#exibindo as caracteristicas do video
#print(hasFrame, frame.shape)
#definindo a extenção do video e salvando ele
fourcc = cv2.VideoWriter_fourcc(* 'XVID')
writer = cv2.VideoWriter(VIDEO_OUT,fourcc, 25, (frame.shape[1], frame.shape[0]), False)
#mostrando quantos frames tem o video
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#gerando 25 numeros aleatorios uniformemente seguindo a Mediana
#print(np.random.uniform(size=25))
framesIds = cap.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=25)
#print(framesIds)
#mostrando um frame dos 25 gerados aleatoriamente
#108, 2000
#hasFrame, frame = cap.read()
#cv2.imshow('teste', frame)
#cv2.waitKey(0)
frames = []
for fid in framesIds:
cap.set(cv2.CAP_PROP_POS_FRAMES, fid)
hasFrame, frame = cap.read()
frames.append(frame)
#print(np.asarray(frames).shape)
#print(frames[0])
#print(frames[1])
#for frame in frames:
#cv2.imshow('frame', frame)
# cv2.waitKey(0)
medianFrame = np.median(frames, axis=0).astype(dtype=np.uint8)
#print(frame[0])
#print(medianFrame)
#cv2.imshow("median frame",medianFrame)
#cv2.waitKey(0)
cv2.imwrite('model_median_frame.jpg', medianFrame)
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
grayMedianFrame = cv2.cvtColor(medianFrame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('gray',grayMedianFrame)
#cv2.waitKey(0)
while (True):
hasFrame, frame = cap.read()
if not hasFrame:
print('Error')
break
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dframe = cv2.absdiff(frameGray, grayMedianFrame)
#th, dframe = cv2.threshold(dframe, 70, 255, cv2.THRESH_BINARY)
th, dframe = cv2.threshold(dframe, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print(th)
cv2.imshow('frame', dframe)
writer.write(dframe)
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
writer.release()
cap.release()
| StarcoderdataPython |
6552708 | # Python edgegrid module
""" Copyright 2015 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************
* Media Services CLI module by <NAME> (<EMAIL>) & <NAME> (<EMAIL>)*
************************************************************************
"""
import sys
import os
import requests
import logging
import json
import texttable as tt
from akamai.edgegrid import EdgeGridAuth, EdgeRc
from config import EdgeGridConfig
if sys.version_info[0] >= 3:
# python3
from urllib import parse
else:
# python2.7
import urlparse as parse
logger = logging.getLogger(__name__)
def formatOutputStreamList(streamList, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(streamList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15, 30, 15, 15, 40])
ParentTable.set_cols_align(['c', 'c', 'c', 'c', 'c'])
ParentTable.set_cols_valign(['m', 'm', 'm', 'm', 'm'])
Parentheader = ['ID', 'Name', 'Format', 'CPcode', 'Origin']
ParentTable.header(Parentheader)
for my_item in streamList['streams']:
Parentrow = [my_item["id"], my_item["name"], my_item["format"], my_item['cpcode'],
my_item['originHostName']]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputLiveOriginList(liveOriginList, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(liveOriginList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([45,10, 10, 15, 12])
ParentTable.set_cols_align(['c','c','c', 'c', 'c'])
ParentTable.set_cols_valign(['m','m','m', 'm', 'm'])
Parentheader = ['HostName','Id','CPCode', 'Encoder', 'Status']
ParentTable.header(Parentheader)
for my_item in liveOriginList:
Parentrow = [my_item["hostName"],my_item["id"], my_item["cpcode"], my_item["location"], my_item['status']]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputGetStream(streamInfo, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(streamInfo, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15, 60])
ParentTable.set_cols_align(['c', 'c'])
ParentTable.set_cols_valign(['m', 'm'])
headerList = ['id', 'name', 'format', 'cpcode', 'origin',
'createdDate', 'modifiedDate', 'storagecpcode', 'encoderZone', 'primaryPublishingUrl', 'backupPublishingUrl', 'allowedIps']
for key in headerList:
if key == 'origin':
Parentrow = [key, streamInfo[key]['hostName']]
elif key == 'storagecpcode':
Parentrow = [key, streamInfo['storageGroup']['cpcode']]
else:
Parentrow = [key,streamInfo[key]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputOrigin(originInfo, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(originInfo, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15, 60])
ParentTable.set_cols_align(['c', 'c'])
ParentTable.set_cols_valign(['m', 'm'])
headerList = ['id', 'type', 'cpcode', 'encoderZone',
'backupEncoderZone', 'hostName', 'backupHostName', 'status', 'activeVersion', 'amdProperties','modifiedDate','activeVersion']
for key in headerList:
Parentrow = [key,originInfo[key]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputContract(contractInfo, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(contractInfo, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([30,15, 15])
ParentTable.set_cols_align(['c','c','c'])
ParentTable.set_cols_valign(['m','m','m'])
Parentheader = ['Contract Name','Contract ID', 'Account ID']
ParentTable.header(Parentheader)
for my_item in contractInfo:
Parentrow = [my_item["contractName"], my_item["contractId"], my_item["accountId"]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputEncoderLocationsList(encoderLocationsList,output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
print(json.dumps(encoderLocationsList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([25])
ParentTable.set_cols_align(['c'])
ParentTable.set_cols_valign(['m'])
Parentheader = ['Locations']
ParentTable.header(Parentheader)
for my_item in encoderLocationsList:
Parentrow = [my_item["location"]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputvodOriginsList(vodOriginsList, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
print(json.dumps(vodOriginsList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([30,10, 10])
ParentTable.set_cols_align(['c','c','c'])
ParentTable.set_cols_valign(['m','m','m'])
Parentheader = ['Name','CPCode', 'StreamCount']
ParentTable.header(Parentheader)
for my_item in vodOriginsList:
Parentrow = [my_item["name"], my_item["cpcode"], my_item["streamCount"]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputCPCodeLiveOrigin(cpcodeOriginList, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
print(json.dumps(cpcodeOriginList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15,20])
ParentTable.set_cols_align(['c','c'])
ParentTable.set_cols_valign(['m','m'])
Parentheader = ['CPCode','Name']
ParentTable.header(Parentheader)
for my_item in cpcodeOriginList:
Parentrow = [my_item["id"], my_item["name"]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputCDNList(cdnList, output_type):
""" Formats the output on a given format (json or text) """
if output_type == "json":
# Let's print the JSON
print(json.dumps(cdnList, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15, 15])
ParentTable.set_cols_align(['c', 'c'])
ParentTable.set_cols_valign(['m', 'm'])
Parentheader = ['Code', 'CDN Name']
ParentTable.header(Parentheader)
for each_item in cdnList:
Parentrow = [each_item["code"], each_item["name"]]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
def formatOutputGeneric(list):
print(json.dumps(list, indent=2))
def formatOutputCPCodelist(cpcodelist, output_type):
if output_type == "json":
# Let's print the JSON
print(json.dumps(cpcodelist, indent=2))
if output_type == "text":
# Iterate over the dictionary and print the selected information
ParentTable = tt.Texttable()
ParentTable.set_cols_width([15, 28, 20])
ParentTable.set_cols_align(['c', 'c', 'c'])
ParentTable.set_cols_valign(['m', 'm', 'm'])
Parentheader = ['CPcode', 'Name', 'ContractId']
ParentTable.header(Parentheader)
for each_item in cpcodelist:
Parentrow = [each_item["id"], each_item["name"], str(each_item["contractIds"])]
ParentTable.add_row(Parentrow)
MainParentTable = ParentTable.draw()
print(MainParentTable)
| StarcoderdataPython |
6537222 | <gh_stars>1-10
import ujson as json
import asyncpg
from fuzzywuzzy import process
from collections import deque
import random
class Server:
@classmethod
async def create(cls, settings):
self = Server()
credentials = {"user": settings['sql'][0], "password": settings['sql'][2], "database": settings['codexDB'], "host": settings['sql'][1]}
self.pool = await asyncpg.create_pool(**credentials)
self.commands = {}
self.commands['get_specific_row'] = lambda x,y: "SELECT * FROM "+x+" WHERE "+y+" = $1;"
self.commands['get_all_of_entry'] = 'SELECT {} FROM {};'
self.commands['get_all'] = 'SELECT * FROM {};'
return self
async def close(self):
await self.pool.close()
@staticmethod
def limited_words(limit, message, keys, processFun, maxFun):
test = []
messlength = len(message.split(' '))
#For all possible word lengths
for i in range(0, min(limit, messlength)):
#Make a list of all keys with that word length.
minikeys = [j for j in keys if j.count(' ')==i]
#Peel off that number of words from the message
testkey = ' '.join(message.split(' ', i+1)[:i+1])
#Put the results into test
processFun(test, testkey, minikeys, i)
#Parse the final results
return maxFun(test)
@staticmethod
def indTest(keys, schema, name, message):
if schema[name] == 'flat':
return process.extractOne(message,keys)
else:
return Server.limited_words(int(json.loads(schema[name])[0]), message, keys,
lambda test, singlekey, minikeys, wordLength: test.append(process.extractOne(singlekey, minikeys)),
lambda test: max(test, key=lambda y:y[1]))
@staticmethod
def array_to_dict(arr):
d = {}
for i in arr:
d[i[0]] = i[1]
return d
async def dive(self, conn, oMessage, codex, table, keys, schema):
message = oMessage
for i in schema:
if all(j in '1234567890' for j in i):
test = self.limited_words(int(i), message, keys,
lambda test, singlekey, keylist, wordLength: test.append((process.extractOne(singlekey, keylist), wordLength)),
lambda x: max(x, key=lambda y:y[0][1]))
if test[0][1] > 80:
table = await conn.fetchval(self.commands['get_specific_row'](table, 'id'), test[0][0], column=1)
message = message.split(' ', test[1])[test[1]]
else:
return "Sorry {}, I couldn't find a good match."
elif i.startswith('<') and i.endswith('>'):
newTable, default = i[1:-1].split('-', 1)
mapping = self.array_to_dict(table)
if len(message) == 0:
data = await conn.fetchval(self.commands['get_specific_row'](codex+'_'+newTable,'id'), mapping[default], column=1)
else:
data = await conn.fetch(self.commands['get_all_of_entry'].format('id', codex+'_'+newTable))
data = [i['id'] for i in data]
if message.find(' ') >-1:
k, rest = message.split(' ', 1)
else:
k, rest = message, ''
test = process.extractOne(k, mapping.keys())
if test[1] > 80:
data = await conn.fetchval(self.commands['get_specific_row'](codex+'_'+newTable,'id'), mapping[test[0]], column=1)
message = rest
else:
data = await conn.fetchval(self.commands['get_specific_row'](codex+'_'+newTable,'id'), mapping[default], column=1)
data = json.loads(data)
elif i.startswith('{') and i.endswith('}'):
j = json.loads(i)
keys = list(data['extra_fields'].keys())+list(j.keys())
searches = [k.strip() for m in message.split(',') for k in m.split()]
test = {k for k in [process.extractOne(k, keys) for k in searches] if k[1]>80}
extra = []
for k in test:
if k[0] in data['extra_fields']:
extra.append(data['extra_fields'][k[0]])
elif type(j[k[0]]) != str:
for l in j[k[0]]:
extra.append(data['extra_fields'][l])
d = data['init']
if len(extra) > 0:
d['fields'] = list(extra)
else:
d['fields'] = []
for k in data['extra_fields']:
d['fields'].append(data['extra_fields'][k])
return d
else:
raise Exception('Not sure how I got here.')
@staticmethod
def initial_data(codex, message, collections, oSchema):
schema = {i['id']:i['schema'] for i in oSchema}
names = {}
for i in collections:
if i != 'schema':
names[i] = codex + '_' + i
if message.find(' ')>-1:
coll, rest = message.split(' ', 1)
coll = coll.lower()
else:
coll, rest = '', message
return (schema, names, coll, rest)
async def random(self, codex, coll):
async with self.pool.acquire() as conn:
#Get schema for codex
schema = await conn.fetch(self.commands['get_all'].format(codex+'_schema'))
try:
schema = next(i['schema'] for i in schema if i['id'] == coll)
except StopIteration:
return "Sorry {}, that category doesn't exist in the current codex."
print(schema)
entries = await conn.fetch(self.commands['get_all_of_entry'].format('id', codex+'_'+coll))
entry = random.choice(entries)['id']
print(entry)
if schema != 'flat':
return "Sorry {}, random selections don't work for non-flat categories (yet!)"
print(codex+'_'+coll, 'embed')
result = await conn.fetchrow(self.commands['get_specific_row'](codex+'_'+coll, 'id'), entry)
return json.loads(result['embed'])
async def lookup(self, codex, message):
async with self.pool.acquire() as conn:
#Get list of tables for codex.
collections = await conn.fetchval(self.commands['get_specific_row']('system_data', 'Name'), codex, column=1)
#Get schema for codex
schema = await conn.fetch(self.commands['get_all'].format(codex+'_schema'))
#Parse initial data
schema, names, coll, rest = self.initial_data(codex, message, collections, schema)
#If coll matches a collection name
if coll in collections:
#Get the names of the entries in that collection
keys = await conn.fetch(self.commands['get_all_of_entry'].format('id',names[coll]))
#Put them into strings
keys = [i['id'] for i in keys]
#If that collection's schema is flat
if schema[coll] == 'flat':
#Get one key
result = process.extractOne(rest, keys)
if result[1] > 80:
result = await conn.fetchrow(self.commands['get_specific_row'](names[coll], 'id'), result[0])
return json.loads(result['embed'])
return "Sorry {}, I couldn't find a good match."
#Otherwise, start the dive.
result = await self.dive(conn, rest, codex, names[coll], keys, json.loads(schema[coll]))
return result
else:
test = []
for i in collections:
if i != 'schema':
keys = await conn.fetch(self.commands['get_all_of_entry'].format('id',names[i]))
keys = [i['id'] for i in keys]
val = self.indTest(keys, schema, i, message)
if val != None:
test += [(val,i)]
result = max(test, key=lambda x:x[0][1])
if result[0][1] <= 80:
return "Sorry {}, I couldn't find a good match."
if schema[result[1]] == 'flat':
result = await conn.fetchrow(self.commands['get_specific_row'](names[result[1]], 'id'), result[0][0])
return json.loads(result['embed'])
message = message.split(' ', result[0][0].count(' ')+1)
if len(message) > result[0][0].count(' ')+1:
message = message[-1]
else:
message = ''
table = await conn.fetchrow(self.commands['get_specific_row'](names[result[1]], 'id'), result[0][0])
result = await self.dive(conn, message, codex, table['alias_assoc'], None, json.loads(schema[result[1]])[1:])
return result
async def ref(self, codex, message):
res = await self.lookup(codex,message)
return res
async def schema(self, codex, oMessage):
def reverse_alias(alias_assoc):
reversed_assoc = {}
for i in alias_assoc:
if not(i[1] in reversed_assoc):
reversed_assoc[i[1]] = []
reversed_assoc[i[1]].append(i[0])
return reversed_assoc
async with self.pool.acquire() as conn:
message = oMessage.strip()
schema = await conn.fetch(self.commands['get_all'].format(codex+'_schema'))
if message != '':
collections = await conn.fetchval(self.commands['get_specific_row']('system_data', 'Name'), codex, column=1)
schema, names, category, rest = self.initial_data(codex, message, collections, schema)
if rest == '':
res = category + '\n'
for i in schema[category]:
if i.startswith('<') and i.endswith('>'):
s = i[1:-1].split('-')
res += '\tSubentry: '+s[0]+'\n'
res += '\t\tDefault Key: '+s[1]+'\n'
elif i.startswith('{') and i.endswith('}'):
res += '\tPartial Fields\n'
res += '\t\tCheck individual entries for a list of relevant subfields.'
return res
if category in collections:
keys = await conn.fetch(self.commands['get_all_of_entry'].format('id',names[category]))
entryKey = self.indTest(keys, schema, i, message)
else:
testRes = []
for i in collections:
if i!='schema':
keys = map(lambda x: x['id'], await conn.fetch(self.commands['get_all_of_entry'].format('id',names[i])))
testRes += [(self.indTest(keys, schema, i, message),i)]
entryKey, category = max(testRes, key=lambda x:x[0][1])
if schema[category] == 'flat':
return 'Elements of the requested category are flat.'
schema[category] = deque(json.loads(schema[category]))
if entryKey[1] > 80:
entryKey = entryKey[0]
table = await conn.fetchval(self.commands['get_specific_row'](codex+'_'+category, 'id'), entryKey, column=1)
message = message.split(' ', entryKey.count(' ')+1)
if len(message)<=entryKey.count(' ')+1:
message = ''
else:
message = message[entryKey.count(' ')+1].lstrip()
nextTest = ''
if message != '':
schema[category].popleft()
while message != '' and len(schema[category])>0 and schema[category][0].startswith('<') and schema[category][0].endswith('>'):
s = schema[category][0][1:-1].split('-')
if message.find(' ') > -1:
k, r = message.split(' ', 1)
r = message.strip()
else:
k, r = message, ''
mapping = self.array_to_dict(table)
minitest = process.extractOne(k, mapping.keys())
if minitest[1] > 80:
nextTest += ' -> '+minitest[0]
table = await conn.fetchval(self.commands['get_specific_row'](codex+'_'+s[0], 'id'), mapping[minitest[0]], column=1)
schema[category].popleft()
else:
break
res = 'Best Result for ' + oMessage +':\n'
res += entryKey + nextTest + '\n'
for i in schema[category]:
if i.startswith('<') and i.endswith('>'):
s = i[1:-1].split('-')
res += '\tSubentries: '+s[0]+'\n'
ar = self.array_to_dict(table)
ra = reverse_alias(table)
for j in ra:
res += '\t\t'+j+' with aliases '+', '.join([k for k in ra[j] if k!=j])+'\n'
res += '\tDetails for default entry '+s[1]+':\n'
table = await (conn.fetchval(self.commands['get_specific_row'](codex+'_'+s[0], 'id'), ar[s[1]], column=1))
elif i.startswith('{') and i.endswith('}'):
s = json.loads(i)
res += '\tSubfields:\n'
res += '\t\tSimple Subfields:\n'
entry = json.loads(table)
for j in entry['extra_fields']:
res += '\t\t\t'+j+'\n'
res += '\t\tGroup Subfields:\n'
for j in s:
res += '\t\t\t'+j
if s[j] == 'default':
res += ': returns all Simple Subfields\n'
else:
res += ': returns the following Simple Subfields:\n'
for k in s[j]:
res += '\t\t\t\t'+k+'\n'
return res
else:
return "Sorry {}, I couldn't find a good match for that query."
res = ''
schema = sorted(i for i in schema)
for i in schema:
res += i['id'] + '\n'
if i['schema'] == 'flat':
res += "\tFlat\n"
else:
entry = json.loads(i['schema'])[1:]
for j in entry:
if j.startswith('<') and j.endswith('>'):
s = j[1:-1].split('-')
res += '\tSubentry: '+s[0]+'\n'
res += '\t\tDefault Key: '+s[1]+'\n'
elif j.startswith('{') and j.endswith('}'):
res += '\tPartial Fields:\n'
res += '\t\tCheck individual entries for a list of relevant subfields.\n'
return res
async def top(self, codex, number, message, logger):
def test(keys, schema_entry, mess, num):
if schema_entry == 'flat':
return process.extract(mess, keys, limit=num)
else:
parsed = json.loads(schema_entry)
return self.limited_words(int(json.loads(schema_entry)[0]),message, keys,
lambda test, singlekey, minikeys, wordLength: test.extend(process.extract(singlekey, minikeys, limit=num)),
lambda test: sorted(test, key=lambda x: x[1], reverse=True)[:num])
async with self.pool.acquire() as conn:
collections = await conn.fetchval(self.commands['get_specific_row']('system_data', 'Name'), codex, column=1)
schema = await conn.fetch(self.commands['get_all'].format(codex+'_schema'))
schema, names, coll, rest = self.initial_data(codex, message, collections, schema)
if coll in collections:
keys = await conn.fetch(self.commands['get_all_of_entry'].format('id',names[coll]))
keys = [i['id'] for i in keys]
results = test(keys, schema[coll], rest, number)
res = ''
mode = 0
for i in results:
if mode == 0 and i[1] > 80:
res += "Strong Matches:\n"
mode = 1
elif mode < 2 and i[1] <= 80:
res += "Weak Matches:\n"
mode = 2
res += f'\t"{i[0]}" ({i[1]})\n'
return (res, coll)
else:
testRes = []
for i in collections:
if i != 'schema':
keys = await conn.fetch(self.commands['get_all_of_entry'].format('id',names[i]))
keys = [i['id'] for i in keys]
testRes += list(map(lambda x:(x,i), test(keys, schema[i], message, number)))
results = sorted(testRes, key=lambda x:x[0][1], reverse=True)[:number]
res = ''
mode = 0
for i in results:
logger(i[0][1])
if mode == 0 and i[0][1] > 80:
res += "Strong Matches:\n"
mode = 1
elif mode < 2 and i[0][1] <= 80:
res += "Weak Matches:\n"
mode = 2
res += f'\t"{i[0][0]}" in {i[1]} ({i[0][1]})\n'
return (res, None)
if __name__ == "__main__":
import asyncio
import sys
async def run():
s = await Server.create()
print(await s.lookup(sys.argv[1],sys.argv[2]))
asyncio.get_event_loop().run_until_complete(run())
| StarcoderdataPython |
6591463 | class A1Stock:
'''
A1_Stock: shorthand for stock with Component A, and 1 solvent.
class called AB2 would consist of Components A and B, and 2 solvents.
Minimum information:
components_dict - dictionary pointing to Component objects:
e.g. {'A':Component, 'solvent1':Component}
wtf_dict - dictionary which ties components to their respective weight fractions:
e.g. {'A':float, 'solvent1':float}
density - float. By default this is solvent1.density
TODO:
-Potential for A1_stock method which estimates density based on volume of component,
or A1_stock method which takes interpolates density from dataset of density and wt.f. Component:solvent
-Function which manages volume/position of A1_stock, i.e. we can assign multiple stock and have the robot
switch to new stock position when necessary.
'''
def __init__(self, components_dict ={}, init_dict ={},init_method = 'wtf', density = None):
self.componentA = components_dict['A']
self.solvent1 = components_dict['solvent1']
#init_dict is generic dictionary of key:concentration relationships, regardless of how concentration is specified.
self.init_dict = init_dict
self.init_dict_methods = {
'wtf':self.init_wtf(), 'molarity':self.init_molarity()
}
self.init_dict_methods[init_method]
def init_wtf(self):
self.wtf_dict = self.init_dict
self.componentA_wtf = self.wtf_dict['A']
self.solvent1_wtf = self.wtf_dict['solvent1']
self.density = self.solvent1.density #Approximate density as density of solvent1 for now.
def init_molarity(self):
self.molarity_dict = self.init_dict
def wtf_to_volf(self):
try:
(self.componentA.density)
except AttributeError:
print(self.componentA.name, " has no density.")
try:
(self.solvent1.density)
except AttributeError:
print(self.solvent1.name, " has no density.")
self.componentA_volf = (self.componentA_wtf/self.componentA.density)/((self.componentA_wtf/self.componentA.density) + (self.solvent1_wtf/self.solvent1.density))
self.solvent1_volf = 1.0 - self.componentA_volf
def wtf_to_molarity(self):
try:
(self.solvent1.density)
except AttributeError:
print(self.solvent1.name, " has no density.")
self.componentA_molarity = self.density*self.componentA_wtf*1000/self.componentA.mw
print("From A1stock componentA_molarity = ", self.componentA_molarity)
def real_init(self):
pass
| StarcoderdataPython |
8185674 | print((lambda x, y: pow(y, 2) - x)(sum(map(lambda x: pow(x, 2), range(101))), sum(range(101))))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.