seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
40811770527 | from django.conf.urls import url, include
from rest_framework import routers
from django.contrib import admin
from thaifood.viewset import *
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
# router.register(r'users', UserViewSet)
router.register(r'foods', FoodViewSet)
router.register(r'ingredients', IngredientViewSet)
router.register(r'elements', ElementViewSet)
router.register(r'diseases', DiseaseViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include('thaifood.urls')),
url(r'^embedded', include('light_sensor.urls')),
url(r'^api/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| ohmini/thaifoodapi | mysite/urls.py | urls.py | py | 861 | python | en | code | 0 | github-code | 90 |
1859427275 | # Program which counts the appearances of each word in a file
import sys
import string
FILE_NAME = input("Enter the file's name: ")
counts = dict() # Empty dictionary
try:
f_in = open(FILE_NAME, 'r') # open file handler
except FileNotFoundError:
print('File not found:', FILE_NAME)
sys.exit()
for line in f_in:
line = line.rstrip()
# Eliminate all types of blank spaces at the right
line = line.translate(line.maketrans('', '', string.punctuation))
# maketrans(fromstr, tostr, deletestr), fromstr is a string of characters
# which need to be replaced, tostr is a string with the characters with
# which will be replaced, deletestr specifies which characters to delete,
# returns a dict with the correlation of the fromst characters (in unicode
# values) as key andt heir corresponding replacement character (in unicode)
# of the tostr as value, if there is no replacement character, it has None
# (cases when the character is deleted). This dict is used to eliminate or
# convert characters of the string of the string.translate()
line = line.lower() # Convert line to lower
words = line.split() # Divide line into a list
for word in words:
counts[word] = counts.get(word, 0) + 1
print(counts)
| rubengr16/OSSU | ComputerScience/1_PY4E_Python_for_Everybody/10_Dictionaries/count_words.py | count_words.py | py | 1,279 | python | en | code | 0 | github-code | 90 |
10299382975 | import numpy as np
import scipy as s
import scipy.special as special
from .basic_distributions import Distribution
from ... import config
class Gamma(Distribution):
"""
Class to define Gamma distributions
Equations:
p(x|a,b) = (1/Gamma(a)) * b^a * x^(a-1) * e^(-b*x)
log p(x|a,b) = -log(Gamma(a)) + a*log(b) + (a-1)*log(x) - b*x
E[x] = a/b
var[x] = a/b^2
E[ln(x)] = digamma(a) - ln(b)
H[x] = ln(Gamma(a)) - (a-1)*digamma(a) - ln(b) + a
"""
def __init__(self, dim, a, b, E=None, lnE=None):
Distribution.__init__(self, dim)
# Initialise parameters
if isinstance(a, (int, float)):
a = np.ones(dim) * a
if isinstance(b, (int, float)):
b = np.ones(dim) * b
self.params = { 'a':a, 'b':b }
# Initialise expectations
if (E is None) or (lnE is None):
self.updateExpectations()
else:
self.expectations = { 'E':np.ones(dim)*E, 'lnE':np.ones(dim)*lnE }
# float64 -> float32
if config.use_float32: self.to_float32()
# Check that dimensionalities match
self.CheckDimensionalities()
def updateExpectations(self):
E = self.params['a']/self.params['b']
lnE = special.digamma(self.params['a']) - np.log(self.params['b'])
self.expectations = { 'E':E, 'lnE':lnE }
def density(self, x):
assert x.shape == self.dim, "Problem with the dimensionalities"
return np.prod( (1/special.gamma(self.params['a'])) * self.params['b']**self.params['a'] * x**(self.params['a']-1) * np.exp(-self.params['b']*x) )
def loglik(self, x):
assert x.shape == self.dim, "Problem with the dimensionalities"
return np.sum( -np.log(special.gamma(self.params['a'])) + self.params['a']*np.log(self.params['b']) + (self.params['a']-1)*np.log(x) -self.params['b']*x )
def sample(self, n=1):
k = self.params['a']
theta = 1./self.params['b'] # using shape/scale parametrisation
return np.random.gamma(k, scale=theta)
| Starlitnightly/omicverse | omicverse/mofapy2/core/distributions/gamma.py | gamma.py | py | 2,061 | python | en | code | 119 | github-code | 90 |
4398456486 | import pandas as pd
# initialize list of lists
data = [['tom', 10], ['nick', 15], ['juli', 14]]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns = ['Name', 'Age'])
# print dataframe.
print(df)
# write to .csv
df.to_csv('names.csv') | eeshabarua/gears | webscraping/posts/dataframe_demo.py | dataframe_demo.py | py | 250 | python | en | code | 0 | github-code | 90 |
73979124455 |
import os
import glob
import hydra
import torch
import numpy as np
import pytorch_lightning as pl
from lib.trainer import SNARFModel
@hydra.main(config_path="config", config_name="config")
def main(opt):
print(opt.pretty())
pl.seed_everything(42, workers=True)
torch.set_num_threads(10)
datamodule = hydra.utils.instantiate(opt.datamodule, opt.datamodule)
datamodule.setup(stage='test')
data_processor = None
if 'processor' in opt.datamodule:
data_processor = hydra.utils.instantiate(opt.datamodule.processor,
opt.datamodule.processor,
meta_info=datamodule.meta_info)
trainer = pl.Trainer(**opt.trainer)
if opt.epoch == 'last':
checkpoint_path = './checkpoints/last.ckpt'
else:
checkpoint_path = glob.glob('./checkpoints/epoch=%d*.ckpt'%opt.epoch)[-1]
print(checkpoint_path)
model = SNARFModel.load_from_checkpoint(
checkpoint_path=checkpoint_path,
opt=opt.model,
meta_info=datamodule.meta_info,
data_processor=data_processor
)
# model.deformer.init_bones = np.arange(24)
# model.deformer.init_bones_cuda = torch.tensor(model.deformer.init_bones).cuda().int()
results = trainer.test(model, datamodule=datamodule, verbose=True)
np.savetxt('./results_%s_%s_%s.txt'%(os.path.basename(opt.datamodule.dataset_path),opt.datamodule.subject, str(opt.epoch)), np.array([results[0]['bbox_iou'], results[0]['surf_iou']]))
if __name__ == '__main__':
main() | xuchen-ethz/fast-snarf | test.py | test.py | py | 1,586 | python | en | code | 228 | github-code | 90 |
21538130275 | """
需求:
获取悟空问答平台特定关键字搜索答案保存为excel文件
如搜python会跳转到:https://www.wukong.com/search/?keyword=python
保存为:悟空问答_python.xlsx
"""
import os
import requests
import time
import xlsxwriter # excel读写 xlsxwriter
from collections import OrderedDict
TITLE_NAMES = ["问题pid", "问题", "提问时间", "提问者名称", "提问者uid", "提问者头像链接",
"问题被收藏数", "好的回答数", "普通回答数", "解答者", "解答者id", "答案id", "答案"]
HEADERS = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
def timestamp_2_str(time_stamp):
"""unix时间戳转时间字符串"""
struct_time = time.localtime(time_stamp)
str_time = time.strftime('%Y-%m-%d %H:%M:%S', struct_time)
return str_time
# 解析数据
def parse_data(rsp_json):
""" 解析数据"""
has_more = False
data_list = list()
if not isinstance(rsp_json, dict):
print("待解析数据格式非法:%s" % (rsp_json))
return has_more, data_list
if rsp_json.get("err_no", None) != 0:
print("获取数据非法,:%s" % (rsp_json.get("err_tips")))
return has_more, data_list
data = rsp_json.get("data", None)
if data:
has_more = data.get("has_more", "false")
feed_questions = data.get("feed_question", [])
for i in feed_questions:
q_info = OrderedDict()
ans_list = i.get("ans_list", [])
question = i.get("question", None)
if not question:
break
q_info["qid"] = question["qid"]
q_info["title"] = question["title"]
q_info["create_time_human"] = timestamp_2_str(int(question["create_time"]))
q_info["uname"] = question["user"]["uname"]
q_info["user_id"] = question["user"]["user_id"]
q_info["avatar_url"] = question["user"]["avatar_url"]
q_info["follow_count"] = question["follow_count"]
q_info["nice_ans_count"] = question["nice_ans_count"]
q_info["normal_ans_count"] = question["normal_ans_count"]
if ans_list:
ans1 = ans_list[0]
q_info["ans_user"] = ans1["user"]["uname"]
q_info["ans_user_id"] = ans1["user"]["user_id"]
q_info["ansid"] = ans1["ansid"]
q_info["abstract_text"] = ans1["abstract_text"]
data_list.append(q_info)
return has_more, data_list
# 数据写入excel
def write_data_to_excel(work_book, data_list, row_num):
worksheet_name = '悟空爬虫数据'
work_sheet = work_book.get_worksheet_by_name(worksheet_name)
if not work_sheet:
# 如果不存在则创建工作表
work_sheet = work_book.add_worksheet(worksheet_name)
row_num = 0
work_sheet.write_row(row_num, 0, TITLE_NAMES)
row_num += 1
for i in data_list:
work_sheet.write_row(row_num, 0, i.values())
row_num += 1
return row_num
# 根据输入关键字 爬取内容并存储
def save_search_url_data(work_book, search_url, row_num):
rsp_json = requests.get(search_url, headers=HEADERS).json()
# 解析数据 接口调用返回的 dict ---->>> [[q1],[q2]...]
has_more, data_list = parse_data(rsp_json)
# 将数据写入excel
row_num = write_data_to_excel(work_book, data_list, row_num)
return has_more, row_num
def main(keyword):
os.makedirs('output', exist_ok=True)
filename = os.path.join('output', '悟空问答_%s.xlsx' % keyword)
base_url = 'https://www.wukong.com/wenda/web/search/loadmore/?search_text=%s&offset=%s&count=%s'
excel_work_book = xlsxwriter.Workbook(filename) # 可以改造成上下文
count = 20 # 每页所爬条数
offset = 0 # 当前偏移数,如按 10页每条来爬, 去爬第一页时是 10, 爬第二页是是 20
row_num = 0 # excel写至行数
has_more = True # 存储服务端响应的是否还有下一页
while has_more:
search_url = base_url % (keyword, offset, count)
# 得到是否还有下一页以及excel写到哪里了
has_more, row_num = save_search_url_data(excel_work_book, search_url, row_num)
# 由于包含了头部需减一处理
print("已爬取%s条....." % (row_num - 1))
offset += count
excel_work_book.close()
if __name__ == "__main__":
keyword = input('请输入关键字:')
main(keyword)
| MisterZZL/web_Crawler | xlsxwriter.py | xlsxwriter.py | py | 4,600 | python | en | code | 0 | github-code | 90 |
27493717799 | #!/usr/bin/env python3
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from colwiseproportion import migrate_params, render
from cjwmodule.testing.i18n import i18n_message
class MigrateParamsTest(unittest.TestCase):
def test_v0_no_colnames(self):
self.assertEqual(migrate_params({
'colnames': '',
}), {
'colnames': [],
})
def test_v0(self):
self.assertEqual(migrate_params({
'colnames': 'A,B',
}), {
'colnames': ['A', 'B'],
})
def test_v1(self):
self.assertEqual(migrate_params({
'colnames': ['A', 'B'],
}), {
'colnames': ['A', 'B'],
})
class RenderTest(unittest.TestCase):
def test_no_params(self):
table = pd.DataFrame({'A': [1, 2], 'B': [2, 3]})
expected = table.copy()
result = render(table, {'colnames': []})
assert_frame_equal(result, expected)
def test_basic(self):
table = pd.DataFrame({'A': [1, 2], 'B': [2, 3], 'C': [3, 4]})
result = render(table, {'colnames': ['A', 'B']})
expected = pd.DataFrame({
'A': [1, 2],
'B': [2, 3],
'C': [3, 4],
'percent_A': [1/3, 2/3],
'percent_B': [0.4, 0.6],
})
assert_frame_equal(result['dataframe'], expected)
self.assertEqual(result['column_formats'],
{'percent_A': '{:,.1%}', 'percent_B': '{:,.1%}'})
def test_divide_by_0(self):
table = pd.DataFrame({'A': [-1, 1]})
result = render(table, {'colnames': ['A']})
self.assertEqual(
result,
i18n_message("badData.columnSum.isZero", {"column":"A"})
)
def test_overwrite_colname(self):
table = pd.DataFrame({'A': [1, 3], 'percent_A': ['x', 'y']})
result = render(table, {'colnames': ['A']})
expected = pd.DataFrame({
'A': [1, 3],
'percent_A': [0.25, 0.75],
})
assert_frame_equal(result['dataframe'], expected)
self.assertEqual(result['column_formats'], {'percent_A': '{:,.1%}'})
| CJWorkbench/colwiseproportion | test_colwiseproportion.py | test_colwiseproportion.py | py | 2,175 | python | en | code | 0 | github-code | 90 |
4306326679 | # Import socket module
from socket import *
import sys # In order to terminate the program
# Create a TCP client socket
# (AF_INET is used for IPv4 protocols)
# (SOCK_STREAM is used for TCP)
clientSocket = socket(AF_INET, SOCK_STREAM)
# arguments of the client command
serverName = sys.argv[1]
serverPort = int(sys.argv[2])
filename = sys.argv[3]
# define the message to send
message = 'GET {} HTTP/1.1'.format(filename)
# Set up a new connection to the server
# Fill in start
clientSocket.connect((serverName, serverPort))
# Fill in end
# Send the message to the server
# Fill in start
clientSocket.send(message.encode())
# Fill in end
# Receive the response from the server
modifiedMessage = clientSocket.recv(1024)
response = ''
while len(modifiedMessage) > 0:
response += modifiedMessage.decode()
modifiedMessage = clientSocket.recv(1024)
print(response)
# Close the client socket
# Fill in start
clientSocket.close()
# Fill in end
sys.exit() # Terminate the program
| Opty-BSc/CN | Lab_4/weblab/client/webclient.py | webclient.py | py | 990 | python | en | code | 0 | github-code | 90 |
43843763680 | """
给定一个字符串数组,将字母异位词组合在一起。字母异位词指字母相同,但排列不同的字符串。
示例:
输入: ["eat", "tea", "tan", "ate", "nat", "bat"],
输出:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
说明:
所有输入均为小写字母。
不考虑答案输出的顺序。
"""
# 解答:利用字典,由于每个字母异位词排序后都是一样的,所以可以将其作为key,value即为字符串数组
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
result = []
m = {}
for i in strs:
a = "".join(sorted(i))
if a in m:
m[a].append(i)
else:
m[a] = [i]
for i in m.values():
result.append(i)
return result
| wtrnash/LeetCode | python/049字母异位词分组/049字母异位词分组.py | 049字母异位词分组.py | py | 918 | python | zh | code | 2 | github-code | 90 |
43739889256 | print("\n*****************\nQUIZ\n*****************\n")
print("Realmente deseja jogar?\n 1- SIM\n 2- NÃO\n")
decisao_inicial = int(input("Informe a sua decisão: "))
if decisao_inicial == 1:
print("Perfeito!!!\n\nIniciando o game...\n")
elif decisao_inicial == 2:
print("És um peidão kkkkkkk\n\nDeseja realmente encerrar o jogo?\n 1-SIM\n 2-NÃO\n")
decisao_inicial2 = int(input("Informe a sua decisão: "))
if decisao_inicial2 == 1:
print("És um peidão kkkkkkk\n")
quit()
elif decisao_inicial2 == 2:
print("Perfeito!!!\n\nIniciando o game...")
else:
print("\nInforme uma decisão válida!\n")
while decisao_inicial != 1 or 2:
decisao_inicial = int(input("Informe a sua decisão: "))
break
| ocarlosmonteiro/Python | Testes/quiz teste v1.py | quiz teste v1.py | py | 761 | python | pt | code | 1 | github-code | 90 |
70019423656 | '''
Optional SSH server based on Twisted Conch if you don't want to use OpenSSH.
'''
from twisted.cred.portal import Portal
from twisted.cred.checkers import FilePasswordDB
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.ssh.factory import SSHFactory
from twisted.internet import reactor
from twisted.conch.ssh.keys import Key
from twisted.conch.interfaces import IConchUser
from twisted.conch.avatar import ConchUser
from twisted.conch.ssh.channel import SSHChannel
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword, ISSHPrivateKey
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
from twisted.python import components
from twisted.conch.ssh import session
from twisted.cred import portal
from twisted.internet import reactor
from twisted.internet.error import ProcessExitedAlready
from zope.interface import implements, providedBy
import os
import pwd
import sys
import subprocess
from . import tools
class settings: pass
settings = settings()
def nothing():
pass
class KeyConchUser(ConchUser):
def __init__(self, avatarId):
ConchUser.__init__(self)
self.avatarId = avatarId
self.channelLookup['session'] = session.SSHSession
class KeySession:
implements(session.ISession)
def __init__(self, avatar):
self.avatar = avatar
self.proc = None
def execCommand(self, proto, cmd):
username, key_id = self.avatar.avatarId
environ = {}
# Are we supposed to setuid?
if settings.username_get:
uid = settings.username_get(username)
home = pwd.getpwuid(uid).pw_dir
environ['PYTHONPATH'] = os.environ['PYTHONPATH']
environ['HOME'] = home
environ['PATH'] = os.environ['PATH']
setuid_args = [uid, settings.gid]
else:
environ.update(os.environ)
home = os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
setuid_args = []
environ['SSH_ORIGINAL_COMMAND'] = cmd
environ['CALLED_WITH_CUSTOM_SSHD'] = '1'
assert "'" not in key_id # generate by get_ssh_key_fingerprint, so it's safe
self.proc = reactor.spawnProcess(ProcessExitWorkaroundWrapper(proto),
'/bin/sh', ['sh', '-c', "python -m gitjoin.git_auth '%s'" % key_id], environ, home,
*setuid_args)
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def eofReceived(self):
if self.proc:
self.proc.closeStdin()
def closed(self):
try:
self.proc.signalProcess('HUP')
except (OSError, ProcessExitedAlready):
pass
self.proc.loseConnection()
class ProcessExitWorkaroundWrapper(object):
'''
Process seems to call processExited long before processEnded.
However SSHSessionProcessProtocol closes channel on processEnded.
'''
def __init__(self, obj):
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def processExited(self, reason=None):
return self.processEnded(reason)
def childDataReceived(self, a, data):
return self._obj.childDataReceived(a, data)
components.registerAdapter(KeySession, KeyConchUser, session.ISession)
class KeyRealm(object):
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
r = interfaces[0], KeyConchUser(avatarId), nothing
return r
class KeyChecker(object):
implements(ICredentialsChecker)
credentialInterfaces = (ISSHPrivateKey,)
def requestAvatarId(self, credentials):
pubkey = tools.reformat_ssh_key('ssh-rsa ' + credentials.blob.encode('base64'))
id = tools.get_ssh_key_fingerprint(pubkey)
return (credentials.username, id)
def main(keys_path, username_get=None, gid=None, port=2022):
settings.username_get = username_get
settings.gid = gid
key_path = keys_path + '/id_rsa'
if not os.path.exists(key_path):
subprocess.check_call(['ssh-keygen', '-f', key_path,
'-t', 'rsa', '-N', ''])
with open(key_path) as privateBlobFile:
privateBlob = privateBlobFile.read()
privateKey = Key.fromString(data=privateBlob)
with open(key_path + '.pub') as publicBlobFile:
publicBlob = publicBlobFile.read()
publicKey = Key.fromString(data=publicBlob)
factory = SSHFactory()
factory.privateKeys = {'ssh-rsa': privateKey}
factory.publicKeys = {'ssh-rsa': publicKey}
factory.portal = Portal(KeyRealm())
factory.portal.registerChecker(KeyChecker())
reactor.listenTCP(port, factory)
reactor.run()
if __name__ == '__main__':
main('.')
| zielmicha/gitjoin | gitjoin/sshd.py | sshd.py | py | 4,822 | python | en | code | 1 | github-code | 90 |
18409114277 | # 2 – Crie um dicionário em que suas chaves serão os números 1, 4, 5, 6, 7, e 9
# (que podem ser armazenados em uma lista)
# e seus valores correspondentes aos quadrados desses números.
l = [1,4,5,6,7,9]
numeros_ao_quadrado = dict()
for i in l:
numeros_ao_quadrado[i] = i**2
print(numeros_ao_quadrado)
print("="*69)
# b – Crie um dicionário em que suas chaves correspondem a números inteiros entre [1, 10]
# e cada valor associado é o número ao quadrado.
dicionario = {}
for i in range(1,11):
dicionario[i] = i**2
print(dicionario)
| lucasmbrute2/Blue_mod1 | Aula11/Exercício01.py | Exercício01.py | py | 562 | python | pt | code | 0 | github-code | 90 |
6425979426 | from lab1.tokenizer import to_tokens
import pandas as pd
import pickle
import copy
import os
import re
import numpy as np
import nltk
nltk.download('stopwords', quiet=True)
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
def freq_vectorizer(text):
tokenized_text = to_tokens(text)
with open("../assets/dictionary", "rb") as file:
dictionary = pickle.load(file)
with open("../assets/pca_model", "rb") as file:
pca = pickle.load(file)
freq_matrix = pd.DataFrame(columns=dictionary.keys())
for sentence in tokenized_text:
for token in sentence:
# init 0 0 0...
freq_matrix.loc[token] = 0
for token in sentence:
try:
freq_matrix.at[token, token] += 1
except:
pass
vector = []
for token in dictionary:
vector.append(round(freq_matrix[token].mean(), 3))
vector = pca.transform(np.array(vector).reshape(1, -1))[0]
return vector
def main():
tokens = [] # for freq dictionary
tokens_in_files = dict() # for termin-document matrix
all_sentences = [] # for word2vec fitting
stop_words = set(stopwords.words("english"))
data = '../assets/annotated-corpus/'
for label in os.listdir(data):
for file in os.listdir(data + label):
token_list = [] # for building tokens_in_files dictionary
token_list_by_sentence = [] # for building all_sentences list
with open(data + label + '/' + file) as tsv:
for row in tsv:
if row == '\n':
token = '\n'
all_sentences.append(token_list_by_sentence)
token_list_by_sentence = []
else:
split = re.split(r'\t', row)
token = split[0]
if re.fullmatch(r'.*[0-9!?#$%^&*\]\[()|~{}\\+=<>\-\",_@`].*|'
r'[_|+`%#=~;@?<>&*:\",./^\'!()\\\-\[\]{}]+', token):
continue
if token not in stop_words:
tokens.append(token.lower())
if token != '\n':
token_list.append(token.lower())
token_list_by_sentence.append(token.lower())
tokens_in_files[label + '/' + file] = token_list
with open('../assets/all_sentences', 'wb') as file:
pickle.dump(all_sentences, file)
# need dictionary
raw_tokens = [token for token in tokens if token != '\n']
frequency_dict = dict()
for token in raw_tokens:
frequency_dict[token] = frequency_dict.get(token, 0) + 1
# filtering low frequency
dictionary = copy.copy(frequency_dict)
for token in frequency_dict:
if frequency_dict[token] <= 6:
del dictionary[token]
del frequency_dict
# save dictionary
with open('../assets/dictionary', 'wb') as file:
pickle.dump(dictionary, file)
with open('../assets/dictionary', 'rb') as file:
dictionary = pickle.load(file)
# make termin-document matrix
data = '../assets/annotated-corpus/'
termin_document = pd.DataFrame(columns=dictionary.keys())
for label in os.listdir(data):
for file in os.listdir(data + label):
print(file)
termin_document.loc[label + '/' + file] = 0
for token in tokens_in_files[label + '/' + file]:
try:
termin_document.at[label + '/' + file, token] += 1
except:
# токена нет в укороченном словаре
pass
with open('../assets/termin_document', 'wb') as file:
pickle.dump(termin_document, file)
if __name__ == "__main__":
main()
| MANASLU8/nlp-22-autumn | projects/petrenko-lab/lab4/main.py | main.py | py | 3,881 | python | en | code | 0 | github-code | 90 |
18121521079 | class Dice:
def __init__(self, top, south, east, west, north, bottom):
self.top = top
self.south = south
self.east = east
self.west = west
self.north = north
self.bottom = bottom
def toN(self):
tmp = self.top
self.top = self.south
self.south = self.bottom
self.bottom = self.north
self.north = tmp
def toE(self):
tmp = self.top
self.top = self.west
self.west = self.bottom
self.bottom = self.east
self.east = tmp
def toS(self):
tmp = self.top
self.top = self.north
self.north = self.bottom
self.bottom = self.south
self.south = tmp
def toW(self):
tmp = self.top
self.top = self.east
self.east = self.bottom
self.bottom = self.west
self.west = tmp
def printTop(self):
print(self.top)
n = input().split()
dice = Dice(int(n[0]), int(n[1]), int(n[2]), int(n[3]), int(n[4]), int(n[5]))
order = input()
for o in order:
if o == "N":
dice.toN()
elif o == "E":
dice.toE()
elif o == "S":
dice.toS()
elif o == "W":
dice.toW()
dice.printTop()
| Aasthaengg/IBMdataset | Python_codes/p02383/s987087544.py | s987087544.py | py | 1,230 | python | en | code | 0 | github-code | 90 |
36397467004 | from __future__ import print_function
import time
import numpy as np
import logging
class ReidentificationOutput(object):
"""Class to hold the output of Reidentifier.identify
Members:
votes : Number of votes per ID
distances : Distance
ID :
time : Time (in seconds) re-identification took
Examples:
After calling Reidentifier.identify(row_128), if 34 features
of person 12 are at distance < reid_threshold, and 3 images of
person 6 at distance < reid_threshold, then we have
votes = { 6: 3, 12: 34 }
and distances will be for example
distances = [(12, 0.13, 5), ...]
where the tuple is (ID, distance, index_in_gallery),
meaning that the feature at position 5 in
Reidentifier.features is at distance 0.13 from input
image. One can access the correspondaing ID with
Reidentifier.ids[5], the corresponding face with
Reidentifier.images[5]. distances is sorted by inscreasing
values of distance from input image.
"""
def __init__(self):
"""Constructor"""
self.votes = {}
self.ID = -1
self.distances = []
self.time = 0
class Reidentifier(object):
"""Class holding a gallery of features and perform reidentification by
comparing input to all features of the gallery.
If the Euclidian distance between 2 features is < reid_threshold,
the 2 features are considered as same ID.
If the Euclidian distance between 2 features of of the same
identity is < keep_threshold, only one of them is stored.
"""
def __init__(self,
keep_threshold=0.1,
reid_threshold=0.35,
min_nb_features_per_id=20,
max_nb_features_per_id=200):
"""
Args:
keep_threshold: threshold below which a feature is not added
(too similar to others)
reid_threshold: threshold below which two features are considered
of the same ID when reidentifying
min_nb_features_per_id: number below which an ID is removed
by clean()
max_nb_features_per_id: number above which new features are
not added
"""
self.logger = logging.getLogger("reidentifier")
self.ids = None # Id corresponding to features
self.features = None # Features
self.images = [] # Corresponding images for debugging
self.keep_threshold = keep_threshold
self.reid_threshold = reid_threshold
self.min_nb_features_per_id = min_nb_features_per_id
self.max_nb_features_per_id = max_nb_features_per_id
def identify(self, row128):
"""Identify the input by comparing it to the gallery.
Returns:
votes: Number of votes for each identity with
distance < reid_threshold
"""
output = ReidentificationOutput()
if self.features is None: return output
tic = time.time()
distances = np.linalg.norm(self.features - row128, axis=1)
sorted_indices = np.argsort(distances)
for i, index_in_gallery in enumerate(sorted_indices):
distance = distances[index_in_gallery]
ID = self.ids[index_in_gallery]
if distance < self.reid_threshold:
if ID not in output.votes: output.votes[ID] = 0
output.votes[ID] += 1
output.distances.append((ID, distance, index_in_gallery))
else:
# Since sorted array, all remaining are farther
break
toc = time.time()
output.time = toc - tic
# print("[reidentifier] Reid {} took {}".format(self.features.shape,
# toc-tic))
# Print for debug
for i, index_in_gallery in enumerate(sorted_indices):
distance = distances[index_in_gallery]
ID = self.ids[index_in_gallery]
self.logger.debug("ID {} at row {} at distance {}". \
format(ID, index_in_gallery, distance))
if i > 15: break
# output.ID will be the one with higher number of votes, or in
# case of tie, the one with closest distance (element 0 from
# votes, which may not be the one with highest votes... Maybe
# fix that)
maxi = -1
ids = []
for k, v in output.votes.items():
if v == maxi:
ids.append(k)
elif v > maxi:
ids = [k]
maxi = v
if len(ids) == 1:
output.ID = ids[0]
elif len(ids) > 1:
output.ID = output.distances[0][0]
return output
def add_row(self, row128, ID, image=None):
"""Add a new feature vector (a row) to matrix 'gallery' with identity
ID. No check is performed on the row.
Args:
ros128 : openface features of size 128
ID : corresponding ID
"""
if self.features is None:
self.features = row128
self.ids = np.array([ID])
if image is not None:
self.images = [image]
else:
self.features = np.vstack((self.features, row128))
self.ids = np.append(self.ids, ID)
if image is not None:
self.images.append(image)
# if image is not None:
# if len(self.images) == len(seld.ids) - 1:
# self.images.append(image)
# else:
# self.logger.warning("Mismatch in gallery size")
self.logger.debug("Person {} gallery {}". \
format(ID, self.features.shape))
def add(self, row128, ID, image=None):
"""Add a new feature vector (a row) to matrix 'gallery' for identity
ID.
Args:
ros128 : openface features of size 128
ID : corresponding ID
"""
is_added = 0
if self.features is None or self.features.size == 0:
self.add_row(row128, ID, image)
# self.features = row128
# self.ids = np.array([ID])
is_added = 1
self.logger.debug("Adding features for {} (gallery {})". \
format(ID, self.features.shape))
elif np.count_nonzero(self.ids == ID) >= self.max_nb_features_per_id:
is_added = 0
self.logger.debug("Enough features for {} (gallery {})". \
format(ID, self.features.shape))
else:
# If ID is too close from others, do not insert it
distances = np.linalg.norm(self.features - row128, axis=1)
sorted_indices = np.argsort(distances)
# print(sorted_indices)
# print(len(sorted_indices))
distance = distances[sorted_indices[0]]
if distance < self.keep_threshold:
self.logger.debug("Not inserting person {} {:.3f} < {:.3f}". \
format(ID, distance, self.keep_threshold))
else:
# Count how many features per ID
# occurences = {}
# for i in self.ids:
# if i not in occurences: occurences[i] = 0
# occurences[i] += 1
self.add_row(row128, ID, image)
is_added = 1
# # if ID not in occurences:
# self.features = np.vstack((self.features, row128))
# self.ids = np.append(self.ids, ID)
# self.logger.debug("Person {}: add, gallery {}". \
# format(ID, self.features.shape))
return is_added
def merge(self, new_id, list_to_merge):
"""Set all the ids of 'list_to_merge' to the same value.
The value used is the minimum of 'list_to_merge'.
This function is meant for situations when 2 IDs are actually
the same person.
"""
self.logger.info("Merging {} into {}".format(list_to_merge, new_id))
for theid in list_to_merge:
self.ids[self.ids == theid] = new_id
def get_available_ids(self):
"""Return the set of IDs which are currently in the gallery."""
s = set([])
if self.ids is not None:
s = set(self.ids)
return s
def get_images_for_id(self, ID):
"""Return a list of images for the given ID.
If the self.images is empty, it returns an empty list.
"""
images = []
n_images = len(self.images)
if self.ids is not None and n_images > 0:
n_ids = self.ids.shape[0]
if n_images > 0 and n_images == n_ids:
for i in range(n_images):
if self.ids[i] == ID:
images.append(self.images[i])
return images
def get_nb_features_per_id(self):
"""Return how many features are stored in the gallery per ID"""
distrib = {}
for i in self.ids:
if i not in distrib:
distrib[i] = 0
distrib[i] += 1
return distrib
def print_status(self):
"""Print the number of elements in the gallery"""
if self.ids is not None:
self.logger.info("Gallery contains {} elements"
.format(len(self.ids)))
self.logger.info("Nb of features per ID {}"
.format(self.get_nb_features_per_id()))
else:
self.logger.info("The gallery is empty")
def remove_indices(self, indices):
"""Remove the elements of ids, features, and images with input
indices.
These are the indices in the list/arrays. No the IDs of the
persons.
Args:
indices: A list with index to remove
"""
self.logger.debug("IDs {}".format(self.ids.shape))
self.logger.debug("Features {}".format(self.features.shape))
self.logger.debug("Images {}".format(len(self.images)))
self.logger.debug("Removing {} elements".format(len(indices)))
self.ids = np.delete(self.ids, indices, 0)
self.features = np.delete(self.features, indices, 0)
self.images = [ self.images[i] for i in range(len(self.images))
if i not in indices]
self.logger.debug("IDs {}".format(self.ids.shape))
self.logger.debug("Features {}".format(self.features.shape))
self.logger.debug("Images {}".format(len(self.images)))
def clean(self):
"""Remove IDs which do not have enough features
An ID is removed (features, ids, and images) when the number
of features is lower than min_nb_features_per_id
"""
if self.ids is None:
return
distrib = self.get_nb_features_per_id()
ids_to_remove = [ ID for ID, count in distrib.items()
if count < self.min_nb_features_per_id ]
self.logger.info("Removing IDs {}".format(ids_to_remove))
indices_to_remove = [ i for i, idx in enumerate(self.ids)
if idx in ids_to_remove ]
self.remove_indices(indices_to_remove)
| idiap/pytopenface | pytopenface/reidentifier.py | reidentifier.py | py | 11,493 | python | en | code | 2 | github-code | 90 |
25587193744 | from __future__ import print_function
import argparse
import json
import uuid
from apiclient import discovery
from apiclient.errors import HttpError
import httplib2
from oauth2client.client import GoogleCredentials
# 30 days in milliseconds
_EXPIRATION_MS = 30 * 24 * 60 * 60 * 1000
NUM_RETRIES = 3
def create_big_query():
"""Authenticates with cloud platform and gets a BiqQuery service object"""
creds = GoogleCredentials.get_application_default()
return discovery.build(
"bigquery", "v2", credentials=creds, cache_discovery=False
)
def create_dataset(biq_query, project_id, dataset_id):
is_success = True
body = {
"datasetReference": {"projectId": project_id, "datasetId": dataset_id}
}
try:
dataset_req = biq_query.datasets().insert(
projectId=project_id, body=body
)
dataset_req.execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
if http_error.resp.status == 409:
print("Warning: The dataset %s already exists" % dataset_id)
else:
# Note: For more debugging info, print "http_error.content"
print(
"Error in creating dataset: %s. Err: %s"
% (dataset_id, http_error)
)
is_success = False
return is_success
def create_table(
big_query, project_id, dataset_id, table_id, table_schema, description
):
fields = [
{
"name": field_name,
"type": field_type,
"description": field_description,
}
for (field_name, field_type, field_description) in table_schema
]
return create_table2(
big_query, project_id, dataset_id, table_id, fields, description
)
def create_partitioned_table(
big_query,
project_id,
dataset_id,
table_id,
table_schema,
description,
partition_type="DAY",
expiration_ms=_EXPIRATION_MS,
):
"""Creates a partitioned table. By default, a date-paritioned table is created with
each partition lasting 30 days after it was last modified.
"""
fields = [
{
"name": field_name,
"type": field_type,
"description": field_description,
}
for (field_name, field_type, field_description) in table_schema
]
return create_table2(
big_query,
project_id,
dataset_id,
table_id,
fields,
description,
partition_type,
expiration_ms,
)
def create_table2(
big_query,
project_id,
dataset_id,
table_id,
fields_schema,
description,
partition_type=None,
expiration_ms=None,
):
is_success = True
body = {
"description": description,
"schema": {"fields": fields_schema},
"tableReference": {
"datasetId": dataset_id,
"projectId": project_id,
"tableId": table_id,
},
}
if partition_type and expiration_ms:
body["timePartitioning"] = {
"type": partition_type,
"expirationMs": expiration_ms,
}
try:
table_req = big_query.tables().insert(
projectId=project_id, datasetId=dataset_id, body=body
)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully created %s "%s"' % (res["kind"], res["id"]))
except HttpError as http_error:
if http_error.resp.status == 409:
print("Warning: Table %s already exists" % table_id)
else:
print(
"Error in creating table: %s. Err: %s" % (table_id, http_error)
)
is_success = False
return is_success
def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
is_success = True
body = {
"schema": {"fields": fields_schema},
"tableReference": {
"datasetId": dataset_id,
"projectId": project_id,
"tableId": table_id,
},
}
try:
table_req = big_query.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully patched %s "%s"' % (res["kind"], res["id"]))
except HttpError as http_error:
print("Error in creating table: %s. Err: %s" % (table_id, http_error))
is_success = False
return is_success
def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
is_success = True
body = {"rows": rows_list}
try:
insert_req = big_query.tabledata().insertAll(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
res = insert_req.execute(num_retries=NUM_RETRIES)
if res.get("insertErrors", None):
print("Error inserting rows! Response: %s" % res)
is_success = False
except HttpError as http_error:
print("Error inserting rows to the table %s" % table_id)
print("Error message: %s" % http_error)
is_success = False
return is_success
def sync_query_job(big_query, project_id, query, timeout=5000):
query_data = {"query": query, "timeoutMs": timeout}
query_job = None
try:
query_job = (
big_query.jobs()
.query(projectId=project_id, body=query_data)
.execute(num_retries=NUM_RETRIES)
)
except HttpError as http_error:
print("Query execute job failed with error: %s" % http_error)
print(http_error.content)
return query_job
# List of (column name, column type, description) tuples
def make_row(unique_row_id, row_values_dict):
"""row_values_dict is a dictionary of column name and column value."""
return {"insertId": unique_row_id, "json": row_values_dict}
| grpc/grpc | tools/gcp/utils/big_query_utils.py | big_query_utils.py | py | 5,951 | python | en | code | 39,468 | github-code | 90 |
18348312289 | import sys
sys.setrecursionlimit(10000)
n=int(input())
a=[[0]*(n-1) for i in range(n)]
id=[[-1]*(n) for i in range(n)]
MAXV=n*(n-1)//2
to=[[]*(n) for i in range(MAXV)]
def toId(i,j):
if (i>j):
i,j=j,i
return id[i][j]
visited=[False]*MAXV
calculated=[False]*MAXV
dp=[0]*MAXV
def dfs(v):
if visited[v]:
if not calculated[v]:
return -1
return dp[v]
visited[v]=True
dp[v]=1
for u in to[v]:
res = dfs(u)
if res==-1:
return -1
dp[v]=max(dp[v], res+1)
calculated[v]=True
return dp[v]
def main():
for i in range(n):
a[i]=list(map(lambda x:int(x)-1,input().split()))
v=0
for i in range(n):
for j in range(n):
if i<j:
id[i][j]=v
v+=1
for i in range(n):
for j in range(n-1):
a[i][j]=toId(i,a[i][j])
for j in range(n-2):
to[a[i][j+1]].append(a[i][j])
ans=0
for i in range(v):
res=dfs(i)
if res==-1:
print(-1)
exit(0)
ans=max(ans, res)
print(ans)
if __name__=='__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02925/s025834459.py | s025834459.py | py | 1,032 | python | en | code | 0 | github-code | 90 |
30828066791 | import discord
import asyncio
import datetime
import json
from discord.ext import commands
f = open("rules.txt", "r")
rules = f.readlines()
class main_cog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.help_message = """
```
Comandos Generales:
!Help - Despliega todos los comandos disponibles
!Regla <num> - Envía la regla específica
!Reglas - Despliega las reglas del server
!Info <user> - Depliega la información del usuario
!Server - Despliega la información del servidor
!Avatar <user> - Manda la foto de perfil del usuario
Comandos para Admins:
!K <user> - Expulsa a un usuario
!Tempban <user> <cantidad> <unidad de tiempo> - Ban temporal a un usuario
!Ban <user> - Banea a un usuario
!Unban <user y id> - Desbanea a un usuario
!C <num> - Borra un cantidad de mensajes específica
Comandos de la calculadora:
!S <num1> <num2> - Suma 2 cantidades
!R <num1> <num2> - Resta 2 cantidades
!X <num1> <num2> - Multiplica 2 cantidades
!D <num1> <num2> - Divide 2 cantidades
!E <num1> <num2> - Exponencia un número en una cantidad deseada
!E2 <num> - Exponencia al cuadrado un numero
!E3 <num> - Exponencia al cubo un numero
Music commands:
No disponible
Otros comandos:
!Golpe <user> - Manda un gif a ese usuario
!Apreton <user> - Manda un gif a ese usuario
!Azar <pregunta> - Responde al azar una pregunta
!Moneda - Tira una moneda
!Dado6 - Tira un dado de 6 caras
!DobleDado - Tira 2 dados de 6 caras
!Carta - Manda una carta al azar
!Ctm
!Mlp
!Zokram
!Skmlla
!Yossef
!Over
```
"""
self.text_channel_list = []
# Lista de comandos
@commands.command(name="Help", aliases=['Ayuda', 'ayuda', 'help'], help="Desplega todos los comandos disponibles")
async def help(self, ctx):
await ctx.send(self.help_message)
async def send_to_all(self, msg):
for text_channel in self.text_channel_list:
await text_channel.send(msg)
# Expulsar usuario
@commands.command(name="Expulsar", aliases=['k', 'K', 'expulsar '], help="Expulsa a un usuario")
@commands.has_permissions(kick_members=True)
async def expulsar(self, ctx, member: discord.Member, *, reason="Sin ninguna razón en particular"):
try:
await member.send(
"Regresa a fornite fan de la CQ te kickeamos porque: " + reason)
except:
await ctx.send("El miembro tiene sus Dm's cerrados")
await member.kick(reason=reason)
# Ban temporal
class DurationConverter(commands.Converter):
async def convert(self, ctx, argument):
amount = argument[:-1]
unit = argument[-1]
if amount.isdigit() and unit in ['s', 'm']:
return int(amount), unit
raise commands.BadArgument(message='Duracion no valida')
@commands.command(name="Tempban", aliases=['tempban', 'banTemp', 'Bantemp'], help="Ban temporal a un usuario")
async def tempban(self, ctx, member: commands.MemberConverter, duration: DurationConverter):
multiplier = {'s': 1, 'm': 60}
amount, unit = duration
await ctx.guild.ban(member)
await ctx.send(f'{member} has sido baneado temporalmente por {amount}{unit}.')
await asyncio.sleep(amount * multiplier[unit])
await ctx.guild.unban(member)
# Banear usuario
@commands.command(name="Ban", aliases=['ban', 'Banamex', 'banamex'], help="Banea a un usuario")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *,reason="Sin ninguna razón en particular"):
await member.send(member.name + "Regresa a fornite fan de la CQ te baneamos porque: " + reason)
await member.ban(reason=reason)
# Quitar ban a usuario
@commands.command(name="Unban", aliases=['unban', 'unb', 'Unb'], help="Desbanea a un usuario")
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_disc = member.split('#')
for banned_entry in banned_users:
user = banned_entry.user
if (user.name, user.discriminator) == (member_name, member_disc):
await ctx.guild.unban(user)
await ctx.send(member_name + " ha sido desbaneado")
return
await ctx.send(member_name + " no fue encontrado")
# Mutear a usuario
# @commands.command(aliases=['m'])
# @commands.has_permissions(kick_members=True)
# async def mute(self, ctx, member: discord.Member):
# muted_role = ctx.guild.get_role(900403284181917726)
# await member.add_roles(muted_role)
# await ctx.send(member.mention + " ha sido muteado")
# Desmutear usuario
# @commands.command(name='unmute', aliases=['unm'])
# @commands.has_permissions(kick_members=True)
# async def unmute(self, ctx, member: discord.Member):
# muted_role = ctx.guild.get_role(900403284181917726)
# await member.remove_roles(muted_role)
# await ctx.send(member.mention + " ha sido desmuteado")
# Borrar mensajes
@commands.command(name="Clear", aliases=['c', 'C', 'clear'], help="Borra un cantidad de mensajes específica")
@commands.has_permissions(ban_members=True)
async def clear(self, ctx, arg):
#extract the amount to clear
amount = 5
try:
amount = int(arg)
except Exception: pass
await ctx.channel.purge(limit=amount)
# Regla específica
@commands.command(name="Regla", aliases=['regla', 'Rule', 'rule'], help="Envía la regla específica")
async def regla(self, ctx, *, number):
await ctx.send(rules[int(number) - 1])
# Reglas
@commands.command(name="Reglas", aliases=['reglas', 'Rules', 'rules'], help="Despliega las reglas del server")
async def reglas(self, ctx):
response = 'Estas son las reglas del server' \
'\n\n:one: Mandar pack obligatorio para entrar en confianza. ' \
'\n:two: Usar NGE en el Nombre tanto en DL como en Discord gei el que no lo use.' \
'\n:three: Mandar a chingar a su madre a U7 y al zeroTG ' \
'\n:four: Evitar salirse del grupo general, de lo contrario serán acreedores a una Sanción.'
categorias_embed = discord.Embed(title=' ', description=f" ", color=discord.Color.blue())
categorias_embed.add_field(name="Reglas | :exclamation:", value=f"{response}")
await ctx.send(embed=categorias_embed)
# Información de usuario
@commands.command(name="User", aliases=['user', 'Info', 'info'], help="Depliega la información del usuario")
async def info(self, ctx, member: discord.Member):
embed = discord.Embed(title=member.name, description=member.mention, color=discord.Color.green())
embed.add_field(name="ID", value=member.id, inline=True)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested by {ctx.author.name}")
await ctx.send(embed=embed)
# Info del server
@commands.command(name="Discord", aliases=['discord', 'Server', 'server'], help="Despliega la información del servidor")
async def server(self, ctx):
embed = discord.Embed(title=f"{ctx.guild.name}",timestamp=datetime.datetime.utcnow(), color=discord.Color.blue())
embed.add_field(name="Server created at", value=f"{ctx.guild.created_at}")
embed.add_field(name="Server Region", value=f"{ctx.guild.region}")
embed.add_field(name="Server ID", value=f"{ctx.guild.id}")
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
# Avatar de perfil
@commands.command(name="Avatar", aliases=['avatar', 'Perfil', 'perfil'], help="Manda la foto de perfil del usuario")
async def avatar(self, ctx):
args = ctx.message.content.split(" ")[1:]
embed = discord.Embed()
embed.colour = discord.Color.from_rgb(0, 255, 255)
if len(args) == 0:
embed.title = ctx.author.name
embed.set_image(url=ctx.author.avatar_url)
await ctx.send(embed=embed)
elif len(ctx.message.mentions) > 0:
for member in ctx.message.mentions:
embed.title = member.name
embed.set_image(url=member.avatar_url)
await ctx.send(embed=embed)
elif args[0] in ("server", "guild"):
embed.title = ctx.guild.name
embed.set_image(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
| MarioVirgilio/Bot | main_cog.py | main_cog.py | py | 8,694 | python | es | code | 0 | github-code | 90 |
20018105582 | f = open("/home/t18476nt/db/GO/test.txt")
lines = f.readlines()
f.close()
len_f = len(lines)
class term:
def __init__(self, term):
self.term = term
self.pos = [i for i in range(
len_f) if "id: {}".format(term) in lines[i]][0]
self.name = lines[self.pos].split("name: ")[1].split("\t")[0]
tmp = lines[self.pos].split("\t")
if len(tmp) >= 3:
self.anc = {"GO:" + elm.split("GO:")[1].split(" !")[0]
for elm in tmp[2:]}
if self.term in self.anc:
self.anc.remove(self.term)
else:
self.anc = None
def tree(self):
funcs = [self.name]
query = set(self.anc)
searched = set()
while len(query) > 0:
ins = term(query.pop())
funcs.append(ins.name.replace("\n", ""))
if ins.anc is not None:
if ins.term not in searched:
query = query | ins.anc
searched.add(ins.term)
return funcs
| TANEO-bio/archaeal_core | UniProt_to_GO.py | UniProt_to_GO.py | py | 1,047 | python | en | code | 0 | github-code | 90 |
44680211184 | #!/usr/bin/env python
import sys
while True:
line = sys.stdin.readline()
if not line:
break
str1,str2 = map(list,line.split())
for each in str2:
if each == str1[0]:
str1.pop(0)
if len(str1) == 0:
break
if len(str1) == 0:
print('Yes')
else:
print("No")
| Lzeyuan/Algorithm | 洛谷/Lg_python/UVA10340/UVA10340.PY | UVA10340.PY | py | 356 | python | en | code | 0 | github-code | 90 |
72577205096 | """ This is a LED Flash program on Raspberry Pi 3 onboard (LED0). """
# -*- coding: utf-8 -*-
import time
FLASH_TIMES = 5
FLASH_INTERVAL = 0.2
FILEPATH = '/sys/class/leds/led0/brightness'
def led_on():
f = open(FILEPATH, 'w')
f.write('1')
f.close()
def led_off():
f = open(FILEPATH, 'w')
f.write('0')
f.close()
def led_flash():
for i in range(FLASH_TIMES):
led_on()
time.sleep(FLASH_INTERVAL)
led_off()
time.sleep(FLASH_INTERVAL)
if __name__ == "__main__":
led_flash()
| kikuzo/sakuraio-filedownload | led_flash.py | led_flash.py | py | 543 | python | en | code | 0 | github-code | 90 |
34856035203 | #Reverse a string in Python
def rev_str(str):
newstr = ""
for i in range(len(str)-1, -1, -1):
print(str[i])
newstr = newstr + str[i]
print(newstr)
rev_str("Mukul") | mukulverma2408/PracticeGeeksforGeeks | PythonPracticeQuestion-Part2/PracticeExample-3.py | PracticeExample-3.py | py | 193 | python | en | code | 0 | github-code | 90 |
3115205439 |
"""
Purpose: Ulam number algorithm
Date created: 2020-01-05
URI: https://en.wikipedia.org/wiki/Ulam_number
Contributor(s):
Mark M.
From Wikipedia:
An Ulam number is a member of an integer sequence devised by and named
after Stanislaw Ulam, who introduced it in 1964.
The standard Ulam sequence (the (1, 2)-Ulam sequence) starts with U1 = 1
and U2 = 2. Then for n > 2, Un is defined to be the smallest integer that
is the sum of two distinct earlier terms in exactly one way and larger than
all earlier terms.
Terms must be distinct. For example, 4 is a member since:
4 = 3 + 1
But, we can ignore 2 + 2 since those aren't distinct values.
We would skip 5 since it is representable in two ways
5 = 1 + 4
5 = 2 + 3
Notes:
Wikipedia entry:
https://en.wikipedia.org/wiki/Ulam_number
OEIS entry and some script examples:
https://oeis.org/A002858
Wolfram entry:
http://mathworld.wolfram.com/UlamSequence.html
"""
### First 60 verified results (n = 340)
### Per: https://oeis.org/A002858
valid_list = [1, 2, 3, 4, 6, 8, 11, 13, 16, 18, 26, 28, 36, 38, 47, 48, 53,
57, 62, 69, 72, 77, 82, 87, 97, 99, 102, 106, 114, 126, 131,
138, 145, 148, 155, 175, 177, 180, 182, 189, 197, 206, 209, 219,
221, 236, 238, 241, 243, 253, 258, 260, 273, 282, 282, 309, 316,
319, 324, 339]
#######################
### Final algorithm ###
def sequence_gen(current_n: int, iterable: list):
sum_count: int = 0
current_pair: tuple
sum_count: int = 0
for i in iterable:
for j in iterable:
if i < j:
current_pair = sorted((j, i))
if sum(current_pair) == current_n:
sum_count += 1
if sum_count == 1:
yield current_n
def ulam_sequence(n_digits: int) -> list:
tmp: list
n: int = 0
ulam_digits: list = list()
while n < n_digits:
n += 1
if n < 3:
ulam_digits.append(n)
else:
tmp = [i for i in sequence_gen(n, ulam_digits)]
if len(tmp) == 1:
ulam_digits.append(n)
return ulam_digits
##############################################################################
### Class format
class UlamSequence:
def __init__(self, max_number: int):
self.max_number = max_number
self.ulam_digits: list = list()
def __repr__(self):
if self.max_number:
return f"<Ulam Sequence class; max_number = {self.max_number}>"
else:
return f"<Ulam Sequence class>"
@property
def max_number(self) -> int:
return self.__max_number
@max_number.setter
def max_number(self, value) -> None:
if isinstance(value, int):
self.__max_number = value
else:
raise ValueError("Please pase integer value for max_number.")
def __ulam_generator(self) -> int:
# current_pair: tuple
sum_count: int = 0
i: int
j: int
for i in self.ulam_digits:
for j in self.ulam_digits:
if i < j:
# Eval sum_count and break if >= 2 to save mem?
# current_pair = (i, j)
if sum((i, j)) == self.n:
sum_count += 1
if sum_count == 1:
yield 1
def run(self) -> None:
tmp: list
self.ulam_digits: list = list()
self.n: int = 0
while self.n < self.max_number:
self.n += 1
if self.n < 3:
self.ulam_digits.append(self.n)
else:
tmp = [i for i in self.__ulam_generator()]
if len(tmp) == 1:
self.ulam_digits.append(self.n)
if self.ulam_digits:
return self.ulam_digits
## Samples
useq = UlamSequence(10)
res = useq.run()
print(res)
# Set new max_number and run
useq.max_number = 100
res = useq.run()
print(res)
# Set new max_number and run
useq.max_number = 100
res = useq.run()
print(res)
######################################################################
##########################################################################
##############################################################################
######### Work area
ulam_sequence(10)
ulam_sequence(100)
ulam_sequence(340)
sum_count: int # Aggregate count of sum of ulam numbers
current_pair: tuple
n_digits: int = 10
ulam_digits: list = [] # Keep a list of approved numbers
def sequence_gen(current_n: int, iterable: list):
sum_count: int = 0
current_pair: tuple
sum_count: int = 0
for i in iterable:
for j in iterable:
if i < j:
current_pair = sorted((j, i))
if sum(current_pair) == current_n:
sum_count += 1
# print(f"{current_pair}\t{sum_count}")
if sum_count == 1:
yield current_n
# ### Generator testing
# init_n = 6
# init_list = [1, 2, 3, 4,]
# [i for i in sequence_gen(init_n, init_list)]
def ulam_sequence(n_digits: int) -> list:
tmp: list
n: int = 0
ulam_digits: list = list()
while n < n_digits:
n += 1
if n < 3:
ulam_digits.append(n)
else:
tmp = [i for i in sequence_gen(n, ulam_digits)]
if len(tmp) == 1:
ulam_digits.append(n)
return ulam_digits
ulam_sequence(10)
ulam_sequence(100)
ulam_sequence(340)
# def ulam_sequence(n_digits: int):
# max_n = float(n_digits)
# n: float = 0.
# while n <= max_n:
# n += 1.
# if n < 3.:
# print(n)
# ulams.append(n)
# else:
# ulam_generator
# sum_count = 0.
# for i in ulams:
# for j in ulams:
# if i < j:
# current_pair = sorted((j, i))
# if sum(current_pair) == n:
# sum_count += 1.
# if sum_count == 1.:
# print(n)
# ulams.append(n)
#############################################################
### Function with working algo.
# def ulam_sequence(n_digits: int):
# sum_count: float # Aggregate count of sum of ulam numbers
# current_pair: tuple
# n_digits: float = 10.
# n: float = 0.
# ulams: list = [] # Keep a list of approved numbers
# while n <= n_digits:
# n += 1.
# if n < 3.:
# print(n)
# ulams.append(n)
# else:
# sum_count = 0.
# for i in ulams:
# for j in ulams:
# if i < j:
# current_pair = sorted((j, i))
# if sum(current_pair) == n:
# sum_count += 1.
# if sum_count == 1.:
# print(n)
# ulams.append(n)
| MarkMoretto/python-examples-main | algorithms/sequences/ulam.py | ulam.py | py | 7,011 | python | en | code | 1 | github-code | 90 |
3825962468 | from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
#sample text
text = '''Data science is an interdisciplinary field of scientific methods, processes, algorithms and systems to extract knowledge
or insights from data in various forms, either structured or unstructured, similar to data mining.
Data science is a "concept to unify statistics, data analysis, machine learning and their related methods" in order to
"understand and analyze actual phenomena" with data.It employs techniques and theories drawn from many fields within
the broad areas of mathematics, statistics, information science, and computer science.
Turing award winner Jim Gray imagined data science as a "fourth paradigm" of science (empirical, theoretical, computational
and now data-driven) and asserted that "everything about science is changing because of the impact of information technology"
and the data deluge. When Harvard Business Review called it "The Sexiest Job of the 21st Century" the term became
a buzzword, and is now often applied to business analytics,business intelligence, predictive modeling, any arbitrary use
of data, or used as a sexed-up term for statistics. In many cases, earlier approaches and solutions are now simply
rebranded as "data science" to be more attractive, which can cause the term to "dilute beyond usefulness." While many
university programs now offer a data science degree, there exists no consensus on a definition or curriculum contents.
Because of the current popularity of this term, there are many "advocacy efforts" surrounding it.'''
print(text)
#nltk.download('stopwords')
#nltk.download('punkt')
#stopwords do not add value to the meaning of a sentence eg the, a, of...
stopWords = set(stopwords.words("english")) #stores predefined stopwords from nltk
words = word_tokenize(text) #separate every word in the text and store in array
#stores frequency of each word
freqTable = dict()
for word in words:
word = word.lower()
if word in stopWords: #skips stopword
continue
if word in freqTable:
freqTable[word] += 1 #go through every word and record frequency
else:
freqTable[word] = 1 #add word to table
sentences = sent_tokenize(text) #separates each sentence
sentenceValue = dict() #stores value of each sentence based on frequency of each word through entire text
for sentence in sentences:
for index, wordValue in enumerate(freqTable, start=1):
if wordValue in sentence.lower(): # index[0] return word
if sentence in sentenceValue:
sentenceValue[sentence] += index # index returns value of occurrence of that word
#print(sentenceValue)
else:
sentenceValue[sentence] = index
#print(sentenceValue)
sumValues = 0
for sentence in sentenceValue:
sumValues += sentenceValue[sentence] #sum of each every sentence value
average = int(sumValues/ len(sentenceValue)) # Average value of a sentence from original text
#print('average ' + str(average))
#prints summary
summary = ''
for sentence in sentences:
if sentence in sentenceValue and sentenceValue[sentence] > (average): #prints sentence if above average
summary += " " + sentence
print(summary) | yuckyfang/Frankie | text_summarizer.py | text_summarizer.py | py | 3,369 | python | en | code | 0 | github-code | 90 |
74785427177 | class Solution(object):
def convert(self, s, numRows):
if numRows == 1: return s
rows = [''] * numRows
num = (numRows-1)*2
for i, item in enumerate(s):
if i % num >= numRows:
rows[(num - i % num) % numRows] += item
else:
rows[i % num] += item
return ''.join(rows)
var=Solution()
print(var.convert("PAYPALISHIRING", 3)) | codejigglers/leetcodes | leetcode/zigzag_iterator.py | zigzag_iterator.py | py | 419 | python | en | code | 0 | github-code | 90 |
35810116263 | # import the necessary packages
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam,SGD
from keras.preprocessing.image import img_to_array
import keras
from keras import layers
from keras import models
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from imutils import paths
import numpy as np
import random
import pickle
import cv2
import os
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
"""
path='dataset'
# initialize the number of epochs to train for, initial learning rate,
# batch size, and image dimensions
EPOCHS = 2
INIT_LR = 1e-3
BS = 12
IMAGE_DIMS = (224, 224, 3)
# grab the image paths and randomly shuffle them
print("loading images...")
imagePaths = sorted(list(paths.list_images(path)))
random.seed(42)
random.shuffle(imagePaths)
# initialize the data and labels
data = []
labels = []
# loop over the input images
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath)
image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
image = img_to_array(image)
data.append(image)
# extract set of class labels from the image path and update the
# labels list
l = label = imagePath.split(os.path.sep)[-2].split("_")
labels.append(l)
print(labels)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# binarize the labels using scikit-learn's special multi-label
# binarizer implementation
print("class labels:")
mlb = MultiLabelBinarizer()
labels = mlb.fit_transform(labels)
print(labels)
# loop over each of the possible class labels and show them
for (i, label) in enumerate(mlb.classes_):
print("{}. {}".format(i + 1, label))
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
labels, test_size=0.30, random_state=42)
print("Train X = ",trainX.shape)
print("Test X = ",testX.shape)
print("Train Y = ",trainY.shape)
print("Test Y = ",testY.shape)
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
from models import alexnet
model = alexnet((224,224,3),2)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the network
print(" training network...")
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
validation_data=(testX, testY),
steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS, verbose=1)
model.save('facial_model.h5')
import matplotlib.pyplot as plt
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
N = EPOCHS
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper left")
plt.savefig('facial_plot.png')
plt.show()
"""
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras.models import Model
path_train='train data/'
#train_datagen = ImageDataGenerator(rescale=1./255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True,rotation_range=25, width_shift_range=0.1,
# height_shift_range=0.1,fill_mode="nearest")
train_datagen = ImageDataGenerator(rescale=1./255,validation_split=0.30,rotation_range=15)
test_datagen = ImageDataGenerator(rescale=1./255)
#train generator and validation generator
train_generator = train_datagen.flow_from_directory(path_train,target_size=(224, 224),subset = 'training',
batch_size=32,class_mode='categorical')
validation_generator = train_datagen.flow_from_directory(path_train,target_size=(224, 224),subset='validation',
batch_size=32,class_mode='categorical',shuffle = False)
#test_generator = test_datagen.flow_from_directory(path_train,target_size=(224, 224),
# batch_size=32,class_mode='categorical',shuffle = False)
image_size = 224
IMG_SHAPE = (image_size, image_size, 3)
batch_size = 32
from models import alexnet,our_model,VGG_16,inception_v3,VGG_19,EfficientNetB0
from VGG_Models import VGG16,VGG19
#Create the base model from the pre-trained model MobileNet V2
#model = alexnet(IMG_SHAPE,2)
model = EfficientNetB0(IMG_SHAPE,10) # image size and Number of classes
#compiling model
model.compile(optimizer=keras.optimizers.SGD(), #Adam(lr=0.00001)
loss='categorical_crossentropy',
metrics=['accuracy'])
# simple early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200)
mc = ModelCheckpoint('Trained_Model.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
callbacks=[es, mc]
#Trainng the model
epochs = 50
steps_per_epoch = 32
validation_steps = 32
history = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks = callbacks)
#saving model
model.save('Trained_Model.h5') # creates a HDF5 file 'my_model.h5'
print('model saved!')
#plotting the graphs
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, 'b-+', label='Training Accuracy')
plt.plot(val_acc,'r-*', label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.savefig("Training and Validation Accuracy Graph")
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,max(plt.ylim())])
plt.title('Training and Validation Loss')
plt.savefig("Training and Validation Loss")
plt.show()
from sklearn.metrics import classification_report, confusion_matrix
num_of_test_samples = len(validation_generator)
target_names = ["gazella","giraffe ","hartebeest","hippopotamus","Lion","wildbeest","Buffalo","elephant","warthog ","zebra"]
#num_of_test_samples=1785
#Confution Matrix and Classification Report
Y_pred = model.predict_generator(validation_generator)
y_pred = np.argmax(Y_pred, axis=1)
print(len(y_pred))
print('Confusion Matrix')
cm = confusion_matrix(validation_generator.classes, y_pred)
print(cm)
print('Classification Report')
print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
import matplotlib.pyplot as plt
import matplotlib.pyplot
from mlxtend.plotting import plot_confusion_matrix
plt.figure(figsize=(20, 20))
fig, ax = plot_confusion_matrix(conf_mat=cm,
colorbar=True,
show_absolute=True,
show_normed=False,
class_names=target_names)
plt.savefig("Confusion Matrix_2nd_iteration")
plt.show()
import seaborn as sns
import pandas as pd
cmn = cm.astype('float') / cm.sum(axis=1)#neaxwis used to conver row into column
fig, ax = plt.subplots(figsize=(20,7))
sns.heatmap(cmn, center=0, annot=True, fmt='.2f', linewidths=1, xticklabels=target_names, yticklabels=target_names)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
plt.savefig("2nd Confusion Matrix_2nd_iteration")
plt.show()
| Adnan8622/Wild-Animal-Classification | Main.py | Main.py | py | 8,358 | python | en | code | 0 | github-code | 90 |
18059812479 | def main():
import sys
input=sys.stdin.readline
h,w,n=map(int,input().split())
ans=[0]*10
p=set()
q=set()
for i in range(n):
a,b=map(int,input().split())
for j in range(-1,2):
for k in range(-1,2):
if 2<=a+j<=h-1 and 2<=b+k<=w-1:
r=(a+j)*(10**10)+b+k
p.add(r)
q.add(a*(10**10)+b)
p=list(p)
for x in p:
cnt=0
for j in range(-1,2):
for k in range(-1,2):
if x+j*(10**10)+k in q:
cnt+=1
ans[cnt]+=1
print((h-2)*(w-2)-sum(ans[1:]))
for i in range(1,10):
print(ans[i])
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p04000/s673597695.py | s673597695.py | py | 551 | python | en | code | 0 | github-code | 90 |
18541405869 | # coding: utf-8
a=input()
b=list(a)
x=700
lis={"o":1,"x":0}
for i in b:
y=lis[i]
if y==1:
x=x+100
print(x) | Aasthaengg/IBMdataset | Python_codes/p03369/s266840326.py | s266840326.py | py | 116 | python | en | code | 0 | github-code | 90 |
12597985287 | import boto3
def lambda_handler(event, context):
# create an SQS resource object using Boto3
sqs = boto3.resource('sqs')
# define the name of your SQS queue
queue_name = 'my-sqs-queue'
# create an SQS queue with the given name
queue = sqs.create_queue(QueueName=queue_name)
# return a success response with the name of the newly created queue
return {
'statusCode': 200,
'body': f'Successfully created SQS queue {queue_name}'
}
| hogtai/Public_Repo | Lambda_Python_Scripts/Create_SQS_Queue.py | Create_SQS_Queue.py | py | 484 | python | en | code | 4 | github-code | 90 |
28135229000 | # -*- coding:utf-8 -*-
''' SQL Table
[Basic] = ID, Name, Abstraction, Structure, Status, Description, Extended_Description, Background_Detail, Likelihood_Of_Exploit, Functional_Area, Affected_Resource
'''
import xml.etree.ElementTree as elemTree
from xml.etree.ElementTree import parse
import xmltodict
import json
if __name__ == '__main__':
i = 0
result = list()
tree = elemTree.parse('cwec_v4.1.xml')
root = tree.getroot()
# i = [Weaknesses, Categories, Views, External_References]
for i in root:
# j = [Weakness, Category, View, External_Reference]
for j in i:
if "Weakness" in j.tag:
ID = j.attrib.get('ID')
Name = j.attrib.get('Name').replace("'", '')
Abstraction = j.attrib.get('Base')
Structure = j.attrib.get('Simple')
Status = j.attrib.get('Incomplete')
for k in j:
# k = [Description, Background_Details, Likelihood_Of_Exploit, Functional_Area, Affected_Resource, ...]
if "Description" in k.tag:
if ("Extended_Description") in k.tag:
try:
Extended_Description = k.text.replace('\n ','').replace("'","").replace('\n\t ','').replace(' ','').replace('\n','').replace(' ','')
except:
pass
Extended_Description = k.text
else:
Description = k.text.replace('\n\t\t\t ','').replace('\n','').replace("'",'')
if "Background_Details" in k.tag:
Background_Detail = list()
for l in k:
Background_Detail.append(l.text.replace('\n ','').replace("'",''))
if "Likelihood_Of_Exploit" in k.tag:
Likelihood_Of_Exploit = k.text
if "Functional_Area" in k.tag:
Functional_Area = k.text.replace('\n ','')
if "Affected_Resource" in k.tag:
Affected_Resource = k.text.replace('\n ','')
try:
for z in range(len(Background_Detail)):
query = "INSERT INTO Vulnerability_DB.CWE_Weakness_Basic (ID, Name, Abstraction, Structure, Status, Description, Extended_Description, Background_Detail, Likelihood_Of_Exploit, Functional_Area, Affected_Resource) VALUES ('{}','{}','{}','{}','{}','{}','{}', '{}', '{}', '{}', '{}');".format(ID, Name, Abstraction, Structure, Status, Description, Extended_Description, Background_Detail[z], Likelihood_Of_Exploit, Functional_Area, Affected_Resource)
result.append(query)
except Exception as e:
pass
with open('Vulnerability_DB.CWE_Weakness_Basic.sql', 'a+') as f:
for y in range(len(result)):
f.write(result[y])
f.write('\n') | roytravel/Cybersecurity | 01. Parser/parser_xml_basic.py | parser_xml_basic.py | py | 3,170 | python | en | code | 0 | github-code | 90 |
17964212569 | import math
import copy
from operator import mul
from functools import reduce
from collections import defaultdict
from collections import Counter
from collections import deque
# 直積 A={a, b, c}, B={d, e}:のとき,A×B={(a,d),(a,e),(b,d),(b,e),(c,d),(c,e)}: product(A, B)
from itertools import product
# 階乗 P!: permutations(seq), 順列 {}_len(seq) P_n: permutations(seq, n)
from itertools import permutations
# 組み合わせ {}_len(seq) C_n: combinations(seq, n)
from itertools import combinations
# 一次元累積和
from itertools import accumulate
from bisect import bisect_left, bisect_right
import re
# import numpy as np
# from scipy.misc import comb
# 再帰がやばいとき
# import sys
# sys.setrecursionlimit(10**9)
def inside(y, x, H, W):
return 0 <= y < H and 0 <= x < W
# 四方向: 右, 下, 左, 上
dy = [0, -1, 0, 1]
dx = [1, 0, -1, 0]
def i_inpl(): return int(input())
def l_inpl(): return list(map(int, input().split()))
def line_inpl(x): return [i_inpl() for _ in range(x)]
INF = int(1e18)
MOD = int(1e9)+7 # 10^9 + 7
# field[H][W]
def create_grid(H, W, value = 0):
return [[ value for _ in range(W)] for _ in range(H)]
########
def main():
N = i_inpl()
S1, S2 = input(), input()
l = []
i = 0
while True:
if S1[i] == S2[i]:
l.append("X")
else:
l.append("Y")
i += 1
i += 1
if i >= N:
break
if l[0] == "X":
ans = 3
else:
ans = 6
for i in range(1, len(l)):
if l[i-1] == "X" and l[i] == "Y":
ans *= 2
elif l[i-1] == "X" and l[i] == "X":
ans *= 2
elif l[i-1] == "Y" and l[i] == "Y":
ans *= 3
elif l[i-1] == "X" and l[i] == "Y":
ans *= 1
ans %= MOD
print(ans)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03626/s217698698.py | s217698698.py | py | 1,869 | python | en | code | 0 | github-code | 90 |
42297516787 |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
import torch
from torch.autograd import Variable
import torch.nn.functional as nnf
from torch.utils.data import random_split
from torch.optim import SGD
from torch.distributions import constraints
import torchvision as torchv
import torchvision.transforms as torchvt
from torch import nn
import torchvision.transforms as transforms
from torch.autograd import grad
import scipy.stats as st
from sklearn.preprocessing import StandardScaler
from copy import deepcopy
import time
from utils.parameters import *
torch.manual_seed(1)
class DNN(nn.Module):
def __init__(self,
n_dim=1,
dropout_prob=0.0,
dropout_active=False,
num_layers=2,
num_hidden=200,
output_size=1,
activation="Tanh",
mode="Regression"
):
super(DNN, self).__init__()
self.n_dim = n_dim
self.num_layers = num_layers
self.num_hidden = num_hidden
self.mode = mode
self.activation = activation
self.device = torch.device('cpu') # Make this an option
self.output_size = output_size
self.dropout_prob = dropout_prob
self.dropout_active = dropout_active
self.model = build_architecture(self)
def fit(self, X, y, learning_rate=1e-3, loss_type="MSE", batch_size=100, num_iter=500, verbosity=False):
self.X = torch.tensor(X.reshape((-1, self.n_dim))).float()
self.y = torch.tensor(y).float()
loss_dict = {"MSE": torch.nn.MSELoss}
self.loss_fn = loss_dict[loss_type](reduction='mean')
self.loss_trace = []
batch_size = np.min((batch_size, X.shape[0]))
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
for _ in range(num_iter):
batch_idx = np.random.choice(list(range(X.shape[0])), batch_size )
y_pred = self.model(self.X[batch_idx, :])
self.loss = self.loss_fn(y_pred.reshape((batch_size, self.n_dim)), self.y[batch_idx].reshape((batch_size, self.n_dim)))
self.loss_trace.append(self.loss.detach().numpy())
if verbosity:
print("--- Iteration: %d \t--- Loss: %.3f" % (_, self.loss.item()))
self.model.zero_grad()
optimizer.zero_grad() # clear gradients for this training step
self.loss.backward() # backpropagation, compute gradients
optimizer.step()
def predict(self, X, numpy_output=True):
X = torch.tensor(X.reshape((-1, self.n_dim))).float()
if numpy_output:
prediction = self.model(X).detach().numpy()
else:
prediction = self.model(X)
return prediction
| AlaaLab/deep-learning-uncertainty | models/base_models.py | base_models.py | py | 3,228 | python | en | code | 561 | github-code | 90 |
19966422451 | import tensorflow as tf
import numpy as np
import random
random_seed = 1
np.random.seed(random_seed)
random.seed(random_seed)
tf.random.set_seed(random_seed)
import flwr as fl
import common
num_clients=2
num_rounds=10
fraction_fit=1.0
losses=[]
accuracies=[]
def get_evaluate_fn(model):
"""Return an evaluation function for server-side evaluation."""
# Load test data here to avoid the overhead of doing it in `evaluate` itself
_, test = tf.keras.datasets.mnist.load_data()
test_data, test_labels = test
# preprocessing
test_data, test_labels = common.preprocess(test_data, test_labels)
# The `evaluate` function will be called after every round
def evaluate(self,weights: fl.common.NDArrays,config):
model.set_weights(weights) # Update model with the latest parameters
loss, accuracy = model.evaluate(test_data, test_labels)
losses.append(loss)
accuracies.append(accuracy)
return loss, {"accuracy": accuracy}
return evaluate
model = common.create_cnn_model()
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile("sgd", loss=loss, metrics=["accuracy"])
strategy = fl.server.strategy.FedAvg(
fraction_fit=fraction_fit,
min_available_clients=num_clients,
evaluate_fn=get_evaluate_fn(model),
initial_parameters=fl.common.ndarrays_to_parameters(model.get_weights()),
)
fl.server.start_server(
server_address="0.0.0.0:8080",
strategy=strategy,
config=fl.server.ServerConfig(num_rounds=num_rounds),
)
print("server losses:")
print(losses)
print("server accuracies")
print(accuracies) | ThalesGroup/federated-learning-frameworks | Flower/dp-sgd_flower_mnist_cnn/server.py | server.py | py | 1,629 | python | en | code | 2 | github-code | 90 |
18499702799 | from cmath import exp
from math import pi
x1, y1, x2, y2 = map(int, input().split())
v = x2 - x1 + (y2-y1)*1j
v_ = v*exp(pi/2*1j)
x3 = round(x2+v_.real)
y3 = round(y2+v_.imag)
x4 = round(x1+v_.real)
y4 = round(y1+v_.imag)
print(x3, y3, x4, y4) | Aasthaengg/IBMdataset | Python_codes/p03265/s652007916.py | s652007916.py | py | 246 | python | en | code | 0 | github-code | 90 |
10148880249 | # -*- coding: utf-8 -*-
"""
Created on Wed May 1 22:51:13 2019
@author: xiong
"""
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
PATH='C:/Users/xiong/OneDrive - McMaster University/Data and files/algae_project/0514/'
FILENAME='2_fl'
cap=cv2.VideoCapture(PATH + FILENAME + '.h264')
current_frame=0;
i_frame=0;
while True:
ret, frame = cap.read()
cv2.imshow('frame',frame)
name= PATH + FILENAME +str(current_frame) +'.jpg'
print(current_frame)
gray_0 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite(name,cv2.multiply(gray_0,1))
#cv2.imwrite(name,frame)
if cv2. waitKey(1)& 0xFF == ord('q'):
break
current_frame += 1;
cap.release()
cv2.destroyAllWindows() | xiongbo5416/algae-0514 | video2images.py | video2images.py | py | 771 | python | en | code | 0 | github-code | 90 |
32900265405 | import os
from datetime import datetime
from flask import Flask, request, make_response
from json import JSONDecoder
from controllers import DatabaseController
from finq import FINQ, Identity
from constants import Fields, Errors, quote_fields
from controllers import MailController
from finq_extensions import extract_key
from controllers import ImageController
from utils import datetime_segment_day_intersections
app = Flask(__name__)
app.config.from_object('config')
DATEFORMAT = '%H:%M %d.%m.%Y'
HOSTNAME = os.environ.get('HOSTNAME', '--no-hostname--')
decoder = JSONDecoder()
DatabaseController.load(app)
MailController.load(app, DATEFORMAT, HOSTNAME)
def read_sensible_data():
with open('./sensible_data.json', 'rt') as sensible_data_stream:
json = '\n'.join(sensible_data_stream.readlines())
return decoder.decode(json)
def get_restaurant(restaurant):
return quote_fields({Fields.Id: restaurant.id,
Fields.WorkdayStart: str(restaurant.workday_start),
Fields.WorkdayEnd: str(restaurant.workday_end),
Fields.RestaurantName: restaurant.restaurant_name})
@app.route("/restaurants", methods=['GET'])
def get_restaurants():
return FINQ(DatabaseController.get_restaurants()) \
.map(get_restaurant) \
.to_dict(lambda d: d[Fields.Id.value], Identity)
def get_table(table):
return quote_fields(
{Fields.Id: table.id,
Fields.SeatCount: table.seat_count,
Fields.TableNumber: table.table_number,
Fields.RestaurantId: table.restaurant_id})
def get_table_with_bookings(table, start: datetime, end: datetime):
return quote_fields(
{Fields.Id: table.id,
Fields.SeatCount: table.seat_count,
Fields.TableNumber: table.table_number,
Fields.RestaurantId: table.restaurant_id,
Fields.Bookings: FINQ(table.bookings)
.filter(datetime_segment_day_intersections(start, end))
.map(lambda b: b.id)
.map(get_booking)
.to_list()})
@app.route("/tables", methods=['GET'])
def get_tables():
return FINQ(DatabaseController.get_tables()) \
.map(get_table) \
.to_dict(lambda d: d[Fields.Id.value], Identity)
@app.route("/restaurants/<identifier>/tables", methods=['GET'])
def get_tables_at_restaurant(identifier):
return FINQ(DatabaseController.get_restaurant(identifier).tables) \
.map(get_table) \
.to_dict(lambda d: d[Fields.Id.value], Identity)
@app.route("/restaurants/<restaurant_identifier>/tables/<table_number>", methods=['GET'])
def get_table_info(restaurant_identifier, table_number):
table = DatabaseController.get_table_from_number_and_restaurant(table_number, restaurant_identifier)
return get_table(table)
def book_table_at_restaurant(restaurant_identifier, table_number):
restaurant = DatabaseController.get_restaurant(restaurant_identifier)
table = DatabaseController.get_table_from_number_and_restaurant(table_number, restaurant_identifier)
book_info = decoder.decode(request.data.decode("utf8"))
startTime = datetime.strptime(book_info[Fields.BookingStartDatetime.value], DATEFORMAT)
endTime = datetime.strptime(book_info[Fields.BookingEndDatetime.value], DATEFORMAT)
email = book_info[Fields.Email.value]
booked, result, booking_id = DatabaseController.pre_book_table(startTime, endTime, email, table.id)
if booked:
MailController.send_confirmation_code(email, result, booking_id, startTime, endTime, table_number,
restaurant.restaurant_name)
return quote_fields({Fields.Success: True, Fields.BookingId: booking_id})
else:
return quote_fields({Fields.Success: False, Fields.Error: str(result)})
@app.route("/restaurants/<restaurant_identifier>/tables/<table_number>/book", methods=['POST'])
def book_table_at_restaurant_req(restaurant_identifier, table_number):
response = make_response(book_table_at_restaurant(restaurant_identifier, table_number))
response.headers["Access-Control-Allow-Origin"] = "http://localhost:3000"
return response
@app.route("/bookings/<booking_id>", methods=['GET'])
def get_booking(booking_id):
booking = DatabaseController.get_booking(booking_id)
if booking:
return quote_fields({Fields.Id: booking.id,
Fields.BookingStartDatetime: booking.booking_start_datetime.strftime(DATEFORMAT),
Fields.BookingEndDatetime: booking.booking_end_datetime.strftime(DATEFORMAT),
Fields.TableId: booking.table_id,
Fields.TableNumber: booking.table.table_number})
return quote_fields({Fields.Id: booking_id, Fields.Error: Errors.NoBooking})
@app.route("/restaurants/<restaurant_identifier>/tables/<table_number>/bookings", methods=['GET'])
def get_table_bookings(restaurant_identifier, table_number):
table = DatabaseController.get_table_from_number_and_restaurant(table_number, restaurant_identifier)
if table:
return quote_fields(
{Fields.Success: True,
Fields.Bookings: FINQ(table.bookings)
.map(lambda b: b.id)
.map(get_booking)
.to_list()})
else:
return quote_fields({Fields.Success: False, Fields.Error: Errors.NoTable})
def confirm_booking(booking_id):
if 'code' in request.args:
err = DatabaseController.confirm_booking(booking_id, int(request.args['code']))
if err:
return quote_fields({Fields.Success: False, Fields.Error: str(err)})
else:
return quote_fields({Fields.Success: True, Fields.Booking: get_booking(booking_id)})
return quote_fields({Fields.Success: False, Fields.Error: Errors.NoConfirmationCode})
@app.route("/bookings/<booking_id>/confirm", methods=['POST'])
def confirm_booking_req(booking_id):
response = make_response(confirm_booking(booking_id))
response.headers["Access-Control-Allow-Origin"] = "http://localhost:3000"
return response
def zip_restaurant(rt):
restaurant, tables = rt
rdict = get_restaurant(restaurant)
rdict[Fields.Tables] = tables
return quote_fields(rdict)
def search_suitable_tables():
start = datetime.strptime(request.args["startDatetime"], DATEFORMAT)
end = datetime.strptime(request.args["endDatetime"], DATEFORMAT)
seat_count = int(request.args["seatsCount"])
return {Fields.Restaurants.value: DatabaseController.search_tables(start, end, seat_count)
.group_by(lambda t: t.restaurant_id)
.map(lambda l: FINQ(l).map(lambda t: (t.restaurant_id, get_table_with_bookings(t, start, end)))
.self(extract_key))
.map(lambda kl: (DatabaseController.get_restaurant(kl[0]), kl[1]))
.map(zip_restaurant)
.to_list()}
def zip_table(t):
table, bookings = t
table_json = get_table(table)
table_json[Fields.Bookings] = bookings.map(lambda b: b.id).map(get_booking).to_list()
return quote_fields(table_json)
def search_all_tables():
start = datetime.strptime(request.args["startDatetime"], DATEFORMAT)
end = datetime.strptime(request.args["endDatetime"], DATEFORMAT)
seat_count = int(request.args["seatsCount"])
return {Fields.Restaurants.value: DatabaseController.search_all_tables(start, end, seat_count)
.group_by(lambda t: t[0].restaurant_id)
.map(lambda l: FINQ(l).map(lambda t: (t[0].restaurant_id, zip_table(t)))
.self(extract_key))
.map(lambda kl: (DatabaseController.get_restaurant(kl[0]), kl[1]))
.map(zip_restaurant)
.to_list()}
@app.route("/search", methods=['GET'])
def search_tables():
if 'restaurants' not in request.args:
search_all = request.args["searchAll"] == "true"
print(search_all)
if search_all:
response = make_response(search_all_tables())
else:
response = make_response(search_suitable_tables())
response.headers["Access-Control-Allow-Origin"] = "http://localhost:3000"
return response
return ""
@app.route("/static/restaurant/image")
def get_restaurant_image():
restaurant_id = request.args["restaurant_id"]
return ImageController.get_image("images/restaurants/", restaurant_id)
| FacelessLord/web-dhtml-project | server/app.py | app.py | py | 8,388 | python | en | code | 0 | github-code | 90 |
31972016191 |
#simular to the count words lab
from datetime import datetime
with open('rain_data.txt') as f:
rows = f.readlines()
offset = 0
for row in rows:
offset += 1
row = row.strip()
if set(row) == set('-'):
break
#get row list at index one and index two
datapoints = []
for row in rows[offset:]:
row_list = row.split()
#if row_list[1] == '-': other way you could go through that shit
#continue
try:
date = datetime.strptime(row_list[0], '%d-%b-%Y')
data_point = {'date': date, 'total_rain': int(row_list[1])}
datapoints.append(data_point)
except ValueError:
continue
max_rain = max(datapoints, key= lambda day: day['total_rain'])
print(max_rain)
yearly_rainfall = {}
for datapoint in datapoints:
year = datapoint['date'].year
if year in yearly_rainfall:
yearly_rainfall[year] = yearly_rainfall[year] + datapoint['total_rain']
else:
yearly_rainfall[year] = datapoint['total_rain']
print(yearly_rainfall.items())
print(yearly_rainfall)
max_year_rain = max(yearly_rainfall.items(), key= lambda year:year[1])
print(max_year_rain) | davidl0673/pythonstuff | lab24.py | lab24.py | py | 1,281 | python | en | code | 0 | github-code | 90 |
23044481858 | # -*- coding: utf-8 -*-
import random
class Gerar():
def gera(num_populacao, num_cromossomo):
x = 0
populacao = []
cromossomo = []
while x in range(num_populacao):
for i in range(num_cromossomo):
cromossomo.append(random.randint(0, 1))
populacao.append(cromossomo)
cromossomo = []
x += 1
return populacao
| ficorrea/data_science | geneticos/algo_genetico/gera.py | gera.py | py | 422 | python | en | code | 0 | github-code | 90 |
18174414549 | # でつoO(YOU PLAY WITH THE CARDS YOU'RE DEALT..)
import sys
def main(N, K, A):
lo = 0
hi = 10**9
while hi - lo > 1:
m = (lo + hi) // 2
if sum((a + m - 1) // m - 1 for a in A) <= K: hi = m
else: lo = m
print(hi)
if __name__ == '__main__':
input = sys.stdin.readline
N, K = map(int, input().split())
*A, = map(int, input().split())
main(N, K, A)
| Aasthaengg/IBMdataset | Python_codes/p02598/s494945400.py | s494945400.py | py | 403 | python | en | code | 0 | github-code | 90 |
13738553358 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ############################################################################
#
# gpx2csv.py
# 04/17/2020 (c) Juan M. Casillas <juanm.casillas@gmail.com>
#
# read a gpx, generate a CSV list, and do some funky works on this
#
# ############################################################################
import sys
import argparse
import csv
import os
import pyproj
import numpy as np
from smooth import smooth_gpx
from geoid import GeoidHeight
from raster import RasterManager
from mtn import MTN
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Show data about file and processing", action="count")
parser.add_argument("-o", "--optimize", help="Optimize GPX input(filter)", action="store_true")
parser.add_argument("gpx_file", help="GPX file to load")
parser.add_argument("csv_out", help="Output CSV file")
args = parser.parse_args()
points, bounds, length = smooth_gpx(args.gpx_file, optimize=args.optimize, output=None)
geoid = GeoidHeight()
rasman = RasterManager()
pd = []
i = 1
mtn = []
for p in points:
N = geoid.get(p.latitude, p.longitude)
pd.append((p.longitude,p.latitude,p.elevation, p.elevation-N, N, i))
r = MTN.where(p.longitude,p.latitude)
if r['MTN50'][0][1] not in mtn:
mtn.append(r['MTN50'][0][1])
i+=1
pd = rasman.bulk_reproj(pd)
# stdout
# writer = csv.writer(sys.stdout, lineterminator=os.linesep)
# writer = csv.writer(f)
print("MTN Sheets: ", list(mtn))
header = [ 'lon', 'lat', 'x', 'y', 'elev_ellip', 'elev_orto', 'N', 'index' ]
with open(args.csv_out, 'w', newline='') as f:
writer = csv.writer(f, lineterminator=os.linesep)
writer.writerow(i for i in header)
writer.writerows(pd)
| juanmcasillas/RoadTools | roadtools/core/gpx2csv.py | gpx2csv.py | py | 1,906 | python | en | code | 1 | github-code | 90 |
71044972776 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from django.http import HttpResponseRedirect
from rest_framework import status
from accounts.models import UserAccount
# def index(request):
# return render(request,'index.html')
@csrf_exempt
def logout(request):
if request.method == 'POST':
response = HttpResponseRedirect('')
response.delete_cookie('refresh_token')
return response
else:
content = {'message': 'Something went wrong in space'}
return Response(content)
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
@csrf_exempt
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_user_data(request):
if request.method == 'GET':
print(request.user.id)
_user_data_ = UserAccount.objects.filter(id=request.user.id).values('id','email','first_name','last_name','is_active','profile_pic')
return Response(_user_data_)
else:
content = {'message': 'Something went wrong in space'}
return Response(content)
@csrf_exempt
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def update_profile(request):
print(request.user.id)
images = request.FILES.getlist('profile_pic')
print(images)
# MyTable.objects.filter(pk=some_value).update(field1='some value')
# profile_pic
content = {'message': 'good'}
return Response(content)
| Roshankc682/Mood_Music_player | core/views.py | views.py | py | 1,531 | python | en | code | 0 | github-code | 90 |
5501568848 |
import openslide
import os
import argparse
import glob
import torch
from scipy.io import loadmat
import numpy as np
import cv2
from tqdm.autonotebook import tqdm
import time
from rasterio import features
from shapely.geometry import shape
from shapely.geometry import mapping
import geojson
from util.util import hover_accumulate_instance_masks as accumulate_masks
from util.util import convolve_iter
from util.util import convolve_gaussian_iter
# +
#not the most elegant way, should do it without torch
if(torch.cuda.is_available()):
import cupy as xp
import cupyx.scipy as sp
from cupyx.scipy.signal import convolve2d
from cupy import asnumpy
else:
import numpy as xp
import scipy as sp
from scipy.signal import convolve2d
from numpy import asarray as asnumpy
# +
#TODO: make these arguments
#in_dir contents expected to be HoverNet output
in_dir_default = "../in_dir/"
out_dir_default = "../out_dir/"
kernel_size_default = 1000
parser = argparse.ArgumentParser(description='Make output for entire image using Unet')
parser.add_argument('--in_dir', help="input directory, expected to have HoverNet output structure",
default=in_dir_default, type=str)
parser.add_argument('--out_dir', help="output directory",
default=out_dir_default, type=str)
parser.add_argument('--kernel_type', help="type of kernel to use: 'flat', 'gaussian'",
default="gauss", type=str)
parser.add_argument('--kernel_size', help="size for flat kernel",
default=kernel_size_default, type=int)
parser.add_argument('--gauss_sigma', help="sigma for gauss kernel",
default=100, type=int)
args, unknown = parser.parse_known_args()
in_dir = args.in_dir
out_dir = args.out_dir
kernel_size = args.kernel_size
kernel_type = args.kernel_type
gauss_sigma = args.gauss_sigma
threshold = 0.1
inst_type = 1 #type of cells of interesst
#preparing output directories
if not os.path.exists(out_dir+"mask"):
os.makedirs(out_dir+"mask")
if not os.path.exists(out_dir+"blurr"):
os.makedirs(out_dir+"blurr")
if not os.path.exists(out_dir+"json"):
os.makedirs(out_dir+"json")
mat_files = glob.glob(in_dir+"mat/*.mat")
# +
kernel = xp.ones((kernel_size,kernel_size))/(kernel_size*kernel_size)
print(len(mat_files), "files to process")
for mat_file in mat_files:
name = os.path.basename(mat_file)
print("processing", name)
mat = loadmat(mat_file)
#create mask of all cancer cells
cancer_ids = [ mat["inst_uid"][i][0] for i in range(len(mat["inst_type"])) if mat["inst_type"][i] == inst_type]
mask = accumulate_masks(mat["inst_map"], cancer_ids)
cv2.imwrite(out_dir + "mask/" + name + ".png", mask.astype(np.uint8)*255)
if kernel_type == "flat":
mask_blurr = convolve_iter(mask, kernel, 2)
cv2.imwrite(out_dir + f"blurr/{name}_conv_{kernel_size}.png", mask_blurr*255)
else:
mask_blurr = convolve_gaussian_iter(mask, gauss_sigma, 2)
cv2.imwrite(out_dir + f"blurr/{name}_gauss_{gauss_sigma}.png", mask_blurr*255)
mask_regions = mask_blurr > threshold
cv2.imwrite(out_dir + f"blurr/{name}_t={threshold}.png", mask_regions*255)
#create polygons from mask and write them to QuPath compatile json
all_polygons = []
for s, value in features.shapes(mask_regions.astype(np.uint8), mask_regions):
poly = shape(s)
all_polygons.append(poly)
json_dicts = []
for poly in all_polygons:
json_dicts.append({
"type": "Feature",
"geometry": mapping(poly)})
with open(out_dir + f"json/{name}_t={threshold}.json", 'w') as outfile:
geojson.dump(json_dicts,outfile)
# -
| phyranja/DNA_estimation | regions_conv.py | regions_conv.py | py | 3,762 | python | en | code | 0 | github-code | 90 |
14614597587 | from datetime import datetime
import logging
import gspread
from gspread_dataframe import get_as_dataframe, set_with_dataframe
from oauth2client.service_account import ServiceAccountCredentials
from pytz import timezone
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class GSheetAPI(object):
"""
Easily and efficiently manage a Google Sheet.
Notes:
- Credentials required are the Google Service account json file
- Sheet must be shared with edit access to the `client_email` found the the service account
- Credentials can be entered as a dictionary or a file path to a json file. If you are sing a file path, set the
`credentials_from_path` argument to True
Usage Example:
gsheet = GSheetAPI(..) # initialize the class
gsheet.sheet_to_df(..) # import data from the sheet to a Pandas DataFrame
gsheet.change_gsheet(..) # switch to a new Google Sheet to work off of
gsheet.get_cell(..) # get the contents of a single cell
gsheet.change_tab(..) # switch to a new tab in the current working Google Sheet
gsheet.set_cell(..) # set the value of a single cell
gsheet.df_to_sheet(..) # export a Pandas DataFrame to the current working sheet
gsheet.timestamp_to_cell(..) # export a timestamp to a single cell in the sheet
"""
def __init__(self, credentials, sheet_id, tab_name):
"""
Initializes the GSheetAPI and sets the current working sheet and current working tab
:param dict|str credentials: Dictionary containing your Google Service Account json blob or a file
path entered as a string
:param str sheet_id: Sheet id found in the Google Sheet URL
:param str tab_name: Name of the tab to use within the Google Sheet
"""
scope = ['https://spreadsheets.google.com/feeds']
if isinstance(credentials, str):
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials, scope)
elif isinstance(credentials, dict):
creds = ServiceAccountCredentials.from_json_keyfile_dict(credentials, scope)
else:
raise TypeError("Credentials must be either a dictionary or a file path to a .json file")
self.gs = gspread.authorize(creds)
self.wks = self.gs.open_by_key(sheet_id)
self.sht = self.wks.worksheet(tab_name)
logger.info('Working sheet set to tab `{}` of `{}`'.format(tab_name, self.wks.title))
def change_gsheet(self, sheet_id, tab_name):
"""
Changes the current working sheet
:param str sheet_id: Sheet id found in the Google Sheet URL
:param str tab_name: Name of the tab to use within the Google Sheet
"""
self.wks = self.gs.open_by_key(sheet_id)
self.sht = self.wks.worksheet(tab_name)
logger.info('Working sheet set to tab `{}` of `{}`'.format(tab_name, self.wks.title))
def change_tab(self, tab_name):
"""
Changes the working tab name within the current sheet
:param str tab_name: Name of the tab to use within the Google Sheet
"""
self.sht = self.wks.worksheet(tab_name)
logger.info('Working tab set to `{}`'.format(tab_name))
def df_to_sheet(self, df_input, row_start, col_start,
clear_sheet=False, clear_range=None, allow_formulas=True,
include_index=False, resize=False, include_column_header=True):
"""
Exports a Pandas DataFrame to a Google Sheet
:param pandas.DataFrame df_input: DataFrame to export
:param int row_start: Integer value of the row (starting at 1)
:param int col_start: Integer value of the column (starting at 1)
:param bool clear_sheet: (default False) If the entire sheet should be cleared before dumping data
:param bool clear_range: (default None) A range that should be cleared (eg. 'A1:B12')
:param bool allow_formulas: (default True) Whether or not formulas should be executed or shown as a raw string
:param bool include_index: (default False) Whether or not the DataFrame index should be shown in the sheet
:param bool resize: (default False) If the sheet should resize when the data is too large to fit in the sheet
:param bool include_column_header: (default True) If the data header should be dumped into the sheet
"""
col_num = df_input.shape[1] + col_start - 1
row_num = df_input.shape[0] + row_start - 1
if clear_sheet:
self.sht.clear()
elif clear_range:
cell_range = self.sht.range(clear_range)
for cell in cell_range:
cell.value = ''
self.sht.update_cells(cell_range)
set_with_dataframe(worksheet=self.sht,
dataframe=df_input,
row=row_start,
col=col_start,
include_index=include_index,
include_column_header=include_column_header,
resize=resize,
allow_formulas=allow_formulas)
logger.info('{} rows and {} columns exported to `{}` starting in row {} and '
'column {}'.format(row_num, col_num, self.sht.title, row_start, col_start))
def sheet_to_df(self, col_list, header, evaluate_formulas):
"""
Grabs data from a Google Sheet and returns it as a Pandas DataFrame
:param list col_list: List of ints representing the column number to extract (eg. range(0, 4) for [0, 1, 2, 3])
:param int header: Integer of the row to use as a column (0 indexed)
:param bool evaluate_formulas: Whether or not the formulas should be computed before extracting the cell content
:return pandas.DataFrame: The sheet contents as a DataFrame object
"""
df = get_as_dataframe(self.sht, parse_date=True, usecols=col_list, header=header,
evaluate_formulas=evaluate_formulas).dropna(how='all')
return df
def timestamp_to_cell(self, cell, fmt="%Y-%m-%d %H:%M %Z", tz='US/Central'):
"""
Exports a timestamp to a single cell in the Google Sheet
:param str cell:
:param str fmt: (default "%Y-%m-%d %H:%M %Z")
:param str tz: (default "US/Central")
"""
now_time = datetime.now(timezone(tz))
self.sht.update_acell(cell, now_time.strftime(fmt))
str_time = now_time.strftime(fmt)
logger.info("`{}` exported to cell `{}` of `{}`".format(str_time, cell, self.sht.title))
def set_cell(self, cell, cell_content):
"""
Sets a cell to a given value
:param str cell: Cell reference (eg. 'A2')
:param str cell_content: Desired cell content
"""
self.sht.update_acell(cell, cell_content)
logger.info("Cell `{cell}` change to `{cell_content}`".format(**locals()))
def get_cell(self, cell):
"""
Gets the current value of a given cell in the sheet
:param str cell: Cell reference (eg. 'A2')
:return: The current contents of the cell
"""
return self.sht.acell(cell).value
| brandenc40/gsheet-api | gsheet_api/gsheet_api.py | gsheet_api.py | py | 7,292 | python | en | code | 4 | github-code | 90 |
3692533087 | import argparse
import logging
import subprocess
import os
import re
from tempfile import NamedTemporaryFile
from lxml import etree
from .exceptions import SysException
logger = logging.getLogger(__name__)
def cmd_parser():
parser = argparse.ArgumentParser(description='Replicate a MySQL database to MongoDB')
# parser.add_argument('--resume-from-end', dest='resume_from_end',
# action='store_true', help="Even if the binlog\
# replication was interrupted, start from the end of\
# the current binlog rather than resuming from the interruption",
# default=False)
# parser.add_argument('--resume-from-start', dest='resume_from_start',
# action='store_true', help="Start from the beginning\
# of the current binlog, regardless of the current position", default=False)
# parser.add_argument('--mysqldump-file', dest='mysqldump_file', type=str,
# help='Specify a file to get the mysqldump from, rather\
# than having ditto running mysqldump itself',
# default='')
parser.add_argument('--mysqldump-schema', dest='mysqldump_schema',
action='store_true', help="Run mysqldump to create new databases on mongodb, but \
not import any data so you can review mmongodb schema before importing data", default=False)
parser.add_argument('--mysqldump-data', dest='mysqldump_data',
action='store_true', help="Run mysqldump to import only data", default=False)
parser.add_argument('--mysqldump-complete', dest='mysqldump_complete',
action='store_true', help="Run mysqldump to import schema and data", default=False)
parser.add_argument('--start', dest='start',
action='store_true', help="Start the daemon process", default=False)
parser.add_argument('--stop', dest='stop',
action='store_true', help="Stop the daemon process", default=False)
parser.add_argument('--restart', dest='restart',
action='store_true', help="Restart the daemon process", default=False)
parser.add_argument('--status', dest='status',
action='store_true', help="Status of the daemon process", default=False)
return parser
def run_mysqldump(dump_type, conf, mongodb):
for db in conf['databases'].split(','):
try:
dump_file = mysqldump_cmd(conf, db, dump_type=dump_type)
except Exception as e:
raise SysException(e)
if dump_type == 'data':
try:
mysqldump_parser_data(dump_file, mongodb)
except Exception as e:
raise SysException(e)
if dump_type == 'schema':
try:
mysqldump_parser_schema(dump_file, mongodb)
except Exception as e:
raise SysException(e)
if dump_type == 'complete':
try:
mysqldump_parser_schema(dump_file, mongodb)
except Exception as e:
raise SysException(e)
try:
mysqldump_parser_data(dump_file, mongodb)
except Exception as e:
raise SysException(e)
return True
def process_data_buffer(buf, table, db, mongodb):
parser = etree.XMLParser(recover=True)
tnode = etree.fromstring(buf, parser=parser)
doc = dict()
for child in tnode:
if child.tag == 'field':
doc[child.attrib['name']] = child.text
try:
mongodb.insert(doc, db, table)
except Exception as e:
raise SysException(e)
del tnode
def process_schema_buffer(buf, table, db, mongodb):
parser = etree.XMLParser(recover=True)
tnode = etree.fromstring(buf, parser=parser)
doc = dict()
doc['_id'] = db + '.' + table
doc['primary_key'] = []
doc['table'] = table
doc['db'] = db
for child in tnode:
if child.tag == 'field':
if child.attrib['Key'] == 'PRI':
doc['primary_key'].append(child.attrib['Field'])
try:
mongodb.insert_primary_key(doc)
except Exception as e:
raise SysException(e)
del tnode
def mysqldump_parser_data(dump_file, mongodb):
inputbuffer = ''
db_start = re.compile(r'.*<database name.*', re.IGNORECASE)
tb_start = re.compile(r'.*<table_data.*', re.IGNORECASE)
row_start = re.compile(r'.*<row>.*', re.IGNORECASE)
row_end = re.compile(r'.*</row>.*', re.IGNORECASE)
master_log = re.compile(r'.*CHANGE MASTER.*', re.IGNORECASE)
db = ''
table = ''
log_file = None
log_pos = None
with open(dump_file, 'r') as inputfile:
append = False
for line in inputfile:
if row_start.match(line):
# print('start')
inputbuffer = line
append = True
elif row_end.match(line):
# print('end')
inputbuffer += line
append = False
process_data_buffer(inputbuffer, table, db, mongodb)
inputbuffer = None
del inputbuffer
elif append:
# print('elif')
inputbuffer += line
elif db_start.match(line):
db = re.findall('name="(.*?)"', line, re.DOTALL)[0]
try:
mongodb.drop_db(db)
except Exception as e:
raise SysException(e)
elif tb_start.match(line):
table = re.findall('name="(.*?)"', line, re.DOTALL)[0]
elif master_log.match(line):
log_file = re.findall("MASTER_LOG_FILE='(.*?)'", line, re.DOTALL)[0]
log_pos = re.findall("MASTER_LOG_POS=(.*?);", line, re.DOTALL)[0]
if log_file is not None and log_pos is not None:
try:
mongodb.write_log_pos(log_file, log_pos)
except Exception as e:
raise SysException(e)
try:
mongodb.make_db_as_parsed(db, 'data')
except Exception as e:
logger.error('Cannot insert db ' + db + ' as parsed')
def mysqldump_parser_schema(dump_file, mongodb):
inputbuffer = ''
db_start = re.compile(r'.*<database name.*', re.IGNORECASE)
tb_start = re.compile(r'.*<table_structure.*', re.IGNORECASE)
tb_end = re.compile(r'.*</table_structure.*', re.IGNORECASE)
db = ''
table = ''
with open(dump_file, 'r') as inputfile:
append = False
for line in inputfile:
if tb_start.match(line):
# print('start')
inputbuffer = line
append = True
table = re.findall('name="(.*?)"', line, re.DOTALL)[0]
elif tb_end.match(line):
# print('end')
inputbuffer += line
append = False
process_schema_buffer(inputbuffer, table, db, mongodb)
inputbuffer = None
del inputbuffer
elif append:
# print('elif')
inputbuffer += line
elif db_start.match(line):
db = re.findall('name="(.*?)"', line, re.DOTALL)[0]
try:
mongodb.make_db_as_parsed(db, 'schema')
except Exception as e:
logger.error('Cannot insert db ' + db + ' as parsed')
# TODO add index from mysql schema
def mysqldump_cmd(conf, db, dump_type):
dump_file = NamedTemporaryFile(delete=False)
dumpcommand = ['mysqldump',
'--user=' + conf['user'],
'--host=' + conf['host'],
'--port=' + conf['port'],
'--force',
'--xml',
'--master-data=2']
if conf['password'] != '':
dumpcommand.append('--password=' + conf['password'])
if dump_type == 'schema':
dumpcommand.append('--no-data')
elif dump_type == 'data':
dumpcommand.append('--no-create-db')
dumpcommand.append('--no-create-info')
dumpcommand.append(db)
logger.debug('executing: {0}'.format(' '.join(dumpcommand)))
with open(dump_file.name, 'wb', 0) as f:
try:
p1 = subprocess.Popen(dumpcommand, stdout=f)
except Exception as e:
raise SysException(e)
p1.wait()
return dump_file.name
# TODO save log_pos to mongo to start from here with replicator
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message != '\n':
self.logger.log(self.level, message)
def flush(self):
return True | njordr/mymongo | mymongolib/utils.py | utils.py | py | 8,821 | python | en | code | 6 | github-code | 90 |
32409536696 | '''
通过肢体姿态检测,得到左右手label
'''
import os
import cv2
from json_tools import make_json_head
from pose_tool import PoseLandmark, landmark_to_box, bb_iou
import numpy as np
import json
from tqdm import tqdm
from convert_coco_format import convert_coco_format_left_label
from copy import deepcopy
mode = 'val'
# E:\whole_body_data\annotations\person_keypoints_{mode}2017.json
json_dir = fr'E:\left_hand_label_data\detect\{mode}_badcase.json'
txt_dir = fr"E:\left_hand_label_data\pose\record_{mode}.txt"
SAVE_PATH = r"E:\left_hand_label_data\pose"
JSON_NAME = f'{mode}.json'
debug = 1
def init():
if debug:
cv2.namedWindow("aa", cv2.WINDOW_NORMAL)
is_exists = os.path.exists(SAVE_PATH)
if not is_exists:
os.makedirs(SAVE_PATH)
print('path of %s is build' % SAVE_PATH)
json_file = make_json_head()
hand_mode = PoseLandmark()
return json_file, hand_mode
def main():
json_head, hand_mode = init()
with open(json_dir, 'r')as f:
json_data = json.load(f)
images = json_data['images']
annotations = json_data['annotations']
assert (len(images) == len(annotations))
count = 0
for index in tqdm(range(len(images))):
# if index < 1500:
# continue
image_info = images[index]
annotation_info = annotations[index]
image_dir = image_info['image_dir']
keypoints = annotation_info['keypoints']
image = cv2.imread(image_dir)
json_landmarks = np.array(keypoints).reshape(21, 3)
json_box = landmark_to_box(json_landmarks)
boxes, _ = hand_mode(image)
iou_list = []
for box in boxes:
pose_box = box['box']
bb_iou(json_box, pose_box)
iou_list.append(bb_iou)
index = max(iou_list)
hand_type = boxes[index]['type']
if debug:
img = cv2.putText(image, f'{hand_type}', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)
cv2.imshow('aa', img)
cv2.waitKey(0) & 0xFF
convert_coco_format_left_label(json_head, deepcopy(image_info), deepcopy(annotation_info), hand_type,
hand_type)
json_path = os.path.join(SAVE_PATH, JSON_NAME)
with open(json_path, 'w') as fw:
json.dump(json_head, fw)
print(f"{json_path} have succeed to write")
if __name__ == '__main__':
main() | Daming-TF/HandData | scripts/Tools/Mark_Tools/Left_label_data/run_pose.py | run_pose.py | py | 2,448 | python | en | code | 1 | github-code | 90 |
41915684581 | from django.core.management.base import BaseCommand
from django.utils import timezone
from pokemon import utilities
class Command(BaseCommand):
help = 'Displays current time'
def add_arguments(self, parser):
parser.add_argument('evolutionChainRangeID',
nargs='+',
type=int,
default=1,
help='EvolutionChainRangeID parameter')
def handle(self, *args, **kwargs):
range_chain_id = kwargs['evolutionChainRangeID']
time = timezone.now().strftime('%X')
self.stdout.write(self.style.WARNING("Begin.... %s" % time))
self.stdout.write(self.style.WARNING("Begin.... Range chain ID ... %s" % range_chain_id))
utilities_pokemon = utilities.UtilitiesPokemon()
status = utilities_pokemon.searchChainByRangeID(range_chain_id[0])
if status:
self.stdout.write(self.style.SUCCESS("Register created.... %s" % time))
else:
self.stdout.write(self.style.WARNING("Register already exists.... %s" % time))
time = timezone.now().strftime('%X')
self.stdout.write(self.style.SUCCESS("Success.... %s" % time)) | andresRah/PokemonDjango | pokemon/management/commands/poblate_database.py | poblate_database.py | py | 1,227 | python | en | code | 0 | github-code | 90 |
19115465322 | from http import HTTPStatus
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException
from app.constants import Role
from app.exceptions import (
AccountNotFoundError,
)
from app.schemas import AccountSchema, TransactionSchema, UserSchema
from app.use_cases.accounts import (
GetTransactionsUseCase,
GetUserAccountsUseCase,
GetUserAccountUseCase,
)
from app.use_cases.users import get_current_user
router = APIRouter()
@router.get("/accounts", response_model=list[AccountSchema])
async def get_accounts(
user: UserSchema = Depends(get_current_user),
use_case: GetUserAccountsUseCase = Depends(GetUserAccountsUseCase),
) -> list[AccountSchema]:
if user.role not in (Role.ADMIN, Role.ACCOUNTANT):
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail="Only accountants and administrators can see all accounts",
)
return await use_case.execute()
@router.get("/accounts/{account_id}", response_model=AccountSchema)
async def get_account(
account_id: UUID | str,
user: UserSchema = Depends(get_current_user),
use_case: GetUserAccountUseCase = Depends(GetUserAccountUseCase),
):
try:
return await use_case.execute(account_id, user)
except AccountNotFoundError:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND,
detail="Account not found",
)
@router.get(
"/accounts/{account_id}/transactions", response_model=list[TransactionSchema]
)
async def get_account_transactions(
account_id: UUID | str,
user: UserSchema = Depends(get_current_user),
use_case: GetTransactionsUseCase = Depends(GetTransactionsUseCase),
):
try:
return await use_case.execute(account_id, user)
except AccountNotFoundError:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND,
detail="Account not found",
)
| nikvst/async-arch-course | accounting/app/api/accounts.py | accounts.py | py | 1,925 | python | en | code | 0 | github-code | 90 |
16001510475 | from picozero import pico_led, LED, Switch
from time import sleep
# Allumer et éteindre la LED sur la carte Pico
pico_led.on()
sleep(1)
pico_led.off()
luciole = LED(13) # Utiliser GP13
interrupteur = Switch(18) # Utiliser GP18
while True:
if interrupteur.is_closed: # L'interrupteur est connecté
luciole.on()
sleep(0.5)
luciole.off()
sleep(2.3)
else: # L'interrupteur n'est pas connecté
luciole.off()
sleep(0.1)
| raspberrypilearning/led-firefly | fr-FR/solutions/led_firefly_complete.py | led_firefly_complete.py | py | 474 | python | fr | code | 2 | github-code | 90 |
2481395631 | from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.mail import send_mail
from django.views.generic import ListView
from django.db.models import Count
# from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
from django.contrib.postgres.search import TrigramSimilarity
from taggit.models import Tag
from .models import Post, Comment
from .forms import EmailPostForm, CommentForm, SearchForm
class PostListView(ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
def post_list(request, tag_slug=None):
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 3) # По 3 статьи на каждой странице
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# Если страница не является целым числом, возращаем первую страницу
posts = paginator.page(1)
except EmptyPage:
# Если номер страницы больше, чем общее количество страниц, возвращаем последнюю11
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/list.html', {'page': page, 'posts': posts, 'tag': tag})
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post, status='published', publish__year=year,
publish__month=month, publish__day=day)
# Список активних комментарів для цієї статі
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
# Користувач залишив комментар
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Створюємо комментар, але поки не зберігаємо в базу даних
new_comment = comment_form.save(commit=False)
# Прив'яжемо комментар до поточної статті
new_comment.post = post
# Зберігаємо комментар в базі данних
new_comment.save()
else:
comment_form = CommentForm()
# Формування списку схожих статей
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in = post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags',
'-publish')[:4]
return render(request, 'blog/post/detail.html', {'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form,
'similar_posts': similar_posts})
def post_share(request, post_id):
# Получение статьи по идентификатору
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
# Форма была отправлена на сохранение
form = EmailPostForm(request.POST)
if form.is_valid():
# Все поля формы прошли валидацию
cd = form.cleaned_data
# Отправка электронной почты
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = '{} ({}) recommends you reading "{}"'.format(cd['name'],
cd['email'], post.title)
massege = 'Read "{}" at {}\n\n{}\'s comments:{}'.format(post.title,
post_url,
cd['name'], cd['comments'])
send_mail(subject, massege, [cd['email']], [cd['to']])
sent = True
else:
form = EmailPostForm()
return render(request, 'blog/post/share.html', {'post': post, 'form': form, 'sent': sent})
def post_search(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
# search_vector = SearchVector('title', weight='A') + SearchVector('body', weight='B')
# search_query = SearchQuery(query)
# results = Post.objects.annotate(
# search=search_vector,
# rank=SearchRank(search_vector, search_query)
# ).filter(rank__gte=0.3).order_by('-rank')
results = Post.objects.annotate(
similarity=TrigramSimilarity('title', query),
).filter(similarity__gt=0.3).order_by('-similarity')
return render(request, 'blog/post/search.html', {
'form': form,
'query': query,
'results':results
})
| koluchiynick/django_first_blog | blog/views.py | views.py | py | 5,606 | python | ru | code | 0 | github-code | 90 |
18176399769 | A,B,C = map(int,input().split())
N = int(input())
stack = [(A,B,C,N)]
flag = False
while stack:
a,b,c,n = stack.pop()
if a<b and b<c:
flag = True
break
if n>0:
stack.append((a*2,b,c,n-1))
stack.append((a,b*2,c,n-1))
stack.append((a,b,c*2,n-1))
print('Yes') if flag else print('No')
| Aasthaengg/IBMdataset | Python_codes/p02601/s993853045.py | s993853045.py | py | 309 | python | en | code | 0 | github-code | 90 |
74380440937 | #+----------------------------------------------------+
#| 23/03/2019 - Report page for teachers
#| Created by Sahar Hosseini - Roman Blond
#| chart description,
#| report data as a table, chart and teachers enable to apply coherence, weight and penalty
#+----------------------------------------------------+
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget
from PyQt5.QtWidgets import QSlider, QGroupBox, QComboBox, QScrollArea, QTableWidget, QTableWidgetItem, \
QHBoxLayout
from Controller.pdfExport import PDFExport
from Controller.readAMC import *
from Controller.studentData import StudentData
from View.Charts import PlotCanvas
from View.coherence_page.coherence import *
from View.inputDate import DateInput
from View.report_page.pageStudents import FirstQuestion
from View.setting_page.pageSetting import Settings
#+--------------main class
class ReportPage(QWidget):
DEFAULT_CHART_INDEX = 4
def __init__(self, parent=None):
super(ReportPage, self).__init__(parent)
self.controller = StudentData()
self.plot = PlotCanvas(self.controller)
def initUI(self, mainWindow):
self.mainWindow = mainWindow
mainWindow.title = 'AMC Report'
self.createGridLayout()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
mainWindow.setCentralWidget(self)
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox("AMC report for teachers")
# ---------------------grid layout --------------------
layout = QGridLayout()
for i in range(3):
layout.setColumnStretch(i, 1)
# ---------------------Mean and STD --------------------
horizLayout = QHBoxLayout()
row = QWidget(self.horizontalGroupBox)
self.meanLabel = QLabel()
self.stdLabel = QLabel()
horizLayout.addWidget(self.meanLabel)
horizLayout.addWidget(self.stdLabel)
row.setLayout(horizLayout)
layout.addWidget(row, 0, 1)
# ---------------------Font of Mean and STD --------------------
meanFont = QFont()
meanFont.setBold(True)
meanFont.setPointSize(13.0)
self.meanLabel.setStyleSheet('QLabel {color: #128700;}')
self.stdLabel.setStyleSheet('QLabel {color: #4a006b;}')
self.meanLabel.setFont(meanFont)
self.stdLabel.setFont(meanFont)
# ---------------------text boxes --------------------
# Put all of the choices together, linking the name and the function to call
self.comboOptions = [
["Box Chart", self.plot.plot_box],
["Violin Chart", self.plot.plot_violin],
["Line Chart", self.plot.plot_line],
["Pie Chart", self.plot.plot_pie],
["Histogram", self.plot.plot_histo],
]
self.cbChart = QComboBox()
for elt in self.comboOptions:
self.cbChart.addItem(elt[0])
self.cbChart.setCurrentIndex(self.DEFAULT_CHART_INDEX)
self.cbChart.resize(140, 30)
layout.addWidget(self.cbChart, 0, 2)
layout.addWidget(self.createBtnGroup(), 0, 0)
self.cbChart.currentIndexChanged.connect(self.OnChangeCbChart)
# ---------------------table view --------------------
# scroll = QScrollArea()
self.table = QTableWidget()
# scroll.setWidget(table)
layout.addWidget(self.table, 1, 0)
self.initData()
self.setTable()
self.computeMeanAndSTD()
# ---------------------slider weight --------------------
numberOfQuestions = self.getNumberOfQuestions()
arrCorrectAns = self.getPercentage()
scrollArea = QScrollArea()
scrollArea.setWidgetResizable(True)
self.buildSlider = BuildSlider(self.refreshInterface, arrCorrectAns=arrCorrectAns,numberOfQuestions=numberOfQuestions)
scrollArea.setWidget(self.buildSlider)
layout.addWidget(scrollArea, 1, 1)
# ---------------------chart view --------------------
self.plot.plot_histo()
self.selectedChart = self.DEFAULT_CHART_INDEX
layout.addWidget(self.plot, 1, 2)
self.horizontalGroupBox.setLayout(layout)
def getNumberOfQuestions(self):
listQuestions = self.boxes['question'].unique()
numberOfQuestions = len(listQuestions)
return numberOfQuestions
def getPercentage(self):
listStudents = self.boxes['student'].unique()
listQuestions = self.boxes['question'].unique()
listQuestions.sort(axis = 0)
numberOfStudents = len(listStudents)
weights = ReadAMC.getWeights()
correctAns = []
for question in listQuestions:
sumPointsOneQuestion = 0
weight = weights.loc[weights['question'] == question, 'weight'].item()
for student in self.scoreTable.index:
sumPointsOneQuestion += self.scoreTable.loc[student, question]
correctAns.append(round((sumPointsOneQuestion / (weight*numberOfStudents)) * 100, 2))
return correctAns
def computeMeanAndSTD(self):
mean = self.scoreTable.iloc[:,-1].mean()
std = self.scoreTable.iloc[:,-1].std()
self.meanLabel.setText('Mean: {0}'.format(round(mean,2)))
self.stdLabel.setText('STD: {0}'.format(round(std,2)))
def sortTable(self):
self.scoreTable.sort_index(inplace=True)
columns = self.scoreTable.columns
toSort = []
notes = []
for value in columns:
if isinstance(value, int):
toSort.append(value)
else:
notes.append(value)
newColumns = sorted(toSort) + notes
self.scoreTable = self.scoreTable[newColumns]
# Calls directly the good function in the array self.comboOptions
def OnChangeCbChart(self,i):
self.comboOptions[i][1]()
self.selectedChart = i
def initData(self):
boxes, resultatsPoints = ReadAMC.computeData()
self.scoreTable = resultatsPoints.T
self.boxes = boxes
def updateData(self):
boxes, resultatsPoints = ReadAMC.updateData()
self.scoreTable = resultatsPoints.T
self.boxes = boxes
def setTable(self):
self.sortTable()
nbIndex = len(self.scoreTable.index)
nbColumns = len(self.scoreTable.columns)
colName = [] # column name
rowName = [] # row name
self.table.setColumnCount(nbColumns)
self.table.setRowCount(nbIndex)
for i in range(nbIndex):
colName.append(str(self.scoreTable.index[i]))
for j in range(nbColumns):
rowName.append(str(self.scoreTable.columns[j]))
value = self.scoreTable.iloc[i, j]
if not isinstance(value, str):
value = str(round(value, 2))
self.table.setItem(i, j, QTableWidgetItem(value))
self.table.setHorizontalHeaderLabels(rowName)
self.table.setVerticalHeaderLabels(colName)
self.table.resizeRowsToContents()
self.table.resizeColumnsToContents()
def refreshInterface(self):
self.updateData()
self.setTable()
self.plot.refresh()
self.comboOptions[self.selectedChart][1]()
self.computeMeanAndSTD()
arrCorrectAns = self.getPercentage()
for i, label in enumerate(self.buildSlider.listOfLabels):
label.setText("Question " + str(i+1) +" correctness: " + str(arrCorrectAns[i]) + " %")
def createBtnGroup(self):
groupBox = QGroupBox()
btnHome = QPushButton("Home")
btnCoherence = QPushButton("Coherence")
btnSettings = QPushButton("Settings")
btnPDF = QPushButton("Export as Markdown")
btnCSV = QPushButton("Export as CSV")
btnCSV.setEnabled(False)
btnStudent = QPushButton("Student report")
btnHome.clicked.connect(self.showHome)
btnCoherence.clicked.connect(self.showCoherence)
btnSettings.clicked.connect(self.showSettings)
btnPDF.clicked.connect(self.exportPDF)
# btnCSV.clicked.connect(self.exportCSV)
btnStudent.clicked.connect(self.showStudentReport)
hbox = QHBoxLayout()
hbox.addWidget(btnHome)
hbox.addWidget(btnCoherence)
hbox.addWidget(btnSettings)
hbox.addWidget(btnPDF)
hbox.addWidget(btnCSV)
hbox.addWidget(btnStudent)
hbox.addStretch(1)
groupBox.setLayout(hbox)
return groupBox
def showHome(self):
# Do not move this import, or the program crashes
from View.home_page.home import HomePage
home = HomePage(self.mainWindow)
home.initUI(self.mainWindow)
def showStudentReport(self):
studentDialog = FirstQuestion(self.mainWindow, self.boxes, self.scoreTable)
studentDialog.exec_()
def showCoherence(self):
coherencePage = CoherencePage(self.mainWindow)
n = coherencePage.exec_()
if n == 1:
self.refreshInterface()
def showSettings(self):
settingsPage = Settings(self.mainWindow, fetchData=True)
n = settingsPage.exec_()
if n == 1:
self.refreshInterface()
def exportPDF(self):
dateInput = DateInput()
n = dateInput.exec_()
if n == 1:
pdfExport = PDFExport()
pdfExport.export()
QMessageBox.information(self, 'Export done', 'Export done', QMessageBox.Ok)
#+--------------builder slider has been written by Arthur Lecert
class BuildSlider(QWidget):
def __init__(self, onValidate, parent=None, initialValue=1.0, arrCorrectAns=[], numberOfQuestions=1):
super(BuildSlider, self).__init__(parent)
self.onValidate = onValidate
# ---------------------weight
self.layout = QVBoxLayout()
self.listOfQuestions = []
self.listOfLabels = []
for i in range(numberOfQuestions):
currentLabel = QLabel("Question " + str(i+1) +" correctness: " + str(arrCorrectAns[i]) + " %")
self.addSlider(currentLabel, QLabel(str(initialValue)), initialValue)
self.listOfLabels.append(currentLabel)
self.b1 = QPushButton("Save weight")
self.b1.clicked.connect(self.writeWeights)
self.layout.addWidget(self.b1)
self.setLayout(self.layout)
self.setWindowTitle("Module AMC")
def addSlider(self, title, weightText, initialValue):
title.setAlignment(Qt.AlignCenter)
self.layout.addWidget(title)
weightText.setAlignment(Qt.AlignCenter)
self.layout.addWidget(weightText)
slider = DoubleSlider(Qt.Horizontal)
slider.setMinimum(0.0)
slider.setMaximum(2.0)
slider.setValue(initialValue)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(20)
self.layout.addWidget(slider)
slider.valueChanged.connect(lambda: self.valuechange(weightText, slider))
self.listOfQuestions.append(slider)
def valuechange(self, weightText, slider):
weightText.setText(str(slider.value()))
def writeWeights(self):
for i, slider in enumerate(self.listOfQuestions):
changeWeight(i + 1, slider.value())
self.onValidate()
class DoubleSlider(QSlider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.decimals = 2
self._max_int = 10 ** self.decimals
super().setMinimum(0)
super().setMaximum(self._max_int)
self._min_value = 0.0
self._max_value = 1.0
@property
def _value_range(self):
return self._max_value - self._min_value
def value(self):
return float(super().value()) / self._max_int * self._value_range + self._min_value
def setValue(self, value):
super().setValue(int((value - self._min_value) / self._value_range * self._max_int))
def setMinimum(self, value):
if value > self._max_value:
raise ValueError("Minimum limit cannot be higher than maximum")
self._min_value = value
self.setValue(self.value())
def setMaximum(self, value):
if value < self._min_value:
raise ValueError("Minimum limit cannot be higher than maximum")
self._max_value = value
self.setValue(self.value())
def minimum(self):
return self._min_value
def maximum(self):
return self._max_value
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QMainWindow()
report = ReportPage(window)
report.initUI(window)
window.showMaximized()
sys.exit(app.exec_())
| Arthlec/AMC | Project/View/report_page/pageReport.py | pageReport.py | py | 12,829 | python | en | code | 1 | github-code | 90 |
17955254869 | import sys
input = sys.stdin.readline
# sys.setrecursionlimit(100000)
def main():
N = int(input().strip())
ans = set()
for _ in range(N):
A = int(input().strip())
if A in ans:
ans.remove(A)
else:
ans.add(A)
return len(ans)
if __name__ == "__main__":
print(main())
| Aasthaengg/IBMdataset | Python_codes/p03607/s095112277.py | s095112277.py | py | 337 | python | en | code | 0 | github-code | 90 |
7155690346 | import logging
import logging.config
import time
from typing import Callable
logger = logging.getLogger()
class Timer(object):
# Just for type hinting
metadata: dict
setState: Callable
def __init__(self):
super().__init__()
self.timerRunning = False
def runTimer(self, mqttclient):
try:
initialTimestamp = self.metadata['timer']['initialTimestamp']
duration = self.metadata['timer']['duration']
except KeyError:
return
currentTimestamp = int(time.time())
# If the timer is activated
if currentTimestamp < initialTimestamp + duration:
# If the schedule wasnt active
if not self.timerRunning:
self.timerRunning = True
self.setState(mqttclient, True)
# If the timer is not active anymore, shut it down
elif self.timerRunning:
self.timerRunning = False
self.setState(mqttclient, False)
| csanz91/IotCloud | python-modules2/source/timer.py | timer.py | py | 1,017 | python | en | code | 3 | github-code | 90 |
73405790695 | from fastapi import APIRouter, HTTPException, Depends
from server import auth
from rest.model.buy import BuyReq, BuyResp
from backend.buy import buy_products
buy = APIRouter(
prefix = '/buy',
tags = ['buy'],
)
@buy.post('',
status_code = 200,
description = 'Buy Products',
response_model = BuyResp,
)
async def buying(req: BuyReq, user: str = Depends(auth.buyer)):
try:
return buy_products(req, user)
except Exception as x:
raise HTTPException(
status_code = 400,
detail = f'Buy Error ! {x}',
)
| louis-riviere-xyz/vending | rest/routes/buy.py | buy.py | py | 583 | python | en | code | 0 | github-code | 90 |
3350074566 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from Train.data_gen import Generator
from tqdm import tqdm
def train(net=None, model_name_and_path='./Models_weights/model',device= torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
# optimizer = optim.Adam(net.parameters(), lr=0.01, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0005, amsgrad=False)
gen = Generator()
criterion = nn.L1Loss()
for epoch in range(1,16): # loop over the dataset multiple times
print('epoch:',epoch)
running_loss = 0.0
i = 0
# decreasing the learning rate after 5 epochs
if epoch == 5:
for g in optimizer.param_groups:
g['lr'] = 0.001
elif epoch == 10:
for g in optimizer.param_groups:
g['lr'] = 0.0001
for i in tqdm(range(0,3500)):
# get the inputs; data is a list of [inputs, labels]
k=0
file_no = random.randint(0, 60000)
for k in range (0,16):
file_no = file_no + 1
if k == 0 :
inputs, labels = gen.get_data(file_no)
if k > 0 :
inputs_stack, labels_stack = gen.get_data(file_no)
inputs = torch.cat([inputs, inputs_stack])
labels = torch.cat([labels, labels_stack])
INN = inputs.to(device)
OUT = labels.to(device)
optimizer.zero_grad()
outputs = net(INN)
loss = criterion(outputs, OUT)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
LOSS = 0.0
if i == 3499: # print every 2000 mini-batches# CHANGE THIS VALUE
print('[%d, %5d] loss: %.5f' %
(epoch , i + 1, running_loss / 3500))
running_loss = 0.0
g = 0
for g in range(0,750):
k=0
file_no = random.randint(60012, 72730)
for k in range (0,16):
file_no = file_no + 1
if k == 0 :
inputs, labels = gen.get_data(file_no)
if k > 0 :
inputs_stack, labels_stack = gen.get_data(file_no)
inputs = torch.cat([inputs, inputs_stack])
labels = torch.cat([labels, labels_stack])
INN = inputs.to(device)
OUT = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(INN)
loss = criterion(outputs, OUT)
LOSS += loss.item()
LOSS = LOSS/750
print('Avergae_validation_loss was='+str(LOSS) )
running_loss = 0.0
if epoch ==1 :
best_loss = LOSS
torch.save(net, model_name_and_path +str('.pt'))
if LOSS < best_loss:
best_loss = LOSS
torch.save(net, model_name_and_path +str('.pt'))
LOSS = 0
running_loss = 0.0
print('Finished Training')
| ALI7861111/Hand-Pose-Estimation | Train/trainer_CNN.py | trainer_CNN.py | py | 3,698 | python | en | code | 0 | github-code | 90 |
16548487004 | from django.urls import path
from . import views
app_name = "main"
urlpatterns = [
path("", views.homepage, name="Home"),
path("discussion/<str:dis_t>", views.discussion, name="Discussion"),
path("newmessage", views.newmessage_request, name="NewMessage"),
path("login", views.login_request, name="Login"),
path("logout", views.logout_request, name="Logout"),
path("register", views.register, name="Register"),
path("profile", views.profile, name="Profile"),
path("newdiscussion", views.newdiscussion_request, name="NewDiscussion"),
path("deletediscussion/<str:dis_t>", views.deletedis_request, name="DeleteDiscussion"),
path("editdiscussion/<str:old_title>", views.editdiscussion_request, name="EditDiscussion"),
path("editprofile", views.editprofile_request, name="EditProfile"),
path("editcredentials", views.editcredentials_request, name="EditCredentials"),
path("rules", views.rules_page, name="Rules"),
]
| SmauDistribution/Fortheum | main/urls.py | urls.py | py | 984 | python | en | code | 0 | github-code | 90 |
31313692565 | """
Homework 5: In this assignment we are creating a tool that trains Decision tree, K nearest neighbors and neural network
machine learning models to perform the task of classifying online reviews.
"""
# Importing libraries
import joblib
import json
import math
import nltk
import requests
from sklearn import model_selection, metrics
from sklearn import tree, neighbors, neural_network
import textblob
import time
import warnings
warnings.filterwarnings("ignore")
nltk.download("punkt")
# Below code is responsible for loading data, preparing and transforming input and output data.
# Performing splitting and training 3 models
# Printing the accuracy scores
# Saving the best performing classifier (in this case it is "Neural Network") in file.
response = requests.get("https://dgoldberg.sdsu.edu/515/appliance_reviews.json")
if response:
print("\nLoading data...")
# Using time module to print time taken in execution process
start_time = time.time()
data = json.loads(response.text)
end_time = time.time()
time_elapsed = end_time - start_time
print("Completed in", time_elapsed, "seconds.")
print("\nIdentifying unique words...")
start_time = time.time()
# "review_text" will store all the reviews in string.
# Using textblob module to store words in lower case.
review_text = ''
for line in data:
review_text += line['Review']
blob = textblob.TextBlob(review_text)
words = blob.words.lower()
# Using set() and list(), to store unique words in list.
unique_words = list(set(words))
end_time = time.time()
time_elapsed = end_time - start_time
print("Completed in", time_elapsed, "seconds.")
print("\nGenerating relevance scores...")
start_time = time.time()
# calculating the relevance score using given formula and storing a pair of word, score in dictionary.
rel_score = {}
for word in unique_words:
# if word == "dangerous":
score = 0
A, B, C, D = 0, 0, 0, 0
for line in data:
if word in line["Review"]:
if line["Safety hazard"]:
A += 1
else:
B += 1
if word not in line["Review"]:
if line["Safety hazard"]:
C += 1
else:
D += 1
# To handle zero division error exception using try-except
try:
val = (math.sqrt((A + B) * (C + D)))
score = ((math.sqrt(A + B + C + D)) * ((A * D) - (C * B))) / val
except ZeroDivisionError:
score = 0
# Filtering the score vy given threshold (4000)
if score >= 4000:
# adding word and score in dictionary
rel_score[word] = score
for key, val in rel_score.items():
print(key,"\t",val)
end_time = time.time()
time_elapsed = end_time - start_time
print("Completed in", time_elapsed, "seconds.")
print("\nFormatting 2D list...")
start_time = time.time()
# Preparing input and output variable
# Preparing 2D input and 1D output
x, y = [], []
for line in data:
review = line["Review"]
hazard = line["Safety hazard"]
temp = []
for word, score in rel_score.items():
if word in review:
temp.append(1)
else:
temp.append(0)
x.append(temp)
y.append(hazard)
end_time = time.time()
time_elapsed = end_time - start_time
print("Completed in", time_elapsed, "seconds.")
print("\nTraining machine learning models...")
start_time = time.time()
# Splitting the input and output data in train data set and test data set
x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y)
# Training Decision Tree Classifier and calculating the accuracy score
clf_dt = tree.DecisionTreeClassifier()
clf_dt = clf_dt.fit(x_train, y_train)
prediction_dt = clf_dt.predict(x_test)
accuracy_dt = metrics.accuracy_score(y_test, prediction_dt)
# Training 7 Nearest Neighbor Classifier and calculating the accuracy score
clf_knn = neighbors.KNeighborsClassifier(7)
clf_knn = clf_knn.fit(x_train, y_train)
prediction_knn = clf_knn.predict(x_test)
accuracy_knn = metrics.accuracy_score(y_test, prediction_knn)
# Training Multi Layer perceptron Classifier and calculating the accuracy score
clf_nn = neural_network.MLPClassifier()
clf_nn = clf_nn.fit(x_train, y_train)
prediction_nn = clf_nn.predict(x_test)
accuracy_nn = metrics.accuracy_score(y_test, prediction_nn)
end_time = time.time()
time_elapsed = end_time - start_time
print("Completed in", time_elapsed, "seconds.\n")
# Printing the accuracy score of Decision Tree, KNN and Neural Network.
print(accuracy_dt)
print(accuracy_knn)
print(accuracy_nn)
# Saving the trained MLP classifier in file for future use.
joblib.dump(clf_nn, "neural_classifier.joblib")
else:
print("Connection Error")
print("\n\nThank you for using Online Review Classification tool..")
| nishusingh11/MIS-515_Object-Oriented-Programming-for-Business-Applications | Assignment5/Assignment5.py | Assignment5.py | py | 5,155 | python | en | code | 0 | github-code | 90 |
18714456105 | import random
def twoNums():
stNum = int(input('Введи 1е число: '))
ndNum = int(input('Введи 2е число: '))
compNum = random.randint(stNum, ndNum)
return compNum
def answer():
print('Thinking of number... ')
answer = int(input('Как ты думаешь какое число я выбрал?: '))
return answer
def correctAns(answer, compNum):
tryAgain = True
while tryAgain == True:
if answer == compNum:
print('Верно, ты угадал!')
tryAgain = False
elif answer > compNum:
answer = int(input('Больше загаданного, попробуй ещё раз: \n'))
else:
answer = int(input('Меньше загаданного, попробуй ещё раз: \n'))
compNum = twoNums()
answer = answer()
correctAns(answer, compNum) | HiikiToSS/The_Oldest_One | тест.py | тест.py | py | 881 | python | ru | code | 0 | github-code | 90 |
33702808077 | # 문제 : 계란으로 계린치기
N = int(input())
array = [[0]*N for _ in range(N)]
for i in range(N):
array[i] = list(map(int,input().split()))
result = 0
def dfs(level,array):
global result
if len(array) == level:
count = 0
for i in range(len(array)):
if array[i][0] <= 0:
count += 1
result = max(result,count)
return
if array[level][0] <= 0:
dfs(level+1,array)
else:
for i in range(len(array)):
flag = False
if array[i][0]<=0 or i==level:
continue
array[level][0] -= array[i][1]
array[i][0] -= array[level][1]
flag = True
dfs(level+1,array)
array[level][0] += array[i][1]
array[i][0] += array[level][1]
if not flag:
dfs(level+1,array)
return
dfs(0,array)
print(result) | kimujinu/python_PS | 16987.py | 16987.py | py | 911 | python | en | code | 0 | github-code | 90 |
28712034857 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 21:45:29 2022
@author: yanbing_wang
Get the statistics of a collection
- tmin/tmax/xymin/xymax/# trajectories
Compare raw and reconciled (unsupervised)
- what fragments are filtered out
- unmatched fragments
- (done)y deviation
- (done)speed distribution
- (done)starting / ending x distribution
- (done)collision
- length, width, height distribution
- density? flow?
- (done)lane distribution
Examine problematic stitching
- plot a list of fragments
- plot the reconciled trajectories
Statics output write to
- DB (?)
- file
- log.info(extra={})
TODO
1. make traj_eval faster using MongoDB projection instead of python
"""
from i24_database_api import DBClient
import matplotlib.pyplot as plt
from bson.objectid import ObjectId
import pprint
import json
import numpy as np
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import time
# local functions
def _get_duration(traj):
return traj["last_timestamp"] - traj["first_timestamp"]
def _get_x_traveled(traj):
x = abs(traj["ending_x"] - traj["starting_x"])
return x
def _get_y_traveled(traj):
return max(traj["y_position"]) - min(traj["y_position"])
def _get_max_vx(traj):
dx = np.diff(traj["x_position"]) * traj["direction"]
dt = np.diff(traj["timestamp"])
try: return max(dx/dt)
except: return np.nan
def _get_min_vx(traj):
dx = np.diff(traj["x_position"]) * traj["direction"]
dt = np.diff(traj["timestamp"])
try: return min(dx/dt)
except: return np.nan
def _get_backward_cars(traj):
dx = np.diff(traj["x_position"]) * traj["direction"]
if np.any(dx < 0):
return str(traj['_id'])
return None
def _get_max_vy(traj):
dy = np.diff(traj["y_position"])
dt = np.diff(traj["timestamp"])
try: return max(dy/dt)
except: return np.nan
def _get_min_vy(traj):
dy = np.diff(traj["y_position"])
dt = np.diff(traj["timestamp"])
try: return min(dy/dt)
except: return np.nan
def _get_avg_vx(traj):
dx = np.diff(traj["x_position"])
dt = np.diff(traj["timestamp"])
try: return np.abs(np.average(dx/dt))
except: return np.nan
def _get_avg_vy(traj):
dy = np.diff(traj["y_position"])
dt = np.diff(traj["timestamp"])
try: return np.average(dy/dt)
except: return np.nan
def _get_avg_ax(traj):
ddx = np.diff(traj["x_position"], 2)
dt = np.diff(traj["timestamp"])[:-1]
try: return np.mean(ddx/(dt**2))
except: return np.nan
def _get_min_ax(traj):
ddx = np.diff(traj["x_position"], 2)
dt = np.diff(traj["timestamp"])[:-1]
try: return min(ddx/(dt**2))
except: return np.nan
def _get_max_ax(traj):
ddx = np.diff(traj["x_position"], 2)
dt = np.diff(traj["timestamp"])[:-1]
try: return max(ddx/(dt**2))
except: return np.nan
def _get_avg_ax(traj):
ddx = np.diff(traj["x_position"], 2)
dt = np.diff(traj["timestamp"])[:-1]
try: return np.mean(ddx/(dt**2))
except: return np.nan
def _get_ax(traj):
'''
return point-wise acceleration
'''
ddx = np.diff(traj["x_position"], 2)
dt = np.diff(traj["timestamp"])[:-1]
return ddx/(dt**2)
def _get_vx(traj):
'''
return point-wise acceleration
'''
dy = np.diff(traj["x_position"])
dt = np.diff(traj["timestamp"])
return np.abs(dy/dt)
def _get_residual(traj):
try:
return traj["x_score"]
except: # field is not available
return 0
def _get_lane_changes(traj, lanes = [i*12 for i in range(-1,12)]):
'''
count number of times y position is at another lane according to lane marks
'''
lane_idx = np.digitize(traj["y_position"], lanes)
lane_change = np.diff(lane_idx)
# count-nonzeros
return np.count_nonzero(lane_change)
def doOverlap(pts1, pts2,xpad = 0,ypad = 0):
'''
pts: [lefttop_x, lefttop_y, bottomright_x, bottomright_y]
return True if two rectangles overlap
'''
# by separating axix theorem
if xpad != 0:
return not (pts1[0] > xpad + pts2[2] or pts1[1] + ypad < pts2[3] or pts1[2] + xpad < pts2[0] or pts1[3] > pts2[1] + ypad )
else:
return not (pts1[0] > pts2[2] or pts1[1] < pts2[3] or pts1[2] < pts2[0] or pts1[3] > pts2[1] )
def calc_space_gap(pts1, pts2):
'''
pts: [lefttop_x, lefttop_y, bottomright_x, bottomright_y]
if two cars are on the same lane, get the space gap
'''
if abs(pts1[1] + pts1[3] - pts2[1] - pts2[3])/2 < 6: # if two cars are likely to be on the same lane
return max(pts2[0] - pts1[2], pts1[0] - pts2[2])
else:
return None
def _get_min_spacing(time_doc, lanes = [i*12 for i in range(-1,12)]):
'''
get the minimum x-difference at all lanes for a given timestamp
TODO: consider vehicle dimension for space gap
'''
# get the lane assignments
veh_ids = np.array(time_doc['id'])
x_pos = np.array([pos[0] for pos in time_doc["position"]])
y_pos = np.array([pos[1] for pos in time_doc["position"]])
lane_asg = np.digitize(y_pos, lanes) # lane_id < 6: east
# for each lane, sort by x - This is not the best way to calculate!
lane_dict = defaultdict(list) # key: lane_id, val: x_pos
for lane_id in np.unique(lane_asg):
in_lane_idx = np.where(lane_asg==lane_id)[0] # idx of vehs that are in lane_id
in_lane_ids = veh_ids[in_lane_idx]
in_lane_xs = x_pos[in_lane_idx]
sorted_idx = np.argsort(in_lane_xs)
sorted_xs = in_lane_xs[sorted_idx]
sorted_ids = in_lane_ids[sorted_idx] # apply the same sequence to ids
lane_dict[lane_id] = [sorted_xs, sorted_ids]
# get x diff for each lane
# pprint.pprint(lane_dict)
min_spacing = 10e6
for lane_id, vals in lane_dict.items():
try:
sorted_xs, sorted_ids = vals
delta_x = np.diff(sorted_xs)
min_idx = np.argmin(delta_x)
min_spacing_temp = delta_x[min_idx]
if min_spacing_temp < min_spacing:
min_spacing = min_spacing_temp
min_pair = (sorted_ids[min_idx], sorted_ids[min_idx+1])
except ValueError:
pass
return min_spacing
class UnsupervisedEvaluator():
def __init__(self, config, collection_name=None, num_threads=100):
'''
Parameters
----------
config : Dictionary
store all the database-related parameters.
collection1 : str
Collection name.
'''
# print(config)
self.collection_name = collection_name
client = DBClient(**config)
db_time = client.client["transformed"]
db_raw = client.client["trajectories"]
db_rec = client.client["reconciled"]
# print("N collections before transformation: {} {} {}".format(len(db_raw.list_collection_names()),len(db_rec.list_collection_names()),len(db_time.list_collection_names())))
# start transform trajectory-indexed collection to time-indexed collection if not already exist
# this will create a new collection in the "transformed" database with the same collection name as in "trajectory" database
if collection_name not in db_time.list_collection_names(): # always overwrite
print("Transform to time-indexed collection first")
client.transform(read_database_name=config["database_name"],
read_collection_name=collection_name)
# print("N collections after transformation: {} {} {}".format(len(db_raw.list_collection_names()),len(db_rec.list_collection_names()),len(db_time.list_collection_names())))
# print(config,collection_name)
self.dbr_v = DBClient(**config, collection_name = collection_name)
self.dbr_t = DBClient(host=config["host"], port=config["port"], username=config["username"], password=config["password"],
database_name = "transformed", collection_name = collection_name)
print("connected to pymongo client")
self.res = defaultdict(dict) # min, max, avg, stdev
self.num_threads = num_threads
self.res["collection"] = self.collection_name
self.res["traj_count"] = self.dbr_v.count()
self.res["timestamp_count"] = self.dbr_t.count()
def __del__(self):
try:
del self.dbr_v
del self.dbr_t
except:
pass
def thread_pool(self, func, iterable = None):
if iterable is None:
iterable = self.dbr_v.collection.find({})
pool = ThreadPool(processes=self.num_threads)
res = []
for item in iterable:
async_result = pool.apply_async(func, (item,)) # tuple of args for foo
res.append(async_result)
pool.close()
pool.join()
res = [r.get() for r in res] # non-blocking
return res
def traj_evaluate(self):
'''
Results aggregated by evaluating each trajectories
'''
# distributions - all the functions that return a single value
# TODO: put all functions in a separate script
functions = [_get_duration, _get_x_traveled,
_get_y_traveled, _get_max_vx, _get_min_vx,
_get_max_vy, _get_min_vy,_get_max_ax,_get_min_ax,_get_avg_vx,_get_avg_vy,_get_avg_ax,_get_residual,
_get_vx, _get_ax,
_get_lane_changes]
# functions = [_get_lane_changes]
for fcn in functions:
traj_cursor = self.dbr_v.collection.find({})
res = self.thread_pool(fcn, iterable=traj_cursor) # cursor cannot be reused
attr_name = fcn.__name__[5:]
print(f"Evaluating {attr_name}...")
if attr_name in ["vx", "ax"]:
res = [item for sublist in res for item in sublist] # flatten the nested list
self.res[attr_name]["min"] = np.nanmin(res).item()
self.res[attr_name]["max"] = np.nanmax(res).item()
self.res[attr_name]["median"] = np.nanmedian(res).item()
self.res[attr_name]["avg"] = np.nanmean(res).item()
self.res[attr_name]["stdev"] = np.nanstd(res).item()
self.res[attr_name]["raw"] = res
# get ids - all the functions that return other information
functions = [_get_backward_cars]
for fcn in functions:
traj_cursor = self.dbr_v.collection.find({})
res = self.thread_pool(fcn, iterable=traj_cursor) # cursor cannot be reused
attr_name = fcn.__name__[5:]
print(f"Evaluating {attr_name}...")
self.res[attr_name] = [r for r in res if r]
return
def time_evaluate(self, sample_rate=10):
'''
Evaluate using time-indexed collection
sample_rate: (int) select every sample_rate timestamps to evaluate
'''
# matries to convert [x,y,len,wid] to [lefttop_x, lefttop_y, bottomright_x, bottomright_y]
east_m = np.array([[1, 0,0,0], [0,1,0,0.5], [1,0,1,0], [0,1,0,-0.5]]).T
west_m = np.array([[1,0,-1,0], [0,1,0,0.5], [1, 0,0,0], [0,1,0,-0.5]]).T
def _get_overlaps(time_doc):
'''
Calculate pair-wise overlaps and space gap at a given timestamp
'''
veh_ids = time_doc['id']
pos = time_doc["position"]
try:
dims = time_doc["dimensions"]
time_doc_dict = {veh_ids[i]: pos[i] + dims[i][:2] for i in range(len(veh_ids))}
has_dimension = True
except KeyError:
time_doc_dict = {veh_ids[i]: pos[i] for i in range(len(veh_ids))}
has_dimension = False
if has_dimension:
pipeline = [
{"$match": {"$and" : [{"_id": {"$in": veh_ids}}, {"direction": {"$eq":1}}]}},
{'$project':{ '_id': 1 } },
]
query = self.dbr_v.collection.aggregate(pipeline)
east_ids = [doc["_id"] for doc in query]
west_ids = list(set(veh_ids) - set(east_ids))
east_b = np.array([time_doc_dict[veh_id] for veh_id in east_ids])
west_b = np.array([time_doc_dict[veh_id] for veh_id in west_ids])
else:
east_pipeline = [
{"$match": {"$and" : [{"_id": {"$in": veh_ids}}, {"direction": {"$eq":1}}]}},
{'$project':{ '_id': 1, 'length':1, 'width': 1 } },
]
east_query = self.dbr_v.collection.aggregate(east_pipeline)
east_dict = {doc["_id"]: [doc["length"], doc["width"]] for doc in east_query} # get dimension info
west_pipeline = [
{"$match": {"$and" : [{"_id": {"$in": veh_ids}}, {"direction": {"$eq":-1}}]}},
{'$project':{ '_id': 1, 'length':1, 'width': 1 } },
]
west_query = self.dbr_v.collection.aggregate(west_pipeline)
west_dict = {doc["_id"]: [doc["length"], doc["width"]] for doc in west_query} # get dimension info
east_ids = list(east_dict.keys())
west_ids = list(west_dict.keys())
east_b = np.array([time_doc_dict[veh_id]+east_dict[veh_id] for veh_id in east_ids])
west_b = np.array([time_doc_dict[veh_id]+west_dict[veh_id] for veh_id in west_ids])
overlap = []
# space_gap = []
# east_pts = M*east_b, where east_pts=[lx, ly, rx, ry], east_b =[x,y,len,wid]
# vectorize to all vehicles: A = M*B
try:
east_pts = np.matmul(east_b, east_m)
except ValueError: # ids are empty
east_pts = []
for i, pts1 in enumerate(east_pts):
for j, pts2 in enumerate(east_pts[i+1:]):
# check if two boxes overlap, if so append the pair ids
if doOverlap(pts1, pts2):
overlap.append((str(east_ids[i]),str(east_ids[j])))
# west bound
try:
west_pts = np.matmul(west_b, west_m)
except ValueError:
west_pts = []
for i, pts1 in enumerate(west_pts):
for j, pts2 in enumerate(west_pts[i+1:]):
# check if two boxes overlap
if doOverlap(pts1, pts2):
overlap.append((str(west_ids[i]),str(west_ids[j])))
return overlap
# start thread_pool for each timestamp
functions = [_get_min_spacing, _get_overlaps]
# functions = [_get_min_spacing]
for fcn in functions:
time_cursor = self.dbr_t.collection.find({})
attr_name = fcn.__name__[5:]
print(f"Evaluating {attr_name}...")
if "overlap" in attr_name:
overlaps = set()
count = 0
for time_doc in time_cursor:
if count % sample_rate == 0:
overlap_t = _get_overlaps(time_doc)
for pair in overlap_t:
overlaps.add(pair)
count += 1
self.res[attr_name] = list(overlaps)
else:
res = self.thread_pool(fcn, iterable=time_cursor)
self.res[attr_name]["min"] = np.nanmin(res).item()
self.res[attr_name]["max"] = np.nanmax(res).item()
self.res[attr_name]["median"] = np.nanmedian(res).item()
self.res[attr_name]["avg"] = np.nanmean(res).item()
self.res[attr_name]["stdev"] = np.nanstd(res).item()
self.res[attr_name]["raw"] = res
return
def print_res(self):
pprint.pprint(self.res, width = 1)
def save_res(self):
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
with open(f"res_{self.collection_name}.json", "w") as f:
json.dump(self.res, f, indent=4, sort_keys=False,cls=NpEncoder)
print("saved.")
def plot_histogram(data, title=""):
bins = min(int(len(data)/10), 100)
plt.figure()
plt.hist(data, bins=bins)
plt.title(title)
plt.show()
def call(db_param,collection):
ue = UnsupervisedEvaluator(db_param, collection_name=collection, num_threads=200)
t1 = time.time()
ue.traj_evaluate()
ue.time_evaluate(sample_rate = 25)
t2 = time.time()
print("time: ", t2-t1)
#ue.print_res()
#ue.save_res()
return ue.res
if __name__ == '__main__':
# with open('config.json') as f:
# config = json.load(f)
collection = "pristine_stork--RAW_GT1__mumbles"
# collection = "morose_panda--RAW_GT1__juxtaposes"
if "__" in collection:
database_name = "reconciled"
else:
databas_name = "trajectories"
param = {
"host": "10.2.218.56",
"port": 27017,
"username": "i24-data",
"password": "mongodb@i24",
"database_name": database_name # db that the collection to evaluate is in
}
try:
with open(f"res_{collection}.json", "rb") as f:
res = json.load(f)
print("loaded res from local json file")
except:
res = call(param, collection)
# %% plot
plot_histogram(res["vx"]["raw"], "vx")
# %% examine large accelerations
# dbc = DBClient(**param, collection_name = collection)
# col = dbc.collection
# for doc in col.find():
# r = _get_avg_ax(doc)
# if r and r < -50:
# print(doc["_id"])
| DerekGloudemans/trajectory-eval-toolkit | unsup_statistics2.py | unsup_statistics2.py | py | 18,493 | python | en | code | 1 | github-code | 90 |
22390803940 |
''' Teste de Aptidão - Para ser apto é necessário
ter mais ou igual a 18 anos, 1.75 ou igual de altura,
ter mais ou igual a 60 kg. '''
idade = int(input('Escreva sua idade: '))
altura = float(input('Esceva sua altura: '))
peso = float(input('Escreva seu peso: '))
if idade >= 18 and peso >= 60 and altura >= 1.75:
print('Você está apto')
else:
print('Você não está apto')
| KesslerBarreto/curso-python-solyd | Teste de aptidão.py | Teste de aptidão.py | py | 418 | python | pt | code | 0 | github-code | 90 |
22323486690 | import json
import telebot
from telebot import types,util
from decouple import config
from googletrans import Translator
BOT_TOKEN = config("BOT_TOKEN")
bot= telebot.TeleBot(BOT_TOKEN)
bot_data={
"name" : ["stone","حجر"]
}
text_messages={
"welcome": "welcome to stone بوت مجموعة تليجرام ☺",
"welcomeNewMember" :
u"اهلا بك {name} في مجموعتنا الخاصة 🙋♂️",
"saying goodbye":
u"العضو {name} غادر المجموعة 🥺",
"leave":"لقد تم اضافتي الى مجموعة غير المجموعة التي صممت لها , وداعاً 🧐",
"call" : "كيف يمكنني المساعدة ؟ 😀",
"warn": u"❌ لقد استعمل {name} احد الكلمات المحظورة ❌\n"
u" 🔴 تبقى لديك {safeCounter} فرص اذا تم تجاوز العدد سيتم طردك 🔴",
"kicked": u"👮♂️⚠ لقد تم طرد العضو {name} صاحب المعرف {username} بسبب مخالفته لاحد قواعد المجموعة 👮♂️⚠"
}
text_list={
"offensive":["cat","puppy"]
}
commands = {
"translate":["translate","trans","ترجم","ترجملي"]
}
def handleNewUserData(message):
id = str(message.new_chat_member.user.id)
name = message.new_chat_member.user.first_name
username = message.new_chat_member.user.username
with open("data.json","r") as jsonFile:
data = json.load(jsonFile)
jsonFile.close()
users = data["users"]
if id not in users:
print("new user detected !")
users[id] = {"safeCounter":5}
users[id]["username"] = username
users[id]["name"] = name
print("new user data saved !")
data["users"] = users
with open("data.json","w") as editedFile:
json.dump(data,editedFile,indent=3)
editedFile.close()
def handleOffensiveMessage(message):
id = str(message.from_user.id)
name = message.from_user.first_name
username = message.from_user.username
with open("data.json","r") as jsonFile:
data = json.load(jsonFile)
jsonFile.close()
users = data["users"]
if id not in users:
print("new user detected !")
users[id] = {"safeCounter":5}
users[id]["username"] = username
users[id]["name"] = name
print("new user data saved !")
for index in users:
if index == id :
print("guilty user founded !")
users[id]["safeCounter"] -= 1
safeCounterFromJson = users[id]["safeCounter"]
if safeCounterFromJson == 0:
bot.kick_chat_member(message.chat.id,id)
users.pop(id)
bot.send_message(message.chat.id,text_messages["kicked"].format(name=name , username = username))
else:
bot.send_message(message.chat.id,text_messages["warn"].format(name=name , safeCounter = safeCounterFromJson))
data["users"] = users
with open("data.json","w") as editedFile:
json.dump(data,editedFile,indent=3)
editedFile.close()
return bot.delete_message(message.chat.id,message.message_id)
@bot.message_handler(commands=["start","help"])
def startBot(message):
bot.send_message(message.chat.id,text_messages["welcome"])
#* saying Welcome to joined members
#* saying goodbye to left members
@bot.chat_member_handler()
def handleUserUpdates(message:types.ChatMemberUpdated):
newResponse = message.new_chat_member
if newResponse.status == "member":
handleNewUserData(message=message)
bot.send_message(message.chat.id,text_messages["welcomeNewMember"].format(name=newResponse.user.first_name))
if newResponse.status == "left":
bot.send_message(message.chat.id,text_messages["saying goodbye"].format(name=newResponse.user.first_name))
#* leave anychat thats not mine
@bot.my_chat_member_handler()
def leave(message:types.ChatMemberUpdated):
update = message.new_chat_member
if update.status == "member":
bot.send_message(message.chat.id,text_messages["leave"])
bot.leave_chat(message.chat.id)
#* listening to group messages
#* respond to bot name
@bot.message_handler(func=lambda m:True)
def reply(message):
words = message.text.split()
if words[0] in bot_data["name"]:
bot.reply_to(message,text_messages["call"])
#* adding googletrans api
#* translating word to arabic
#* translating sentence to arabic
if words[0] in commands["translate"]:
translator = Translator()
translation = translator.translate(" ".join(words[1:]),dest="ar")
bot.reply_to(message,translation.text)
for word in words:
if word in text_list["offensive"]:
handleOffensiveMessage(message=message)
#* : checking if any word in message is offensive print("offensive")
#* : creating a data json file reading/writing
#* : saving users info from message (id,name,username)
#* : adding safeCounter data to each user safeCounter = TRIES
#* : kick chat member that break the rules
bot.infinity_polling(allowed_updates=util.update_types) | morethancoder/telebot-groupchat | #3/main.py | main.py | py | 5,144 | python | en | code | 7 | github-code | 90 |
72805147176 | from http.server import HTTPServer, BaseHTTPRequestHandler
from json import dumps, loads
from urllib.parse import urlparse, parse_qs
from cowpy import cow
ADDRESS = ('127.0.0.1', 3000)
INDEX = b'''<!DOCTYPE html>
<html>
<head>
<title> cowsay </title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="/cowsay">cowsay</a></li>
</ul>
</nav>
<header>
<main>
This website provides an api linked above. It can be used to generate cowsay
messages in raw test or JSON.
</main>
</body>
</html>
'''
COWSAY = b'''<!DOCTYPE html>
<html>
<head>
<title> cowsay api docs </title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="..">home</a></li>
</ul>
</nav>
<header>
<main>
<div>
One endpoint is provided at the following path: /cow[?msg=message]. If the
message is not provided a default one will be inserted. A POST at the endpoint
will respond with a json document of the following form:
<code>{"content": string response from GET}</code>
</div>
<div>
examples bolow:
</div>
<ul>
<li>
<a href="/cow?msg=text">/cow?msg=text
<iframe src="/cow?msg=text"></iframe>
</a>
</li>
<li>
<a href="/cow?msg=Hello user!">/cow?msg=Hello user!
<iframe src="/cow?msg=Hello user!"></iframe>
</a>
</li>
<li>
<a href="/cow">cow
<iframe src="/cow"></iframe>
</a>
</li>
</ul>
</main>
</body>
</html>
'''
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
COW = cow.get_cow()()
DEFAULT_MSG = ['You should speak up for yourself.']
def get_index(self, parsed_path):
"""
Handle `/` path get request.
"""
self.send_response(200)
self.end_headers()
self.wfile.write(INDEX)
def get_cowsay(self, parsed_path):
"""
Handle `/cowsay` path get request.
"""
self.send_response(200)
self.end_headers()
self.wfile.write(COWSAY)
def get_cow(self, parsed_path):
"""
Handle `/cow[?msg=<message>]` path get request.
"""
parsed_qs = parse_qs(parsed_path.query)
msg = parsed_qs.get('msg', self.DEFAULT_MSG)[0]
self.send_response(200)
self.end_headers()
self.wfile.write(self.COW.milk(msg).encode())
def post_cow(self, parsed_path):
"""
Handle `/cow[?msg=<message>]` path post request.
"""
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
msg = loads(post_data)
msg = msg.get('msg', self.DEFAULT_MSG[0])
self.send_response(200)
self.end_headers()
self.wfile.write(dumps({"content": self.COW.milk(msg)}).encode())
def do_GET(self):
"""
Dispatch get to known paths or handle 404 status.
"""
parsed_path = urlparse(self.path)
if parsed_path.path == '/':
return self.get_index(parsed_path)
if parsed_path.path == '/cowsay':
return self.get_cowsay(parsed_path)
if parsed_path.path == '/cow':
return self.get_cow(parsed_path)
self.send_response(404)
self.end_headers()
self.wfile.write(b'Not Found')
def do_POST(self):
"""
Dispatch post to known paths or handle 404 status.
"""
parsed_path = urlparse(self.path)
if parsed_path.path == '/cow':
return self.post_cow(parsed_path)
self.send_response(404)
self.end_headers()
self.wfile.write(b'Not Found')
def create_server():
"""
Initialize a default server for cowsay.
"""
return HTTPServer(ADDRESS, SimpleHTTPRequestHandler)
def main():
"""
Entry point for server application.
"""
with create_server() as server:
print(f'Starting server on port { ADDRESS[1] }')
server.serve_forever()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
print(f'Stopping server on port { ADDRESS[1] }')
| asakatida/http-server | src/server.py | server.py | py | 4,327 | python | en | code | 0 | github-code | 90 |
36659354717 | import numpy as np
import math
def coord(debut,fin, df):
cord=[]
for debut in range(fin):
cord.append(df.iloc[debut,1,2])
return cord
#fonction pour matrice euclidienne
def matrice_euclide(dataframe1,dataframe2, len_lig, len_col):
#leng_lig= dataframe1.shape[0]
#leng_col= dataframe2.shape[0]
i=0
j=0
#len_mat=200
euc_mat=np.empty((len_lig,len_col))
for i in range(len_lig):
for j in range(len_col):
Xpow = pow((dataframe1.iloc[i,3]) - (dataframe2.iloc[j,3]),2)
Ypow = pow((dataframe1.iloc[i,4]) - (dataframe2.iloc[j,4]),2)
racine = math.sqrt(Xpow+Ypow)
euc_mat[i,j] = racine
return euc_mat
def MatriceFrechet(euclideMatrice, len_lig, len_col):
# initialisation de la matrice
fre_mat = np.zeros((len_lig, len_col))
fre_mat[0,0] = euclideMatrice[0,0]
# remplir la première ligne
for i in range(1, len_col):
fre_mat[0, i] = max(euclideMatrice[0, i], fre_mat[0, i-1])
# remplir la première colomne
for j in range(1, len_lig):
fre_mat[j,0] = max(euclideMatrice[j,0], fre_mat[j-1, 0])
# remplir le reste de la matrice
for x in range(1, len_lig):
for y in range(1, len_col):
fre_mat[x,y] = max(min(fre_mat[x,y-1], fre_mat[x-1,y-1], fre_mat[x-1,y]), euclideMatrice[x,y])
return fre_mat
#fonction pour distance de frechet
def distance_frechet(fre_mat, len_lig, len_col):
i=len_lig -1
j=len_col -1
listdesmin=[]
listdesmin.append((fre_mat[i,j], i, j))
while (i>0 and j>0):
h=i-1
hgb=j-1
lvoisinF=[fre_mat[h,j],fre_mat[h,hgb],fre_mat[i,hgb]]
coord=[(h,j),(h,hgb),(i,hgb)]
lvoisinE=[fre_mat[h,j],fre_mat[h,hgb],fre_mat[i,hgb]]
indices = [i for i, x in enumerate(lvoisinF) if x ==min(lvoisinF)]
if(len(indices)==1):
#acceder au coordonnées dans coord
i=coord[indices[0]][0]
j=coord[indices[0]][1]
if(len(indices)==2):
minE=min(lvoisinE[indices[0]],lvoisinE[indices[1]])
coordE=[i for i, x in enumerate(lvoisinF) if x == minE]
i=coord[coordE[0]][0]
j=coord[coordE[0]][1]
if(len(indices)==3):
minE = min(lvoisinE[indices[0]], lvoisinE[indices[1]],lvoisinE[indices[2]])
coordE = [i for i, x in enumerate(lvoisinF) if x == minE]
i = coord[coordE[0]][0]
j = coord[coordE[0]][1]
#indices = [i for i, x in enumerate(lvoisinF) if x == min(lvoisinF)]
listdesmin.append((min(lvoisinF),i,j))
return listdesmin
| didadya/Distance_Frechet | FrechetDiscret.py | FrechetDiscret.py | py | 2,629 | python | fr | code | 0 | github-code | 90 |
19292510278 | import tensorflow as tf
import numpy as np
################################################################################################### Define Edge Network
class EdgeNet(tf.keras.layers.Layer):
def __init__(self, name='EdgeNet', hid_dim=10):
super(EdgeNet, self).__init__(name=name)
self.layer = tf.keras.Sequential([
tf.keras.Input(shape=(hid_dim+3)*2,),
tf.keras.layers.Dense(hid_dim, activation='tanh'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
def call(self,X, Ri, Ro):
bo = tf.matmul(Ro,X,transpose_a=True)
bi = tf.matmul(Ri,X,transpose_a=True)
# Shape of B = N_edges x 6 (2x (3 coordinates))
# each row consists of two node that are possibly connected.
B = tf.concat([bo, bi], axis=1) # n_edges x 6, 3-> r,phi,z
return self.layer(B)
# Define Node Network
class NodeNet(tf.keras.layers.Layer):
def __init__(self, name='NodeNet', hid_dim=10):
super(NodeNet, self).__init__(name=name)
self.layer = tf.keras.Sequential([
tf.keras.Input(shape=(hid_dim+3)*3,),
tf.keras.layers.Dense(hid_dim, activation='tanh'),
tf.keras.layers.Dense(hid_dim, activation='sigmoid'),
])
def call(self,X, e, Ri, Ro):
bo = tf.matmul(Ro, X, transpose_a=True)
bi = tf.matmul(Ri, X, transpose_a=True)
Rwo = Ro * e[:,0]
Rwi = Ri * e[:,0]
# changin the order to test something !!!!!!!!! DONT FORGET TO LOOK BACK!!!
mi = tf.matmul(Rwi, bo)
mo = tf.matmul(Rwo, bi)
# Shape of M = N_nodes x 9 (3x (3 coordinates))
# each row consists of a node and its 2 possible neigbours
M = tf.concat([mi, mo, X], axis=1)
return self.layer(M)
##################################################################################################
class GNN(tf.keras.Model):
def __init__(self):
# Network definitions here
super(GNN, self).__init__(name='GNN')
self.InputNet = tf.keras.Sequential([
tf.keras.layers.Dense(GNN.config['hid_dim'], input_shape=(3,), activation='sigmoid')
],name='InputNet')
self.EdgeNet = EdgeNet(name='EdgeNet', hid_dim=GNN.config['hid_dim'])
self.NodeNet = NodeNet(name='NodeNet', hid_dim=GNN.config['hid_dim'])
self.n_iters = GNN.config['n_iters']
def call(self, graph_array):
X, Ri, Ro = graph_array # decompose the graph array
H = self.InputNet(X) # execute InputNet to produce hidden dimensions
H = tf.concat([H,X],axis=1) # add new dimensions to original X matrix
for i in range(self.n_iters): # recurrent iteration of the network
e = self.EdgeNet(H, Ri, Ro) # execute EdgeNet
H = self.NodeNet(H, e, Ri, Ro) # execute NodeNet using the output of EdgeNet
H = tf.concat([H,X],axis=1) # update H with the output of NodeNet
e = self.EdgeNet(H, Ri, Ro) # execute EdgeNet one more time to obtain edge predictions
return e # return edge prediction array
| QTrkX/qtrkx-gnn-tracking | qnetworks/CGNN.py | CGNN.py | py | 3,249 | python | en | code | 12 | github-code | 90 |
11585001158 | from __future__ import annotations
import uuid
from typing import TypedDict
from data_zipcaster import __version__
DEFAULT_USER_AGENT = f"data_zipcaster/{__version__}"
# From S3S
class NAMESPACES:
STATINK = uuid.UUID("b3a2dbf5-2c09-4792-b78c-00b548b70aeb")
class Mode(TypedDict):
name: str
key: str
properties: list[str]
_id: int
class MODES:
TURF_WAR = Mode(
name="Turf War", key="regular", properties=["turf_war"], _id=1
)
ANARCHY_SERIES = Mode(
name="Anarchy Series",
key="bankara_challenge",
properties=["anarchy"],
_id=2,
)
X_BATTLE = Mode(
name="X Battle", key="xbattle", properties=["xbattle"], _id=3
)
LEAGUE_BATTLE = Mode(
name="Challenge Battle",
key="league",
properties=[],
_id=4,
)
PRIVATE_BATTLE = Mode(
name="Private Battle", key="private", properties=["private"], _id=5
)
ANARCHY_OPEN = Mode(
name="Anarchy Open", key="bankara_open", properties=["anarchy"], _id=51
)
SPLATFEST = Mode(
name="Splatfest",
key="splatfest_open",
properties=["splatfest", "turf_war"],
_id=6,
)
SPLATFEST_PRO = Mode(
name="Splatfest Pro",
key="splatfest_challenge",
properties=["splatfest", "turf_war", "pro"],
_id=7,
)
SPLATFEST_TRICOLOR = Mode(
name="Splatfest Tricolor",
key="splatfest_open",
properties=["splatfest", "turf_war", "tricolor"],
_id=8,
)
@staticmethod
def get_modes() -> list[Mode]:
return [
getattr(MODES, attr)
for attr in dir(MODES)
if isinstance(getattr(MODES, attr), dict)
]
@staticmethod
def get_mode_by_id(mode_id: int | str) -> Mode:
if isinstance(mode_id, str):
mode_id = int(mode_id)
modes = MODES.get_modes()
for mode in modes:
if mode["_id"] == mode_id:
return mode
raise ValueError(f"Invalid mode ID: {mode_id}")
@staticmethod
def get_mode_by_key(mode_key: str) -> Mode:
modes = MODES.get_modes()
for mode in modes:
if mode["key"] == mode_key:
return mode
raise ValueError(f"Invalid mode key: {mode_key}")
@staticmethod
def get_mode_by_name(mode_name: str) -> Mode:
modes = MODES.get_modes()
for mode in modes:
if mode["name"] == mode_name:
return mode
raise ValueError(f"Invalid mode name: {mode_name}")
RANKS = [
"C-",
"C",
"C+",
"B-",
"B",
"B+",
"A-",
"A",
"A+",
"S",
"S+",
]
MATCH_MULTIPLIERS = {
"NORMAL": 1,
"DECUPLE": 10,
"DRAGON": 100,
"DOUBLE_DRAGON": 333,
}
| cesaregarza/DataZipcaster | data_zipcaster/constants.py | constants.py | py | 2,812 | python | en | code | 4 | github-code | 90 |
37820735401 |
# ________
# /
# \ /
# \ /
# \/
# Main reference: Hoff and Niu (2012)
# Hoff, P. and Niu, X., A Covariance Regression Model.
# Statistica Sinica, Institute of Statistical Science, 2012, 22(2), 729–753.
import textwrap
import numpy as np
import group_lasso
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import SGDRegressor, LassoLars, Lars
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(0)
sns.set(style='darkgrid')
def b(knots, time, degree):
"""
Recursive method for building basis functions - concise and effective.
Parameters
----------
knots : real ndarray
Entire knot vector or subset of knot vector depending on level of recursion.
Number of knots provided depends on degree of basis function i.e. degree = 3 -> len(knots) = 5
time : real ndarray
Time over which basis functions will be defined.
degree : positive integer
Degree of basis spline to be constructed.
Returns
-------
output : real ndarray
Single basis spline of degree: "degree".
Notes
-----
Continually subsets knot vector by one increment until base case is reached.
"""
if degree == 0:
output = ((knots[0] <= time) & (time < knots[1])) * 1.0
return output
else:
c1 = (time - knots[0] * np.ones_like(time)) / \
(knots[-2] * np.ones_like(time) - knots[0] * np.ones_like(time)) * b(knots[0:-1], time, degree - 1)
c2 = (knots[-1] * np.ones_like(time) - time) / \
(knots[-1] * np.ones_like(time) - knots[1] * np.ones_like(time)) * b(knots[1:], time, degree - 1)
output = c1 + c2
return output
def cubic_b_spline(knots, time):
"""
Returns a (len(knots) - 4) x len(time) array. Each row is an individual cubic basis.
Matrix is sparse. Each column contains at most 4 non-zero values (only four bases overlap at any point).
Parameters
----------
knots : real ndarray
Knot points to be used (not necessarily evenly spaced).
time : real ndarray
Time over which basis matrix will be defined.
Returns
-------
matrix_c : real ndarray
Each row of matrix contains an individual cubic basis spline.
Notes
-----
A vector 'c' can be calculated such that with output of this function being array 'B' and a time series being 's'
the objective function ||(B^T)c - s||^2 is minimized to yield coefficient vector 'c'.
"""
num_c = len(knots) - 4 # cubic basis-spline -> 4 fewer coefficients than knots
matrix_c = np.zeros((num_c, len(time))) # each row is a single basis function
for tau in range(num_c): # watch inequalities
temp_knots = knots[tau:(tau + 5)] # select 5 knots applicable to current cubic spline
matrix_c[tau, :] = b(temp_knots, time, 3) # calls func b above
return matrix_c
def calc_B_Psi(m, v, x, y, basis, A_est, technique, alpha, l1_ratio_or_reg, group_reg, max_iter, groups,
test_lasso=False):
"""
This follows the calculation at the bottom of page 10 and top of page 11 in Hoff and Niu (2012).
Parameters
----------
m : real ndarray
Column vector of shape (n x 1) of means in random effects model with 'n' being number of observations.
v : real ndarray
Column vector of shape (n x 1) of variances in random effects model with 'n' being number of observations.
x : real ndarray
Matrix of shape (m x n) of covariates with 'm' being number of covariates
and 'n' being number of observations.
y : real ndarray
Matrix of shape (p x n) of dependent variables with 'p' being number of dependent variables
and 'n' being number of observations.
basis : real ndarray
Basis matrix used to estimate local mean - (A_est^T * basis) approximates local mean of y matrix.
A_est : real ndarray
Coefficient matrix used to estimate local mean - (A_est^T * basis) approximates local mean of y matrix.
technique : string
'direct' : Direct calculation method used in Hoff and Niu (2012).
beta = [(x_tilda^T * x_tilda)^(-1)] * (x_tilda^T * y)
'lasso' : Least Absolute Shrinkage and Selection Operator (LASSO) Regression.
Minimize: (1 / (2 * n)) * ||y_tilda - x_tilda * beta||^2_2 +
alpha * ||beta||_1
'ridge' :
Minimize: ||y_tilda - x_tilda * beta||^2_2 + alpha * ||beta||^2_2
Equivalent to: beta = [(x_tilda^T * x_tilda + alpha * I)^(-1)] * (x_tilda^T * y)
'elastic-net' :
Minimize: (1 / (2 * n)) * ||y_tilda - x_tilda * beta||^2_2 +
alpha * l1_ratio * ||beta||_1 + 0.5 * alpha * (1 - l1_ratio) * ||beta||^2_2
l1_ratio = 1 equivalent to 'lasso'
l1_ratio = 0 and alpha = 2 equivalent to 'ridge'
'group-lasso' :
With G being the grouping of the covariates the objective function is given below.
Minimize: ||∑g∈G[X_g * beta_g] - y||^2_2 + alpha * ||w||_1 + lambda_group * ∑g∈G||beta_g||_2
'sub-gradient' :
Minimize: ||beta||_1
subject to: x_tilda * beta^T = y
iterate by: B_{k+1} = B_k - alpha_k(I_p - X^T * (X * X^T)^{-1} * X * sign(B_k))
alpha : float
Constant used in chosen regression to multiply onto weights.
l1_ratio_or_reg : float
Least Absolute Shrinkage and Selection Operator (LASSO) ratio for elastic-net regression and
LASSO regulator for group LASSO regression.
group_reg : float
Group LASSO regulator for group LASSO regression.
max_iter : positive integer
Maximum number of iterations to perform in chosen regression.
groups : real ndarray (integer ndarray)
Groups to be used in 'group-lasso' regression.
test_lasso : bool
If True, then given 'alpha' value is disregarded at each iteration and an optimal 'alpha' is calculated.
Returns
-------
B_est : real ndarray
Coefficients for covariates explaining attributable covariance.
Psi_est : real ndarray
Base unattributable covariance present in model.
Notes
-----
Group LASSO regression and Subgradient optimisation are experimental and need to be improved to stop possible
breaking of correlation structure or nonsensical results.
"""
x_tilda = np.vstack([m * x.T, (v ** (1 / 2)) * x.T])
y_tilda = np.vstack(((y.T - np.matmul(A_est.T, basis).T), np.zeros_like(y.T)))
if technique == 'direct':
try:
B_est = \
np.matmul(y_tilda.T,
np.matmul(x_tilda, np.linalg.inv(np.matmul(x_tilda.T, x_tilda).astype(np.float64))))
except:
B_est = \
np.matmul(y_tilda.T,
np.matmul(x_tilda, np.linalg.pinv(np.matmul(x_tilda.T, x_tilda).astype(np.float64))))
elif technique == 'lasso':
if test_lasso:
parameters = {'alpha': 10 ** (np.linspace(-12, 0, 121))}
reg_lasso = linear_model.Lasso()
clf = GridSearchCV(reg_lasso, parameters)
clf.fit(x_tilda, y_tilda)
alpha = np.asarray(clf.cv_results_['param_alpha'])[clf.best_index_]
reg_lasso = linear_model.MultiTaskLasso(alpha=alpha, fit_intercept=False, max_iter=max_iter)
reg_lasso.fit(x_tilda, y_tilda)
B_est = reg_lasso.coef_
elif technique == 'ridge':
try:
B_est = \
np.matmul(np.matmul(y_tilda.T, x_tilda),
np.linalg.inv(np.matmul(x_tilda.T, x_tilda).astype(np.float64) +
alpha * np.identity(np.shape(x_tilda)[1])))
except:
reg_ridge = linear_model.Ridge(alpha=alpha, fit_intercept=False, max_iter=max_iter)
reg_ridge.fit(x_tilda, y_tilda)
B_est = reg_ridge.coef_
elif technique == 'elastic-net':
reg_elas_net = linear_model.ElasticNet(alpha=alpha, fit_intercept=False, l1_ratio=l1_ratio_or_reg,
max_iter=max_iter)
reg_elas_net.fit(x_tilda, y_tilda)
B_est = reg_elas_net.coef_
elif technique == 'group-lasso':
################################################################
# possibly breaks correlation structure when doing column-wise #
################################################################
# https://group-lasso.readthedocs.io/en/latest/
# https://group-lasso.readthedocs.io/en/latest/auto_examples/index.html
B_est = np.zeros((np.shape(y_tilda)[1], np.shape(x_tilda)[1]))
for covariate in range(np.shape(y_tilda)[1]):
reg_group_lasso = group_lasso.GroupLasso(groups=groups, old_regularisation=True, supress_warning=True,
fit_intercept=False, group_reg=group_reg, l1_reg=l1_ratio_or_reg)
reg_group_lasso.fit(x_tilda, y_tilda[:, covariate].reshape(-1, 1))
B_est[covariate, :] = reg_group_lasso.coef_[:, 0]
print(B_est[covariate, :])
elif technique == 'sub-gradient':
################################################################
# possibly breaks correlation structure when doing column-wise #
################################################################
# B_est = np.zeros((np.shape(y_tilda)[1], np.shape(x_tilda)[1]))
# for covariate in range(np.shape(y_tilda)[1]):
# # https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/linear_model/_sgd_fast.pyx
# reg_sgd = SGDRegressor()
# reg_sgd.fit(x_tilda, y_tilda[:, covariate])
# B_est[covariate, :] = reg_sgd.coef_
# print(B_est[covariate, :])
B_est = subgrad_opt(x_tilda, y_tilda, alpha=alpha, max_iter=max_iter)
C_est = np.vstack((A_est, B_est.T))
x_tilda_extend = np.hstack((np.vstack((basis.T, np.zeros_like(basis.T))), x_tilda))
y_tilda_extend = np.vstack((y.T, np.zeros_like(y.T)))
const = (y_tilda_extend - np.matmul(x_tilda_extend, C_est))
try:
Psi_est = np.matmul(const.T, const) / np.shape(x)[1]
except:
Psi_est = np.matmul(const.T, const) / len(x)
return B_est.astype(np.float64), Psi_est.astype(np.float64)
def gamma_v_m_error(errors, x, Psi, B):
"""
Function to calculate variance and mean for random error formulation which follows calculation
at the bottom of page 9 in Hoff and Niu (2012).
Parameters
----------
errors : real ndarray
Errors (variance) about given mean of dependent variables matrix.
x : real ndarray
Independent variable matrix.
Psi : real ndarray
Base unattributable covariance present in model.
B : real ndarray
Coefficients for covariates explaining attributable covariance.
Returns
-------
m : real ndarray
Mean of random error formulation.
v : real ndarray
Variance of random error formulation.
Notes
-----
"""
try:
const = np.matmul(np.linalg.solve(Psi.astype(np.float64), B.T.astype(np.float64)), x)
except:
try:
const = np.matmul(np.linalg.lstsq(Psi.astype(np.float64), B.T.astype(np.float64), rcond=None)[0], x)
except:
const = np.matmul(np.linalg.lstsq(Psi.astype(np.float64).dot(Psi.astype(np.float64).T),
Psi.astype(np.float64).dot(B.T.astype(np.float64)), rcond=None)[0], x)
v = np.abs((1 + (x * np.matmul(B, const)).sum(0)) ** (-1))
m = v * sum(errors * const)
return m.astype(np.float64), v.astype(np.float64)
# define covariance regression function with mean given
def cov_reg_given_mean(A_est, basis, x, y, iterations=10, technique='direct', alpha=1.0, l1_ratio_or_reg=0.1,
group_reg=1e-6, max_iter=10000, groups=np.arange(76), test_lasso=False, LARS=False,
true_coefficients=np.zeros((5, 15))):
"""
Calculate Psi and B matrices of covariance regression as in Hoff and Niu (2012) except that A_est and basis
are now given as inputs allowing for customisable definition of "mean" or "trend".
Parameters
----------
A_est : real ndarray
Matrix of coefficients corresponding to 'basis' to estimate mean of dependent variables.
basis : real ndarray
Matrix of basis functions corresponding to 'A_est' to estimate mean of dependent variables.
x : real ndarray
Matrix of independent variables.
y : real ndarray
Matrix of dependent variables.
iterations : positive integer
Number of iterations of the Covariance Regression algorithm.
technique : string
'direct' : Direct calculation method used in Hoff and Niu (2012).
beta = [(x_tild^T * x_tilda)^(-1)] * (x_tilda^T * y)
'lasso' : Least Absolute Shrinkage and Selection Operator (LASSO) Regression.
Minimize: (1 / (2 * n)) * ||y_tilda - x_tilda * beta||^2_2 +
alpha * ||beta||_1
'ridge' :
Minimize: ||y_tilda - x_tilda * beta||^2_2 + alpha * ||beta||^2_2
Equivalent to: beta = [(x_tild^T * x_tilda + alpha * I)^(-1)] * (x_tilda^T * y)
'elastic-net' :
Minimize: (1 / (2 * n)) * ||y_tilda - x_tilda * beta||^2_2 +
alpha * l1_ratio * ||beta||_1 + 0.5 * alpha * (1 - l1_ratio) * ||beta||^2_2
l1_ratio = 1 equivalent to 'lasso'
l1_ratio = 0 and alpha = 2 equivalent to 'ridge'
'group-lasso' :
With G being the grouping of the covariates the objective function is given below.
Minimize: ||∑g∈G[X_g * beta_g] - y||^2_2 + alpha * ||w||_1 + lambda_group * ∑g∈G||beta_g||_2
'sub-gradient' :
Minimize: ||beta||_1
subject to: x_tilda * beta^T = y
iterate by: B_{k+1} = B_k - alpha_k(I_p - X^T * (X * X^T)^{-1} * X * sign(B_k))
alpha : float
Lambda value used to weight coefficients.
l1_ratio_or_reg : float
Ratio of l1 normalisation in elastic-net regression.
group_reg : float
Lambda weighting for group lasso regression.
max_iter : positive integer
Maximum number of iterations in regularised regression.
groups : real ndarray (consisting of negative integers)
Vector of groups to be used in group LASSO regression.
test_lasso : boolean
Whether to optimise alpha value in LASSO regression.
Returns
-------
B_est : real ndarray
Coefficients for covariates explaining attributable covariance.
Psi_est : real ndarray
Base unattributable covariance present in model.
Notes
-----
"""
if LARS:
# https://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_lars.html#sphx-glr-auto-examples-linear-model-plot-lasso-lars-py
reg = LassoLars(normalize=False, alpha=1e-06)
# reg = Lars(normalize=False)
reg.fit(X=x.T, y=y.T)
coef_paths = reg.coef_path_
for path in range(len(coef_paths)):
for row in range(np.shape(coef_paths[path])[0]):
xx = np.sum(np.abs(coef_paths[path]), axis=0)
xx /= xx[-1]
plt.plot(xx[:len(coef_paths[path][row, :])], coef_paths[path][row, :],
label=f'Structure: {int(row + 1)} coef: {int(true_coefficients[path, row])}')
if np.abs(true_coefficients[path, row]) > 0:
plt.plot(xx[:len(coef_paths[path][row, :])], coef_paths[path][row, :], ':', Linewidth=3)
plt.vlines(xx[:len(coef_paths[path][row, :])], min(coef_paths[path][:, -1]),
max(coef_paths[path][:, -1]), linestyle="dashed")
plt.legend(loc='upper left', fontsize=6)
plt.show()
m = (np.random.normal(0, 1, np.shape(y)[1])).reshape(-1, 1) # initialise m
v = np.ones_like(m) # initialise v
mean = np.matmul(A_est.T, basis)
for iter in range(iterations):
B_est, Psi_est = calc_B_Psi(m=m, v=v, x=x, y=y, basis=basis, A_est=A_est, technique=technique,
l1_ratio_or_reg=l1_ratio_or_reg, group_reg=group_reg, alpha=alpha,
max_iter=max_iter, groups=groups, test_lasso=test_lasso)
m, v = gamma_v_m_error(errors=(y - mean), x=x, Psi=Psi_est, B=B_est.T)
m = m.reshape(-1, 1)
v = v.reshape(-1, 1)
B_est = B_est.T
return B_est.astype(np.float64), Psi_est.astype(np.float64)
# sub-gradient optimisation
def subgrad_opt(x_tilda, y_tilda, max_iter, alpha=1e-12):
"""
Subgradient optimisation of coefficients.
Parameters
----------
x_tilda : real ndarray
Matrix of independent variables.
y_tilda : real ndarray
Matrix of dependent variables.
max_iter : positive integer
Maximum number of integers.
alpha : float
Scale to be used in square summable, but not summable Polyak step size.
Returns
-------
B_est : real ndarray
Coefficients for covariates explaining attributable covariance.
Notes
-----
Convergence results do not apply if applied to skinny matrices.
Starting point of algorithm can be changed and optimised.
"""
# will not necessarily converge if not satisfied
# if np.shape(x_tilda)[0] > np.shape(x_tilda)[1]:
# raise ValueError('Matrix cannot be skinny/thin.')
# reg_ridge = linear_model.Ridge(alpha=alpha, fit_intercept=False)
# reg_ridge.fit(x_tilda, y_tilda)
# B_sol = reg_ridge.coef_.T
B_sol = np.matmul(y_tilda.T, np.matmul(x_tilda, np.linalg.pinv(np.matmul(x_tilda.T, x_tilda).astype(np.float64)))).T
B_k = B_sol.copy()
f_star = sum(np.abs(B_sol))
f_best = 1e12 * np.ones_like(f_star)
k = 1
while k < int(max_iter + 1):
B_k_1 = B_k - (alpha / k) * np.matmul((np.identity(np.shape(x_tilda)[1]) -
np.matmul(np.matmul(x_tilda.T,
np.linalg.pinv(np.matmul(x_tilda, x_tilda.T))),
x_tilda)), np.sign(B_k))
f_potential = sum(np.abs(B_k_1))
for i in range(len(f_potential)):
if f_potential[i] <= f_best[i]:
f_best[i] = f_potential[i]
B_k[:, i] = B_k_1[:, i]
print(k)
k += 1
B_est = B_k.copy().T
return B_est
if __name__ == "__main__":
dt = 0.025
x = np.linspace(0, 10, 401)
y = np.linspace(0, 10, 401)
Z, Y = np.meshgrid(x, y)
X = (10 * np.ones(np.shape(Z)) - (Z / 2) + (Z / 2) * np.cos((1 / 2) * Z / (5 / (2 * np.pi)))
- (Y / 2) + (Y / 2) * np.cos((1 / 2) * Y / (5 / (2 * np.pi))) + np.random.normal(0, 0.1, np.shape(Z)) + 5) / 1.5
X_new = X.copy()
X_new[np.asarray(np.sqrt((Z - 10) ** 2 + Y ** 2) <= 7)] = np.nan
fig = plt.figure()
fig.set_size_inches(8, 6)
ax = plt.axes(projection='3d')
ax.view_init(30, -70)
ax.set_title(r'L$_2$ Norm Wells')
# require vmin and vmax as np.nan causes error of colour bar
cov_plot_1 = ax.plot_surface(Z, Y, X_new, rstride=1, cstride=1, cmap='gist_rainbow', edgecolor='none',
antialiased=False, shade=True, alpha=1, zorder=2, vmin=2.8, vmax=10)
for i in np.arange(3, 8, 1):
Z_radius = Z * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) <= (i + dt)) * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) >= (i - dt))
Z_radius[Z_radius == 0] = np.nan
Y_radius = Y * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) <= (i + dt)) * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) >= (i - dt))
Y_radius[Y_radius == 0] = np.nan
X_radius = 0.1 + X * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) <= (i + dt)) * np.asarray(np.sqrt(Z ** 2 + (Y - 10) ** 2) >= (i - dt))
X_radius[X_radius == 0] = np.nan
cov_plot_2 = ax.plot_surface(Z_radius, Y_radius, X_radius, rstride=1, cstride=1,
edgecolor='black', antialiased=False, shade=True, alpha=1, zorder=1)
plt.plot(x, y, -10 * np.ones_like(y), 'k-', label='Ridge regressions')
ax.scatter(0.2, 7, 7.2, s=100, label='ridge minimum', zorder=10, c='grey')
ax.scatter(2.6, 7, 6.2, s=100, zorder=10, c='grey')
ax.scatter(3.6, 7, 4.6, s=100, zorder=10, c='grey')
ax.scatter(4.5, 7, 3.2, s=100, zorder=10, c='grey')
ax.scatter(5.0, 7, 2.2, s=100, zorder=10, c='grey')
ax.set_xticks(ticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ax.set_xticklabels(labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], fontsize=8, ha="left", rotation_mode="anchor")
ax.set_xlabel(r'$|\beta_1|^2$', fontsize=8)
ax.set_yticks(ticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ax.set_yticklabels(labels=[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], rotation=0, fontsize=8)
ax.set_ylabel(r'$|\beta_2|^2$', fontsize=8)
ax.set_zticks(ticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ax.set_zticklabels([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], fontsize=8)
ax.set_zlabel(r'$||f(\beta_1, \beta_2; t) - g(t)||^2_2$', fontsize=8)
cbar = plt.colorbar(cov_plot_1)
box_0 = ax.get_position()
ax.set_zlim(0, 10)
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width, box_0.height])
ax.legend(loc='best', fontsize=8)
plt.savefig('aas_figures/norm_well_example.png')
plt.show()
# load raw data
raw_data = pd.read_csv('Peter_Hoff_Data/peter_hoff_data', header=0)
raw_data = np.asarray(raw_data)
# prepare data
peter_hoff_data = np.zeros((654, 3))
for row in range(654):
if row < 309:
peter_hoff_data[row, 0] = int(raw_data[row, 0][2])
else:
peter_hoff_data[row, 0] = int(raw_data[row, 0][1:3])
if peter_hoff_data[row, 0] == 3: # original paper groups those aged 3 into age 4
peter_hoff_data[row, 0] = 4
elif peter_hoff_data[row, 0] == 19: # original paper groups those aged 19 into age 18
peter_hoff_data[row, 0] = 18
peter_hoff_data[row, 1] = float(raw_data[row, 0][4:10]) # fev values always 6 text values
peter_hoff_data[row, 2] = float(raw_data[row, 0][11:15]) # height values always 4 text values
peter_hoff_data = pd.DataFrame(peter_hoff_data, columns=['age', 'fev', 'height'])
# knots and time used in original paper
spline_basis = cubic_b_spline(knots=np.linspace(-17, 39, 9), time=np.linspace(4, 18, 15))
spline_basis = np.vstack((spline_basis, np.linspace(4, 18, 15)))
age_vector = np.asarray(peter_hoff_data['age'])
spline_basis_transform = np.zeros((6, 654))
for col in range(len(age_vector)):
spline_basis_transform[:, col] = spline_basis[:, int(age_vector[col] - 4)]
coef_fev = np.linalg.lstsq(spline_basis_transform.transpose(), np.asarray(peter_hoff_data['fev']), rcond=None)
coef_fev = coef_fev[0]
mean_fev = np.matmul(coef_fev, spline_basis)
coef_height = np.linalg.lstsq(spline_basis_transform.transpose(), np.asarray(peter_hoff_data['height']), rcond=None)
coef_height = coef_height[0]
mean_height = np.matmul(coef_height, spline_basis)
x_cov = np.vstack((np.ones((1, 654)), (age_vector ** (1 / 2)).reshape(1, 654), age_vector.reshape(1, 654)))
y = np.vstack((np.asarray(peter_hoff_data['fev']).reshape(1, 654),
np.asarray(peter_hoff_data['height']).reshape(1, 654)))
# mean = np.vstack((np.matmul(coef_fev, spline_basis_transform), np.matmul(coef_height, spline_basis_transform)))
A_est = np.hstack((coef_fev.reshape(6, 1), coef_height.reshape(6, 1)))
B_est, Psi_est = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y, iterations=100)
mod_x_cov = np.vstack((np.ones((1, 15)),
(np.linspace(4, 18, 15) ** (1 / 2)).reshape(1, 15),
np.linspace(4, 18, 15).reshape(1, 15)))
# mean and covariance plots
cov_3d = np.zeros((2, 2, 15))
for depth in range(np.shape(cov_3d)[2]):
cov_3d[:, :, depth] = Psi_est + np.matmul(np.matmul(B_est.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est).reshape(-1, 2))
fig, axs = plt.subplots(1, 2, figsize=(10, 6))
fig.suptitle('Rank 1 Figure 5 in Hoff and Niu (2012)')
axs[0].scatter(peter_hoff_data['age'], peter_hoff_data['fev'], facecolor='none', edgecolor='black')
axs[0].plot(np.linspace(4, 18, 15), mean_fev, linewidth=3, c='k')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d[0, 0, :]), c='grey')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d[0, 0, :]), c='grey')
axs[0].set_xlabel('age')
axs[0].set_ylabel('FEV')
axs[0].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[0].set_yticks([1, 2, 3, 4, 5, 6])
axs[1].scatter(peter_hoff_data['age'], peter_hoff_data['height'], facecolor='none', edgecolor='black')
axs[1].plot(np.linspace(4, 18, 15), mean_height, linewidth=3, c='k')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d[1, 1, :]), c='grey')
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d[1, 1, :]), c='grey')
axs[1].set_xlabel('age')
axs[1].set_ylabel('height')
axs[1].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[1].set_yticks([45, 50, 55, 60, 65, 70, 75])
plt.savefig('aas_figures/Hoff_Figure_5')
plt.show()
fig, axs = plt.subplots(1, 3, figsize=(10, 6))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.3, hspace=None)
fig.suptitle('Rank 1 Figure 6 in Hoff and Niu (2012)')
axs[0].plot(np.linspace(4, 18, 15), cov_3d[0, 0, :], c='grey')
fev_var = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
fev_var[i] = np.var(np.asarray(peter_hoff_data['fev'])[np.asarray(peter_hoff_data['age']) == age])
axs[0].scatter(np.linspace(4, 18, 15), fev_var, facecolor='none', edgecolor='black')
axs[0].set_xlabel('age')
axs[0].set_ylabel('Var(FEV)')
axs[0].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[0].set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
axs[1].plot(np.linspace(4, 18, 15), cov_3d[1, 1, :], c='grey')
height_var = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
height_var[i] = np.var(np.asarray(peter_hoff_data['height'])[np.asarray(peter_hoff_data['age']) == age])
axs[1].scatter(np.linspace(4, 18, 15), height_var, facecolor='none', edgecolor='black')
axs[1].set_xlabel('age')
axs[1].set_ylabel('Var(height)')
axs[1].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[1].set_yticks([4, 6, 8, 10, 12])
axs[2].plot(np.linspace(4, 18, 15), cov_3d[0, 1, :] / (np.sqrt(cov_3d[0, 0, :]) * np.sqrt(cov_3d[1, 1, :])), c='grey')
fev_height_cov = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
fev_height_cov[i] = np.corrcoef(np.asarray(peter_hoff_data['fev'])[np.asarray(peter_hoff_data['age']) == age],
np.asarray(peter_hoff_data['height'])[
np.asarray(peter_hoff_data['age']) == age])[0, 1]
axs[2].scatter(np.linspace(4, 18, 15), fev_height_cov, facecolor='none', edgecolor='black')
axs[2].set_xlabel('age')
axs[2].set_ylabel('Cor(FEV,height)')
axs[2].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[2].set_yticks([0.5, 0.6, 0.7, 0.8, 0.9])
plt.savefig('aas_figures/Hoff_Figure_6')
plt.show()
# additions - ridge, lasso
A_est = np.hstack((coef_fev.reshape(6, 1), coef_height.reshape(6, 1)))
B_est_ridge, Psi_est_ridge = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y,
iterations=100, technique='ridge')
B_est_lasso, Psi_est_lasso = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y,
iterations=100, technique='lasso', alpha=0.05)
B_est_net, Psi_est_net = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y,
iterations=100, technique='elastic-net', alpha=0.01,
l1_ratio_or_reg=0.1)
B_est_sub, Psi_est_sub = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y,
iterations=10, technique='sub-gradient', alpha=0.01, max_iter=10)
B_est_group, Psi_est_group = cov_reg_given_mean(A_est=A_est, basis=spline_basis_transform, x=x_cov, y=y,
iterations=100, technique='group-lasso', alpha=0.01, group_reg=1e-9,
l1_ratio_or_reg=1e-6, groups=np.asarray([0, 1, 1]).reshape(-1, 1))
# mean and covariance plots
cov_3d_ridge = np.zeros((2, 2, 15))
cov_3d_lasso = np.zeros((2, 2, 15))
cov_3d_net = np.zeros((2, 2, 15))
cov_3d_sub = np.zeros((2, 2, 15))
cov_3d_group = np.zeros((2, 2, 15))
for depth in range(np.shape(cov_3d)[2]):
cov_3d_ridge[:, :, depth] = \
Psi_est_ridge + np.matmul(np.matmul(B_est_ridge.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est_ridge).reshape(-1, 2))
cov_3d_lasso[:, :, depth] = \
Psi_est_lasso + np.matmul(np.matmul(B_est_lasso.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est_lasso).reshape(-1, 2))
cov_3d_net[:, :, depth] = \
Psi_est_net + np.matmul(np.matmul(B_est_net.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est_net).reshape(-1, 2))
cov_3d_sub[:, :, depth] = \
Psi_est_sub + np.matmul(np.matmul(B_est_sub.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est_sub).reshape(-1, 2))
cov_3d_group[:, :, depth] = \
Psi_est_group + np.matmul(np.matmul(B_est_group.T, mod_x_cov[:, depth]).reshape(2, -1),
np.matmul(mod_x_cov[:, depth].T, B_est_group).reshape(-1, 2))
fig, axs = plt.subplots(1, 2, figsize=(8, 5))
fig.suptitle('Rank 1 Figure 5 in Hoff and Niu (2012)')
axs[0].scatter(peter_hoff_data['age'], peter_hoff_data['fev'], facecolor='none', edgecolor='black')
axs[0].plot(np.linspace(4, 18, 15), mean_fev, linewidth=3, c='k')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d[0, 0, :]), c='grey')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d[0, 0, :]), c='grey')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d_ridge[0, 0, :]), c='red')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d_ridge[0, 0, :]), c='red')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d_lasso[0, 0, :]), c='green')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d_lasso[0, 0, :]), c='green')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d_net[0, 0, :]), c='blue')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d_net[0, 0, :]), c='blue')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d_sub[0, 0, :]), c='cyan')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d_sub[0, 0, :]), c='cyan')
axs[0].plot(np.linspace(4, 18, 15), mean_fev + 2 * np.sqrt(cov_3d_group[0, 0, :]), c='magenta')
axs[0].plot(np.linspace(4, 18, 15), mean_fev - 2 * np.sqrt(cov_3d_group[0, 0, :]), c='magenta')
axs[0].set_xlabel('age')
axs[0].set_ylabel('FEV')
axs[0].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[0].set_yticks([1, 2, 3, 4, 5, 6])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width, box_0.height])
axs[1].scatter(peter_hoff_data['age'], peter_hoff_data['height'], facecolor='none', edgecolor='black')
axs[1].plot(np.linspace(4, 18, 15), mean_height, linewidth=3, c='k')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d[1, 1, :]), c='grey',
label=textwrap.fill('Direct estimation', 11))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d[1, 1, :]), c='grey')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d_ridge[1, 1, :]), c='red',
label=textwrap.fill('Ridge regression', 11))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d_ridge[1, 1, :]), c='red')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d_lasso[1, 1, :]), c='green',
label=textwrap.fill('LASSO regression', 11))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d_lasso[1, 1, :]), c='green')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d_net[1, 1, :]), c='blue',
label=textwrap.fill('Elastic-net regression', 11))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d_net[1, 1, :]), c='blue')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d_sub[1, 1, :]), c='cyan',
label=textwrap.fill('Subgradient optimization', 12))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d_sub[1, 1, :]), c='cyan')
axs[1].plot(np.linspace(4, 18, 15), mean_height + 2 * np.sqrt(cov_3d_group[1, 1, :]), c='magenta',
label=textwrap.fill('Group LASSO regression', 11))
axs[1].plot(np.linspace(4, 18, 15), mean_height - 2 * np.sqrt(cov_3d_group[1, 1, :]), c='magenta')
axs[1].set_xlabel('age')
axs[1].set_ylabel('height')
axs[1].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[1].set_yticks([45, 50, 55, 60, 65, 70, 75])
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width, box_1.height])
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
plt.savefig('aas_figures/Hoff_Figure_5_RCR')
plt.show()
fig, axs = plt.subplots(1, 3, figsize=(8, 5))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.3, hspace=None)
fig.suptitle('Rank 1 Figure 6 in Hoff and Niu (2012)')
axs[0].plot(np.linspace(4, 18, 15), cov_3d[0, 0, :], c='grey')
axs[0].plot(np.linspace(4, 18, 15), cov_3d_ridge[0, 0, :], c='red')
axs[0].plot(np.linspace(4, 18, 15), cov_3d_lasso[0, 0, :], c='green')
axs[0].plot(np.linspace(4, 18, 15), cov_3d_net[0, 0, :], c='blue')
axs[0].plot(np.linspace(4, 18, 15), cov_3d_sub[0, 0, :], c='cyan')
axs[0].plot(np.linspace(4, 18, 15), cov_3d_group[0, 0, :], c='magenta')
fev_var = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
fev_var[i] = np.var(np.asarray(peter_hoff_data['fev'])[np.asarray(peter_hoff_data['age']) == age])
axs[0].scatter(np.linspace(4, 18, 15), fev_var, facecolor='none', edgecolor='black')
axs[0].set_xlabel('age', fontsize=8)
axs[0].set_ylabel('Var(FEV)', fontsize=8)
axs[0].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[0].set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
plt.setp(axs[0].get_xticklabels(), fontsize=8)
plt.setp(axs[0].get_yticklabels(), fontsize=8)
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.051, box_0.y0, box_0.width, box_0.height])
axs[1].plot(np.linspace(4, 18, 15), cov_3d[1, 1, :], c='grey')
axs[1].plot(np.linspace(4, 18, 15), cov_3d_ridge[1, 1, :], c='red')
axs[1].plot(np.linspace(4, 18, 15), cov_3d_lasso[1, 1, :], c='green')
axs[1].plot(np.linspace(4, 18, 15), cov_3d_net[1, 1, :], c='blue')
axs[1].plot(np.linspace(4, 18, 15), cov_3d_sub[1, 1, :], c='cyan')
axs[1].plot(np.linspace(4, 18, 15), cov_3d_group[1, 1, :], c='magenta')
height_var = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
height_var[i] = np.var(np.asarray(peter_hoff_data['height'])[np.asarray(peter_hoff_data['age']) == age])
axs[1].scatter(np.linspace(4, 18, 15), height_var, facecolor='none', edgecolor='black')
axs[1].set_xlabel('age', fontsize=8)
axs[1].set_ylabel('Var(height)', fontsize=8)
axs[1].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[1].set_yticks([4, 6, 8, 10, 12])
plt.setp(axs[1].get_xticklabels(), fontsize=8)
plt.setp(axs[1].get_yticklabels(), fontsize=8)
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.051, box_1.y0, box_1.width, box_1.height])
axs[2].plot(np.linspace(4, 18, 15), cov_3d[0, 1, :] / (np.sqrt(cov_3d[0, 0, :]) * np.sqrt(cov_3d[1, 1, :])),
c='grey', label=textwrap.fill('Direct estimation', 11))
axs[2].plot(np.linspace(4, 18, 15), cov_3d_ridge[0, 1, :] / (np.sqrt(cov_3d_ridge[0, 0, :]) * np.sqrt(cov_3d_ridge[1, 1, :])),
c='red', label=textwrap.fill('Ridge regression', 11))
axs[2].plot(np.linspace(4, 18, 15),
cov_3d_lasso[0, 1, :] / (np.sqrt(cov_3d_lasso[0, 0, :]) * np.sqrt(cov_3d_lasso[1, 1, :])),
c='green', label=textwrap.fill('LASSO regression', 11))
axs[2].plot(np.linspace(4, 18, 15),
cov_3d_net[0, 1, :] / (np.sqrt(cov_3d_net[0, 0, :]) * np.sqrt(cov_3d_net[1, 1, :])),
c='blue', label=textwrap.fill('Elastic-net regression', 11))
axs[2].plot(np.linspace(4, 18, 15),
cov_3d_sub[0, 1, :] / (np.sqrt(cov_3d_sub[0, 0, :]) * np.sqrt(cov_3d_sub[1, 1, :])),
c='cyan', label=textwrap.fill('Subgradient optimization', 12))
axs[2].plot(np.linspace(4, 18, 15),
cov_3d_group[0, 1, :] / (np.sqrt(cov_3d_group[0, 0, :]) * np.sqrt(cov_3d_group[1, 1, :])),
c='magenta', label=textwrap.fill('Group LASSO regression', 11))
fev_height_cov = np.zeros_like(np.linspace(4, 18, 15))
for i, age in enumerate(range(4, 19)):
fev_height_cov[i] = np.corrcoef(np.asarray(peter_hoff_data['fev'])[np.asarray(peter_hoff_data['age']) == age],
np.asarray(peter_hoff_data['height'])[
np.asarray(peter_hoff_data['age']) == age])[0, 1]
axs[2].scatter(np.linspace(4, 18, 15), fev_height_cov, facecolor='none', edgecolor='black')
axs[2].set_xlabel('age', fontsize=8)
axs[2].set_ylabel('Cor(FEV,height)', fontsize=8)
axs[2].set_xticks([4, 6, 8, 10, 12, 14, 16, 18])
axs[2].set_yticks([0.5, 0.6, 0.7, 0.8, 0.9])
plt.setp(axs[2].get_xticklabels(), fontsize=8)
plt.setp(axs[2].get_yticklabels(), fontsize=8)
box_2 = axs[2].get_position()
axs[2].set_position([box_2.x0 - 0.051, box_2.y0, box_2.width, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
plt.savefig('aas_figures/Hoff_Figure_6_RCR')
plt.show()
| Cole-vJ/CovRegpy | CovRegpy_RCR.py | CovRegpy_RCR.py | py | 39,406 | python | en | code | 1 | github-code | 90 |
3349980416 | import os
from stackclass import stack
from stackclasswithLL import stack as LLstack
import copy
class bcolors: ##color fo console
PURPLE = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
def printmenu():
print((bcolors.RED + " president party " + bcolors.ENDC).center(110,"~"))
print("\n")
print("1.add a car".center(100," "))
print("2.show cars after you".center(100," "))
print("3.show all cars".center(100," "))
print("(for exit type \"exit\")".center(100," "))
def run(isStackWithLL):
myqu1 = q1(isStackWithLL)
menunumber="0"
isloop = True
while isloop:
if menunumber=="1": ##add plate
clearConsole()
myqu1.addPlate()
menunumber = "0"
elif menunumber=="2": ##show palates after you
clearConsole()
myqu1.printplatesafter()
menunumber = "0"
elif menunumber=="3": ## show all plates in parking
clearConsole()
myqu1.printAllPalates()
menunumber = "0"
elif menunumber=="exit": ##exit
isloop = False
else:
clearConsole() ##print menu
printmenu()
menunumber = input("".center(50," "))
class q1:
def __init__(self,isStackWithLL):
self.isStackWithLL = isStackWithLL
if isStackWithLL==False: ## Implementation with array stack
self.mystack = stack()
else: ## Implementation with linked list stack
self.mystack = LLstack()
try:
datafile = open("data.txt", "r") ##import some plates
platenumbers = datafile.read().splitlines()
for plate in platenumbers:
self.mystack.push(plate)
datafile.close()
del platenumbers
except:
pass
def addPlate(self): ## add plate tp stack
print((bcolors.RED + " add car menu " + bcolors.ENDC).center(110,"~"))
print("\n")
print("please enter a plate number : ".center(100," "))
temp = input("".center(47," "))
if self.serach(self.mystack,temp)==-1:
self.mystack.push(temp)
print((bcolors.GREEN + f'{self.mystack.peek()} added to parking successfuly.' + bcolors.ENDC).center(110," "))
print("press enter to continue...".center(100," "))
input("".center(47," "))
else:
print((bcolors.RED + f'cant add plate {temp} to parking list.' + bcolors.ENDC).center(110," "))
print((bcolors.RED + "this plate is already exist!!" + bcolors.ENDC).center(110," "))
print("press enter to continue...".center(100," "))
input("".center(47," "))
def printplatesafter(self): ##print plates after enterd plate
print((bcolors.RED + " print car menu " + bcolors.ENDC).center(110,"~"))
print("\n")
print("please enter a plate number : ".center(100," "))
mycarplate = input("".center(47," "))
mystack = copy.deepcopy(self.mystack)
ans = self.serach(mystack,mycarplate)
if ans == -1:
print((bcolors.RED + "your car isn`t in parking" + bcolors.ENDC).center(110," "))
else:
self.printstack(ans)
print("press enter to continue...".center(100," "))
input("".center(47," "))
def printstack(self,instack):
print(((bcolors.BLUE +"~" + bcolors.ENDC)*100))
tempstack = copy.deepcopy(instack)
temp=[]
if (not tempstack.isEmpty()):
for i in range(0,tempstack.topIndex+1):
temp.append(tempstack.pop())
if temp != None:
for plate in temp:
print(plate.center(100," "))
print(((bcolors.BLUE +"~" + bcolors.ENDC)*100))
def serach(self,instack,mycarplate): ##serach in stack
mystack = copy.deepcopy(instack)
index = mystack.topIndex
if self.isStackWithLL==False: ## Implementation with array stack
tempstack = stack()
else: ## Implementation with linked list stack
tempstack = LLstack()
isfind = False
while (not mystack.isEmpty()) and (not isfind):
temp = mystack.pop()
if temp == mycarplate:
tempstack.push(temp)
isfind = True
return tempstack
else:
tempstack.push(temp)
return -1
def printAllPalates(self): ##print all plates
print((bcolors.RED + " print all car menu " + bcolors.ENDC).center(110,"~"))
print("\n")
mystack = copy.deepcopy(self.mystack)
index = mystack.topIndex
if self.isStackWithLL==False: ## Implementation with array stack
tempstack = stack()
else: ## Implementation with linked list stack
tempstack = LLstack()
while not mystack.isEmpty():
temp = mystack.pop()
tempstack.push(temp)
self.printstack(tempstack)
print("press enter to continue...".center(100," "))
input("".center(47," "))
| ali79hm/DSfinalproject | DSfinalproject/question1.py | question1.py | py | 5,503 | python | en | code | 0 | github-code | 90 |
27069636458 | # STEP4 文字を翻訳する
# https://github.com/DeepLcom/deepl-python
import deepl
import os
API_KEY = os.environ["API_KEY"]
def translate(lang: int, txt: str) -> str:
translator = deepl.Translator(API_KEY)
if lang == 0:
# 英語を日本語に翻訳する
result = translator.translate_text(txt, target_lang="JA")
else:
# 日本語を英語に翻訳する
result = translator.translate_text(txt, target_lang="EN-US")
return result.text
| YutoKatsuno/image_and_language | translate.py | translate.py | py | 493 | python | ja | code | 1 | github-code | 90 |
11086654517 | import pyglet
window = pyglet.window.Window()
# Create a document to hold the input text
document = pyglet.text.document.FormattedDocument()
# Set the color of the text and the background
document.set_style(0, len(document.text), dict(color=(255, 0, 0, 255), background_color=(0, 255, 0, 255)))
# Create a layout to display the document
layout = pyglet.text.layout.TextLayout(document, width=200, height=200, multiline=False)
@window.event
def on_draw():
window.clear()
layout.draw()
# Update the document when the input field is updated
@window.event
def on_text(text):
document.text = text
# Set the color of the text and the background
document.set_style(0, len(document.text), dict(color=(255, 0, 0, 255), background_color=(0, 255, 0, 255)))
# Set the focus to the input field
@window.event
def on_mouse_press(x, y, button, modifiers):
if button == pyglet.window.mouse.LEFT:
layout.caret.on_mouse_press(x, y, button, modifiers)
pyglet.app.run()
| themangokid/gen_dev_slides | pyglet_testing.py | pyglet_testing.py | py | 989 | python | en | code | 0 | github-code | 90 |
13840490905 | from prism import __version__
from discord.ext import commands
# Command class
class Version(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
self.core = bot.core
self.github_repo = "https://github.com/ii-Python/Prism-v3"
@commands.slash_command(description = "Show Prism's version.")
async def version(self, ctx) -> any:
# Grab git information
last_commits = self.core.fetch_output(["git", "log", "-n", "5", "--oneline", "--pretty=format:'%s <%an>'"]).replace("'", "")
curnt_branch = self.core.fetch_output("git branch --show-current")
lastmodified = self.core.fetch_output("git log -1 --pretty=format:%cd")
# Construct embed
embed = self.core.embed(
title = f"Prism v{__version__} - Build Info",
description = f"Current branch: `{curnt_branch}`",
url = self.github_repo,
footer = ctx
)
embed.add_field(name = "Last 5 commits", value = f"```\n{last_commits}\n```", inline = False)
embed.add_field(name = "Last modified", value = f"```\n{lastmodified}\n```", inline = False)
return await ctx.respond(embed = embed)
# Link
def setup(bot) -> None:
return bot.add_cog(Version(bot))
| iiPythonx/Prism-v3 | prism/cmds/general/version.py | version.py | py | 1,266 | python | en | code | 3 | github-code | 90 |
40655365787 | import logging
import os
import sqlalchemy
import unittest
from studi import app
from studi import sqlalchemy_orm, upload, util, custom_error
from studi.sqlalchemy_orm import Notes, Clauses, ClausePoints
def gen_logger(test_name):
logger = logging.getLogger(test_name)
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(os.path.join(app.config['TEST_LOG_DIR'], test_name + '.log'))
logger_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger_handler.setFormatter(formatter)
logger.addHandler(logger_handler)
return logger
test_name = "test_delete"
logger = gen_logger(test_name)
class TestDeleteDB(unittest.TestCase):
"""
if you want to skip some function, use it.
@unittest.skip("skipping")
"""
def setUp(self):
self.app = app.test_client()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/test_studi.db'
#create new tesint DB & insert dummy data
if os.path.exists("../studi/db/test_studi.db"):
sqlalchemy_orm.drop_db(False)
sqlalchemy_orm.create_db(False)
upload.insert_csv_to_db(False) # note_id : 1
upload.insert_csv_to_db(False) # note_id : 2
upload.insert_csv_to_db(False) # note_id : 3
# Delete one Note (with related clause, clausePoints Data)
# @unittest.skip("skipping")
def test_delete_one_note(self):
with app.app_context():
try:
sqlalchemy_orm.delete_note_and_related_data_from_db(3)
# Check if result is empty
note_result = sqlalchemy_orm.get_item_from_db(Notes, {'note_id' : 3})
clauses_result = sqlalchemy_orm.get_item_from_db(Clauses, {'note_id' : 3})
clausePoints_result = sqlalchemy_orm.get_item_from_db(ClausePoints, {'note_id' : 3})
self.assertEqual(note_result, [])
self.assertEqual(clauses_result, [])
self.assertEqual(clausePoints_result, [])
except sqlalchemy.exc.SQLAlchemyError:
error_message = util.traceback_custom_error()
error = custom_error.SQLAlchemyError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except AssertionError:
error_message = util.traceback_custom_error()
error = custom_error.AssertionError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except:
error_message = util.traceback_custom_error()
error = custom_error.UnExpectedError(error_message)
error.set_logger(logger)
error.to_dict()
raise
# Delete multiple Note (with related clause, clausePoints Data)
# @unittest.skip("skipping")
def test_delete_multiple_note(self):
with app.app_context():
try:
sqlalchemy_orm.delete_data_from_db(Notes, {'note_id' : [1, 2]})
# Check if result is empty
note_result = sqlalchemy_orm.get_item_from_db(Notes, {'note_id' : [1, 2]})
self.assertEqual(note_result, [])
except sqlalchemy.exc.SQLAlchemyError:
error_message = util.traceback_custom_error()
error = custom_error.SQLAlchemyError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except AssertionError:
error_message = util.traceback_custom_error()
error = custom_error.AssertionError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except:
error_message = util.traceback_custom_error()
error = custom_error.UnExpectedError(error_message)
error.set_logger(logger)
error.to_dict()
raise
# Delete Clause (with related clausePoints)
# @unittest.skip("skipping")
def test_delete_clause(self):
with app.app_context():
try:
sqlalchemy_orm.delete_data_from_db(Clauses, {'clause_id' : 11})
clause_result = sqlalchemy_orm.get_item_from_db(Clauses, {'clause_id' : 11})
clause_point_result = sqlalchemy_orm.get_item_from_db(ClausePoints, {'clause_id' : 11})
# Check if result is empty
self.assertEqual(clause_result,[])
self.assertEqual(clause_point_result, [])
except sqlalchemy.exc.SQLAlchemyError:
error_message = util.traceback_custom_error()
error = custom_error.SQLAlchemyError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except AssertionError:
error_message = util.traceback_custom_error()
error = custom_error.AssertionError(error_message)
error.set_logger(logger)
error.to_dict()
raise
except:
error_message = util.traceback_custom_error()
error = custom_error.UnExpectedError(error_message)
error.set_logger(logger)
error.to_dict()
raise
if __name__ == "__main__":
unittest.main()
| GTedHa/studi | tests/test_delete.py | test_delete.py | py | 5,511 | python | en | code | 0 | github-code | 90 |
5670011418 | #!/usr/bin/env python
from WxMAP2 import *
ropt=''
#ropt='norun'
brOpt='-alv '
# -- web-config
#
wdirs=['config','configa','tceps','jtdiag','tcact','tcdiag','tceps','tcgen']
wsdir='/data/w22/web-config'
usdir='/usb1/w22/web-config'
for wdir in wdirs:
Source='%s/%s/'%(wsdir,wdir)
Target='%s/%s/'%(usdir,wdir)
exOpt='''%s --exclude "*2021*" --exclude "*2020*" --exclude "*2019*"'''%(brOpt)
if(mf.find(wdir,'tcact')): exOpt=brOpt
cmd="rsync %s %s %s "%(exOpt,Source,Target)
mf.runcmd(cmd,ropt)
# -- dotfiles
#
cmd="cp -n /home/fiorino/dotfiles.tar /usb1/."
mf.runcmd(cmd,ropt)
exOpt=brOpt
Source='/usb1/ptmp/'
Target='/dat1/ptmp/'
# -- ptmp first
#
cmd="rsync %s %s %s "%(exOpt,Source,Target)
mf.runcmd(cmd,ropt)
sdirs={
'/data/w22':'/usb1/w22',
'/dat1/w21-git':'/usb1/w21-git',
}
for sdir in sdirs.keys():
Source=sdir
Target=sdirs[sdir]
exOpt=brOpt
if(mf.find(sdir,'w22')):
exOpt="%s --exclude web-config"%(brOpt)
cmd="rsync %s %s/ %s/ "%(exOpt,Source,Target)
mf.runcmd(cmd,ropt)
| tenkiman/wxmap2 | etc/p-rsync-w22-usb1.py | p-rsync-w22-usb1.py | py | 1,099 | python | en | code | 0 | github-code | 90 |
11726597443 | import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def C2F(c):
return (c*9/5)+32
with open("timeTemp.txt", 'r') as f:
lines = f.readlines()
print(lines[0:5])
# create arrays for time and temperature
t = []
T = []
for i in lines[1:]: # do every line except the first
# print(i)
# split each line
a, b = i.strip().split('\t')
# convert from string to numbers
t.append(float(a))
T.append(float(b))
t = np.array(t, float)
T = np.array(T, float)
print(f't: {t}')
print(f'T: {T}')
avgT = np.average(T)
maxT = np.max(T)
skewT = stats.skew(T)
print(f"Avg T: {avgT} °C, {C2F(avgT)} °F")
print(f"Max T: {maxT} °C, {C2F(maxT)} °F")
print(f"Skew T: {skewT} ")
# graph
fig, ax = plt.subplots()
ax.plot(t, T)
plt.show()
| lurbano/DataAnalysis | fileGraph.py | fileGraph.py | py | 781 | python | en | code | 0 | github-code | 90 |
30377004903 | from datetime import datetime
from aiogram import types
from aiogram.dispatcher import FSMContext
from tortoise import timezone
from datetime import timezone as tz
from data import messages
from keyboards.default.events import get_event_buttons
from keyboards.inline.events import created_event_buttons
from models.base import Event
from states.EventStates import EventStates
from states.ReportStates import ReportStates
from utils.event_utils import event_to_text
from utils.misc import chat_bot
from utils.time_utils import from_dt_to_hours_minutes, utc_to_local, get_dates_from_str
async def create_event(message: types.Message, state: FSMContext):
event = await Event.create(client_id=message.from_user.id)
keyboard = await created_event_buttons(event.id)
await EventStates.event_in_process.set()
now = utc_to_local(event.start_at).strftime('%H:%M:%S - %d/%m/%Y')
await chat_bot.send_message(message.from_user.id,
messages.CREATED_EVENT.format(event.id, now),
reply_markup=keyboard)
async def event_get_name(message: types.Message, state: FSMContext):
data = await state.get_data()
event = await Event.get(id=data['event_id'])
event.name = message.text
event.end_at = timezone.now()
event.sum_time = (event.end_at - event.start_at).seconds
await event.save()
hours, minutes, seconds = await from_dt_to_hours_minutes(event.sum_time)
await state.finish()
await chat_bot.send_message(message.from_user.id,
messages.END_EVENT_WITH_NAME.format(event.name,
hours, minutes, seconds
), reply_markup=await get_event_buttons())
async def report_custom(message: types.Message, state: FSMContext):
await ReportStates.get_dates.set()
await chat_bot.send_message(message.from_user.id, text=messages.CUSTOM_DATE_REPORT)
async def dates_report_custom(message: types.Message, state: FSMContext):
first, second = await get_dates_from_str(message.text)
if first is None:
return
events = await Event.filter(start_at__gte=first, end_at__lte=second)
s = ""
sum_final = 0
for event in events:
s += await event_to_text(event) + "\n"
sum_final += event.sum_time
hours, minutes, seconds = await from_dt_to_hours_minutes(sum_final)
s += f"Всё заняло: <i>{hours} часов, {minutes} минут, {seconds} секунд</i>"
await chat_bot.send_message(message.from_user.id, s, reply_markup=await get_event_buttons())
| ghostnoop/BotTimeSpentManager | handlers/text.py | text.py | py | 2,676 | python | en | code | 0 | github-code | 90 |
73613659177 | #!/usr/bin/env python3
#web3fusion
from web3fsnpy import Fsn
linkToChain = {
'network' : 'mainnet', # One of 'testnet', or 'mainnet'
'provider' : 'WebSocket', # One of 'WebSocket', 'HTTP', or 'IPC'
'gateway' : 'wss://mainnetpublicgateway1.fusionnetwork.io:10001',
#'gateway' : 'wss://testnetpublicgateway1.fusionnetwork.io:10001',
}
web3fsn = Fsn(linkToChain)
TxID = 0x25557995e5ed36cc41c156e7f8fa7be10c6d45e25e37cd2e1f8d2d443480b2f2
Tx = web3fsn.getTransaction(TxID)
print(Tx) # Print the whole dictionary
#print('from : ',Tx.from)
#print('to : ',Tx.to)
#print('blocknumber : ',Tx.blockNumber)
TxCount = web3fsn.getBlockTransactionCount(Tx.blockNumber)
print('There were ',TxCount,' transactions in block number ',Tx.blockNumber)
| FUSIONFoundation/web3fsnpy | fusion_tests/fsnGetTransaction.py | fsnGetTransaction.py | py | 803 | python | en | code | 4 | github-code | 90 |
5499279655 | #! /usr/bin/python
## \file scr_handler.py
# \brief holds all functions and variables related to the screen
# \author John Stone
# \date 2016
# \version 1.0.1
#
import pygame
from menu import *
# Used to represent changes made to the background image
# that can be reflected when the screen is blit every run through
# of the loop
# bg_image_pos_x = -45
# bg_image_pos_y = -100
bg_image_pos_x = 0
bg_image_pos_y = -35
# Used to scale the background image if necessary
bg_image_scale_x = 960
bg_image_scale_y = 788
# List of all possible states the game can exist within
#
# states 0 - 9 correspond to state possilities from the start menu
# states 10 - 13 correspond to the state possibilities from the character selection menu
# states 20 - 24 correspond to the state possibilities from the battle screen
# states 30 - 34 correspond to the state possibilities from the move list screen
# states 40 - 44 correspond to the state possibilities from the move description screen
# states 50 - 59 correspond to animations to be displayed on the screen
#
# state 99 corresponds to a state that involves having a blank menu present so text can be displayed in its place
# space left for possible expansion of state list
#
list_of_states = [-1,0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13,
20, 21, 22, 23, 24, 25, 26,
27, 30, 31, 32, 33, 34, 40,
41, 42, 43, 44, 50, 60, 61, 62,
63, 64, 65, 66,67, 68, 68, 69, 70, 71,
99]
start_states = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
character_select_states = [10, 11, 12, 13]
battle_states = [20, 21, 22, 23, 24]
battle_done_states = [25, 26, 27]
other_states = [30, 31, 32, 33, 34, 40, 41, 42, 43, 44]
option_states = [60,61,62,63,64,65,66,67,68,69,70,71]
animate_states = [50,51,52,53,54,55,56,57,58,59]
instruction_state = -1
null_state = 99
## Function in which the battlefield is updated as long as the hero's health and
## boss's health remain above.
## When one of the player's health does reach zero, a victory or lost screen is
## displayed to the user
def display_field(boss_health, hero_health, screen, font, hero, villain, hit_points):
if boss_health > 0 and hero_health > 0:
screen.blit(font.render(str(boss_health),True,(255,0,0)), (0,0))
screen.blit(font.render(str(hero_health),True,(255,0,0)), (0,50))
screen.blit(hero, (0,100))
screen.blit(villain, (550,-50))
screen.blit(hit_points, (185,180))
pygame.draw.rect(screen, RED, pygame.Rect(200, 200, hero_health, 30))
pygame.draw.rect(screen, RED, pygame.Rect(650, 0, boss_health, 30))
return None
| jojoC0de/CapstoneProject2016 | Capstone Spring 2016/scr_handler.py | scr_handler.py | py | 2,688 | python | en | code | 1 | github-code | 90 |
7581904632 | import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
#UI파일 연결
#단, UI파일은 Python 코드 파일과 같은 디렉토리에 위치해야한다.
form_class = uic.loadUiType("plus.ui")[0]
class WindowClass(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.pb.clicked.connect(self.pb_clicked)
def pb_clicked(self):
a = self.le1.text()
b = self.le2.text()
c = int(a)+int(b)
self.le3.setText(str(c))
if __name__ == '__main__':
#QApplication : 프로그램을 실행시켜주는 클래스
app = QApplication(sys.argv)
#WindowClass의 인스턴스 생성
myWindow = WindowClass()
#프로그램 화면을 보여주는 코드
myWindow.show()
#프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드
app.exec_() | seaweedy/python | HelloPython/day04/plus.py | plus.py | py | 925 | python | ko | code | 0 | github-code | 90 |
70213348456 | import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg \
import FigureCanvasWxAgg as FigureCanvas
import wx
from wx.lib.agw import aui
import wx.lib.agw.floatspin as agwfs
from cebl import ml
from cebl import util
from cebl.rt import widgets
from .standard import StandardConfigPanel, StandardBCIPage
class WelchConfigPanel(wx.Panel):
def __init__(self, parent, pg, cp, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.pg = pg
self.cp = cp
self.sizer = wx.BoxSizer(orient=wx.VERTICAL)
self.SetSizer(self.sizer)
self.initFeatures()
self.initFreqs()
self.Layout()
def initFeatures(self):
featureSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
spanControlBox = widgets.ControlBox(self, label='Welch Span', orient=wx.VERTICAL)
self.spanFloatSpin = agwfs.FloatSpin(self, min_val=0.1, max_val=3.0,
increment=0.05, value=self.pg.welchConfig.span)
self.spanFloatSpin.SetFormat('%f')
self.spanFloatSpin.SetDigits(3)
self.Bind(agwfs.EVT_FLOATSPIN, self.setSpan, self.spanFloatSpin)
self.cp.offlineControls += [self.spanFloatSpin]
spanControlBox.Add(self.spanFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
featureSizer.Add(spanControlBox, proportion=1,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
# radio buttons for turning log transform on and off
logTransControlBox = widgets.ControlBox(self, label='Log Trans', orient=wx.HORIZONTAL)
logTransOnRbtn = wx.RadioButton(self, label='On', style=wx.RB_GROUP)
self.Bind(wx.EVT_RADIOBUTTON, self.setLogTransOn, logTransOnRbtn)
logTransControlBox.Add(logTransOnRbtn, proportion=0, flag=wx.ALL, border=10)
self.cp.offlineControls += [logTransOnRbtn]
logTransOffRbtn = wx.RadioButton(self, label='Off')
self.Bind(wx.EVT_RADIOBUTTON, self.setLogTransOff, logTransOffRbtn)
logTransControlBox.Add(logTransOffRbtn, proportion=0, flag=wx.ALL, border=10)
self.cp.offlineControls += [logTransOffRbtn]
if self.pg.welchConfig.logTrans:
logTransOnRbtn.SetValue(True)
else:
logTransOffRbtn.SetValue(True)
featureSizer.Add(logTransControlBox, proportion=1,
flag=wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(featureSizer, proportion=0, flag=wx.EXPAND)
def setSpan(self, event):
self.pg.welchConfig.span = self.spanFloatSpin.GetValue()
self.pg.requireRetrain()
def setLogTransOn(self, event=None):
self.pg.welchConfig.logTrans = True
def setLogTransOff(self, event=None):
self.pg.welchConfig.logTrans = False
def initFreqs(self):
lowHighFreqSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
lowFreqControlBox = widgets.ControlBox(self, label='Low Freq', orient=wx.VERTICAL)
self.lowFreqFloatSpin = agwfs.FloatSpin(self, min_val=0.25, max_val=100.0,
increment=1/4.0, value=self.pg.welchConfig.lowFreq)
self.lowFreqFloatSpin.SetFormat('%f')
self.lowFreqFloatSpin.SetDigits(4)
self.Bind(agwfs.EVT_FLOATSPIN, self.setLowFreq, self.lowFreqFloatSpin)
self.cp.offlineControls += [self.lowFreqFloatSpin]
lowFreqControlBox.Add(self.lowFreqFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
lowHighFreqSizer.Add(lowFreqControlBox, proportion=1,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
highFreqControlBox = widgets.ControlBox(self, label='High Freq', orient=wx.VERTICAL)
self.highFreqFloatSpin = agwfs.FloatSpin(self, min_val=0.25, max_val=100.0,
increment=1/4.0, value=self.pg.welchConfig.highFreq)
self.highFreqFloatSpin.SetFormat('%f')
self.highFreqFloatSpin.SetDigits(4)
self.Bind(agwfs.EVT_FLOATSPIN, self.setHighFreq, self.highFreqFloatSpin)
self.cp.offlineControls += [self.highFreqFloatSpin]
highFreqControlBox.Add(self.highFreqFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
lowHighFreqSizer.Add(highFreqControlBox, proportion=1,
flag=wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(lowHighFreqSizer, proportion=0, flag=wx.EXPAND)
def setLowFreq(self, event):
self.pg.welchConfig.lowFreq = self.lowFreqFloatSpin.GetValue()
self.pg.requireRetrain()
def setHighFreq(self, event):
self.pg.welchConfig.highFreq = self.highFreqFloatSpin.GetValue()
self.pg.requireRetrain()
class AutoregConfigPanel(wx.Panel):
def __init__(self, parent, pg, cp, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.pg = pg
self.cp = cp
self.sizer = wx.BoxSizer(orient=wx.VERTICAL)
self.SetSizer(self.sizer)
self.Layout()
class TDENetConfigPanel(wx.Panel):
def __init__(self, parent, pg, cp, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.pg = pg
self.cp = cp
self.sizer = wx.BoxSizer(orient=wx.VERTICAL)
self.SetSizer(self.sizer)
self.Layout()
class ConvNetConfigPanel(wx.Panel):
def __init__(self, parent, pg, cp, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.pg = pg
self.cp = cp
self.sizer = wx.BoxSizer(orient=wx.VERTICAL)
self.SetSizer(self.sizer)
self.Layout()
class ConfigPanel(StandardConfigPanel):
def __init__(self, *args, **kwargs):
StandardConfigPanel.__init__(self, *args, **kwargs)
self.initChoices()
self.initNTrial()
self.initTrialSecs()
self.initTiming()
self.initGain()
self.initMethod()
self.initLayout()
def initChoices(self):
choiceControlBox = widgets.ControlBox(self, label='Choices', orient=wx.VERTICAL)
self.choiceTextCtrl = wx.TextCtrl(parent=self, value=', '.join(self.pg.choices),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_TEXT_ENTER, self.setChoices, self.choiceTextCtrl)
self.choiceTextCtrl.Bind(wx.EVT_KILL_FOCUS, self.setChoices, self.choiceTextCtrl)
self.offlineControls += [self.choiceTextCtrl]
choiceControlBox.Add(self.choiceTextCtrl, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
self.sizer.Add(choiceControlBox, proportion=0, flag=wx.ALL | wx.EXPAND, border=10)
def setChoices(self, event):
choiceString = self.choiceTextCtrl.GetValue()
choices = [c.strip() for c in choiceString.split(',')]
if len(choices) < 2:
wx.LogError('Page %s: Cannot use less than 2 choices.' % self.name)
else:
self.pg.choices = choices
self.pg.confusion = np.zeros((len(choices), len(choices)))
self.pg.pieMenu.setChoices(choices, refresh=False)
if len(choices) == 2:
self.pg.pieMenu.setRotation(-np.pi/2.0)
else:
self.pg.pieMenu.setRotation(np.pi/len(choices)+np.pi/2.0)
self.pg.setTrained(False)
#event.Skip()
def initNTrial(self):
trialSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
trainTrialControlBox = widgets.ControlBox(self, label='Train Trials', orient=wx.VERTICAL)
self.trainTrialSpinCtrl = wx.SpinCtrl(self, #style=wx.SP_WRAP,
value=str(self.pg.nTrainTrial), min=2, max=100)
self.Bind(wx.EVT_SPINCTRL, self.setNTrainTrial, self.trainTrialSpinCtrl)
self.offlineControls += [self.trainTrialSpinCtrl]
trainTrialControlBox.Add(self.trainTrialSpinCtrl, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
trialSizer.Add(trainTrialControlBox, proportion=1,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
testTrialControlBox = widgets.ControlBox(self, label='Test Trials', orient=wx.VERTICAL)
self.testTrialSpinCtrl = wx.SpinCtrl(self, #style=wx.SP_WRAP,
value=str(self.pg.nTestTrial), min=1, max=100)
self.Bind(wx.EVT_SPINCTRL, self.setNTestTrial, self.testTrialSpinCtrl)
self.offlineControls += [self.testTrialSpinCtrl]
testTrialControlBox.Add(self.testTrialSpinCtrl, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
trialSizer.Add(testTrialControlBox, proportion=1,
flag=wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(trialSizer, proportion=0, flag=wx.EXPAND)
def setNTrainTrial(self, event):
self.pg.nTrainTrial = self.trainTrialSpinCtrl.GetValue()
self.pg.setTrained(False)
def setNTestTrial(self, event):
self.pg.nTestTrial = self.testTrialSpinCtrl.GetValue()
def initTrialSecs(self):
secsSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
trainTrialSecsControlBox = widgets.ControlBox(self,
label='Train Trial Secs', orient=wx.VERTICAL)
self.trainTrialSecsFloatSpin = agwfs.FloatSpin(self, min_val=2.00, max_val=60.0,
increment=1/4.0, value=self.pg.trainTrialSecs)
self.trainTrialSecsFloatSpin.SetFormat('%f')
self.trainTrialSecsFloatSpin.SetDigits(3)
self.Bind(agwfs.EVT_FLOATSPIN, self.setTrialSecs, self.trainTrialSecsFloatSpin)
self.offlineControls += [self.trainTrialSecsFloatSpin]
trainTrialSecsControlBox.Add(self.trainTrialSecsFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
secsSizer.Add(trainTrialSecsControlBox, proportion=1,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
pauseSecsControlBox = widgets.ControlBox(self, label='Pause Secs', orient=wx.VERTICAL)
self.pauseSecsFloatSpin = agwfs.FloatSpin(self, min_val=0.25, max_val=10.0,
increment=1/4.0, value=self.pg.pauseSecs)
self.pauseSecsFloatSpin.SetFormat('%f')
self.pauseSecsFloatSpin.SetDigits(3)
self.Bind(agwfs.EVT_FLOATSPIN, self.setPauseSecs, self.pauseSecsFloatSpin)
self.offlineControls += [self.pauseSecsFloatSpin]
pauseSecsControlBox.Add(self.pauseSecsFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
secsSizer.Add(pauseSecsControlBox, proportion=1,
flag=wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(secsSizer, proportion=0, flag=wx.EXPAND)
def setTrialSecs(self, event):
self.pg.trainTrialSecs = self.trainTrialSecsFloatSpin.GetValue()
self.pg.setTrained(False)
def setPauseSecs(self, event):
self.pg.pauseSecs = self.pauseSecsFloatSpin.GetValue()
self.pg.setTrained(False)
def initTiming(self):
timingSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
widthControlBox = widgets.ControlBox(self, label='Width Secs', orient=wx.VERTICAL)
self.widthFloatSpin = agwfs.FloatSpin(self, min_val=0.2, max_val=5.0,
increment=0.05, value=self.pg.width)
self.widthFloatSpin.SetFormat('%f')
self.widthFloatSpin.SetDigits(3)
self.Bind(agwfs.EVT_FLOATSPIN, self.setWidth, self.widthFloatSpin)
self.offlineControls += [self.widthFloatSpin]
widthControlBox.Add(self.widthFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
timingSizer.Add(widthControlBox, proportion=1,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
decisionSecsControlBox = widgets.ControlBox(self, label='Decision Secs', orient=wx.VERTICAL)
self.decisionSecsFloatSpin = agwfs.FloatSpin(self, min_val=0.025, max_val=5.0,
increment=0.025, value=self.pg.decisionSecs)
self.decisionSecsFloatSpin.SetFormat('%f')
self.decisionSecsFloatSpin.SetDigits(4)
self.Bind(agwfs.EVT_FLOATSPIN, self.setDecisionSecs, self.decisionSecsFloatSpin)
self.offlineControls += [self.decisionSecsFloatSpin]
decisionSecsControlBox.Add(self.decisionSecsFloatSpin, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
timingSizer.Add(decisionSecsControlBox, proportion=1,
flag=wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(timingSizer, proportion=0, flag=wx.EXPAND)
def setDecisionSecs(self, event):
self.pg.decisionSecs = self.decisionSecsFloatSpin.GetValue()
self.pg.initOverlap()
self.pg.requireRetrain()
def setWidth(self, event):
self.pg.width = self.widthFloatSpin.GetValue()
self.pg.initOverlap()
self.pg.requireRetrain()
def initGain(self):
gainSizer = wx.BoxSizer(orient=wx.VERTICAL)
gainControlBox = widgets.ControlBox(self, label='Gain', orient=wx.HORIZONTAL)
self.gainText = wx.StaticText(self, label='%0.2f' % self.pg.gain)
gainTextSizer = wx.BoxSizer(orient=wx.VERTICAL)
gainTextSizer.Add(self.gainText, proportion=1, flag=wx.EXPAND)
self.gainSlider = wx.Slider(self, style=wx.SL_HORIZONTAL,
value=int(self.pg.gain*100.0), minValue=1, maxValue=100)
self.Bind(wx.EVT_SLIDER, self.setGain, self.gainSlider)
gainControlBox.Add(gainTextSizer, proportion=0, flag=wx.ALL | wx.EXPAND, border=10)
gainControlBox.Add(self.gainSlider, proportion=1, flag=wx.ALL | wx.EXPAND, border=10)
gainSizer.Add(gainControlBox,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.sizer.Add(gainSizer, proportion=0, flag=wx.EXPAND)
def setGain(self, event):
self.pg.gain = self.gainSlider.GetValue() / 100.0
self.gainText.SetLabel('%0.2f' % self.pg.gain)
def initMethod(self):
methodControlBox = widgets.ControlBox(self, label='Method', orient=wx.VERTICAL)
self.methodComboBox = wx.ComboBox(self, value=self.pg.method,
style=wx.CB_READONLY, choices=('Welch Power', 'Autoregressive', 'Time Embedded Net', 'Convolutional Net'))
self.Bind(wx.EVT_COMBOBOX, self.setMethod, self.methodComboBox)
self.offlineControls += [self.methodComboBox]
methodControlBox.Add(self.methodComboBox, proportion=0,
flag=wx.ALL | wx.EXPAND, border=10)
self.sizer.Add(methodControlBox, proportion=0,
flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10)
self.methodConfigSizer = wx.BoxSizer(orient=wx.VERTICAL)
self.welchPanel = WelchConfigPanel(self, pg=self.pg, cp=self)
self.methodConfigSizer.Add(self.welchPanel, proportion=1, flag=wx.EXPAND)
self.autoregPanel = AutoregConfigPanel(self, pg=self.pg, cp=self)
self.methodConfigSizer.Add(self.autoregPanel, proportion=1, flag=wx.EXPAND)
self.tdeNetPanel = ConvNetConfigPanel(self, pg=self.pg, cp=self)
self.methodConfigSizer.Add(self.tdeNetPanel, proportion=1, flag=wx.EXPAND)
self.convNetPanel = ConvNetConfigPanel(self, pg=self.pg, cp=self)
self.methodConfigSizer.Add(self.convNetPanel, proportion=1, flag=wx.EXPAND)
self.sizer.Add(self.methodConfigSizer, proportion=1, flag=wx.EXPAND)
self.methodConfig = self.welchPanel
def setMethod(self, event):
method = self.methodComboBox.GetValue()
self.pg.method = method
self.methodConfig.Hide()
if method == 'Welch Power':
self.methodConfig = self.welchPanel
elif method == 'Autoregressive':
self.methodConfig = self.autoregPanel
elif method == 'Time Embedded Net':
self.methodConfig = self.tdeNetPanel
elif method == 'Convolutional Net':
self.methodConfig = self.convNetPanel
else:
raise RuntimeError('Unknown method: ' + str(method))
self.methodConfig.Show()
self.FitInside()
self.pg.requireRetrain()
def initLayout(self):
self.initStandardLayout()
self.FitInside()
self.autoregPanel.Hide()
self.FitInside()
class PlotPanel(wx.Panel):
def __init__(self, parent, choices, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.initPieMenu(choices)
self.initFeatureCanvas()
self.initLayout()
def initPieMenu(self, choices):
self.pieMenu = widgets.PieMenu(self, choices=choices,
rotation=np.pi/len(choices)+np.pi/2.0,
#colors=('red', (50,220,50), 'yellow', 'blue'))
colors=('turquoise', 'red', 'blue violet', 'orange',
'blue', 'yellow'))
def initFeatureCanvas(self):
self.featureFig = plt.Figure()
##self.featureFig.subplots_adjust(hspace=0.32, wspace=0.02,
## left=0.065, right=0.95, top=0.97, bottom=0.18)
self.featureAx = self.featureFig.add_subplot(1,1,1)
self.featureCanvas = FigureCanvas(parent=self, id=wx.ID_ANY, figure=self.featureFig)
def initLayout(self):
plotSizer = wx.BoxSizer(orient=wx.VERTICAL)
plotSizer.Add(self.pieMenu, proportion=1, flag=wx.EXPAND | wx.ALL)
plotSizer.Add(self.featureCanvas, proportion=1, flag=wx.EXPAND | wx.ALL)
self.SetSizer(plotSizer)
self.featureCanvas.Hide()
self.Layout()
def showPieMenu(self):
self.featureCanvas.Hide()
self.pieMenu.Show()
self.Layout()
def showFeatureCanvas(self):
self.featureCanvas.Show()
self.pieMenu.Hide()
self.Layout()
def plotFeatures(self, trainData, freqs, choices, chanNames):
self.featureAx.cla()
meanFeat = [np.mean(cls, axis=0) for cls in trainData]
for cls, choice in zip(meanFeat, choices):
self.featureAx.plot(cls, label=choice, marker='o', linewidth=2)
self.featureAx.set_xlabel(r'Frequency ($Hz$)')
self.featureAx.set_ylabel(r'Mean Log$_{10}$ Power $(uV^2 / Hz)^{\frac{1}{2}}$')
self.featureAx.legend()
nFreq = len(freqs)
mn = np.min(np.concatenate(meanFeat))
mx = np.max(np.concatenate(meanFeat))
for i,cn in enumerate(chanNames):
if i > 0:
self.featureAx.vlines(i*float(nFreq), mn, mx, linestyle='--')
self.featureAx.text((i+0.25)*float(nFreq), mx-0.1*(mx-mn), cn, fontsize=14)
tickStride = int(np.ceil(nFreq/3.0))
tickFreqs = freqs[::tickStride]
tickPlaces = np.arange(nFreq)[::tickStride]
tickLocs = np.concatenate(
[tickPlaces+nFreq*i for i,c in enumerate(chanNames)])
tickLabels = np.round(np.tile(tickFreqs, len(chanNames))).astype(np.int)
self.featureAx.set_xticks(tickLocs)
self.featureAx.set_xticklabels(tickLabels)
self.featureAx.autoscale(tight=True)
self.featureFig.tight_layout()
self.showFeatureCanvas()
class MentalTasks(StandardBCIPage):
def __init__(self, *args, **kwargs):
self.initConfig()
StandardBCIPage.__init__(self, name='MentalTasks',
configPanelClass=ConfigPanel, *args, **kwargs)
self.initPlots()
self.initLayout()
def initConfig(self):
self.nTrainTrial = 8
self.trainTrialSecs = 10.0
self.pauseSecs = 1.0
self.nTestTrial = 5
self.width = 2.0
self.decisionSecs = 2.0
self.initOverlap()
self.gain = 0.25
self.method = 'Welch Power'
#self.choices = ['Song', 'Right', 'Count', 'Left']
self.choices = ['Song', 'Right', 'Cube', 'Left']
self.welchConfig = util.Holder(
classifierKind = 'Linear Discrim',
span = 0.2,
logTrans = True,
lowFreq = 1.0,
highFreq = 40.0
)
# autoregression config
self.autoregConfig = util.Holder(
horizon = 1
)
self.classifier = None
self.trainCap = None
self.confusion = np.zeros((len(self.choices), len(self.choices)))
def initOverlap(self):
self.overlap = 1.0 - (self.decisionSecs/float(self.width))
def initPlots(self):
self.plotPanel = PlotPanel(self, self.choices)
self.pieMenu = self.plotPanel.pieMenu
def initLayout(self):
self.initStandardLayout()
stimPaneAuiInfo = aui.AuiPaneInfo().Name('stim').Caption(self.name + ' Stimulus').CenterPane()
self.auiManager.AddPane(self.plotPanel, stimPaneAuiInfo)
self.auiManager.Update()
def beforeStart(self):
self.configPanel.disable()
def afterStop(self):
self.configPanel.enable()
#def getCap(self, secs):
# cap = self.src.getEEGSecs(secs)
# # hack, should be handled in filter chain XXX - idfah
# if cap.getSampRate() > 256.0:
# cap = cap.demean().bandpass(0.0, 100.0, order=3)
# decimationFactor = int(np.round(cap.getSampRate()/256.0))
# cap = cap.downsample(decimationFactor)
# #cap = cap.car()
# #cap.bandpass(0.5, 40).downsample(3)
# return cap
def saveCap(self):
cap = self.src.getEEGSecs(self.getSessionTime(), filter=False)
saveDialog = wx.FileDialog(self, message='Save EEG data.',
wildcard='Pickle (*.pkl)|*.pkl|All Files|*',
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
try:
if saveDialog.ShowModal() == wx.ID_CANCEL:
return
cap.saveFile(saveDialog.GetPath())
except Exception:
wx.LogError('Save failed!')
raise
finally:
saveDialog.Destroy()
def saveResultText(self, resultText):
saveDialog = wx.FileDialog(self, message='Save Result Text.',
wildcard='Text (*.txt)|*.txt|All Files|*',
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
try:
if saveDialog.ShowModal() == wx.ID_CANCEL:
return
with open(saveDialog.GetPath(), 'w') as fd:
fd.write(resultText)
except Exception:
wx.LogError('Save failed!')
raise
finally:
saveDialog.Destroy()
def powerize(self, segs):
# generate PSD object for each seg in each class
psds = [cls.psd(method='welch', span=self.welchConfig.span) for cls in segs]
#psds = [cls.psd(method='raw') for cls in segs]
# get freqencies, should be same for all classes
freqs = psds[0][0].getFreqs()
# mask where freqencies fall between lowFreq and highFreq
freqMask = (freqs > self.welchConfig.lowFreq) & (freqs < self.welchConfig.highFreq)
freqs = freqs[freqMask]
# extract powers into a single matrix for each class
powers = [np.array([segPsd.getPowers().T for segPsd in cls]) for cls in psds]
# grab powers between lowFreq and highFreq
powers = [cls[:,:,freqMask] for cls in powers]
if self.welchConfig.logTrans:
# use log power to put near amplitude units
powers = [np.log10(cls) for cls in powers]
# embed channels
powers = [cls.reshape(cls.shape[0], -1) for cls in powers]
return freqs, powers
def beforeTrain(self):
self.curTrial = 0
self.curChoices = []
self.src.setMarker(0.0)
def afterTrain(self, earlyStop):
if not earlyStop:
self.trainCap = self.src.getEEGSecs(self.getSessionTime()).copy(dtype=np.float32) # get rid of copy and dtype after implementing option in source XXX - idfah
self.saveCap()
def trainEpoch(self):
if len(self.curChoices) == 0:
self.curChoices = copy.copy(self.choices)
np.random.shuffle(self.curChoices)
self.curTrial += 1
choice = self.curChoices.pop()
self.pieMenu.highlight(choice, style='pop')
self.src.setMarker(self.choices.index(choice)+1.0)
wx.CallLater(int(1000 * self.trainTrialSecs), self.trainClearTrial)
def trainClearTrial(self, event=None):
self.pieMenu.clearAllHighlights()
self.src.setMarker(0.0)
if self.curTrial == self.nTrainTrial and len(self.curChoices) == 0:
wx.CallLater(int(1000 * self.pauseSecs), self.endTrain)
else:
wx.CallLater(int(1000 * self.pauseSecs), self.runTrainEpoch)
def trainClassifier(self):
if self.trainCap is None:
raise RuntimeError('No data available for training.')
segmented = self.trainCap.segment(start=0.0, end=self.trainTrialSecs)
segs = [segmented.selectNear(i+1.0) for i in range(len(self.choices))]
for cls in segs:
assert cls.getNSeg() == self.nTrainTrial
# split segments
segs = [cls.split(self.width, self.overlap) for cls in segs]
##print('nSplit segs: ', segs[0].getNSeg())
if self.method == 'Welch Power':
self.trainWelch(segs)
elif self.method == 'Autoregressive':
self.trainAutoreg(segs)
elif self.method == 'Time Embedded Net':
self.trainTDENet(segs)
elif self.method == 'Convolutional Net':
self.trainConvNet(segs)
else:
raise RuntimeError('Invalid method: %s.' % str(self.method))
self.plotPanel.showPieMenu()
def trainWelch(self, segs):
freqs, trainData = self.powerize(segs)
self.plotPanel.plotFeatures(trainData, freqs, self.choices,
self.trainCap.getChanNames())
if self.welchConfig.classifierKind == 'Linear Discrim':
self.trainWelchLDA(trainData)
elif self.welchConfig.classifierKind == 'Neural Net':
self.trainWelchNN(trainData)
else:
raise RuntimeError('Invalid classifier kind: %s.' % str(self.welchConfig.classifierKind))
def trainWelchLDA(self, trainData):
self.stand = ml.ClassStandardizer(trainData)
trainDataStd = self.stand.apply(trainData)
#penalties = np.insert(np.power(10.0, np.linspace(-3.0, 0.0, 50)), 0, 0.0)
penalties = np.linspace(0.0, 1.0, 51)
nFold = self.nTrainTrial
trnCA = np.zeros((nFold, penalties.size))
valCA = np.zeros((nFold, penalties.size))
dialog = wx.ProgressDialog('Training Classifier',
'Featurizing', maximum=nFold+1,
style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH)
for fold, trnData, valData in ml.part.classStratified(trainDataStd, nFold=nFold):
dialog.Update(fold, 'Validation Fold: %d' % fold)
for i, penalty in enumerate(penalties):
classifier = ml.LDA(trnData, shrinkage=penalty)
trnCA[fold,i] = classifier.ca(trnData)
valCA[fold,i] = classifier.ca(valData)
dialog.Update(nFold, 'Training Final Classifier')
meanTrnCA = np.mean(trnCA, axis=0)
meanValCA = np.mean(valCA, axis=0)
bestPenaltyIndex = np.argmax(meanValCA)
bestPenalty = penalties[bestPenaltyIndex]
bestMeanTrnCA = meanTrnCA[bestPenaltyIndex]
bestMeanValCA = meanValCA[bestPenaltyIndex]
self.classifier = ml.LDA(trainDataStd, shrinkage=bestPenalty)
trainCA = self.classifier.ca(trainDataStd)
trainConfusion = np.round(100*self.classifier.confusion(trainDataStd))
dialog.Destroy()
resultText = (('Best Shrinkage: %f\n' % bestPenalty) +
('Best Mean Training CA: %f\n' % bestMeanTrnCA) +
('Best Mean Validation CA: %f\n' % bestMeanValCA) +
('Final Training CA: %f\n' % trainCA) +
('Confusion Matrix:\n' + str(trainConfusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Training Completed!',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
def trainWelchNN(self, trainData):
maxIter = 250
nHidden = 10
seed = np.random.randint(0, 1000000)
self.stand = ml.ClassStandardizer(trainData)
trainDataStd = self.stand.apply(trainData)
nFold = self.nTrainTrial
trnCA = np.zeros((nFold, maxIter+1))
valCA = np.zeros((nFold, maxIter+1))
dialog = wx.ProgressDialog('Training Classifier',
'Featurizing', maximum=nFold+1,
style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH)
for fold, trnData, valData in ml.part.classStratified(trainDataStd, nFold=nFold):
dialog.Update(fold, 'Validation Fold: %d' % fold)
stand = ml.ClassStandardizer(trnData)
trnData = stand.apply(trnData)
valData = stand.apply(valData)
def valTraceCB(optable, iteration, paramTrace, errorTrace, success=True):
if success:
trnCA[fold,valTraceCB.it] = optable.ca(trnData)
valCA[fold,valTraceCB.it] = optable.ca(valData)
valTraceCB.it += 1
valTraceCB.it = 0
np.random.seed(seed)
classifier = ml.FNS(trnData, accuracy=0.0, precision=0.0,
nHidden=nHidden, maxIter=maxIter, optimFunc=ml.optim.scg,
callback=valTraceCB, eTrace=True, verbose=False)
dialog.Update(nFold, 'Training Final Classifier')
meanTrnCA = np.mean(trnCA, axis=0)
meanValCA = np.mean(valCA, axis=0)
bestIter = np.argmax(meanValCA)
bestMeanTrnCA = meanTrnCA[bestIter]
bestMeanValCA = meanValCA[bestIter]
np.random.seed(seed)
self.classifier = ml.FNS(trainDataStd, accuracy=0.0, precision=0.0,
nHidden=nHidden, maxIter=bestIter, optimFunc=ml.optim.scg,
eTrace=False, verbose=False)
trainCA = self.classifier.ca(trainDataStd)
trainConfusion = np.round(100*self.classifier.confusion(trainDataStd))
dialog.Destroy()
resultText = (('Best Num Iterations: %f\n' % bestIter) +
('Best Mean Training CA: %f\n' % bestMeanTrnCA) +
('Best Mean Validation CA: %f\n' % bestMeanValCA) +
('Final Training CA: %f\n' % trainCA) +
('Confusion Matrix:\n' + str(trainConfusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Training Completed!',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
def trainAutoreg(self, segs):
trainData = [seg.data for seg in segs]
self.trainAutoregRR(trainData)
def trainAutoregRR(self, trainData):
self.stand = ml.ClassSegStandardizer(trainData)
trainDataStd = self.stand.apply(trainData)
orders = np.arange(2,30)
nFold = self.nTrainTrial
trnCA = np.zeros((nFold, orders.size))
valCA = np.zeros((nFold, orders.size))
dialog = wx.ProgressDialog('Training Classifier',
'Featurizing', maximum=nFold+1,
style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH)
for fold, trnData, valData in ml.part.classStratified(trainDataStd, nFold=nFold):
dialog.Update(fold, 'Validation Fold: %d' % fold)
for i, order in enumerate(orders):
classifier = ml.ARC(trnData, order=order)
trnCA[fold,i] = classifier.ca(trnData)
valCA[fold,i] = classifier.ca(valData)
dialog.Update(nFold, 'Training Final Classifier')
meanTrnCA = np.mean(trnCA, axis=0)
meanValCA = np.mean(valCA, axis=0)
bestOrderIndex = np.argmax(meanValCA)
bestOrder = orders[bestOrderIndex]
bestMeanTrnCA = meanTrnCA[bestOrderIndex]
bestMeanValCA = meanValCA[bestOrderIndex]
self.classifier = ml.ARC(trainDataStd, order=bestOrder)
trainCA = self.classifier.ca(trainDataStd)
trainConfusion = np.round(100*self.classifier.confusion(trainDataStd))
dialog.Destroy()
resultText = (('Best Order: %f\n' % bestOrder) +
('Best Mean Training CA: %f\n' % bestMeanTrnCA) +
('Best Mean Validation CA: %f\n' % bestMeanValCA) +
('Final Training CA: %f\n' % trainCA) +
('Confusion Matrix:\n' + str(trainConfusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Training Completed!',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
def trainTDENet(self, segs):
trainData = [seg.data for seg in segs]
convs = ((25,7),) # first session
maxIter = 400
nHidden = None
poolSize = 1
poolMethod = 'average'
self.stand = ml.ClassSegStandardizer(trainData)
trainDataStd = self.stand.apply(trainData)
dialog = wx.ProgressDialog('Training Classifier',
'Featurizing', maximum=maxIter+2,
style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH)
def progressCB(optable, iteration, *args, **kwargs):
dialog.Update(iteration, 'Iteration: %d/%d' % (iteration, maxIter))
self.classifier = ml.CNA(trainDataStd, convs=convs, nHidden=nHidden, maxIter=maxIter,
optimFunc=ml.optim.scg, accuracy=0.0, precision=0.0,
poolSize=poolSize, poolMethod=poolMethod,
verbose=False, callback=progressCB)
trainCA = self.classifier.ca(trainDataStd)
trainConfusion = np.round(100*self.classifier.confusion(trainDataStd))
dialog.Destroy()
resultText = (('Final Training CA: %f\n' % trainCA) +
('Confusion Matrix:\n' + str(trainConfusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Training Completed!',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
def trainConvNet(self, segs):
trainData = [seg.data for seg in segs]
#convs = ((16,9), (8,9))
#maxIter = 400 # 49
convs = ((8,3), (8,3), (8,3))
maxIter = 1000
nHidden = None
poolSize = 1
poolMethod = 'average'
self.stand = ml.ClassSegStandardizer(trainData)
trainDataStd = self.stand.apply(trainData)
dialog = wx.ProgressDialog('Training Classifier',
'Featurizing', maximum=maxIter+2,
style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH)
def progressCB(optable, iteration, *args, **kwargs):
dialog.Update(iteration, 'Iteration: %d/%d' % (iteration, maxIter))
self.classifier = ml.CNA(trainDataStd, convs=convs, nHidden=nHidden, maxIter=maxIter,
optimFunc=ml.optim.scg, accuracy=0.0, precision=0.0,
poolSize=poolSize, poolMethod=poolMethod,
verbose=False, callback=progressCB)
trainCA = self.classifier.ca(trainDataStd)
trainConfusion = np.round(100*self.classifier.confusion(trainDataStd))
dialog.Destroy()
resultText = (('Final Training CA: %f\n' % trainCA) +
('Confusion Matrix:\n' + str(trainConfusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Training Completed!',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
def beforeTest(self):
self.curTrial = 0
self.curChoices = []
self.curDecision = -1
self.confusion[...] = 0.0
self.src.setMarker(0.0)
def afterTest(self, earlyStop):
if not earlyStop:
self.saveCap()
ca = np.mean(np.diag(self.confusion))/self.nTestTrial
confusion = np.round(100*self.confusion/self.nTestTrial)
resultText = (('Test Selection CA: %f\n' % ca) +
('Confusion Matrix:\n' + str(confusion) + '\n') +
('Choices: ' + str(self.choices)))
wx.MessageBox(message=resultText,
caption='Testing Complete',
style=wx.OK | wx.ICON_INFORMATION)
self.saveResultText(resultText)
else:
self.pieMenu.zeroBars(refresh=False)
self.pieMenu.clearAllHighlights()
def testEpoch(self):
if self.curDecision == -1:
self.src.setMarker(0.0)
self.highlightTestTarget()
self.curDecision += 1
wx.CallLater(int(1000 * self.width * 1.1), self.runTestEpoch)
else:
# a little extra at the end to make sure we get the last segment
wx.CallLater(int(1000 * self.decisionSecs * 1.1), self.testClassify)
def highlightTestTarget(self):
if len(self.curChoices) == 0:
self.curChoices = copy.copy(self.choices)
np.random.shuffle(self.curChoices)
self.curTrial += 1
self.curChoice = self.curChoices.pop()
self.pieMenu.highlight(self.curChoice, style='pop')
self.src.setMarker(10.0*(self.choices.index(self.curChoice)+1.0))
def testClassify(self):
# a little extra (0.9*self.pauseSecs) for edge effects in filter XXX - idfah
cap = self.src.getEEGSecs(self.width+0.9*self.pauseSecs).copy(dtype=np.float32) # get rid of copy and dtype after implementing in source XXX - idfah
cap = cap.trim(start=0.9*self.pauseSecs)
seg = cap.segmentSingle()
if self.method == 'Welch Power':
freqs, testData = self.powerize((seg,))
else:
testData = (seg.data,)
testDataStd = self.stand.apply(testData)[0]
label = self.classifier.label(testDataStd)[0]
selection = self.choices[label]
self.pieMenu.growBar(selection, self.gain, refresh=True)
self.curDecision += 1
finalSelection = self.pieMenu.getSelection()
if finalSelection is None:
wx.CallAfter(self.runTestEpoch)
else:
self.pieMenu.clearAllHighlights(refresh=False)
self.pieMenu.highlight(finalSelection, style='jump', secs=self.pauseSecs)
finalLabel = self.choices.index(finalSelection)
self.src.incrementMarker(finalLabel+1)
self.confusion[finalLabel, self.choices.index(self.curChoice)] += 1.0
wx.CallLater(int(1000 * self.pauseSecs), self.testClearTrial)
def testClearTrial(self):
self.pieMenu.zeroBars(refresh=False)
self.pieMenu.clearAllHighlights()
self.curDecision = -1
if self.curTrial == self.nTestTrial and len(self.curChoices) == 0:
wx.CallLater(int(1000 * self.pauseSecs), self.endTest)
else:
wx.CallLater(int(1000 * self.pauseSecs), self.runTestEpoch)
| idfah/cebl | cebl/rt/pages/mentaltasks.py | mentaltasks.py | py | 39,727 | python | en | code | 10 | github-code | 90 |
40239750028 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 18:57:54 2020
@author: SethHarden
"""
import math
# Add any extra import statements you may need here
# Add any helper functions you may need here
# Split an array into two subsequences (a, b)
# to see if the sum of of the integers in both are ==
def findSplitPoint(arr, n):
leftSum = 0
#traverse
for i in range(0, n):
#add them
leftSum += arr[i]
rightSum = 0
for j in range(i+1, n):
rightSum += arr[j]
if (leftSum == rightSum):
return i+1
return -1
def balancedSplitExists(arr):
# Write your code here
n = len(arr)
splitArray = findSplitPoint(arr, n)
if (splitArray == -1 or splitArray == n ):
return False
for i in range(0, n):
if(splitArray == i):
print("True")
return True
# These are the tests we use to determine if the solution is correct.
# You can add your own at the bottom, but they are otherwise not editable!
def printString(string):
print('[\"', string, '\"]', sep='', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
result = False
if expected == output:
result = True
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'Test #', test_case_number, sep='')
else:
print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')
printString(expected)
print(' Your output: ', end='')
printString(output)
print()
test_case_number += 1
if __name__ == "__main__":
arr_1 = [2, 1, 2, 5]
expected_1 = True
output_1 = balancedSplitExists(arr_1)
check(expected_1, output_1)
arr_2 = [3, 6, 3, 4, 4]
expected_2 = False
output_2 = balancedSplitExists(arr_2)
check(expected_2, output_2)
# Add your own test cases here | sethmh82/SethDevelopment | Python/00-Sorting/Sorting-Balanced-Split-Question.py | Sorting-Balanced-Split-Question.py | py | 1,805 | python | en | code | 1 | github-code | 90 |
18172676809 | import sys
def main():
k = int(input())
a =7%k
if a == 0:
print(1)
sys.exit()
for i in range(2,10**7):
a = (10*a+7) % k
if a == 0:
print(i)
sys.exit()
print(-1)
main() | Aasthaengg/IBMdataset | Python_codes/p02596/s405067634.py | s405067634.py | py | 251 | python | en | code | 0 | github-code | 90 |
10876473985 | import pygame
class spritesheet(object):
def __init__(self, filename, blocksize):
self.sheet = pygame.image.load(filename)#.convert_alpha()
self.blocksize = blocksize
def image_at_block(self, pos, colorkey = None):
xStart = pos[0] * self.blocksize[0]
yStart = pos[1] * self.blocksize[1]
return self.image_at((xStart, yStart, self.blocksize[0], self.blocksize[1]), colorkey)
def image_at_custom(self, pos, size, colorkey = None):
xStart = pos[0] * self.blocksize[0]
yStart = pos[1] * self.blocksize[1]
return self.image_at((xStart, yStart, size[0], size[1]), colorkey)
def image_at(self, rectangle, colorkey = None):
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size, pygame.SRCALPHA).convert_alpha()
image.blit(self.sheet, (0, 0), rect)
if colorkey != None:
if colorkey == -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image | stojanov/dungeon-crawler | Spritesheet.py | Spritesheet.py | py | 1,069 | python | en | code | 0 | github-code | 90 |
34727386037 | #!/usr/bin/python3
"""
Rectangle class definition.
"""
from .base import Base
class Rectangle(Base):
"""Define a rectangle."""
def __init__(self, width, height, x=0, y=0, id=None):
"""Initialize rectangle."""
super().__init__(id)
self.width = width
self.height = height
self.x = x
self.y = y
@property
def width(self):
"""Return width."""
return self.__width
@width.setter
def width(self, value):
"""Set width."""
if type(value) is not int:
raise TypeError("width must be an integer")
elif value <= 0:
raise ValueError("width must be > 0")
self.__width = value
@property
def height(self):
"""Return height."""
return self.__height
@height.setter
def height(self, value):
"""Set height."""
if type(value) is not int:
raise TypeError("height must be an integer")
elif value <= 0:
raise ValueError("height must be > 0")
self.__height = value
@property
def x(self):
"""Return x."""
return self.__x
@x.setter
def x(self, value):
"""Set x."""
if type(value) is not int:
raise TypeError("x must be an integer")
elif value < 0:
raise ValueError("x must be >= 0")
self.__x = value
@property
def y(self):
"""Return y."""
return self.__y
@y.setter
def y(self, value):
"""Set y."""
if type(value) is not int:
raise TypeError("y must be an integer")
elif value < 0:
raise ValueError("y must be >= 0")
self.__y = value
def __str__(self):
"""Display basic information."""
return "[Rectangle] ({}) {}/{} - {}/{}".format(
self.id, self.x, self.y, self.width, self.height)
def area(self):
"""Return area."""
return self.width * self.height
def display(self):
"""Print rectangle in text."""
y_offset = '\n' * self.y
x_offset = ' ' * self.x
row = (x_offset + '#' * self.width + '\n')
text_rectangle = y_offset + (row * self.height)
# Remove trailing newline
print(text_rectangle, end='')
return text_rectangle
def update(self, *args, **kwargs):
"""Update attributes."""
if len(args):
# Set default values for attributes
new_vals = [self.id, self.width, self.height, self.x, self.y]
# Replace default values with values provided from 'args', if any
for i, arg in enumerate(args):
new_vals[i] = arg
self.id, self.width, self.height, self.x, self.y = new_vals
else:
for attr, value in kwargs.items():
if hasattr(self, attr):
setattr(self, attr, value)
def to_dictionary(self):
"""Return this object's dictionary of attributes."""
keys = ['id', 'width', 'height', 'x', 'y']
return {key: getattr(self, key) for key in keys}
| keysmusician/holbertonschool-higher_level_programming | 0x0C-python-almost_a_circle/models/rectangle.py | rectangle.py | py | 3,124 | python | en | code | 0 | github-code | 90 |
22201247708 | import json
from scholarly import ProxyGenerator, scholarly
# Set up a ProxyGenerator object to use free proxies
# This needs to be done only once per session
print("Setting up proxy generator...")
pg = ProxyGenerator()
pg.FreeProxies()
scholarly.use_proxy(pg)
author_ID = "KLIjERgAAAAJ"
scholar_sections = ["basics", "indices", "counts", "coauthors", "publications", "public_access"]
# Retrieve the author's data, fill-in, and print
# Get an iterator for the author results
print("Searching for author ID: " + author_ID)
search_query = scholarly.search_author_id(author_ID)
# Retrieve all the details for the author
print("Retrieving author details...")
author = scholarly.fill(search_query, sections=scholar_sections, sortby="year")
# Retrieve journal of each publication and fill
print("Retrieving journal details...")
for pub in author["publications"]:
scholarly.fill(pub)
# Save the author's data to a json file
print("Saving author data to file...")
with open("scholar_data.json", "w") as f:
f.write(json.dumps(author, indent=4))
print("Done!")
| eurunuela/eurunuela.github.io | workflows/fetch_from_scholar.py | fetch_from_scholar.py | py | 1,067 | python | en | code | 0 | github-code | 90 |
29683751557 | from use_cases.rent_use_cases import RentUseCases
from repositories.rent_repository import RentRepository
from repositories.movie_repository import MovieRepository
from flask import Blueprint, redirect, session
rent_bp = Blueprint("rent", __name__)
@rent_bp.route("/rent/<int:id>", methods=["POST"])
def rent(id) :
repository = RentRepository()
use_case = RentUseCases(repository)
user = session.get("user", None)
movie = MovieRepository.query.filter_by(id=id).first()
movie_title = movie.title
movie_id = movie.id
if not user :
session["flash"] = "Entre no sistema para acessar seu perfil."
return redirect("/entrar")
if not movie :
session["flash"] = "Filme não encontrado."
return redirect("/")
rent_data = {
"user_id": user["id"],
"movie_id": movie_id,
}
try :
use_case.create(rent_data)
session["flash"] = f"{movie_title} adicionado a sua lista de alugados"
return redirect("/perfil")
except Exception as e :
session["flash"] = e.args[0]
return redirect(f"/alugar/{movie_id}") | MatheusLuizSoares/locadora | server/src/routes/rent_routes.py | rent_routes.py | py | 1,147 | python | en | code | 1 | github-code | 90 |
23419379088 | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from .views import RequestCreateList, RequestRetrieveUpdate, GetActiveRequests, \
AcceptRequests, CancelRequests, GetMyRequests, GetMyAcceptedRequests
urlpatterns = [
path('requests/active/', GetActiveRequests.as_view(), name="active-requests"),
path('requests/my/', GetMyRequests.as_view(), name="my-requests"),
path('requests/my/accepted/', GetMyAcceptedRequests.as_view(), name="my-accepted-requests"),
path('request/accept/<str:id>/', AcceptRequests.as_view(), name="accept-request"),
path('request/cancel/<str:id>/', CancelRequests.as_view(), name="cancel-request"),
path('request/', RequestCreateList.as_view(), name="create-request"),
path('request/<str:pk>/', RequestRetrieveUpdate.as_view(), name="get-update-request"),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| SineRaja/FoodWasteManagement | FoodRequest/urls.py | urls.py | py | 907 | python | en | code | 0 | github-code | 90 |
24841224312 | #_*_ coding:UTF-8 _*_
# @author: jacky
# 百度云语音识别Demo,实现对本地语音文件的识别。
# 需安装好python-SDK,录音文件不不超过60s,文件类型为wav格式。
# 音频参数需设置为 单通道 采样频率为16K PCM格式 可以先采用官方音频进行测试
# 导入AipSpeech AipSpeech是语音识别的Python SDK客户端
from aip import AipSpeech
import os
import importlib,sys
importlib.reload(sys)
#sys.setdefaultencoding('utf8')
''' 你的APPID AK SK 参数在申请的百度云语音服务的控制台查看'''
APP_ID = '23476240'
API_KEY = 'XXH3nkO18oDqrerZTuvlMvc3'
SECRET_KEY = 'PMf0mbamURxetU1aoxH5uXttvzi36uj7'
# 新建一个AipSpeech
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
# 读取文件
def get_file_content(filePath): #filePath 待读取文件名
with open(filePath, 'rb') as fp:
return fp.read()
def stt(filename, lan="中文"): # 语音识别
# 识别本地文件
piddict = {"中文":1537, "英文":1737, "英语":1737}
print('语言码是', piddict[lan])
result = client.asr(get_file_content(filename),
'wav',
16000,
{'dev_pid': piddict[lan],} # dev_pid参数表示识别的语言类型 1536表示普通话
)
# print (result)
# 解析返回值,打印语音识别的结果
if result['err_msg']=='success.':
word = result['result'][0].encode('utf-8') # utf-8编码
if word!='':
if word[len(word)-3:len(word)]==',':
#print (word[0:len(word)-3])
with open('demo.txt','w') as f:
f.write(word[0:len(word)-3])
f.close()
return word[0:len(word)-3]
else:
#print(bytes.decode(word))
with open('demo.txt','w') as f:
f.write(bytes.decode(word))
f.close()
return bytes.decode(word)
else:
print ("音频文件不存在或格式错误")
else:
print ("错误")
return None
# main函数 识别本地录音文件yahboom.wav
if __name__ == '__main__':
stt('test.wav')
| buliugucloud/Che-Bao-A-smart-car-voice-control-assistant-based-on-Raspberry-Pi | Source code/speech_recognition.py | speech_recognition.py | py | 2,252 | python | zh | code | 2 | github-code | 90 |
24758046975 | nama = 'Muh Hamzah Tsalis N'
program = 'Gerak Lurus'
print(f'Program {program} oleh {nama}')
def hitung_kecepatan(jarak, waktu):
kecepatan = jarak / waktu
print(f'jarak ={jarak / 1000} ditempuh dalam waktu = {waktu / 60}menit')
print(f'Sehingga kecepatan = {kecepatan} m/s')
return jarak / waktu
# jarak = 1000
# waktu 5 * 60
kecepatan = hitung_kecepatan(1000, 5 * 60)
kecepatan = hitung_kecepatan(3000, 15 * 60)
def hitung_berat(massa, gravitasi):
berat = massa * gravitasi
print(f'massa = {massa}kg dengan gravitasi = {gravitasi}')
print(f'sehingga beratnya = {berat} N')
return massa * gravitasi
# massa = 20
# gravitasi = 9.8
berat = hitung_berat(20, 9.8)
berat = hitung_berat(99, 9.8)
| KaNirullah/uin_modularization | main.py | main.py | py | 729 | python | id | code | 0 | github-code | 90 |
27090669638 | from spack import *
class Glfmultiples(MakefilePackage):
"""glfMultiples is a GLF-based variant caller for next-generation
sequencing data. It takes a set of GLF format genotype likelihood
files as input and generates a VCF-format set of variant calls
as output. """
homepage = "https://genome.sph.umich.edu/wiki/GlfMultiples"
url = "http://www.sph.umich.edu/csg/abecasis/downloads/generic-glfMultiples-2010-06-16.tar.gz"
version('2010-06-16', '64bf6bb7c76543f4c8fabce015a3cb11')
depends_on('zlib')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('CXX=.*', 'CXX = ' + env['CXX'])
makefile.filter('CFLAGS=.*',
'CFLAGS=-O2 -I./libsrc -I./pdf ' +
'-D_FILE_OFFSET_BITS=64 -D__USE_LONG_INT')
def install(self, spec, prefix):
make('INSTALLDIR=%s' % prefix, 'install')
| matzke1/spack | var/spack/repos/builtin/packages/glfmultiples/package.py | package.py | py | 935 | python | en | code | 2 | github-code | 90 |
18004433449 | import sys
input = sys.stdin.readline
def read():
N = int(input().strip())
A = list(map(int, input().strip().split()))
return N, A
def solve(N, A):
B = [A[0]]
prev = A[0]
for a in A:
if prev != a:
B.append(a)
prev = a
count = 1
up = False
down = False
for i in range(1, len(B)):
if not up and not down:
if B[i-1] < B[i]:
up = True
else:
down = True
elif up and B[i-1] > B[i]:
up = False
count += 1
elif down and B[i-1] < B[i]:
down = False
count += 1
return count
if __name__ == '__main__':
inputs = read()
print("%s" % solve(*inputs))
| Aasthaengg/IBMdataset | Python_codes/p03745/s048864173.py | s048864173.py | py | 754 | python | en | code | 0 | github-code | 90 |
40178307729 | from flask_jwt_extended import jwt_required, get_jwt_identity
from app.models.user_model import UserModel
from flask import current_app, session
from app.exc import UserNotFound
@jwt_required()
def delete_user_controller():
session = current_app.db.session
current_user = get_jwt_identity()
try:
user = UserModel.query.filter_by(email=current_user["email"]).first()
if not user:
raise UserNotFound
session.delete(user)
session.commit()
return {"msg": f"user {user.name} has been deleted."},204
except UserNotFound as err:
return err.message, 404
| Kenzie-Academy-Brasil-Developers/q3-sprint6-autenticacao-e-autorizacao-brunotetzner | app/controllers/delete_user_controller.py | delete_user_controller.py | py | 631 | python | en | code | 0 | github-code | 90 |
18000220909 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
A, B, C = map(int, readline().split())
def judge():
for i in range(0, 100000):
cur = A * i
if cur % B == C:
return True
return False
if judge():
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03730/s804966524.py | s804966524.py | py | 430 | python | en | code | 0 | github-code | 90 |
10531799062 | import logging
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class ir_actions_report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
_columns = {
'report_type': fields.selection([('qweb-pdf', 'PDF'),
('qweb-html', 'HTML'),
('controller', 'Controller'),
('pdf', 'RML pdf (deprecated)'),
('sxw', 'RML sxw (deprecated)'),
('webkit', 'Webkit (deprecated)'),
('html2html', 'html2html'),
('mako2html', 'Mako2'),
], 'Report Type', required=True, help="HTML will open the report directly in your browser, PDF will use wkhtmltopdf to render the HTML into a PDF file and let you download it, Controller allows you to define the url of a custom controller outputting any kind of report."),
}
| JoryWeb/illuminati | poi_x_pretensa/ir_actions.py | ir_actions.py | py | 897 | python | en | code | 1 | github-code | 90 |
2797917246 |
from jsondb import JsonDB
# db = JsonDB()
# db.load()
# print(db)
# print(db['contacts'][0])
# print(db['contacts'][0]['first_name'])
# db['contacts'][0]['blood_type'] = 'A-'
# db['contacts'].append({
# 'first_name': 'Eleanor',
# 'last_name': 'Johnston',
# 'phone': '814-398-4326',
# 'email': 'EleanorSJohnston@rhyta.com',
# 'favorite_color': 'orange',
# 'blood_type': 'AB+'
# })
# db.save()
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/')
def index():
db = JsonDB()
db.load()
# print(db['contacts'])
return render_template('index.html', contacts=db['contacts'])
@app.route('/create/', methods=['GET', 'POST'])
def create():
# print(request.method)
# print(request.form)
db = JsonDB()
db.load()
if request.method == 'POST':
db['contacts'].append({
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'phone': request.form['phone'],
'email': request.form['email'],
'blood_type': request.form['blood_type'],
'favorite_color': request.form['favorite_color']
})
db.save()
return redirect('/')
return render_template('create.html', blood_types=db['blood_types'])
@app.route('/delete/<int:index>/')
def delete(index):
db = JsonDB()
db.load()
db['contacts'].pop(index)
db.save()
return redirect('/')
app.run(debug=True)
| PdxCodeGuild/class_salmon | 2 Flask + HTML + CSS/solutions/contact_list/app.py | app.py | py | 1,493 | python | en | code | 5 | github-code | 90 |
29741298799 | """
输入一个字符串,按字典序打印出该字符串中字符的所有排列。例如输入字符串abc,则打印出由字符a,b,c所能排列出来的所有字符串abc,acb,bac,bca,cab和cba。
思路:循环数组中字母,取第一个,后续组成一部分数据,第一个数据 + 后续数据进行递归的结果
"""
def full_prmutate(alpth_list):
if len(alpth_list) == 1:
return alpth_list[0]
else:
result = []
for i in range(len(alpth_list)):
full_list = full_prmutate(alpth_list[:i] + alpth_list[i+1:])
for j in full_list:
result.append(alpth_list[i] + j)
return result
if __name__ == '__main__':
print(full_prmutate('tkc')) | misoomang/offer_test | 27.py | 27.py | py | 734 | python | zh | code | 0 | github-code | 90 |
40572016286 | from typing import List
#
# @lc app=leetcode id=78 lang=python3
#
# [78] Subsets
#
# @lc code=start
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = []
def helper(idx, lst: List[int]):
if (idx == len(nums)):
res.append(lst.copy())
return
lst.append(nums[idx])
helper(idx + 1, lst)
lst.pop()
helper(idx + 1, lst)
helper(0, [])
return res
# @lc code=end
| VenkatSBitra/leetcode | 78.subsets.py | 78.subsets.py | py | 513 | python | en | code | 0 | github-code | 90 |
44020028542 | import unittest
"""
Fermet Little Theorem:
If n is prime, then for all integers 'a' such that 2 <= a <= n-1, a**(n-1) % n = 1.
Idea is to chose a random 'a' from the above mentioned range, k times, and return True if
remainder is 1 for each time.
This is a probabilistic method: It returns true for all primes, it may return true for composites (non-primes).
Higher k => greater accuracy.
"""
from random import randint
# Computes (a ** n) % p in O(log(n)) time complexity.
# If n == 0: return 1
# If n % 2 == 0:
# return a^(n/2) * a^(n/2)
# else return a * a^(n/2) * a^(n/2)
def modular_exponent(a, n, p):
result = 1
a = a % p
while n > 0:
# If exponent is odd, multiply result with num
if n % 2 != 0:
result = (result * a) % p
# Exponent must be even now
n /= 2
a = (a * a) % p
return result
def is_prime_fermet(number, num_iterations):
if number <= 1 or number == 4:
return False
if number <= 3:
return True
while num_iterations > 0:
a = randint(2, number-2)
if modular_exponent(a, number-1, number) != 1:
return False
num_iterations -= 1
return True
class TestPrimality(unittest.TestCase):
def test_is_prime_fermet(self):
self.assertTrue(is_prime_fermet(11, 3))
self.assertFalse(is_prime_fermet(15, 3))
| prathamtandon/g4gproblems | Math/is_prime_fermet_method.py | is_prime_fermet_method.py | py | 1,378 | python | en | code | 3 | github-code | 90 |
38163990005 | import os.path as osp
from tempfile import NamedTemporaryFile
import mmcv
import numpy as np
import pytest
import torch
import mmdeploy.backend.ncnn as ncnn_apis
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
try:
import_codebase(Codebase.MMDET)
except ImportError:
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
from mmdeploy.codebase.mmdet.deploy.object_detection_model import End2EndModel
def assert_det_results(results, module_name: str = 'model'):
assert results is not None, f'failed to get output using {module_name}'
assert isinstance(results, list)
assert len(results) == 2
assert results[0].shape[0] == results[1].shape[0]
assert results[0].shape[1] == results[1].shape[1]
def assert_forward_results(results, module_name: str = 'model'):
assert results is not None, f'failed to get output using {module_name}'
assert isinstance(results, list)
assert len(results) == 1
if isinstance(results[0], tuple): # mask
assert len(results[0][0]) == 80
else:
assert len(results[0]) == 80
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'dets': torch.rand(1, 10, 5),
'labels': torch.rand(1, 10)
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmcv.Config(
{'onnx_config': {
'output_names': ['dets', 'labels']
}})
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
End2EndModel
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
['' for i in range(80)], deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
imgs = [torch.rand(1, 3, 64, 64)]
img_metas = [[{
'ori_shape': [64, 64, 3],
'img_shape': [64, 64, 3],
'scale_factor': [1, 1, 1, 1],
'border': [0, 0, 0]
}]]
results = self.end2end_model.forward(imgs, img_metas)
assert_forward_results(results, 'End2EndModel')
def test_show_result(self):
input_img = np.zeros([64, 64, 3])
img_path = NamedTemporaryFile(suffix='.jpg').name
result = (torch.rand(1, 10, 5), torch.rand(1, 10))
self.end2end_model.show_result(
input_img, result, '', show=False, out_file=img_path)
assert osp.exists(img_path)
@backend_checker(Backend.ONNXRUNTIME)
class TestMaskEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
num_classes = 80
num_dets = 10
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'dets': torch.rand(1, num_dets, 5),
'labels': torch.randint(num_classes, (1, num_dets)),
'masks': torch.rand(1, num_dets, 28, 28)
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmcv.Config({
'onnx_config': {
'output_names': ['dets', 'labels', 'masks']
},
'codebase_config': {
'post_processing': {
'export_postprocess_mask': False
}
}
})
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
End2EndModel
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
['' for i in range(80)], deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
imgs = [torch.rand(1, 3, 64, 64)]
img_metas = [[{
'ori_shape': [64, 64, 3],
'img_shape': [64, 64, 3],
'scale_factor': [1, 1, 1, 1],
}]]
results = self.end2end_model.forward(imgs, img_metas)
assert_forward_results(results, 'mask End2EndModel')
def get_test_cfg_and_post_processing():
test_cfg = {
'nms_pre': 100,
'min_bbox_size': 0,
'score_thr': 0.05,
'nms': {
'type': 'nms',
'iou_threshold': 0.5
},
'max_per_img': 10
}
post_processing = {
'score_threshold': 0.05,
'iou_threshold': 0.5,
'max_output_boxes_per_class': 20,
'pre_top_k': -1,
'keep_top_k': 10,
'background_label_id': -1
}
return test_cfg, post_processing
@backend_checker(Backend.ONNXRUNTIME)
class TestPartitionSingleStageModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'scores': torch.rand(1, 10, 80),
'boxes': torch.rand(1, 10, 4)
}
cls.wrapper.set(outputs=cls.outputs)
test_cfg, post_processing = get_test_cfg_and_post_processing()
model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg)))
deploy_cfg = mmcv.Config(
dict(codebase_config=dict(post_processing=post_processing)))
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
PartitionSingleStageModel
cls.model = PartitionSingleStageModel(
Backend.ONNXRUNTIME, [''],
'cpu', ['' for i in range(80)],
model_cfg=model_cfg,
deploy_cfg=deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward_test(self):
imgs = [torch.rand(1, 3, 64, 64)]
img_metas = [[{
'ori_shape': [64, 64, 3],
'img_shape': [64, 64, 3],
'scale_factor': [1, 1, 1, 1],
}]]
results = self.model.forward_test(imgs, img_metas)
assert_det_results(results, 'PartitionSingleStageModel')
def test_postprocess(self):
scores = torch.rand(1, 120, 80)
bboxes = torch.rand(1, 120, 4)
results = self.model.partition0_postprocess(
scores=scores, bboxes=bboxes)
assert_det_results(
results, '.partition0_postprocess of'
'PartitionSingleStageModel')
def prepare_model_deploy_cfgs():
test_cfg, post_processing = get_test_cfg_and_post_processing()
bbox_roi_extractor = {
'type': 'SingleRoIExtractor',
'roi_layer': {
'type': 'RoIAlign',
'output_size': 7,
'sampling_ratio': 0
},
'out_channels': 8,
'featmap_strides': [4]
}
bbox_head = {
'type': 'Shared2FCBBoxHead',
'in_channels': 8,
'fc_out_channels': 1024,
'roi_feat_size': 7,
'num_classes': 80,
'bbox_coder': {
'type': 'DeltaXYWHBBoxCoder',
'target_means': [0.0, 0.0, 0.0, 0.0],
'target_stds': [0.1, 0.1, 0.2, 0.2]
},
'reg_class_agnostic': False,
'loss_cls': {
'type': 'CrossEntropyLoss',
'use_sigmoid': False,
'loss_weight': 1.0
},
'loss_bbox': {
'type': 'L1Loss',
'loss_weight': 1.0
}
}
roi_head = dict(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head)
model_cfg = mmcv.Config(
dict(
model=dict(
neck=dict(num_outs=0),
test_cfg=dict(rpn=test_cfg, rcnn=test_cfg),
roi_head=roi_head)))
deploy_cfg = mmcv.Config(
dict(codebase_config=dict(post_processing=post_processing)))
return model_cfg, deploy_cfg
class DummyWrapper(torch.nn.Module):
def __init__(self, outputs):
self.outputs = outputs
def __call__(self, *arg, **kwargs):
return 0
def output_to_list(self, *arg, **kwargs):
return self.outputs
@backend_checker(Backend.ONNXRUNTIME)
class TestPartitionTwoStageModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
outputs = [
np.random.rand(1, 12, 80).astype(np.float32),
np.random.rand(1, 12, 4).astype(np.float32),
] * 2
model_cfg, deploy_cfg = prepare_model_deploy_cfgs()
cls.wrapper.set(
outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg)
# replace original function in PartitionTwoStageModel
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
PartitionTwoStageModel
cls.model = PartitionTwoStageModel(
Backend.ONNXRUNTIME, ['', ''],
'cpu', ['' for i in range(80)],
model_cfg=model_cfg,
deploy_cfg=deploy_cfg)
feats = [torch.randn(1, 8, 14, 14) for i in range(5)]
scores = torch.rand(1, 10, 1)
bboxes = torch.rand(1, 10, 4)
bboxes[..., 2:4] = 2 * bboxes[..., :2]
cls_score = torch.rand(10, 81)
bbox_pred = torch.rand(10, 320)
cls.model.device = 'cpu'
cls.model.CLASSES = ['' for i in range(80)]
cls.model.first_wrapper = DummyWrapper([*feats, scores, bboxes])
cls.model.second_wrapper = DummyWrapper([cls_score, bbox_pred])
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_postprocess(self):
feats = [torch.randn(1, 8, 14, 14) for i in range(5)]
scores = torch.rand(1, 50, 1)
bboxes = torch.rand(1, 50, 4)
bboxes[..., 2:4] = 2 * bboxes[..., :2]
results = self.model.partition0_postprocess(
x=feats, scores=scores, bboxes=bboxes)
assert results is not None, 'failed to get output using '\
'partition0_postprocess of PartitionTwoStageDetector'
assert isinstance(results, tuple)
assert len(results) == 2
rois = torch.rand(1, 10, 5)
cls_score = torch.rand(10, 81)
bbox_pred = torch.rand(10, 320)
img_metas = [[{
'ori_shape': [32, 32, 3],
'img_shape': [32, 32, 3],
'scale_factor': [1, 1, 1, 1],
}]]
results = self.model.partition1_postprocess(
rois=rois,
cls_score=cls_score,
bbox_pred=bbox_pred,
img_metas=img_metas)
assert results is not None, 'failed to get output using '\
'partition1_postprocess of PartitionTwoStageDetector'
assert isinstance(results, tuple)
assert len(results) == 2
def test_forward(self):
class DummyPTSDetector(torch.nn.Module):
"""A dummy wrapper for unit tests."""
def __init__(self, *args, **kwargs):
self.output_names = ['dets', 'labels']
def partition0_postprocess(self, *args, **kwargs):
return self.outputs0
def partition1_postprocess(self, *args, **kwargs):
return self.outputs1
import types
self.model.partition0_postprocess = types.MethodType(
DummyPTSDetector.partition0_postprocess, self.model)
self.model.partition1_postprocess = types.MethodType(
DummyPTSDetector.partition1_postprocess, self.model)
self.model.outputs0 = [torch.rand(2, 3)] * 2
self.model.outputs1 = [torch.rand(1, 9, 5), torch.rand(1, 9)]
imgs = [torch.rand(1, 3, 32, 32)]
img_metas = [[{
'ori_shape': [32, 32, 3],
'img_shape': [32, 32, 3],
'scale_factor': [1, 1, 1, 1],
}]]
results = self.model.forward(imgs, img_metas)
assert_forward_results(results, 'PartitionTwoStageModel')
class TestGetClassesFromCfg:
data_cfg1 = mmcv.Config(
dict(
data=dict(
test=dict(type='CocoDataset'),
val=dict(type='CityscapesDataset'),
train=dict(type='CityscapesDataset'))))
data_cfg2 = mmcv.Config(
dict(
data=dict(
val=dict(type='CocoDataset'),
train=dict(type='CityscapesDataset'))))
data_cfg3 = mmcv.Config(dict(data=dict(train=dict(type='CocoDataset'))))
data_cfg4 = mmcv.Config(dict(data=dict(error=dict(type='CocoDataset'))))
data_cfg_classes_1 = mmcv.Config(
dict(
data=dict(
test=dict(classes=('a')),
val=dict(classes=('b')),
train=dict(classes=('b')))))
data_cfg_classes_2 = mmcv.Config(
dict(data=dict(val=dict(classes=('a')), train=dict(classes=('b')))))
data_cfg_classes_3 = mmcv.Config(
dict(data=dict(train=dict(classes=('a')))))
data_cfg_classes_4 = mmcv.Config(dict(classes=('a')))
@pytest.mark.parametrize('cfg',
[data_cfg1, data_cfg2, data_cfg3, data_cfg4])
def test_get_classes_from_cfg(self, cfg):
from mmdet.datasets import DATASETS
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
get_classes_from_config
if 'error' in cfg.data:
with pytest.raises(RuntimeError):
get_classes_from_config(cfg)
else:
assert get_classes_from_config(
cfg) == DATASETS.module_dict['CocoDataset'].CLASSES
@pytest.mark.parametrize('cfg', [
data_cfg_classes_1, data_cfg_classes_2, data_cfg_classes_3,
data_cfg_classes_4
])
def test_get_classes_from_custom_cfg(self, cfg):
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
get_classes_from_config
assert get_classes_from_config(cfg) == ['a']
@backend_checker(Backend.ONNXRUNTIME)
@pytest.mark.parametrize('partition_type', [None, 'end2end'])
def test_build_object_detection_model(partition_type):
_, post_processing = get_test_cfg_and_post_processing()
model_cfg = mmcv.Config(dict(data=dict(test={'type': 'CocoDataset'})))
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type='onnxruntime'),
onnx_config=dict(output_names=['dets', 'labels']),
codebase_config=dict(
type='mmdet', post_processing=post_processing)))
if partition_type:
deploy_cfg.partition_config = dict(
apply_marks=True, type=partition_type)
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
build_object_detection_model
detector = build_object_detection_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(detector, End2EndModel)
@backend_checker(Backend.NCNN)
class TestNCNNEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.ncnn import NCNNWrapper
ncnn_apis.__dict__.update({'NCNNWrapper': NCNNWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(NCNNWrapper)
cls.outputs = {
'output': torch.rand(1, 10, 6),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmcv.Config({'onnx_config': {'output_names': ['output']}})
model_cfg = mmcv.Config({})
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
NCNNEnd2EndModel
cls.ncnn_end2end_model = NCNNEnd2EndModel(Backend.NCNN, ['', ''],
'cpu',
['' for i in range(80)],
model_cfg, deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
@pytest.mark.parametrize('num_det', [10, 0])
def test_forward_test(self, num_det):
self.outputs = {
'output': torch.rand(1, num_det, 6),
}
imgs = torch.rand(1, 3, 64, 64)
results = self.ncnn_end2end_model.forward_test(imgs)
assert_det_results(results, 'NCNNEnd2EndModel')
@backend_checker(Backend.RKNN)
class TestRKNNModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
import mmdeploy.backend.rknn as rknn_apis
from mmdeploy.backend.rknn import RKNNWrapper
rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(RKNNWrapper)
cls.outputs = [
torch.rand(1, 255, 5, 5),
torch.rand(1, 255, 10, 10),
torch.rand(1, 255, 20, 20)
]
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmcv.Config({
'onnx_config': {
'output_names': ['output']
},
'backend_config': {
'common_config': {}
}
})
model_cfg = mmcv.Config(
dict(
model=dict(
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (
373, 326)], [(30, 61), (62, 45), (
59, 119)], [(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(
type='MSELoss', loss_weight=2.0, reduction='sum')),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))))
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
RKNNModel
cls.rknn_model = RKNNModel(Backend.RKNN, ['', ''], 'cpu',
['' for i in range(80)], model_cfg,
deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward_test(self):
imgs = torch.rand(1, 3, 64, 64)
results = self.rknn_model.forward_test(imgs)
assert_det_results(results, 'RKNNWrapper')
| fengbingchun/PyTorch_Test | src/mmdeploy/tests/test_codebase/test_mmdet/test_object_detection_model.py | test_object_detection_model.py | py | 20,797 | python | en | code | 14 | github-code | 90 |
71248188457 | from collections import deque
from sys import maxsize
def citire(nume_fisier="graf.in"):
n=0
la=[]
with open(nume_fisier) as f:
n, m = (int(x) for x in f.readline().split())
la = [[] for i in range(n+2)]
for i in range(m):
i, j = (int(x) for x in f.readline().split())
la[i].append(j)
la[j].append(i)
return n, m, la
n, m, la = citire()
viz = [0]*(n+1)
tata = [None]*(n+1)
d = [None]*(n+1)
ciclu = False
def DFS(s):
viz[s]=1
global nod, fin, ciclu
for y in la[s]:
if viz[y]==0:
tata[y]=s
d[y]=d[s]+1
DFS(y)
elif viz[y]==1 and y != tata[s]:
if ciclu is False and (d[s]-d[y])%2 == 0:
ciclu = True
nod = s
fin = y
nod = -1
fin = -1
for i in range(1, n+1):
viz = [0] * (n + 1)
tata = [None] * (n + 1)
d = [None] * (n+1)
d[i] = 0
DFS(i)
if ciclu is True:
print("Graful nu este bipartit")
c = []
if d[nod] < d[fin]:
nod,fin=fin,nod
c.append(fin)
while nod != fin:
c.append(nod)
nod = tata[nod]
c.append(fin)
print(c)
break
if ciclu is not True:
colorare = [-1]*(n+1)
colorare[1] = 1
q = deque()
q.append(1)
while q:
u = q.pop()
for v in la[u]:
if colorare[v] == -1:
colorare[v] = 1 - colorare[u]
q.append(v)
X = []
Y = []
for i in range(1,n+1):
if colorare[i] == 1:
X.append(i)
else: Y.append(i)
tata = [0]*(n+2)
viz = [0]*(n+2)
s = 0
t = n+1
le = [[] for i in range(n + 2)]
li = [[] for i in range(n + 2)]
# tratez graful ca un graf orientat, din s doar ies muchii ce merg intr-o bucata din graf care comunica cu cealalta bucata intr-un sens
# celelalte se reunesc apoi in t
mat = [[None for j in range(n + 2)] for i in range(n + 2)]
for i in X:
for v in la[i]:
le[i].append(v)
li[v].append(i)
mat[i][v] = [0, 1]
for i in X:
le[s].append(i)
li[i].append(s)
mat[s][i] = [0, 1]
for i in Y:
le[i].append(t)
li[t].append(i)
mat[i][t] = [0, 1]
def build_unsat_BF():
for i in range(0, n+2):
tata[i] = viz[i] = 0
global s, t
q.clear()
q.append(s)
viz[s] = 1
while q:
i = q.pop()
for j in le[i]: #arc direct
if viz[j] == 0 and mat[i][j][1]-mat[i][j][0] > 0:
q.append(j)
viz[j] = 1
tata[j] = i
if j == t:
return True
for j in li[i]: #arc invers
if viz[j]==0 and mat[j][i][0] > 0:
q.append(j)
viz[j] = 1
tata[j] = -i
if j == t:
return True
return False
q = deque()
P = []
a=0
while build_unsat_BF() is True:
P = []
vf = t
while vf != s:
P.append(vf)
if tata[vf] >= 0:
vf = tata[vf]
else:
vf = - tata[vf]
P.append(s)
P.reverse()
pond = []
for i in range(len(P)-1): # calculez I(P)
if tata[P[i+1]] >= 0:
pond.append(mat[P[i]][P[i+1]][1]-mat[P[i]][P[i+1]][0])
else:
pond.append(mat[P[i+1]][P[i]][0])
val = min(pond)
for i in range(len(P) - 1):
if tata[P[i+1]] >= 0:
mat[P[i]][P[i + 1]][0] = mat[P[i]][P[i+1]][0] + val # fluxul creste
else:
mat[P[i+1]][P[i]][0] = mat[P[i+1]][P[i]][0] - val
P.clear()
flux = 0
for v in le[s]:
flux += mat[s][v][0]
print("Cuplajul maximal este "+str(flux))
for i in range(1, n+1):
for v in le[i]:
if mat[i][v][0]>0 and v!=t:
print(i, v)
else:
print("Graful nu este bipartit")
| DanNimara/FundamentalAlgorithms-Graphs | Lab5/2.py | 2.py | py | 4,193 | python | en | code | 0 | github-code | 90 |
36014623587 |
import random
import wave
import pyaudio
import sys
import socket
import asyncio
from sio import sio, run_server
from processing import process_audio
from config import FORMAT, CHANNELS, RATE, CHUNK, RECORD_SECONDS, TOTAL_CHUNKS
from threading import Thread
@sio.on('message')
async def print_message(sid, message):
print("Socket ID: ", sid)
print(message)
await sio.emit('message', message)
# Called on receiving a new chunk of audio frames.
# Counts up to the total amount of chunk needed per file
@sio.on('audio')
async def process_data(sid, data):
async with sio.session(sid) as session:
session['frames'].append(data['data'])
# print(data['id'], len(session['frames']), " / ", TOTAL_CHUNKS)
if len(session['frames']) == TOTAL_CHUNKS:
# Spawn new thread
t = Thread(target=asyncio.run, args=(process_audio(sid, session['n'], session['frames']), ))
t.start()
session['n'] += 1
session['frames'] = []
# Called on first connection.
# Initalizes session variables
@ sio.event
async def connect(sid, environ):
print('connect ', sid)
async with sio.session(sid) as session:
# Set inital session variables
session['n'] = 1
session['total'] = 0
session['frames'] = []
@ sio.event
async def disconnect(sid):
print('disconnect ', sid)
if __name__ == '__main__':
run_server()
| David-Happel/realtime_deepfake_audio_detection | server/server.py | server.py | py | 1,429 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.