index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,000 | f5db9d917142e6cb9abb6f04ad6701e1129b2d19 |
__all__ = ['deepID_model', 'lenet_model', 'resnet80_model',
'sg_model', 'tinynet_model', 'vggm_model'] |
19,001 | 64f79ff1cc5d5cd5cc3975c6a2bb0f3bd7bb3744 | import numpy as np
import matplotlib.pyplot as plt
def test_k():
x = list(range(200))
rank = 100
xprint = list(range(100))
test_k1 = np.load('test_hinge_k=1_lr=1.npy')
p_k1 = np.poly1d(np.polyfit(x, test_k1, rank))
test_k3 = np.load('test_hinge_k=3_lr=1.npy')
p_k3 = np.poly1d(np.polyfit(x, test_k3, rank))
test_k5 = np.load('test_hinge_k=5_lr=1.npy')
p_k5 = np.poly1d(np.polyfit(x, test_k5, rank))
test_k10 = np.load('test_hinge_k=10_lr=1.npy')
p_k10 = np.poly1d(np.polyfit(x, test_k10, rank))
test_k20 = np.load('test_hinge_k=20_lr=1.npy')
p_k20 = np.poly1d(np.polyfit(x, test_k20, rank))
test_k50 = np.load('test_hinge_k=50_lr=1.npy')
p_k50 = np.poly1d(np.polyfit(x, test_k50, rank))
test_k100 = np.load('test_hinge_k=100_lr=1.npy')
p_k100 = np.poly1d(np.polyfit(x, test_k100, rank))
plt.plot(xprint, p_k1(xprint), label='k=1')
plt.plot(xprint, p_k3(xprint), label='k=3')
plt.plot(xprint, p_k5(xprint), label='k=5')
plt.plot(xprint, p_k10(xprint),label='k=10')
plt.plot(xprint, p_k20(xprint),label='k=20')
plt.plot(xprint, p_k50(xprint),label='k=50')
plt.plot(xprint, p_k100(xprint),label='k=100')
# plt.plot(list(range(200)), test_k1)
plt.xlabel("episodes")
plt.ylabel("test accuracy")
plt.legend()
plt.show()
def train_k():
x = list(range(200))
rank = 100
xprint = list(range(100))
train_k1 = np.load('train_hinge_k=1_lr=1.npy')
p_k1 = np.poly1d(np.polyfit(x, train_k1, rank))
train_k3 = np.load('train_hinge_k=3_lr=1.npy')
p_k3 = np.poly1d(np.polyfit(x, train_k3, rank))
train_k5 = np.load('train_hinge_k=5_lr=1.npy')
p_k5 = np.poly1d(np.polyfit(x, train_k5, rank))
train_k10 = np.load('train_hinge_k=10_lr=1.npy')
p_k10 = np.poly1d(np.polyfit(x, train_k10, rank))
train_k20 = np.load('train_hinge_k=20_lr=1.npy')
p_k20 = np.poly1d(np.polyfit(x, train_k20, rank))
train_k50 = np.load('train_hinge_k=50_lr=1.npy')
p_k50 = np.poly1d(np.polyfit(x, train_k50, rank))
train_k100 = np.load('train_hinge_k=100_lr=1.npy')
p_k100 = np.poly1d(np.polyfit(x, train_k100, rank))
plt.plot(xprint, p_k1(xprint), label='k=1')
plt.plot(xprint, p_k3(xprint), label='k=3')
plt.plot(xprint, p_k5(xprint), label='k=5')
plt.plot(xprint, p_k10(xprint),label='k=10')
plt.plot(xprint, p_k20(xprint),label='k=20')
plt.plot(xprint, p_k50(xprint),label='k=50')
plt.plot(xprint, p_k100(xprint),label='k=100')
plt.xlabel("episodes")
plt.ylabel("train accuracy")
plt.legend()
plt.show()
def getPolyfit(input):
x = list(range(200))
rank = 100
data = np.load(input)
return np.poly1d(np.polyfit(x, data, rank))
def train_method():
xprint = list(range(100))
train_hinge = getPolyfit('train_hinge_k=20_lr=1.npy')
train_logistic = getPolyfit('train_logistic_k=20_lr=1.npy')
train_linear = getPolyfit('train_linear_k=20_lr=1.npy')
plt.plot(xprint, train_hinge(xprint), label='hinge')
plt.plot(xprint, train_logistic(xprint), label='logistic')
plt.plot(xprint, train_linear(xprint), label='linear')
plt.xlabel("episodes")
plt.ylabel("train accuracy")
plt.legend()
plt.show()
def test_method():
xprint = list(range(100))
test_hinge = getPolyfit('test_hinge_k=20_lr=1.npy')
test_logistic = getPolyfit('test_logistic_k=20_lr=1.npy')
test_linear = getPolyfit('test_linear_k=20_lr=1.npy')
plt.plot(xprint, test_hinge(xprint), label='hinge')
plt.plot(xprint, test_logistic(xprint), label='logistic')
plt.plot(xprint, test_linear(xprint), label='linear')
plt.xlabel("episodes")
plt.ylabel("test accuracy")
plt.legend()
plt.show()
def train_hinge_loss():
x = list(range(200))
rank = 100
xprint = list(range(100))
loss = np.load('loss_hinge_k=20_lr=1.npy')
f1 = loss[:,0]
f2 = loss[:,1]
f3 = loss[:,2]
f4 = loss[:,3]
f5 = loss[:,4]
f6 = loss[:,5]
p1 = np.poly1d(np.polyfit(x, f1, rank))
p2 = np.poly1d(np.polyfit(x, f2, rank))
p3 = np.poly1d(np.polyfit(x, f3, rank))
p4 = np.poly1d(np.polyfit(x, f4, rank))
p5 = np.poly1d(np.polyfit(x, f5, rank))
p6 = np.poly1d(np.polyfit(x, f6, rank))
plt.plot(xprint,p1(xprint), label='label 1')
plt.plot(xprint,p2(xprint), label='label 2')
plt.plot(xprint,p3(xprint), label='label 3')
plt.plot(xprint,p4(xprint), label='label 4')
plt.plot(xprint,p5(xprint), label='label 5')
plt.plot(xprint,p6(xprint), label='label 6')
plt.xlabel("episodes")
plt.ylabel("loss")
plt.legend()
plt.show()
def train_lr():
xprint = list(range(100))
train_lr1 = getPolyfit('train_hinge_k=20_lr=1.npy')
train_lr05 = getPolyfit('train_hinge_k=20_lr=05.npy')
train_lr01 = getPolyfit('train_hinge_k=20_lr=01.npy')
train_lr005 = getPolyfit('train_hinge_k=20_lr=005.npy')
plt.plot(xprint,train_lr1(xprint), label='lr = 1')
plt.plot(xprint,train_lr05(xprint), label='lr = 0.5')
plt.plot(xprint,train_lr01(xprint), label='lr = 0.1')
plt.plot(xprint,train_lr005(xprint), label='lr = 0.05')
plt.xlabel("episodes")
plt.ylabel("train accuracy")
plt.legend()
plt.show()
def test_lr():
xprint = list(range(100))
test_lr1 = getPolyfit('test_hinge_k=20_lr=1.npy')
test_lr05 = getPolyfit('test_hinge_k=20_lr=05.npy')
test_lr01 = getPolyfit('test_hinge_k=20_lr=01.npy')
test_lr005 = getPolyfit('test_hinge_k=20_lr=005.npy')
plt.plot(xprint,test_lr1(xprint), label='lr = 1')
plt.plot(xprint,test_lr05(xprint), label='lr = 0.5')
plt.plot(xprint,test_lr01(xprint), label='lr = 0.1')
plt.plot(xprint,test_lr005(xprint), label='lr = 0.05')
plt.xlabel("episodes")
plt.ylabel("test accuracy")
plt.legend()
plt.show()
def train_loss():
xprint = list(range(100))
train_loss0 = getPolyfit('train_hinge_k=20_lr=1.npy')
train_loss005 = getPolyfit('train_hinge_k=20_lr=1_lost=005.npy')
train_loss01 = getPolyfit('train_hinge_k=20_lr=1_lost=01.npy')
train_loss02 = getPolyfit('train_hinge_k=20_lr=1_lost=02.npy')
train_loss03 = getPolyfit('train_hinge_k=20_lr=1_lost=03.npy')
train_loss05 = getPolyfit('train_hinge_k=20_lr=1_lost=05.npy')
plt.plot(xprint,train_loss0(xprint), label='missing rate = 0')
plt.plot(xprint,train_loss005(xprint), label='missing rate = 0.05')
plt.plot(xprint,train_loss01(xprint), label='missing rate = 0.1')
plt.plot(xprint,train_loss02(xprint), label='missing rate = 0.2')
plt.plot(xprint,train_loss03(xprint), label='missing rate = 0.3')
plt.plot(xprint,train_loss05(xprint), label='missing rate = 0.5')
plt.xlabel("episodes")
plt.ylabel("train accuracy")
plt.legend()
plt.show()
def test_loss():
xprint = list(range(100))
test_loss0 = getPolyfit('test_hinge_k=20_lr=1.npy')
test_loss005 = getPolyfit('test_hinge_k=20_lr=1_lost=005.npy')
test_loss01 = getPolyfit('test_hinge_k=20_lr=1_lost=01.npy')
test_loss02 = getPolyfit('test_hinge_k=20_lr=1_lost=02.npy')
test_loss03 = getPolyfit('test_hinge_k=20_lr=1_lost=03.npy')
test_loss05 = getPolyfit('test_hinge_k=20_lr=1_lost=05.npy')
plt.plot(xprint,test_loss0(xprint), label='missing rate = 0')
plt.plot(xprint,test_loss005(xprint), label='missing rate = 0.05')
plt.plot(xprint,test_loss01(xprint), label='missing rate = 0.1')
plt.plot(xprint,test_loss02(xprint), label='missing rate = 0.2')
plt.plot(xprint,test_loss03(xprint), label='missing rate = 0.3')
plt.plot(xprint,test_loss05(xprint), label='missing rate = 0.5')
plt.xlabel("episodes")
plt.ylabel("test accuracy")
plt.legend()
plt.show() |
19,002 | a0d320665cb943ab4df796f74f62fb1b2b2cf871 | import re
import hashlib
import os
# from flask import current_app as app
from flask import session
from .models import User
SALT = os.environ["SALT"]
def hasher(s: str) -> str:
return hashlib.sha256(str(s + SALT).encode()).hexdigest()
def signIn(email: str, password: str) -> bool:
user = User.objects(email=email)
if user.count() == 0:
return False
user = user[0]
if hasher(password) == user["password"]:
# sign in success
session["user"] = {
"name": user["email"],
"email": user["email"],
"role": user["role"],
}
return session["user"]
else:
# sign in fail
return False
def getUser():
if not "user" in session:
return False
user = User.objects.get(email=session["user"]["email"])
return user
|
19,003 | 75df9bfacc9276e788731ffe359636b538774203 | """
Reverse Words in a String II
# https://aaronice.gitbook.io/lintcode/string/reverse-words-in-a-string-ii
# https://www.programcreek.com/2014/05/leetcode-reverse-words-in-a-string-ii-java/
Given an input string, reverse the string word by word.
Example:
Input:
["t","h","e"," ","s","k","y"," ","i","s"," ","b","l","u","e"]
Output:
["b","l","u","e"," ","i","s"," ","s","k","y"," ","t","h","e"]
Note:
A word is defined as a sequence of non-space characters.
The input string does not contain leading or trailing spaces.
The words are always separated by a single space.
Follow up: Could you do itin-placewithout allocating extra space?
Topics: Two Pointers, String
"""
# V0
# IDEA : REVERSE WHOLE STRING -> REVERSE EACH WORD
class Solution(object):
def reverseWords(self, s):
s_ = s[::-1]
s_list = s_.split(" ")
return " ".join([ i[::-1] for i in s_list])
# V0'
class Solution(object):
def reverseWords(self, s):
res = "".join(x).split(" ")[::-1]
return list(" ".join(res))
# V1
# http://www.voidcn.com/article/p-eggrnnob-zo.html
class Solution(object):
def reverseWords(self, s):
"""
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
s.reverse()
i = 0
while i < len(s):
j = i
while j < len(s) and s[j] != " ":
j += 1
for k in range(i, i + (j - i) / 2 ):
t = s[k]
s[k] = s[i + j - 1 - k]
s[i + j - 1 - k] = t
i = j + 1
# V2
# Time: O(n)
# Space:O(1)
class Solution(object):
def reverseWords(self, s):
"""
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
def reverse(s, begin, end):
for i in range((end - begin) / 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
reverse(s, 0, len(s))
i = 0
for j in range(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1 |
19,004 | bb0ca0288a8bfc17a93121792b9ac3b3ded09b56 | from restaurant import Restaurant
nicos_italian = Restaurant("Nico's Italian","italian")
nicos_italian.describe_restaurant()
nicos_italian.restaurant_open()
print(nicos_italian.customers_served(4))
print(nicos_italian.increment_customers(1))
|
19,005 | a24cd7d42873f5013090c8817fc847d085afead0 | def resta__base(lista1,lista2,n):
if type (lista1,list) and type (lista2,list):
return resta_aux(lista1,0,lista2,n)
else : return "error"
def resta_aux(lista1,prestado,lista2,base):
if lista1 == []:
return []
elif (lista1[0]- prestado)>lista2[0]:
return [lista1[0]-prestado]- lista2[0] + resta_aux(lista1[1:],prestado,lista2[1:],base)
elif (lista1[0]-prestado)== lista2[0]:
return [0] + resta_aux(lista[1:],prestado,lista2[1:],base)
elif (lista1[0]-prestado)<lista2[0]:
return [((lista1[0]-prestado)+base) - lista2[0]] + resta_aux(lista1[1:],prestado + 1, lista2[1:],base)
|
19,006 | 1728e67e43451ffc4d1ac605990132b4c2ae4e8f | from bs4 import BeautifulSoup as bs
import requests
from pprint import pprint
import time
import re
from pymongo import MongoClient
import random
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36'}
def hh(main_link, search_str, n_str):
html = requests.get(
main_link + '/search/vacancy?clusters=true&enable_snippets=true&text=' + search_str + '&showClusters=true',
headers=headers).text
parsed_html = bs(html, 'lxml')
jobs = []
for i in range(n_str):
jobs_block = parsed_html.find('div', {'class': 'vacancy-serp'})
jobs_list = jobs_block.findChildren(recursive=False)
for job in jobs_list:
job_data = {}
req = job.find('span', {'class': 'g-user-content'})
if req != None:
main_info = req.findChild()
job_name = main_info.getText()
job_link = main_info['href']
salary = job.find('div', {'class': 'vacancy-serp-item__compensation'})
if not salary:
salary_min = None
salary_max = None
else:
salary = salary.getText().replace(u'\xa0', u'')
salaries = salary.split('-')
salaries[0] = re.sub(r'[^0-9]', '', salaries[0])
salary_min = int(salaries[0])
if len(salaries) > 1:
salaries[1] = re.sub(r'[^0-9]', '', salaries[1])
salary_max = int(salaries[1])
else:
salary_max = None
job_data['name'] = job_name
job_data['salary_min'] = salary_min
job_data['salary_max'] = salary_max
job_data['link'] = job_link
job_data['site'] = main_link
jobs.append(job_data)
time.sleep(random.randint(1, 10))
next_btn_block = parsed_html.find('a', {'class': 'bloko-button HH-Pager-Controls-Next HH-Pager-Control'})
next_btn_link = next_btn_block['href']
html = requests.get(main_link + next_btn_link, headers=headers).text
parsed_html = bs(html, 'lxml')
pprint(jobs)
return jobs
search_str = 'учитель'
n_str = 2
hh('https://hh.ru', search_str, n_str)
|
19,007 | dc5401fad2ed09f44540c4a6fb1bd08d34027c53 | from PyQt5 import uic, QtWidgets
import mysql.connector
import platform
#from reportlab.pdfgen import canvas
"""
Author: Gerssivaldo Oliveira dos Santos
Contacts:
https://github.com/andows159
https://www.linkedin.com/in/gerssivaldo-santos-a75921130
Note:
The function of generating pdf of the table is still under development.
The software is in constant updating.
"""
class EnterpriseManager():
def __init__(self):
#verify OS
self.os = platform.system()
print(self.os)
#login mysql
self.database = mysql.connector.connect(
host="localhost",
user="root",
passwd="andows",
)
#create database if not exists
cursor = self.database.cursor()
create_database_command = "CREATE DATABASE IF NOT EXISTS enterprise_manager;"
cursor.execute(create_database_command)
self.database.commit()
cursor.execute("use enterprise_manager;")
self.database.commit()
create_table_command = "CREATE TABLE IF NOT EXISTS produtos(id INT NOT NULL AUTO_INCREMENT, produto VARCHAR(50), descricao VARCHAR(70), preco_compra DOUBLE, preco_venda DOUBLE, radio_button VARCHAR(50), check_box VARCHAR(150), liquido DOUBLE, PRIMARY KEY(id));"
cursor.execute(create_table_command)
self.database.commit()
#connect tp database
self.database = mysql.connector.connect(
host="localhost",
user="root",
passwd="andows",
database="enterprise_manager"
)
#QT instances
app = QtWidgets.QApplication([])
self.register_screen()
app.exec()
def login_screen(self):
if self.os == "Windows":
self.login_screen = uic.loadUi(r".\screens\login.ui")
elif self.os == "Linux":
self.login_screen = uic.loadUi("./screens/login.ui")
self.login_screen.login_button.clicked.connect(self.menu_screen)
self.login_screen.create_button.clicked.connect(self.create_screen)
self.login_screen.show()
def create_screen(self):
if self.os == "Windows":
self.create_screen = uic.loadUi(r".\screens\create.ui")
elif self.os == "Linux":
self.create_screen = uic.loadUi("./screens/create.ui")
self.create_screen.show()
def menu_screen(self):
if self.os == "Windows":
self.menu_screen = uic.loadUi(r".\screens\menu.ui")
if self.os == "Linux":
self.menu_screen = uic.loadUi("./screens/menu.ui")
self.menu_screen.register_screen.clicked.connect(self.register_screen)
self.menu_screen.control_screen.clicked.connect(self.control_screen)
self.menu_screen.show()
def register_screen(self):
if self.os == "Windows":
self.register_screen = uic.loadUi(r".\screens\register_products.ui")
elif self.os == "Linux":
self.register_screen = uic.loadUi("./screens/register_products.ui")
self.register_screen.show()
self.register_screen.register_button.clicked.connect(self.register_button)
self.register_screen.control_screen.clicked.connect(self.control_screen)
def control_screen(self):
if self.os == "Windows":
self.control_screen = uic.loadUi(r".\screens\control_screen.ui")
elif self.os == "Linux":
self.control_screen = uic.loadUi("./screens/control_screen.ui")
self.control_screen.rm_button.clicked.connect(self.rm_buttom)
self.control_screen.refresh_button.clicked.connect(self.refresh_button)
self.control_screen.show()
cursor = self.database.cursor()
select = "SELECT * FROM produtos"
cursor.execute(select)
self.view_data = cursor.fetchall()
self.control_screen.tableWidget.setRowCount(len(self.view_data))
self.control_screen.tableWidget.setColumnCount(8)
#ler a matriz
for linha in range(0,len(self.view_data)): #para percorrendo as linhas
for item in range(0,8): #dentro das linhas pegando os itens ao cruzar com as colunas
self.control_screen.tableWidget.setItem(linha,item,QtWidgets.QTableWidgetItem(str(self.view_data[linha][item])))
def rm_buttom(self):
cursor = self.database.cursor()
truncate = "TRUNCATE TABLE produtos;"
cursor.execute(truncate)
select = "SELECT * FROM produtos"
cursor.execute(select)
self.view_data = cursor.fetchall()
self.control_screen.tableWidget.setRowCount(len(self.view_data))
self.control_screen.tableWidget.setColumnCount(8)
self.control_screen.show()
def refresh_button(self):
cursor = self.database.cursor()
select = "SELECT * FROM produtos"
cursor.execute(select)
self.view_data = cursor.fetchall()
self.control_screen.tableWidget.setRowCount(len(self.view_data))
self.control_screen.tableWidget.setColumnCount(8)
for linha in range(0,len(self.view_data)): #traversing the lines
for item in range(0,8): #inside the lines picking up the items when crossing the columns
self.control_screen.tableWidget.setItem(linha,item,QtWidgets.QTableWidgetItem(str(self.view_data[linha][item])))
self.control_screen.show()
def login_button(self):
self.register_screen()
def register_button(self):
input_description = self.register_screen.input_description.text()
input_product = self.register_screen.input_product.text()
input_sell_price = float(self.register_screen.input_sell_price.text())
input_buy_price = float(self.register_screen.input_buy_price.text())
liquido = float(input_sell_price - input_buy_price)
if self.register_screen.radioButton.isChecked():
radio_option = "Ração pura"
elif self.register_screen.radioButton_2.isChecked():
radio_option = "Formulação própria"
elif self.register_screen.radioButton_4.isChecked():
radio_option = "Medicamentos"
elif self.register_screen.radioButton_5.isChecked():
radio_option = "Formulação indústrial"
elif self.register_screen.radioButton_6.isChecked():
radio_option = "Outros produtos"
else:
radio_option = ""
check_option = []
if self.register_screen.checkBox.isChecked():
check_option.append("Aves")
if self.register_screen.checkBox_2.isChecked():
check_option.append("Bovinos")
if self.register_screen.checkBox_3.isChecked():
check_option.append("Ovinos")
if self.register_screen.checkBox_4.isChecked():
check_option.append("Caprinos")
if self.register_screen.checkBox_5.isChecked():
check_option.append("Suínos")
if self.register_screen.checkBox_7.isChecked():
check_option.append("Uso geral")
check_option = "/".join(check_option)
#input_amount = self.register_screen.input_amount.text()
cursor = self.database.cursor()
insert_into = f'INSERT INTO produtos (produto, descricao, preco_compra, preco_venda, radio_button, check_box, liquido) VALUES {input_product,input_description,input_buy_price,input_sell_price,radio_option,check_option, liquido}'
print(insert_into)
cursor.execute(insert_into)
self.database.commit()
if self.register_screen.checkBox_6.isChecked() == False:
self.register_screen.input_product.setText("")
self.register_screen.input_description.setText("")
self.register_screen.input_buy_price.setValue(0)
self.register_screen.input_sell_price.setValue(0)
instance = EnterpriseManager()
|
19,008 | ce474b3fca35f7e4e6c2a4b43aa0d69c4f60cae6 | """
WSGI config for src project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from src.base import DEBUG
settings = 'src.development' if DEBUG else 'src.production'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings)
application = get_wsgi_application()
|
19,009 | fcd20b22a3224f4b2e66585b35633649917f9048 | #!/usr/bin/python3
# coding: utf-8
"""Лото
Правила игры в лото.
Игра ведется с помощью специальных карточек, на которых отмечены числа,
и фишек (бочонков) с цифрами.
Количество бочонков — 90 штук (с цифрами от 1 до 90).
Каждая карточка содержит 3 строки по 9 клеток. В каждой строке по 5 случайных цифр,
расположенных по возрастанию. Все цифры в карточке уникальны. Пример карточки:
--------------------------
9 43 62 74 90
2 27 75 78 82
41 56 63 76 86
--------------------------
В игре 2 игрока: пользователь и компьютер. Каждому в начале выдается
случайная карточка.
Каждый ход выбирается один случайный бочонок и выводится на экран.
Также выводятся карточка игрока и карточка компьютера.
Пользователю предлагается зачеркнуть цифру на карточке или продолжить.
Если игрок выбрал "зачеркнуть":
Если цифра есть на карточке - она зачеркивается и игра продолжается.
Если цифры на карточке нет - игрок проигрывает и игра завершается.
Если игрок выбрал "продолжить":
Если цифра есть на карточке - игрок проигрывает и игра завершается.
Если цифры на карточке нет - игра продолжается.
Побеждает тот, кто первый закроет все числа на своей карточке.
Пример одного хода:
Новый бочонок: 70 (осталось 76)
------ Ваша карточка -----
6 7 49 57 58
14 26 - 78 85
23 33 38 48 71
--------------------------
-- Карточка компьютера ---
7 87 - 14 11
16 49 55 88 77
15 20 - 76 -
--------------------------
Зачеркнуть цифру? (y/n)
Подсказка: каждый следующий случайный бочонок из мешка удобно получать
с помощью функции-генератора.
Подсказка: для работы с псевдослучайными числами удобно использовать
модуль random: http://docs.python.org/3/library/random.html
"""
import random
user_card = [[], [], []]
comp_card = [[], [], []]
j = [[], [], []]
for i in user_card: #сначала сделала класс, потом поняла, что нет смысла – разный код (немножко) для всего двух карточек, верно? или все же надо??
# 2016.04.13_08:55:36 checked. prusanov -- всё же, с классом будет чуть удобней (особенно, если развивать игру и добавлять разных игроков)
# и, кстати, код не такой уж разный получится.
while len(i)<5: # 2016.04.13_08:56:25 checked. prusanov -- не забывайте про PEP-8
number = random.randint(1,90)
if not any(number in j for j in user_card): #проверка на уникальность числа
i.append(number)
i.sort() # сортируем по возрастанию
while len(i)<9:
i.insert(random.randint(0,len(i)), ' ') #добавляем пустые клетки на случайные места
def print_ucard():
print ('----- Your card ------') #как правильнее сделать здесь? тупо принтами??
# 2016.04.13_08:57:25 checked. prusanov -- для универсальности и красоты можно подготовить строки вывода,
# а потом их одним print'ом вывести
for i in user_card:
print (' '.join(map(str, i)))
print ('----------------------')
for i in comp_card:
while len(i)<5:
number = random.randint(1,90)
if (not any(number in j for j in comp_card) and (not any(number in k for k in user_card))): #проверяем уникальность, чтобы число не встречалось ни в одной карточке
# 2016.04.13_09:08:08 checked. prusanov -- правильно ли я понял, что числа будут уникальны и у компьютера, и у человека?
# но так не должно быть - числа могут повторяться в карточках разных игроков.
i.append(number)
i.sort()
while len(i)<9:
i.insert(random.randint(0,len(i)), ' ')
def print_ccard():
print ('-- Computer\'s card ---') # <- обратите внимание на отступы. Здесь табуляция, а в других местах пробелы. Не нужно смешивать разные отступы
for i in comp_card:
print (' '.join(map(str, i)))
print ('----------------------')
def new_num(n):
j = 0
for i in random.sample(range(1,n+1), n):
j += 1
yield i,j
def wrong_ans():
raise RuntimeError('You lost!')
sys.exit(1)
num_q = 5 #количество бочонков
for i,j in new_num(num_q):
print ("New number is %s (%d left)" % (i, (num_q-j)))
print_ucard()
print_ccard()
for n in range(3):
comp_card[n] = ['-' if x == i else x for x in comp_card[n]]
u_ans = input('Want to cross out the number? y/n \n')
if u_ans == 'y':
for n in range(3): user_card[n] = ['-' if x == i else wrong_ans() for x in user_card[n]]
# здесь лучше на две строки написать - плохо читаема однострочка
break
elif u_ans == 'n': break
else:
print ("Please answer y/n")
continue
|
19,010 | b4663dcc2a71b1724a353b5a501bc3659fea49d1 | import numpy as np
INIT_POS = (3, 0)
GOAL_POS = (3, 11)
GRID_SHAPE = (4, 12)
R_STEP = -1
R_CLIFF = -100
KEY_ACTION_DICT = {
'z': (-1, 0),
'q': (0, -1),
'd': (0, 1),
's': (1, 0),
}
POS_CHAR_DICT = {
GOAL_POS: 'G',
INIT_POS: 'S',
}
AGENT_KEY = 'A'
CLIFF_KEY = 'C'
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def in_bounds(self, index, axis):
return max(0, min(index, GRID_SHAPE[axis] - 1))
def in_cliff(self):
return self.x == (GRID_SHAPE[0] - 1) and 0 < self.y < GRID_SHAPE[1] - 1
def next_state(self, action):
s_p = Position(self.in_bounds(self.x + action[0], 0), self.in_bounds(self.y + action[1], 1))
return (Position(*INIT_POS), R_CLIFF) if s_p.in_cliff() else (s_p, R_STEP)
def __eq__(self, other_pos):
if isinstance(other_pos, tuple):
return self.x == other_pos[0] and self.y == other_pos[1]
return self.x == other_pos.x and self.y == other_pos.y
def __hash__(self):
return hash((self.x, self.y))
def __str__(self):
return f"({self.x}, {self.y})"
class TheCliff:
def __init__(self):
self.get_states()
self.get_moves()
self.get_moves_dict()
self.get_keys()
def get_moves(self):
self.moves = [(x, y) for x in [-1, 0, 1] for y in [-1, 0, 1] if (abs(x) + abs(y)) == 1]
def get_states(self):
self.states = [Position(x, y) for x in range(GRID_SHAPE[0]) for y in range(GRID_SHAPE[1])]
def get_moves_dict(self):
self.moves_d = {s: [a for a in self.moves if s.next_state(a)[0] in self.states] for s in self.states}
def step(self, action):
self.state, r = self.state.next_state(action)
return self.state, r, self.state == Position(*GOAL_POS), {}
def get_keys(self):
self.keys = KEY_ACTION_DICT.keys()
def step_via_key(self, key):
return self.step(KEY_ACTION_DICT[key])
def reset(self):
self.state = Position(*INIT_POS)
return self.state
def seed(self, seed):
pass
def __str__(self):
x_ag, y_ag = self.state.x, self.state.y
s = ''
s += '\n'
for x in range(GRID_SHAPE[0]):
for y in range(GRID_SHAPE[1]):
if (x, y) == (x_ag, y_ag):
s += AGENT_KEY
elif (x, y) in POS_CHAR_DICT.keys():
s += POS_CHAR_DICT[(x, y)]
elif Position(x, y).in_cliff():
s += CLIFF_KEY
else:
s += '.'
s += '\n'
return s
|
19,011 | 08dfd6113c3c7a26f073fa0f75b0052476db922d | import itertools
import pickle
import random
import matplotlib
import networkx as nx
matplotlib.use('agg')
import matplotlib.pyplot as plt
from collections import defaultdict
from config import *
def plot(nodes, edges, group, suffix):
colors = [(0, 'w'), (1, 'r'), (2, 'g'), (3, 'b'), (4, 'y')]
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist=[node for node in nodes if node not in group], node_color='w', node_size=120)
for g_id, color in colors:
nx.draw_networkx_nodes(G, pos, nodelist=[node for node in nodes if node in group and group[node] == g_id],
node_color=color, node_size=600)
nx.draw_networkx_edges(G, pos,
edgelist=[edge for edge in edges if edge[0] in graph.author_labels and edge[1] in graph.author_labels], width=4.0)
nx.draw_networkx_edges(G, pos,
edgelist=[edge for edge in edges if edge[0] not in graph.author_labels or edge[1] not in graph.author_labels], width=1.0)
nx.draw_networkx_labels(G, pos, labels={node: node for node in nodes if node in graph.author_labels}, font_size=20)
nx.draw_networkx_labels(G, pos, labels={node: node for node in nodes if node not in graph.author_labels}, font_size=12)
plt.tight_layout()
axis = plt.gca()
axis.axes.get_xaxis().set_visible(False)
axis.axes.get_yaxis().set_visible(False)
plt.savefig(output_directory + suffix + '.png', dpi=200)
plt.close()
def read_graph(suffix):
with open(cwd + 'nodes_' + suffix + '.pkl', 'rb') as f:
nodes = pickle.load(f)
with open(cwd + 'edges_' + suffix + '.pkl', 'rb') as f:
edges = pickle.load(f)
return nodes, edges
def read_test():
test_authors = defaultdict(set)
with open(args.test_file) as f:
for line in f:
splits = line.rstrip().split('\t')
test_authors[splits[0].replace('_', ' ')] = int(splits[1])
return test_authors
class Network(object):
def __init__(self):
nodes_baseline, edges_baseline = read_graph('baseline')
nodes_rl, edges_rl = read_graph('rl')
self.author_labels = read_test()
self.neighbors_baseline = {node: set() for node in nodes_baseline}
self.neighbors_rl = {node: set() for node in nodes_rl}
for edge in edges_baseline:
self.neighbors_baseline[edge[0]].add(edge[1])
self.neighbors_baseline[edge[1]].add(edge[0])
for edge in edges_rl:
self.neighbors_rl[edge[0]].add(edge[1])
self.neighbors_rl[edge[1]].add(edge[0])
self.dangling = set([node for node in self.neighbors_baseline if not bool(self.neighbors_baseline[node])])
self.connected = set([node for node in self.neighbors_rl if bool(self.neighbors_rl[node] & self.dangling)]) | self.dangling
def edges(self, nodes):
edges = set()
for node, neighbors in self.neighbors_rl.items():
if node in nodes:
intersect = neighbors & nodes
if bool(intersect):
for neighbor in intersect:
self.add_edge(edges, node, neighbor)
return edges
def colored(self):
return set(self.neighbors_baseline.keys())
def is_connected(self, n1, n2, order=0):
if order == 0:
return n1 == n2
if len(self.neighbors_baseline[n1]) == 0:
return False
return reduce(lambda x, y: x or y, [self.is_connected(n, n2, order - 1) for n in self.neighbors_baseline[n1]])
def baseline(self):
# black_list = set()
black_list = set(['Yizhou Sun', 'Jing Gao', 'Dong Xin', 'Wei Wang', 'Wei Lu', 'Manish Gupta', 'Hong Cheng', 'Wenmin Li', 'Chen Chen'])
nodes = set()
colored = self.colored()
for node, nbs in self.neighbors_rl.items():
if node in black_list:
continue
neighbors = nbs & colored
if node not in self.neighbors_baseline:
for pair in itertools.combinations(neighbors, 2):
if pair[0] in black_list or pair[1] in black_list:
continue
if self.author_labels[pair[0]] == self.author_labels[pair[1]]:
if not self.is_connected(pair[0], pair[1], 4):
nodes.add(pair[0])
nodes.add(pair[1])
else:
for neighbor in neighbors:
if neighbor in black_list:
continue
if self.author_labels[node] != self.author_labels[neighbor]:
nodes.add(node)
nodes.add(neighbor)
edges = self.edges(nodes)
return list(nodes), list(edges)
def add_edge(self, edges, n1, n2):
if (n1, n2) not in edges and (n2, n1) not in edges:
edges.add((n1, n2))
def rl1(self, baseline_nodes, baseline_edges):
colored = self.colored()
nodes = set()
edges = set()
for (n1, n2) in itertools.combinations(baseline_nodes, 2):
if self.author_labels[n1] == self.author_labels[n2] and n2 not in self.neighbors_baseline[n1]:
intersect = self.neighbors_rl[n1] & self.neighbors_rl[n2]
if bool(intersect):
nodes.add(n1)
nodes.add(n2)
for nb in intersect:
if nb not in colored:
nodes.add(nb)
self.add_edge(edges, n1, nb)
self.add_edge(edges, n2, nb)
for edge in baseline_edges:
if self.author_labels[edge[0]] == self.author_labels[edge[1]]:
edges.add(edge)
return list(nodes), list(edges)
def rl2(self, baseline_nodes, baseline_edges):
colored = self.colored()
nodes = set()
edges = set()
for edge in baseline_edges:
if self.author_labels[edge[0]] != self.author_labels[edge[1]]:
nodes.add(edge[0])
nodes.add(edge[1])
edges.add(edge)
nbs1 = self.neighbors_rl[edge[0]] - colored
nbs2 = self.neighbors_rl[edge[1]] - colored
for nb1 in nbs1:
ratio = 4.0 / len(nbs1)
if random.uniform(0, 1) < ratio:
nodes.add(nb1)
self.add_edge(edges, edge[0], nb1)
for nb2 in nbs2:
ratio = 4.0 / len(nbs2)
if random.uniform(0, 1) < ratio:
nodes.add(nb2)
self.add_edge(edges, edge[1], nb2)
return list(nodes), list(edges)
def rl3(self, baseline_nodes, baseline_edges):
return None, None
if __name__ == '__main__':
cwd = 'data/'
output_directory = 'plot/'
graph = Network()
baseline_authors, baseline_links = graph.baseline()
# plot(baseline_authors, baseline_links, graph.author_labels, args.plot_path)
authors1, links1 = graph.rl1(baseline_authors, baseline_links)
authors2, links2 = graph.rl2(baseline_authors, baseline_links)
for i in range(50):
plot(authors1, links1, graph.author_labels, 'rl1.' + str(i))
# plot(authors2, links2, graph.author_labels, 'rl2.' + str(i))
|
19,012 | 35eed76aa85c3bb98482f60041a2cc497372be43 | import logging
import random
import gym
import numpy as np
import tensorflow as tf
from agent import Agent
from utils import save_args, make_logger
LOGGER = make_logger('./results/logs.txt', 'info')
def experiment(config):
"""
A function that runs an experiment.
args
config (dict) hyperparameters and experiment setup
"""
with tf.Session() as sess:
seed = config.pop('seed')
if seed:
seed = int(seed)
random.seed(seed)
tf.set_random_seed(seed)
np.random.seed(seed)
env_id = config.pop('env_id')
LOGGER.info('using {} env'.format(env_id))
env = gym.make(env_id)
global_rewards = []
global_step, episode = 0, 0
config['env'] = env
config['env_repr'] = repr(env)
config['sess'] = sess
render = int(config.pop('render'))
agent = Agent(**config)
rl_writer = tf.summary.FileWriter('./results/rl')
save_args(config, 'results/args.txt')
while global_step < config['total_steps']:
episode += 1
done = False
rewards, actions = [], []
observation = env.reset()
while not done:
global_step += 1
# if episode % 1 == render:
env.render()
action = agent.act(observation)
next_observation, reward, done, info = env.step(action)
agent.remember(observation, action, reward, next_observation, done)
train_info = agent.learn()
rewards.append(reward)
actions.append(action)
observation = next_observation
ep_rew = sum(rewards)
global_rewards.append(ep_rew)
avg_reward = sum(global_rewards[-100:]) / len(global_rewards[-100:])
if episode % 10 == 0:
log_str =' step {:.0f} ep {:.0f} reward {:.1f} avg {:.1f}'
logging.info(log_str.format(global_step,
episode,
ep_rew,
avg_reward))
summary = tf.Summary(value=[tf.Summary.Value(tag='episode_reward',
simple_value=ep_rew)])
rl_writer.add_summary(summary, episode)
avg_sum = tf.Summary(value=[tf.Summary.Value(tag='avg_last_100_ep',
simple_value=avg_reward)])
rl_writer.add_summary(avg_sum, episode)
rl_writer.flush()
return config
if __name__ == '__main__':
envs = ['Pendulum-v0', 'CartPole-v0', 'MountainCar-v0']
config_dict = {
'env_id': envs[2],
'discount': 0.97,
'tau': 0.001,
'total_steps': 500000,
'batch_size': 32,
'layers': (50, 50),
'learning_rate': 0.0001,
'epsilon_decay_fraction': 0.3,
'memory_fraction': 0.4,
'process_observation': False,
'process_target': False,
'seed': 42,
'render': 1
}
output = experiment(config_dict)
|
19,013 | effa4cfb4bcf73b5f3c647b2a6ff0679846c30a6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 18:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roofy', '0009_auto_20171002_1124'),
]
operations = [
migrations.AddField(
model_name='project',
name='status',
field=models.CharField(choices=[('Lead', 'Lead'), ('Prospect', 'Prospect'), ('Inspection/Demo', 'Inspection/Demo'), ('Estimate/Proposal', 'Estimate/Proposal'), ('Contract Signed', 'Contract Signed'), ('Pre-Production', 'Pre-Production'), ('Production', 'Production'), ('Post-Production', 'Post-Production'), ('Accounts Receivable', 'Accounts Receivable'), ('Closed Pending', 'Closed Pending'), ('Closed - Complete', 'Closed - Complete'), ('Cancelled', 'Cancelled')], default='Lead', max_length=200),
preserve_default=False,
),
]
|
19,014 | 2ac062983235217d912e6234d127cecc2ad7a4b6 | from Usuari import Usuari
from Subasta import Subasta
# Creacion clase Test para probar el programa.
class Test:
# Creación de Usuarios
Toni = Usuari("Toni", 100)
Pep = Usuari("Pep", 150)
Enric = Usuari("Enric", 300)
# Creación Primera Subasta
s1 = Subasta("Telefon Movil", Toni)
print(str(s1.licitar(Pep, 100)))
print(str(s1.getLicitacioMajor()))
print(str(s1.licitar(Enric, 50)))
print(str(s1.getLicitacioMajor()))
print(str(s1.executar()))
print(str(s1.licitar(Enric, 200)))
# Creación Segunda Subasta
s2 = Subasta("Impresora 3D", Pep)
print(str(s2.licitarUsuari(Enric)))
print(str(s2.executar()))
# Bucle para recorrer los Usuarios y ver su credito actual
usuaris = [Toni, Pep, Enric]
for usuari in usuaris:
print("Usuari: " + str(usuari))
print("Crèdit actual de l'usuari " + str(usuari.getCredit()))
# Bucle para recorrer las subastas y ver también sus usuarios propietarios.
licitacions = [s1, s2]
for licitacio in licitacions:
print(licitacio)
|
19,015 | 6cadc2c365e91baba690a92519f676cd5ddc5a66 | """
Author: Xiaocong Du
Description:
Title: Single-Net Continual Learning with Progressive Segmented Training (PST)
"""
import logging
import os
import pickle
import sys
import scipy.io as scio
import continualNN
from load_cifar import *
from utils_tool import count_parameters_in_MB
import matplotlib.pyplot as plt
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from args import parser
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]= args.gpu
log_path = 'log_main_edge.txt'.format()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M%p')
fh = logging.FileHandler(os.path.join('../../baseline_library/',log_path))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("*************************************************************************************************")
logging.info(" PST_main.py ")
logging.info("*************************************************************************************************")
logging.info("args = %s", args)
method = continualNN.ContinualNN()
method.initial_single_network(init_weights = True)
method.initialization(args.lr, args.lr_step_size, args.weight_decay)
# -----------------------------------------
# Prepare dataset
# -----------------------------------------
task_list, _ = method.create_task()
logging.info('Task list %s: ', task_list)
task_division = []
for item in args.task_division.split(","):
task_division.append(int(item))
total_task = len(task_division)
if args.dataset == 'cifar10':
num_classes = 10
assert sum(task_division) <= 10
elif args.dataset == 'cifar100':
num_classes = 100
assert sum(task_division) <= 100
cloud_class = task_division[0]
task_id = 1
cloud_list = task_list[0 : task_division[0]]
total = 0
for i in range(task_id+1):
total += task_division[i]
current_edge_list = task_list[task_division[0]: total]
all_list = task_list[0 : total]
all_data_list = []
all_data_list.append(cloud_list)
train_cloud, test_cloud = get_dataset_cifar(cloud_list, 0)
for batch_idx, (data, target) in enumerate(train_cloud):
logging.info('CLOUD re-assigned label: %s\n', np.unique(target))
break
# #
num_epoch0 = args.epoch_edge
num_epoch1 = int(args.epoch_edge * 0.2)
num_epoch2 = int(args.epoch_edge * 0.5)
num_epoch3 = int(args.epoch_edge * 1)
# num_epoch0 = 2
# num_epoch1 = 2
# num_epoch2 = 2
# num_epoch3 = 2
train_acc = []
test_acc_0 = []
test_acc_current = []
test_acc_mix = []
test_task_accu = [] # At the end of each task, best overall test accuracy. Length = number of tasks
test_acc_0_end = [] # At the end of each task, the accuracy of task 0. Length = number of tasks
logging.info("================================== Train task 0 ==========================================")
"""Test data from the first task"""
best_acc_0 = 0.0
for epoch in range(args.epoch):
train_acc.append(method.train(epoch, train_cloud))
test_acc_0.append(method.test(test_cloud))
test_acc_current.append(np.zeros(1))
test_acc_mix.append(method.test(test_cloud))
if test_acc_0[-1] > best_acc_0:
best_acc_0 = test_acc_0[-1]
logging.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Single-head This training on T0 testing accu is : {:.4f}'.format(method.test(test_cloud)))
logging.info('train_acc {0:.4f}\n\n\n'.format(train_acc[-1]))
test_task_accu.append(best_acc_0)
torch.save(method.net.state_dict(), '../../baseline_library/model_afterT{0}_Accu{1:.4f}.pt'.format(0, best_acc_0))
for task_id in range(1, total_task):
logging.info("================================== 1. Current Task is {} : Prepare dataset ==========================================".format(task_id))
# -----------------------------------------
# Prepare dataset
# -----------------------------------------
total = 0
for i in range(task_id+1):
total += task_division[i]
current_edge_list = task_list[(total-task_division[task_id]) : total] # 0 1 2 3 4, 5, 6, 7 taskid = 3 task_division=[5,1,1,[1],1,1]
all_list = task_list[0 : total]
all_data_list.append(current_edge_list)
memory_each_task = int(args.total_memory_size / task_id) # The previous tasks shares the memory
alltask_list = []
alltask_memory = []
alltask_single_list = []
for i in range(task_id+1):
alltask_list.append(task_list[i])
alltask_memory.append(memory_each_task)
alltask_single_list += task_list[i]
alltask_memory = []
for i in range(len(task_division)):
alltask_memory.append(int(task_division[i] * args.total_memory_size / num_classes))
logging.info('alltask_memory = %s', alltask_memory)
train_edge, test_edge = get_dataset_cifar(current_edge_list, task_division[0]+ (task_id-1)*task_division[task_id] )
for batch_idx, (data, target) in enumerate(train_edge):
logging.info('EDGE re-assigned label: %s\n', np.unique(target))
break
train_all, test_all = get_dataset_cifar(all_list, 0)
for batch_idx, (data, target) in enumerate(train_all):
logging.info('ALL re-assigned label: %s\n', np.unique(target))
break
"""Test data from all the tasks"""
_, test_mix_full = get_dataset_cifar(alltask_single_list, 0)
for batch_idx, (data, target) in enumerate(test_mix_full):
logging.info('test_mix_full (all test data till now) re-assigned label: %s\n', np.unique(target))
break
logging.info("Current Task is {} : Train task without any technique to prevent forgetting====================".format(task_id))
method.initialization(args.lr*0.1, args.lr_step_size, args.weight_decay)
best_acc_mix = 0.0
for epoch in range(args.epoch_edge):
train_acc.append(method.train(epoch, train_edge))
test_acc_0.append(method.test(test_cloud))
test_acc_current.append(method.test(test_edge))
test_acc_mix.append(method.test(test_mix_full))
if test_acc_mix[-1] > best_acc_mix:
best_acc_mix = test_acc_mix[-1]
logging.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Single-head T0 testing accu is : {:.4f}'.format( test_acc_0[-1]))
logging.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Single-head current testing accu is : {:.4f}'.format( test_acc_current[-1]))
logging.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Single-head mixed all tasks testing accu is : {:.4f}'.format( test_acc_mix[-1]))
logging.info('train_acc {0:.4f} \n\n\n'.format(train_acc[-1]))
test_task_accu.append(best_acc_mix)
## RESULTS DOCUMENTATION
logging.info("====================== Document results ======================")
title_font = { 'size':'8', 'color':'black', 'weight':'normal'} # Bottom vertical alignment for more space
axis_font = { 'size':'10'}
plt.figure()
x = np.linspace(task_division[0], num_classes, num = len(test_task_accu))
plt.xlim(0, num_classes)
plt.xlabel('Task ID')
plt.ylabel('Accuracy')
plt.plot(x, test_task_accu , 'g-o', alpha=1.0, label = 'our method')
plt.yticks(np.arange(0, 1.0, step=0.1))
plt.xticks(np.arange(0, num_classes+1, step= 10))
plt.legend(loc='best')
plt.title('Incrementally learning {} classes at a time'.format(args.classes_per_task))
plt.savefig('../../baseline_library/incremental_curve_T{}_{:.4f}.png'.format(task_id, best_acc_mix))
# plt.title('Task: {} Model: {} \n Batch: {} Memory: {}\n Epoch_edge: {} ModelSize: {}'.format(task_division, args.model, args.batch_size, alltask_memory, args.epoch_edge, args.NA_C0), **title_font)
# plt.show()
text_acc_mix_noise = []
for idx in range(len(test_acc_mix)):
text_acc_mix_noise.append(test_acc_mix[idx] + random.randint(-1, 1) * 0.01)
x = np.linspace(0, len(test_acc_mix), len(test_acc_mix))
plt.figure(figsize=(20,10))
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(x, train_acc, 'k', alpha=0.5, label = 'Training accuracy')
plt.plot(x, test_acc_mix, 'b', alpha=0.5, label = 'Testing accuracy - mix')
plt.plot(x, text_acc_mix_noise, 'g', alpha=0.5, label = 'Testing accuracy - noise')
plt.yticks(np.arange(0, 1.0, step=0.1))
plt.xticks(np.arange(0, len(test_acc_mix), step=10))
plt.grid(color='b', linestyle='-', linewidth=0.1)
plt.legend(loc='best')
plt.title('Learning curve')
plt.savefig('../../baseline_library/PSTmain_learning_curve_model_{}_acc{:.4f}.png'.format(args.model, best_acc_mix))
param = count_parameters_in_MB(method.net)
logging.info('Param:%s',param)
scio.savemat('../../baseline_library/PSTmain_model{}_acc{:.4f}.mat'.format(args.model, best_acc_mix),
{'train_acc':train_acc, 'test_acc_0':test_acc_0,'test_acc_current':test_acc_current, 'test_acc_mix':test_acc_mix,
'best_acc_mix':best_acc_mix, 'best_acc_0': best_acc_0,'model':args.model,
'NA_C0':args.NA_C0, 'epoch': args.epoch, 'epoch_edge': args.epoch_edge, 'param':param,
'lr': args.lr, 'lr_step_size':args.lr_step_size,
'classes_per_task': args.classes_per_task, 'test_acc_0_end':test_acc_0_end, 'test_task_accu':test_task_accu,
'weight_decay': args.weight_decay, 'score': args.score,
'dataset':args.dataset, 'task_list': task_list, 'seed':args.seed, 'shuffle':args.shuffle,
'num_epoch0':num_epoch0, 'num_epoch1':num_epoch1, 'num_epoch2':num_epoch2, 'num_epoch3':num_epoch3,
'threshold_task_ratio':0, 'text_acc_mix_noise':text_acc_mix_noise})
|
19,016 | d2c68568d014094b2eb1b2975601a601ab51ae3a | #!/usr/bin/python
import sys
import RPi.GPIO as GPIO
import time
from datetime import datetime
import smtplib
from email.mime.text import MIMEText
# Address/Groups to send to
carrie_text='@vtext.com'
randy_text='@messaging.sprintpcs.com'
carrie_email='@cox.net'
randy_email='@cox.net'
dummy_email='@cox.net'
exception=0
send_to_alarm=[randy_email,randy_text,carrie_text,carrie_email]
# send_to_arm=[randy_email,carrie_email]
send_to_arm=[randy_email]
# send_to_arm=[dummy_email]
# This creates an email and sends it to an address of your choosing directly from python
def mailsend (recipients,sender,subject):
try:
print "Sending alert messages to: ",recipients,"Subject:",subject,
s=smtplib.SMTP('smtp.cox.net')
# s.set_debuglevel(1)
# If the previous mail caused an exception, let the recipient know
global exception
if exception == 1:
subject += 'A previous message caused an exception and was unable to send an e-mail.'
msg = MIMEText(subject)
msg['From'] = sender
msg['To'] = ", ".join(recipients)
msg['Subject'] = "Home Alarm Alert Message"
s.sendmail(sender, recipients, msg.as_string())
s.quit()
s.close()
exception=0
except Exception as e:
print "Caught exception in the e-mail function: %s" % (e)
exception=1
########################################################################################
GPIO.setmode(GPIO.BOARD)
# Set up the GPIO channels
GPIO.setmode(GPIO.BOARD)
GPIO.setup(29, GPIO.IN)
GPIO.setup(33, GPIO.IN)
# Read the current sensor state as our starting point
# The sensors read reversed logic so:
# 0 = Armed and Alarming
# 1 = Disarmed and Quiet
armstate = GPIO.input(29)
alarmstate = 1 # Assume not in alarm, and report if it is in alarm
armtimer=0
while True:
# Input from the sensors
armsensor = GPIO.input(29)
alarmsensor = GPIO.input(33)
# Display the GPIO state
# print armsensor, alarmsensor
tm = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write(tm)
if armsensor == 0:
sys.stdout.write (" - Alarm is Armed [0] & ")
else:
sys.stdout.write (" - Alarm is Disarmed [1] & ")
if alarmsensor == 0:
sys.stdout.write ("Alarm is Alarming [0]\n")
else:
sys.stdout.write ("Alarm is Clear [1]\n")
#####################################################################################
# This is the Alarm sensor check
if alarmsensor != alarmstate:
# The alarm state has changed
alarmstate = alarmsensor
if alarmstate == 0:
print "The home alarm is alarming! (0)"
mailsend (send_to_alarm,'randyscott@cox.net','The Home Alarm Is Alarming at: ' + tm + '\n')
else:
print "The home alarm has cleared. (1)"
mailsend (send_to_alarm,'randyscott@cox.net','The Home Alarm Has Cleared at: ' + tm + '\n')
#####################################################################################
# As long as we're in an alarm state, we ignore the Arm sensor which is flashing and unreliable. Go back to the top of the loop until alarm clears.
if alarmstate == 0:
print "Ignoring the ARM sensor while in alarm."
sys.stdout.flush()
time.sleep(5)
continue
#####################################################################################
# This is the ARM sensor check
if armsensor != armstate:
# We need the sensor to be different than state for >40 seconds before we flip the flag
if armtimer > 40:
# Here means that the flag and sensor have been different for > 40 seconds
armstate = armsensor
if armstate == 0:
print "The home alarm is armed. (0)"
mailsend (send_to_arm,'randyscott@cox.net','The Home Alarm Is Armed at: ' + tm + '\n')
else:
print "The home alarm is disarmed (1)"
mailsend (send_to_arm,'randyscott@cox.net','The Home Alarm Is Disarmed at: ' + tm + '\n')
else:
# Here means that the flag and sensor have been different for less than 40 seconds. Keep waiting.
armtimer = armtimer + 5
if armtimer == 5:
print "The ARM sensor has changed. Waiting 45 seconds for it to stabilize."
else:
# Here means that the flag and sensor are the same. Reset the timer and keep going
armtimer=0
#####################################################################################
sys.stdout.flush()
time.sleep(5)
|
19,017 | eb687ba68f7f7030be167470a98265b162e52806 | #################################################################################################
# CSE 597 - Assignment 1
# Description - This python program loads the word embeddings from the 4 GLoVE word
# embedding files and computes the cosine similarity for the 5 lists each
# of the randomly generated verbs and nouns and their synonyms.
# Created by - Namita Kalady Prathap
# Last Modified on - Feb 1 2019
#################################################################################################
#import files
from numpy import asarray
from sklearn.metrics.pairwise import cosine_similarity
# function to load the embeddings from a file
def glove_load( fname ):
word_embeddings = {}
#opening the file in which GLove embeddings are present
fin_file = open(fname)
for i in fin_file:
values = i.split()
temp = values[0]
temp2 = asarray(values[1:], dtype='float32')
#assigning the vector to the word
word_embeddings[temp] = temp2
fin_file.close()
print(' %s vectors were loaded.' % len(word_embeddings))
#returning the list of vectors
return word_embeddings
# function to compute the cosine similarity for the noun/verb lists
def cosine_func( nv_list):
cos_vec = []
temp = []
for i in range (0,5):
temp = []
for j in range (0,5):
word = nv_list[i][j]
temp.append(word_vec[word])
#making the list of vectors into 2D format for cosine similarity computation
cos_vec.append(temp)
tmp2 = []
for k in range (0,5):
for i in range (0,5):
for j in range(i+1,5):
#computing the cosine similarity and printing the result.
tmp2 = cosine_similarity([cos_vec[k][i]],[cos_vec[k][j]])
print nv_list[k][i], ',' ,nv_list[k][j], '-', tmp2
noun = []
verb = []
#list of synonyms for the verbs
verb.append(['describe', 'narrate', 'recount', 'relate', 'chronicle'])
verb.append(['unlock' , 'unbolt', 'unlatch', 'unpick', 'unfasten'])
verb.append(['flash', 'shine', 'flare', 'blaze', 'glare'])
verb.append(['screw','fasten', 'secure', 'fix', 'attach'])
verb.append(['deliver','bring', 'take', 'convey', 'transport'])
#list of synonyms for the nouns
noun.append(['expert','specialist', 'authority', 'pundit', 'oracle'])
noun.append(['trade', 'craft', 'occupation', 'job', 'profession'])
noun.append(['number','numeral', 'integer', 'figure', 'digit'])
noun.append(['request', 'appeal', 'entreaty', 'plea', 'petition'])
noun.append(['color','hue', 'shade', 'tint', 'tone'])
#files of the GLoVE embeddings
filename = ['glove.6B.50d.txt','glove.6B.100d.txt','glove.6B.200d.txt','glove.6B.300d.txt']
#Looping through each file of GLoVE embeddings to compute the cosine similarity for each list
for index in range(len(filename)):
word_vec = []
print 'Loading the word embeddings from ', filename[index]
word_vec = glove_load(filename[index])
print('Calculating the cosine similarities for the synonym list of verbs')
cosine_func(verb)
print('Calculating cosine similarities for the synonym list of nouns')
cosine_func(noun)
print('Successfully calculated the cosine similarity for the synonym lists of nouns and verbs.')
|
19,018 | 0112502dcbcd9b5bea397f767183e37cc6b2cab5 | import numpy as np
import random
import scipy.io
def load_data(mode='train'):
"""
Function to (download and) load the MNIST data
:param mode: train or test
:return: images and the corresponding labels
"""
if mode == 'train':
x_train = scipy.io.loadmat('train_images_14bus.mat')['x_train']
y_train = scipy.io.loadmat('train_labels_14bus.mat')['y_train']
x_valid = scipy.io.loadmat('valid_images_14bus.mat')['x_valid']
y_valid = scipy.io.loadmat('valid_labels_14bus.mat')['y_valid']
print(x_valid.shape, y_valid.shape, type(x_valid), type(y_valid))
x_train, _ = reformat(x_train, y_train)
x_valid, _ = reformat(x_valid, y_valid)
print(x_valid.shape, y_valid.shape, type(x_valid), type(y_valid))
return x_train, y_train, x_valid, y_valid
elif mode == 'test':
x_test = scipy.io.loadmat('test_images_14bus.mat')['x_test']
y_test = scipy.io.loadmat('test_labels_14bus.mat')['y_test']
x_test, _ = reformat(x_test, y_test)
return x_test, y_test
def randomize(x, y):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :, :, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y
def reformat(x, y):
"""
Reformats the data to the format acceptable for convolutional layers
:param x: input array
:param y: corresponding labels
:return: reshaped input and labels
"""
# img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))
img_size, num_ch, num_class = 14, 1, 16
dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)
labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??
return dataset, labels
def get_next_batch(x, y, start, end):
x_batch = x[start:end]
y_batch = y[start:end]
return x_batch, y_batch
|
19,019 | fcab8a8eed05f164d5f34a60a33f75729b6a355a | from tanxees.utils.Comparer import ComparerMixin
class Direction(ComparerMixin):
COMPARE_ATTRS = ('angle')
RIGHT = 0
DOWN = 90
LEFT = 180
UP = 270
def __init__(self, angle):
if angle not in (self.RIGHT, self.DOWN, self.LEFT, self.UP):
raise ValueError('Unexpected angle value: %s' % angle)
self.__angle = angle
@property
def angle(self):
return self.__angle
|
19,020 | 5846933095c38adaeac9da2f4ef1db81cfa41306 | # Generated by Django 3.2.5 on 2021-07-13 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='order',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='product',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='tag',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
19,021 | c2a57d4058e339d6c5097377971224d32f5dd5b1 | from rest_framework import permissions
class BlacklistPermission(permissions.BasePermission):
"""
Global permission check for blacklisted IPs.
"""
def has_permission(self, request, view):
ip_addr = request.META['REMOTE_ADDR']
blacklisted = Blacklist.objects.filter(ip_addr=ip_addr).exists()
return not blacklisted
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `owner`.
return obj.owner == request.user
# ########################################################################################
# Note that the generic views will check the appropriate object level permissions,
# but if you're writing your own custom views,
# you'll need to make sure you check the object level permission checks yourself.
# You can do so by calling self.check_object_permissions(request, obj) from the view
# once you have the object instance. This call will raise an appropriate APIException
# if any object-level permission checks fail, and will otherwise simply return.
###########################################################################################
####################
# The IsAdminUser permission class will deny permission to any user, unless user.is_staff is True in which case permission will be allowed.
#
# This permission is suitable if you want your API to only be accessible to a subset of trusted administrators.
####################
|
19,022 | 2b8815e348bdba1735fac5f581ed6b2061ff1ab3 | from flask import Flask, request, redirect, url_for, flash, jsonify
from flask_cors import CORS
import numpy as np
import json
import pandas as pd
import numpy as np
#import blog as bg
#from flask_mysqldb import MySQL
import pymysql
import base64
db = pymysql.connect("localhost","root","","womensecurity")
cursor = db.cursor()
app = Flask(__name__)
CORS(app)
@app.route('/register',methods=['POST'])
def register():
aadhar = request.json.get('aadhar_id')
name = request.json.get('name')
password = request.json.get('password')
print(aadhar)
print(name)
sql = "INSERT INTO users (aadhar_id,name,password) VALUES (%s, %s, %s)"
val = (aadhar,name,password)
cursor.execute(sql,val)
db.commit()
return jsonify({"status":"Successful"})
# app.config['CORS_HEADERS'] = 'Content-Type'
# CORS(app, resources={r"/api": {"origins": "*"}})
@app.route('/info', methods=['PUT'])
# @crossdomain(origin="*", headers=["Content-Type"])
def blogs():
data = request.json.get('name')
cursor.execute("SELECT Name,latitude,longitude from info where name=%s ",data)
results = cursor.fetchall()
return jsonify(results[0])
@app.route('/hits', methods=['GET'])
# @crossdomain(origin="*", headers=["Content-Type"])
def hits():
dt=[]
data=cursor.execute("SELECT * from policestatistics order by hits desc")
results = cursor.fetchall()
# mysql.connection.commit()
# cur.close()
print(results)
return json.dumps(results)
@app.route('/reporters', methods=['GET'])
# @crossdomain(origin="*", headers=["Content-Type"])
def reporters():
dt=[]
data=cursor.execute("SELECT name from info")
results = cursor.fetchall()
# mysql.connection.commit()
# cur.close()
print(results)
return json.dumps(results)
if __name__ == '__main__':
app.run(debug=True)
|
19,023 | 9009a650df223d7d1ebda8e5454d995c556ce10c | import json
import scrapy
from kingfisher_scrapy.base_spider import IndexSpider
from kingfisher_scrapy.util import components, handle_http_error, join, parameters
class Uganda(IndexSpider):
"""
Domain
Government Procurement Portal (GPP) of Public Procurement and Disposal of Public Assets Authority (PPDA)
API documentation
https://docs.google.com/spreadsheets/d/10tVioy-VOQa1FwWoRl5e1pMbGpiymA0iycNcoDFkvks/edit#gid=365266172
"""
name = 'uganda_releases'
data_type = 'release_package'
total_pages_pointer = '/data/last_page'
yield_list_results = False
formatter = staticmethod(parameters('page'))
base_url = 'https://gpp.ppda.go.ug/adminapi/public/api/pdes'
download_delay = 0.9
def start_requests(self):
yield scrapy.Request(
'https://gpp.ppda.go.ug/adminapi/public/api/pdes',
meta={'file_name': 'page-1.json'},
callback=self.parse_list,
cb_kwargs={'callback': self.parse_data}
)
@handle_http_error
def parse_data(self, response):
pattern = 'https://gpp.ppda.go.ug/adminapi/public/api/open-data/v1/releases/{}?fy={}&pde={}'
data = json.loads(response.text)
for pdes in data['data']['data']:
for plans in pdes['procurement_plans']:
for tag in ('planning', 'tender', 'award', 'contract'):
yield self.build_request(
pattern.format(tag, plans['financial_year'], plans['pde_id']),
formatter=join(components(-1), parameters('fy', 'pde'))
)
|
19,024 | eff4f027d3256511afdd8c2f8176f5a78983b7c9 | age = int(input('Сколько вам лет?: '))
grade = int(input('В каком классе вы учитесь?: '))
if age >= 12 and grade >= 7:
print('Доступ разрешен.')
else:
print('Доступ запрещен.') |
19,025 | 2ac68992f8bdf6e230aa5723c519ec427280e563 | #! /usr/bin/python
import sys
import copy
from matrix import Matrix
from math import sqrt
def gramschmidt(basis):
ret = "\\begin{align*}\n"
orthogonalbasis = []
for i,x in enumerate(basis):
ret += " v_" + str(i+1) + " &= x_" + str(i+1)
v = copy.copy(x)
for j in range(len(orthogonalbasis)):
v1 = "v_"+str(j+1)
ret += " - \\frac{x_"+str(i+1)+"\cdot "+v1+"}{"+v1+"\cdot "+v1+"}"+v1
v2 = orthogonalbasis[j]
proj = x.cdot(v2) / v2.cdot(v2)
v -= v2 * proj
orthogonalbasis.append(v)
ret += " = " + v.toString({'nobar':True,'compact':True}) + " \\\\\n"
ret += "\\end{align*}\n"
ret += "$\left\{" + ",".join(x.toString({'nobar':True,'compact':True}) for x in orthogonalbasis) + "\\right\}$ is therefore an orthogonal basis for $W$"
ret += ", so \n\\begin{align*}\n"
ret += "\\left\{" + ",".join("u_{"+str(i+1)+"}" for i in range(len(orthogonalbasis))) + "\\right\} \n"
ret += "&= \\left\{" + ",".join("\\frac{1}{\|v_{"+str(i+1)+"}\|}v_{"+str(i+1)+"}" for i in range(len(orthogonalbasis))) + "\\right\} \\\\\n"
ret += "&= \\left\{" + ",".join("\\frac{1}{\\sqrt{"+ str(v.cdot(v)) +"}}" + v.toString({'nobar':True,'compact':True}) for v in orthogonalbasis) + "\\right\} \\\\\n"
ret += "&= \\left\{\\right\}\n"
ret += "\\end{align*} is an orthonormal basis."
return ret
if __name__ == "__main__":
r = input()
basis = []
for i in range(r):
basis.append(Matrix([ [x] for x in raw_input().split()]))
print gramschmidt(basis)
|
19,026 | d3ea720a8fe678c1521c44a437d32391458e2640 | import pickle
from advanced.dir import getFile
from datetime import time
def readPickle(zdir,pick):
data = None
nor = True
i = 0
while nor and i < 10:
i += 1
try:
inz = open(getFile(zdir, pick+".pickle"), 'rb')
data = pickle.load(inz)
inz.close()
nor = False
except EOFError:
nor = False
except:
print("Retry open pickle"+pick)
time.sleep(1)
return data
def writePickle(zdir,pick,msg):
output = open(getFile(zdir, pick+".pickle"), 'wb')
pickle.dump(msg, output)
output.close()
return True |
19,027 | 5f09069dc68ae113c7127d01d588dba010c8ecaa | #!/usr/bin/python
# Shyam Govardhan
# 26 December 2018
# Coursera: Interfacing with the Raspberry Pi
# Week 3 Assignment
from twython import Twython
from twython import TwythonStreamer
execfile("tweet_init.py")
tweetCount = 0
class MyStreamer(TwythonStreamer):
def on_success(self, data):
global tweetCount
if 'text' in data:
tweetCount += 1
print("Found it: tweetCount(%d)" % tweetCount)
if (tweetCount >= 3):
print("Ian G. Harris is popular!")
stream = MyStreamer(c_k, c_s, a_t, a_s)
stream.statuses.filter(track="Ian G. Harris")
|
19,028 | f9c74fdedb99dcdc5420618698403b6b2700d389 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import string
from openpyxl import load_workbook
"""
openpyxl 不支持old .xls file format
how to convert .xls file to .xlsx?
"""
excel_file = 'wacai_2019-05-2019-05.xlsx'
sheet_names = ['支出', '收入', '转账', '借入借出', '收款还款']
class DealExcel:
def __init__(self, excel_path):
self.excel_path = excel_path
self.data = load_workbook(self.excel_path)
self.sheetnames = self.data.sheetnames
def convert_alphabet_to_int(self, alphabet):
return string.ascii_uppercase.index(alphabet) + 1
def convert_int_to_alphabet(self, num):
return string.ascii_uppercase[num - 1]
def read_by_sheetname(self, sheetname):
sheetname_list = []
if sheetname in self.sheetnames:
wb = self.data[sheetname]
for j in range(1, wb.max_row + 1):
line_list = []
for i in range(1, wb.max_column + 1):
letter = self.convert_int_to_alphabet(i)
cell_value = wb[letter+str(j)].value
line_list.append(cell_value)
sheetname_list.append(line_list)
return sheetname_list
def read_all_sheetnames(self):
data_dict = {}
for sheetname in self.sheetnames:
sheet_data = self.read_by_sheetname(sheetname)
data_dict[sheetname] = sheet_data
return data_dict
if __name__ == "__main__":
deal_excel = DealExcel(excel_file)
#deal_excel.read_by_sheetname('收入')
content = deal_excel.read_all_sheetnames()
print(content)
|
19,029 | bbbfbf38056a59b6fc27ce2898a7e6b0b3cbcd03 | keyword='A'
stuffed_serp='B'
match_pattern='C'
extracted_pos='D'
|
19,030 | cc1f3b543e7c1b341c5fbbf1463b7ceb250a2c80 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 13:17:43 2020
@author: LilyHeAsamiko
"""
import numpy as np
def Rz(theta):
RZ = np.array([[np.exp(-1j*theta/2), 0],[0,np.exp(-1j*theta/2)]])
return RZ
def Rx(theta):
RX = np.array([[np.cos(theta/2), -np.sin(theta/2)],[np.cos(theta/2), np.sin(theta/2)]])
return RX
def Ry(theta):
RY = np.array([[np.cos(theta/2), 1j*np.sin(theta/2)],[np.cos(theta/2), 1j*np.sin(theta/2)]])
return RY
def H(theta):
H = np.array([[1, 1],[1,-1]])/np.sqrt(2)
return H
def I(theta):
if np.size(theta)>1:
I = np.eye(2*len(theta))
else:
I = np.eye(2)
return I
def X(theta):
X = np.array([[0,1],[1,0]])
return X
def Y(theta):
Y = np.array([[0,-1j],[1j,0]])
return Y
def Z(theta):
Z = np.array([[1,0],[0,-1]])
return Z
def CX(theta):
CX = np.array([1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1])
return CX
def CZ(theta):
CZ = np.array([1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,-1])
return CZ
#test
I(np.pi/2) + np.array(Rz(2*np.pi),int)
X(np.pi/2) +Rx(np.pi)
#X2 = [[0, 0, 0, 1],[0 ,0, 1, 0],[0, 1, 0, 0],[1, 0, 0, 0]]
|
19,031 | dc812b844a6816e9096aae51ae4e4a196e210326 | from math import factorial
def curio(n):
sum = 0
for i in str(n):
sum += factorial(int(i))
if sum % n == 0:
return True
return False
N = int(input())
sum = 0
for i in range(10, N):
if curio(i):
sum += i
print(sum)
|
19,032 | d532c8b0829ce3dfebcd98535f6e20f106b3ff1a | #!/usr/bin/python
'''
# crontab
*/5 7-20 * * 1-5 export DISPLAY=:0; /usr/bin/python /home/john/git/john/minas/scripts/chat_logs.py >> /var/log/john/chat_logger.log 2>&1
'''
import pyautogui
import pyperclip
import difflib
import os
from os.path import expanduser
from datetime import datetime
from time import sleep
import pygtk
pygtk.require('2.0')
import gtk
import wnck
import re
import sys
import time
import subprocess
from functools import partial
home = expanduser("~")
chat_home_dir = "{}/git/john/minas/chat_logs".format(home)
chatlog_cmd = partial(subprocess.check_output, cwd=chat_home_dir, shell=True)
MIN_IDLE = 20 * 1000 # (10 seconds)
MAX_TRIES = 3
MAX_COMMIT_MIN = 60
rooms = [
'thinkScript Lounge',
'Trader Lounge'
]
# commit chat logs
def commit_chat_logs():
NO_CHANGES = 'nothing to commit, working tree clean'
status = chatlog_cmd('git status')
print(status)
if NO_CHANGES in status:
print("No changes to commit.")
else:
# time since last commit
cmd = "(git log --pretty=format:'%at' -1)"
last_log = chatlog_cmd(cmd)
last_commit_min = ( int(time.time()) - int(last_log) ) / 60
if last_commit_min > MAX_COMMIT_MIN:
# git commit
cmds = [
'git add .',
'git commit -m "autocommit chat logs"',
'for i in `git remote `; do git push $i master; done'
]
for c in cmds:
res = chatlog_cmd(c)
print(res)
else:
print("Not committing yet. {} "
"minutes since last commit." \
.format(last_commit_min))
def get_chat_logs():
active_window = None
room_windows = {}
screen = wnck.screen_get_default()
while gtk.events_pending():
gtk.main_iteration()
windows = screen.get_windows()
active_window = screen.get_active_window()
for w in windows:
for room in rooms:
titlePattern = re.compile('.*{}.*'.format(room))
if titlePattern.match(w.get_name()):
room_windows[room] = w
for room, w in room_windows.iteritems():
print(w.get_name())
print(w.get_pid())
w.activate(0)
x, y, width, height = w.get_client_window_geometry()
pyautogui.click(x=x+20, y=y+height/2)
sleep(.2)
pyautogui.hotkey('ctrl', 'a')
sleep(.5)
pyautogui.hotkey('ctrl', 'c')
sleep(.5)
data = pyperclip.paste()
# print("data: {}".format(data))
filename = "{r}-{d}.txt".format(
r=room.replace(' ', ''),
d=datetime.now().strftime("%Y%m%d")
)
folder = '{c}/{r}'.format(c=chat_home_dir, r=room)
if not os.path.exists(folder):
os.mkdir(folder)
old_data = ''
try:
chat_log = '{dir}/{f}'.format(
dir=folder,
f=filename)
with open(chat_log, 'r') as f:
old_data = f.read()
except IOError:
pass
if old_data:
diff = difflib.ndiff(old_data.split('\n'), data.split('\n'))
changes = [l[2:] for l in diff if l.startswith('+ ') or l.startswith('- ')]
changes = [x for x in changes if x != '']
print("{} new lines".format(len(changes)))
if len(changes) > 0:
data = '\n'.join(changes)
else:
continue
else:
print("Writing new file.")
mode = 'a' if os.path.exists(chat_log) else 'w'
with open(chat_log, mode) as f:
f.write(data)
# deduplicate to make sure difflib didnt mess up
cmd = "awk '!seen[$0]++ == 1' '{f}' > '{f}.tmp' && mv '{f}.tmp' '{f}' ".format(f=chat_log)
print(cmd)
out = subprocess.check_output(
[cmd],
cwd=chat_home_dir,
shell=True
)
print(out)
sleep(1)
active_window.activate(0)
def get_chat_logs_on_idle():
for tries in xrange(0, MAX_TRIES):
if tries >= MAX_TRIES:
print("Too busy to run.")
sys.exit(1)
idle = subprocess.check_output(['xprintidle'])
idle = int(idle)
if idle > MIN_IDLE:
get_chat_logs()
return 1
else:
print("Idle at {}. Waiting for greater than {}".format(
idle, MIN_IDLE))
sleep(3)
print("Computer is not idle. Not running for now.")
return 0
def main():
result = get_chat_logs_on_idle()
if result == 1:
commit_chat_logs()
if __name__ == '__main__':
main() |
19,033 | 0d5afd40efb6d35517d01b6bcb73655ec7ac8e29 | r"""
Empty Files (:mod:`skbio.io.format.emptyfile`)
==============================================
.. currentmodule:: skbio.io.format.emptyfile
This format exists to make debugging simpler, often an empty file is a mistake
which can take an embarrasing amount of time to notice. This format has only
a sniffer and no readers or writers, so error messages will indicate as such
if an empty file is accidentally used as input.
Format Support
--------------
**Has Sniffer: Yes**
Format Specification
--------------------
An empty file consists of only whitespace characters.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from skbio.io import create_format
emptyfile = create_format('<emptyfile>')
@emptyfile.sniffer()
def _empty_file_sniffer(fh):
for line in fh:
if line.strip():
return False, {}
return True, {}
|
19,034 | 87ae0e96d9e269f08cea98a4bd90b5803a954976 | from includes.interfaces.Processor import Processor
from includes.configs.Defaults import Default
class InputProcessor(Processor):
@staticmethod
def process(processor, user_input):
command, params = InputProcessor.parse_input(user_input)
# use the processor to send the (command - Eg. park) (params - Eg. ["KA-BL-93-4444", "White"]) to the processor
# processor can be anything from ParkingManager or AdsManager or something else
print(processor.process(command)(params))
@staticmethod
def parse_input(user_input):
return user_input.split()[Default.COMMAND_INDEX], user_input.split()[Default.PARAM_INDEX:] |
19,035 | 9ba5fdecee5c46c8b1b163871b5c4a1cf22aeb61 | import copy
class BaseEntity(object):
def __init__(self, document={}):
super(BaseEntity, self).__init__()
self._document = document
@property
def id(self):
return self._document.get("_id")
@id.setter
def id(self, new_id):
if self.id:
raise BaseException()
self._document["_id"] = new_id
@property
def document(self):
return copy.deepcopy(self._document)
|
19,036 | 6217c18889913942ca8d2d3e28ae81c59d564cbb | #!/usr/bin/python
import rospy
rospy.init_node('first_node')
rate = rospy.Rate(2)
while not rospy.is_shutdown():
print "this is my first node"
rate.sleep()
|
19,037 | 798e39210a35a31e082bdb8b4e13ac50e9218d37 | import pathlib
import json
current_dir: pathlib.PurePath = pathlib.Path(__file__).parent
google_credentials_file = "./google_browser_login_credentials.json"
google_credentials_file = current_dir.joinpath(google_credentials_file)
print(google_credentials_file)
def load_google_credentials():
with open(str(google_credentials_file)) as f:
data = json.load(f)
return {"ID": data["ID"], "PW": data["PW"]}
def test():
c = load_google_credentials()
print(c)
if __name__ == '__main__':
test()
|
19,038 | 83c2175b913d70a83d805eb558f07fe91bd6bf41 | import FWCore.ParameterSet.Config as cms
daughterID = "( daughter(0).userInt('isHighPt')==1 || \
daughter(1).userInt('isHighPt')==1 )"
daughterKin = "((daughter(0).userFloat('pTtuneP') > 50 & abs(daughter(0).eta) < 2.1) || \
(daughter(1).userFloat('pTtuneP') > 50 & abs(daughter(1).eta) < 2.1))"
daughterCharge = "((daughter(0).charge == -daughter(1).charge) || \
(daughter(0).pdgId == -daughter(1).pdgId))"
boostedIso = "? deltaR(daughter(0).eta,daughter(0).phi,daughter(1).eta,daughter(1).phi) < 0.3 & \
(daughter(0).userFloat('trackIso')-daughter(1).userFloat('innerPt'))/daughter(0).pt < 0.1 & \
(daughter(1).userFloat('trackIso')-daughter(0).userFloat('innerPt'))/daughter(1).pt < 0.1 ? 1 : 0"
regularIso = "? deltaR(daughter(0).eta,daughter(0).phi,daughter(1).eta,daughter(1).phi) > 0.3 & \
daughter(0).userFloat('trackIso')/daughter(0).pt < 0.1 & \
daughter(1).userFloat('trackIso')/daughter(1).pt < 0.1 ? 1 : 0"
looseIso = "daughter(0).userFloat('pfIso04R') < 0.25 & \
daughter(1).userFloat('pfIso04R') < 0.25 "
Ztoee = cms.EDProducer( "CandViewCombiner",
decay = cms.string("idElectrons idElectrons"),
cut = cms.string("(daughter(0).pt > 115 || daughter(1).pt > 115)"),
checkCharge = cms.bool(False) )
Ztomumu = cms.EDProducer( "CandViewCombiner",
decay = cms.string("idMuons idMuons"),
cut = cms.string( daughterID +" & "+ daughterKin ),
checkCharge = cms.bool(False) )
leptonicV = cms.EDProducer( "CandViewMerger",
src = cms.VInputTag( "Ztoee", "Ztomumu"),
cut = cms.string("") )
leptonicVFilter = cms.EDFilter( "CandViewCountFilter",
src = cms.InputTag("leptonicV"),
minNumber = cms.uint32(1),
filter = cms.bool(True) )
ZdaughterCharge = cms.EDFilter( "CandViewSelector",
src = cms.InputTag("leptonicV"),
cut = cms.string( daughterCharge ),
filter = cms.bool(True) )
ZdaughterIso = cms.EDFilter( "CandViewSelector",
src = cms.InputTag("ZdaughterCharge"),
cut = cms.string( boostedIso +" || "+ regularIso ),
filter = cms.bool(True) )
leptonicVSelector = cms.EDFilter( "CandViewSelector",
src = cms.InputTag("ZdaughterCharge"),
cut = cms.string( "pt > 170. & 70. < mass < 110."),
filter = cms.bool(True) )
leptonicVSequence = cms.Sequence( Ztoee +
Ztomumu +
leptonicV +
leptonicVFilter +
ZdaughterCharge +
leptonicVSelector )
|
19,039 | 21dc13742d1657dac1b1227fbdb2d052e59c98b3 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyband",
packages=["pyband"],
version="0.1.0",
license="MIT",
description="Python library for BandChain",
long_description=long_description,
long_description_content_type="text/markdown",
author="Band Protocol",
author_email="dev@bandprotocol.com",
url="https://github.com/bandprotocol/bandchain",
keywords=["BAND", "BLOCKCHAIN", "ORACLE"],
install_requires=["requests", "dacite", "bech32", "bip32", "ecdsa", "mnemonic"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
19,040 | c19672f9f1221ea217f08bff99c33bfae3b7d120 | from enum import Enum
from unittest.mock import MagicMock
from django.test import TestCase
from learn.infrastructure.configuration import LearnConfiguration
from learn.models import Configuration
class ConfigurationIntTests(TestCase):
def setUp(self):
self.configuration_objects = MagicMock()
self.configuration_model = MagicMock()
self.configuration_model.objects = self.configuration_objects
self.conf = LearnConfiguration()
def test_shouldReturnConfigurationAtSpecifiedKey(self):
# Given
enum = Enum('enum', 'A_KEY')
self.configuration_objects.filter.return_value = [Configuration(key='A_KEY', value='AValue'), ]
# When
result = self.conf.get_configuration(enum.A_KEY, configuration_model=self.configuration_model)
# Then
self.assertEqual(result, 'AValue')
def test_shouldReturnNoneWhenNoConfigurationExists(self):
# Given
enum = Enum('enum', 'NONE')
self.configuration_objects.filter.return_value = []
# When
try:
self.conf.get_configuration(enum.NONE, configuration_model=self.configuration_model)
# Then
except AttributeError as e:
self.assertEqual("'Settings' object has no attribute 'NONE'", str(e))
def test_shouldReturnDefaultMaxWordsLearningFromSettingsWhenNoEntryExists(self):
enum = Enum('enum', 'LEARN_RYTHM_MULTIPLIER LEARN_BASE_RYTHM LEARN_MAX_WORDS')
for given_key, expected_value in {
'LEARN_RYTHM_MULTIPLIER': '2',
'LEARN_BASE_RYTHM': '2',
'LEARN_MAX_WORDS': '5',
}.items():
# Given
self.configuration_objects.filter.return_value = []
# When
result = self.conf.get_configuration(enum.__getattr__(given_key),
configuration_model=self.configuration_model)
# Then
self.assertEqual(result, expected_value)
def test_shouldRecordGivenConfiguration(self):
# Given
self.configuration_objects.create = MagicMock()
# When
self.conf.set_configuration('SOME_KEY', 'SOME_VALUE', configuration_model=self.configuration_model)
# Then
kwargs = self.configuration_objects.create.call_args_list[0][1]
self.assertEqual(kwargs['key'], 'SOME_KEY')
self.assertEqual(kwargs['value'], 'SOME_VALUE')
|
19,041 | f6336ea7e163b3ebfa98e00e14b0aaf3552e0f74 | # -*- codeing = utf-8 -*-
# @Time : 2021/1/21 0021 14:42
# @Author: 罗路
# @File: demo1.py
# @Software: PyCharm
#增加: append
'''
namelist = ['小张','小王','小李']
print('---增加前,名单列表的数据---')
for name in namelist:
print(name)
nametemp = input("请输入添加学生的姓名:")
namelist.append(nametemp) #在末尾追加元素 类似Push
print('---增加后,名单列表的数据---')
for name in namelist:
print(name)
'''
'''
a = [1,2]
b = [3,4]
# a.append(b)
# print(a)
a.extend(b) #将b 逐个追加到a列表种
print(a)
'''
'''
#增
a = [0,1,2]
a.insert(1,3) # 指定下标 增加指定元素
print(a)
'''
'''
#删
moviename = ['骇客帝国','金钱帝国','速度与激情']
print(moviename)
del moviename[1]
moviename.remove('骇客帝国') # 删除找到的第一个 骇客帝国
# moviename.pop() 删除最后一个
print(moviename)
'''
'''
#改
moviename = ['骇客帝国','金钱帝国','速度与激情']
moviename[1] = '我是杀人犯'
print(moviename)
'''
'''
#查询 in , not in
moviename = ['骇客帝国','金钱帝国','速度与激情']
findName = input("请输入你要查找的电影名称:")
if findName in moviename:
print("在列表中找到了")
else:
print('没找到')
'''
'''
# index方法 查找元素,从哪到哪
mylist = ['a','b','c','d','e']
print(mylist.index('c',0,4))
print(mylist.count('d')) #统计d出现了几次
'''
'''
a = [1,2,3,4]
a.reverse()
print(a)
a.sort()
print(a)
'''
offices = [[],[],[]]
schoolName = [['北京大学','清华大学'],['南开大学','天津大学'],['山东大学']]
print(schoolName[0][0:])
import random
names = ['A','B','C','D','E']
for name in names:
index = random.randint(0,2)
offices[index].append(name)
print(offices) |
19,042 | 511e967653ec3362972b679edafdfb80185b8000 | import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
scores = [3.0, 1.0, 0.2]
"""Softmax."""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_yi = np.exp(x)
suma_e_yj = sum(e_yi)
soft = e_yi/suma_e_yj
return soft
print(softmax(scores))
|
19,043 | b2f65827ff7e8bba45fec24b5f68034b387090f5 | """
Synchronization operations inside the IR.
"""
from collections import defaultdict
from devito.data import FULL
from devito.tools import Pickable, filter_ordered
from .utils import IMask
__all__ = ['WaitLock', 'ReleaseLock', 'WithLock', 'FetchUpdate', 'PrefetchUpdate',
'normalize_syncs']
class SyncOp(Pickable):
__rargs__ = ('handle', 'target')
__rkwargs__ = ('tindex', 'function', 'findex', 'dim', 'size', 'origin')
def __init__(self, handle, target, tindex=None, function=None, findex=None,
dim=None, size=1, origin=None):
self.handle = handle
self.target = target
self.tindex = tindex
self.function = function
self.findex = findex
self.dim = dim
self.size = size
self.origin = origin
def __eq__(self, other):
return (type(self) is type(other) and
self.handle == other.handle and
self.target is other.target and
self.tindex == other.tindex and
self.function is other.function and
self.findex == other.findex and
self.dim is other.dim and
self.size == other.size and
self.origin == other.origin)
def __hash__(self):
return hash((self.__class__, self.handle, self.target, self.tindex,
self.function, self.findex, self.dim, self.size, self.origin))
def __repr__(self):
return "%s<%s>" % (self.__class__.__name__, self.handle)
__str__ = __repr__
@property
def lock(self):
return self.handle.function
# Pickling support
__reduce_ex__ = Pickable.__reduce_ex__
class SyncCopyOut(SyncOp):
def __repr__(self):
return "%s<%s->%s>" % (self.__class__.__name__, self.target, self.function)
__str__ = __repr__
@property
def imask(self):
ret = [self.handle.indices[d] if d.root in self.lock.locked_dimensions else FULL
for d in self.target.dimensions]
return IMask(*ret, getters=self.target.dimensions, function=self.function,
findex=self.findex)
class SyncCopyIn(SyncOp):
def __repr__(self):
return "%s<%s->%s>" % (self.__class__.__name__, self.function, self.target)
__str__ = __repr__
@property
def imask(self):
ret = [(self.tindex, self.size) if d.root is self.dim.root else FULL
for d in self.target.dimensions]
return IMask(*ret, getters=self.target.dimensions, function=self.function,
findex=self.findex)
class WaitLock(SyncCopyOut):
pass
class WithLock(SyncCopyOut):
pass
class ReleaseLock(SyncCopyOut):
pass
class FetchUpdate(SyncCopyIn):
pass
class PrefetchUpdate(SyncCopyIn):
pass
def normalize_syncs(*args):
if not args:
return
if len(args) == 1:
return args[0]
syncs = defaultdict(list)
for _dict in args:
for k, v in _dict.items():
syncs[k].extend(v)
syncs = {k: filter_ordered(v) for k, v in syncs.items()}
for v in syncs.values():
waitlocks = [s for s in v if isinstance(s, WaitLock)]
withlocks = [s for s in v if isinstance(s, WithLock)]
if waitlocks and withlocks:
# We do not allow mixing up WaitLock and WithLock ops
raise ValueError("Incompatible SyncOps")
return syncs
|
19,044 | f44b9d0d9a989691fc6b4377868fa5cf2a67d77c | import datetime
import keras
import gym
from tools.plot_tool import plot_with_avg_std
from cartpole_agents.drqn_cartpole import DRQN_Cartpole_Agent, LinearSchedule
if __name__ == "__main__":
EPISODES = 10
# In case of CartPole-v0, maximum length of episode is 200
env = gym.make('CartPole-v1')
# get size of state and action from trading_environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
model = keras.models.load_model('/Users/mmw/Documents/GitHub/rl_for_optimal_exec/drqn_cartpole/drqn_cartpole_v0_10000_eps.h5')
agent = DRQN_Cartpole_Agent(state_size,
action_size,
lookback=5,
initial_exploration_eps=0,
exploration=LinearSchedule(1, 0, initial_p=0),
model=model)
scores, episodes = [], []
avg_step = 1
for eps in range(EPISODES):
eps_rew = agent.sample_transition_pairs(env, render=(eps % avg_step == 0), max_step=500)
scores.append(eps_rew)
if eps % avg_step == 0:
avg = sum(scores[-avg_step-1:-1]) / avg_step
print('{} episode: {}/{}, average reward: {}'.
format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), eps, EPISODES, avg))
env.reset()
plot_with_avg_std(scores, 1, xlabel=f'Number of Episodes in {1}') |
19,045 | 13ecec9d8667877b80951e7ad696d038a09ad541 | def toposort(G):
ans = list()
indeg = [ 0 for _ in len(G)]
for u in range(len(G)):
for v in G[u]:
indeg[v] += 1
pending = list()
for u in range(len(G)):
if indeg[u]==0:
pending.append(u)
while len(pending) ! = 0:
u=pending.pop()
ans.append(u)
for v in G[u]:
indeg[v] -= 1
if indeg[v] == 0:
pending.append(v)
return ans
algoritmo que determine si hay mas de un orden topologico?
si la coleccion pending llega a tener mas de un elemento
|
19,046 | 18187d0484e10cf2ab50913477eba28edfa1c548 | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler Trace Handler."""
from __future__ import annotations
import abc
import pathlib
from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from composer.core.callback import Callback
if TYPE_CHECKING:
from composer.core import Timestamp
__all__ = ['TraceHandler']
class TraceHandler(Callback, abc.ABC):
"""Base class for Composer Profiler trace handlers.
Subclasses should implement :meth:`process_duration_event`, :meth:`process_instant_event`,
:meth:`process_counter_event`, and :meth:`process_chrome_json_trace_file` to record trace events.
Since :class:`TraceHandler` subclasses :class:`.Callback`, a trace handler can run on any
:class:`.Event` (such as on :attr:`.Event.INIT` to open files or on :attr:`.Event.BATCH_END` to periodically dump
data to files) and use :meth:`.Callback.close` to perform any cleanup.
"""
def process_duration_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
is_start: bool,
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
"""Invoked whenever there is a duration event to record.
This method is called twice for each duration event -- once with ``is_start = True``,
and then again with ``is_start = False``. Interleaving events are not permitted.
Specifically, for each event (identified by the ``name``), a call with ``is_start = True`` will be followed
by a call with ``is_start = False`` before another call with ``is_start = True``.
Args:
name (str): The name of the event.
categories (Union[List[str], Tuple[str, ...]]): The categories for the event.
is_start (bool): Whether the event is a start event or end event.
timestamp (Timestamp): Snapshot of the training time.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
"""
del name, categories, is_start, timestamp, wall_clock_time_ns # unused
def process_instant_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
"""Invoked whenever there is an instant event to record.
Args:
name (str): The name of the event.
categories (List[str] | Tuple[str, ...]): The categories for the event.
timestamp (Timestamp): Snapshot of current training time.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
"""
del name, categories, timestamp, wall_clock_time_ns # unused
def process_counter_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
timestamp: Timestamp,
wall_clock_time_ns: int,
values: Dict[str, Union[int, float]],
) -> None:
"""Invoked whenever there is an counter event to record.
Args:
name (str): The name of the event.
categories (List[str] | Tuple[str, ...]): The categories for the event.
timestamp (Timestamp): The timestamp.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
values (Dict[str, int | float]): The values corresponding to this counter event.
"""
del name, categories, timestamp, wall_clock_time_ns, values # unused
def process_chrome_json_trace_file(self, filepath: pathlib.Path) -> None:
"""Invoked when there are events in Chrome JSON format to record.
See `this document <https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview>`_
for more information.
Args:
filepath (pathlib.Path): The filepath to a Chrome JSON trace file.
"""
del filepath # unused
|
19,047 | 996bc5104ca0697524cc9ebdd9cbce6d46a73518 | #-*- encoding: utf-8 -*-
def llegir_placa(p):
"""
Donada una posicio retorna la matricula del cotxe aparcat o
XXXXXXX en cas de que estigui buida
"""
# Obrim el fitxer
s = ""
f=open('places.dat','r+')
# Calculem la posicio que volem mirar
posicio = p*7
f.seek(posicio)
s+=f.read(7)
f.close()
return s
def ocupar_placa(p,m):
"""
Comprova si la posicio esta ocupada, en el cas de que no ho estigui
escriu la matricula, si funciona correctament retorna true,
en cas contrari retorna false
"""
if len(m) == 7 and p < 1000:
f=open('places.dat','r+')
posicio = p*7
f.seek(posicio)
f.write(m)
f.close()
return True
else:
return False
def is_in(m):
"""
Aquesta funcio comprova si una matricula es troba dintre
el parquing, i retorna la seva posicio, en cas de que no i sigui
retorna -1
"""
f=open('places.dat','r')
r = f.read()
f.close()
if str(m) in r:
j = r.find(m)/7
return j
else:
return -1
def empty_number():
"""
Aquesta funcio busca el numero de plaçes buides que hi ha,
les plaçes buides es representen amb la matricula XXXXXXX
per tan busquem totes les posicions per veure quantes tenen
espais buits.
"""
f = open('places.dat','r')
r= f.read()
i = 0
l = []
if is_in("XXXXXXX") < 0:
return l
while i < 7007:
f.seek(i)
i=r.find("XXXXXXX",i)
if i > 0:
l.append(i/7)
i+=7
f.close()
return l
def buidar():
"""
Aquesta funcio fica tots els aparcaments buits
"""
f = open('places.dat','r+')
for x in range(7007):
f.seek(x)
f.write('X')
f.close()
|
19,048 | 5abe89801e4ec2b3cb174774d0776af7b8d70bfe | def calculate_score(games):
abigal_score = 0
ben_score = 0
for game in games:
if game[0] == "R" and game[1] == "P":
# ben wins
ben_score += 1
elif game[0] == "P" and game[1] == "R":
# abigal wins
abigal_score += 1
elif game[0] == "S" and game[1] == "R":
# ben wins
ben_score += 1
elif game[0] == "R" and game[1] == "S":
# abigal wins
abigal_score += 1
elif game[0] == "S" and game[1] == "P":
# abigal wins
abigal_score += 1
elif game[0] == "P" and game[1] == "S":
# ben wins
ben_score += 1
elif game[0] == "R" and game[1] == "P":
# ben wins
ben_score += 1
else:
# tie
pass
if abigal_score > ben_score:
return "Abigail"
elif abigal_score == ben_score:
return "Tie"
else:
return "Benson"
|
19,049 | 64eb190c5b97c411a7fe15a099f29ec6717b22e0 | import io
import os
import subprocess
import tarfile
import tempfile
import pytest
from exodus_bundler.bundling import logger
from exodus_bundler.cli import configure_logging
from exodus_bundler.cli import parse_args
parent_directory = os.path.dirname(os.path.realpath(__file__))
chroot = os.path.join(parent_directory, 'data', 'binaries', 'chroot')
ldd_path = os.path.join(chroot, 'bin', 'ldd')
fizz_buzz_path = os.path.join(chroot, 'bin', 'fizz-buzz')
def run_exodus(args, **options):
options['universal_newlines'] = options.get('universal_newlines', True)
process = subprocess.Popen(
['exodus'] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **options)
stdout, stderr = process.communicate()
return process.returncode, stdout, stderr
def test_logging_outputs(capsys):
# There should be no output before configuring the logger.
logger.error('error')
out, err = capsys.readouterr()
print(out, err)
assert len(out) == len(err) == 0
# The different levels should be routed separately to stdout/stderr.
configure_logging(verbose=True, quiet=False)
logger.debug('debug')
logger.warn('warn')
logger.info('info')
logger.error('error')
out, err = capsys.readouterr()
assert all(output in out for output in ('info'))
assert all(output not in out for output in ('debug', 'warn', 'error'))
assert all(output in err for output in ('warn', 'error'))
assert all(output not in err for output in ('info', 'debug'))
def test_missing_binary(capsys):
# Without the --verbose flag.
command = 'this-is-almost-definitely-not-going-to-be-a-command-anywhere'
returncode, stdout, stderr = run_exodus([command])
assert returncode != 0, 'Running exodus should have failed.'
assert 'Traceback' not in stderr, 'Traceback should not be included without the --verbose flag.'
# With the --verbose flag.
returncode, stdout, stderr = run_exodus(['--verbose', command])
assert returncode != 0, 'Running exodus should have failed.'
assert 'Traceback' in stderr, 'Traceback should be included with the --verbose flag.'
def test_required_argument():
with pytest.raises(SystemExit):
parse_args([])
parse_args(['/bin/bash'])
def test_return_type_is_dict():
assert type(parse_args(['/bin/bash'])) == dict
def test_quiet_and_verbose_flags():
result = parse_args(['--quiet', '/bin/bash'])
assert result['quiet'] and not result['verbose']
result = parse_args(['--verbose', '/bin/bash'])
assert result['verbose'] and not result['quiet']
def test_writing_bundle_to_disk():
f, filename = tempfile.mkstemp(suffix='.sh')
os.close(f)
args = ['--ldd', ldd_path, '--output', filename, fizz_buzz_path]
try:
returncode, stdout, stderr = run_exodus(args)
assert returncode == 0, 'Exodus should have exited with a success status code, but didn\'t.'
with open(filename, 'rb') as f_in:
first_line = f_in.readline().strip()
assert first_line == b'#! /bin/bash', stderr
finally:
if os.path.exists(filename):
os.unlink(filename)
def test_writing_bundle_to_stdout():
args = ['--ldd', ldd_path, '--output', '-', fizz_buzz_path]
returncode, stdout, stderr = run_exodus(args)
assert returncode == 0, 'Exodus should have exited with a success status code, but didn\'t.'
assert stdout.startswith('#! /bin/sh'), stderr
def test_writing_tarball_to_disk():
f, filename = tempfile.mkstemp(suffix='.tgz')
os.close(f)
args = ['--ldd', ldd_path, '--output', filename, '--tarball', fizz_buzz_path]
try:
returncode, stdout, stderr = run_exodus(args)
assert returncode == 0, 'Exodus should have exited with a success status code, but didn\'t.'
assert tarfile.is_tarfile(filename), stderr
with tarfile.open(filename, mode='r:gz') as f_in:
assert 'exodus/bin/fizz-buzz' in f_in.getnames()
finally:
if os.path.exists(filename):
os.unlink(filename)
def test_writing_tarball_to_stdout():
args = ['--ldd', ldd_path, '--output', '-', '--tarball', fizz_buzz_path]
returncode, stdout, stderr = run_exodus(args, universal_newlines=False)
assert returncode == 0, 'Exodus should have exited with a success status code, but didn\'t.'
stream = io.BytesIO(stdout)
with tarfile.open(fileobj=stream, mode='r:gz') as f:
assert 'exodus/bin/fizz-buzz' in f.getnames(), stderr
|
19,050 | faa03b63942a550a62b4c6d3f07d39371b810fd4 | """This exercise will help us increase the abstraction of our code.
Instead of just defininig a function that draws a square, and then a function
that draws a triangle, let's see how they are similar and what parts we can
abstract to make it more useful.
Let's write a function that will creat a regular polygon of N sides.
We also have to have a parameter for the Sidelength(s)
We also have to take into account the angle will change when the number of sides
change."""
# You will need to import turtle and create a turtle
"""
1. If the total angles must add up to 360 degrees, then how can we use N to
determine what angle it should be?
*Hint: For 3 sides the angle is 120, for 4 sides the angle is 90.
3 * 120 = 360 ; 4 * 90 = 360
Therefore N * (theta) = 360. What's (theta)?
"""
"""
2. Create a function that will take into account 3 parameters: N, anyTurtle, s
and create a regular polygon of sidelength(s) and number of sides N.
"""
def regularPoly(N, anyTurtle, s):
"""
3. Create a loop that will call your function each time through the loop.
Start with N = 12 and stop with N = 3. Use a While loop for this:
N = 12
while N >= 3:
regularPoly(N, anyTurtle, s) # You need to put in S and anyTurtle here
N = N - 1
"""
|
19,051 | 615ed81d6c4528f1de097d4e1039a6a1e89aba5c | from django.contrib.auth.decorators import user_passes_test
from crequest.middleware import CrequestMiddleware
def group_required(group_names):
"""Requires user membership in at least one of the groups passed in."""
try:
user = CrequestMiddleware.get_request().user
if user.is_authenticated():
test = user.groups.filter(name=group_names).exists()
except (AttributeError):
test = False
return user_passes_test(test) |
19,052 | 4fab1495794a8d1a8dbcedcd931d4c4857de55d7 | x=[num for num in range(0,51) if num%3==0]
print(x) |
19,053 | 7001bc246e550b9fc0cfbe3e9760583a6019fbb6 | import socket
import sys
import subprocess
subprocess.call('clear', shell=True)
remoteServer=input("enter the host to scan")
remoteServerIP=socket.gethostbyname(remoteServer)
print("_"*60)
print("please wait, scanning undergoing.....",remoteServerIP)
print("_"*60)
try:
for port in range(1,1025):
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
result=sock.connect_ex((remoteServerIP,port))
if result == 0:
port("port {}:".format(port))
sock.close()
except socket.error:
print("error in reaching host")
sys.exit()
|
19,054 | bb7eabab28c95ec2a36d51ecd09ad397dafacd62 | from .get_links import get_all_links
from queue import PriorityQueue
class Graph():
def __init__(self, source, destination):
self.graph = {}
self.start = source
self.end = destination
self.pq = PriorityQueue()
self.found = False
self.path = []
self.seen = set()
def __str__(self):
st = 'PATH: {0} \n'.format(str(self.path))
for title in self.graph.keys():
st += 'Title: ' + title + '\nprev: ' + self.graph[title]["previous"] + '\n' + str(len(self.links(title))) + '\n'
st += "PQ approx length " + str(self.pq.qsize()) + "\n"
st += "Checked this many pages: " + str(len(self.seen)) + "\n"
return st
def add(self, title, links, previous):
self.graph[title] = {
'links' : links,
'previous' : previous,
}
def set_previous(self, title, previous):
self.graph[title]["previous"] = previous
print(self.graph[title])
def prioritize(self, title):
try:
for link in self.links(title):
print(title)
if link in self.seen:
continue
ls = get_all_links(link)
self.add(link, ls, title)
if (self.end in ls):
self.set_previous(self.end, link)
self.make_path(self.end)
print("FOUND!!!")
print(self.found)
print(self.end, link)
print(self.path)
return
# mul by -1 to give link with most common items "highest" priority
priority = (-1 * len(ls.intersection(self.links(self.end))))
print("getting links for: " + link + " priority " + str(priority))
self.seen.add(link)
self.pq.put((priority, link))
except Exception as e:
print(e)
print(self)
return e
def links(self, title):
return self.graph[title]['links']
def make_path(self, title):
try:
t = title
previous = self.graph[t]["previous"]
path = [t]
while(previous != '0'):
path.append(previous)
print(path)
t = previous
previous = self.graph[t]["previous"]
if (title == self.end):
self.found = True
print("reversed " , path.reverse())
self.path = path
print(self.path)
except Exception as e:
print(e)
print(path)
return e
def get_path(self, title):
self.make_path(title)
return self.path
def dest(self):
return self.end
def done(self, duration):
print(self.graph)
print("Duration: ", duration)
return self.graph.path
|
19,055 | af52b1df348c519def25d1880a4b9a204387d41a | import tweepy, time, sys
from numpy import *
from random import randint
file=open('chirac.txt', 'r')
data = file.read()
def gen():
l=290*' '
while len(l)>280:
k=randint(0,len(data)-1)
while data[k]=='.' or data[k]=='?' or data[k]=='!':
k=randint(0,len(data)-1)
k0=k
l=""
while data[k]!='.' and data[k]!='.' and data[k]!='.':
l=data[k]+l
k-=1
k=k0+1
while data[k]!='.' and data[k]!='.' and data[k]!='.':
l=l+data[k]
k+=1
l=l+data[k]
l=l+' ACAB cependant.'
return(l)
INTERVAL = 60 * 60 * 24
auth = tweepy.OAuthHandler(c1, c2)
auth.set_access_token(c3, c4)
api = tweepy.API(auth)
while True:
print("about to get ad...")
ad = gen()
api.update_status(ad)
time.sleep(INTERVAL)
|
19,056 | 4e4a3caba99e07939a537ee8bf09fccf7b9eb16d | import os
from flask import Flask, request, redirect, url_for
from config.jconfig import render
from werkzeug import secure_filename
app = Flask(__name__)
UPLOAD_FOLDER = 'static/uploads/'
ALLOWED_EXTENSIONS = ['txt'] #for testing, only allow txt files
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
# make sure the file being uploaded has the allowed extention
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload():
"""
GET: show the upload file page
* POST: upload the file to be drawn, then show them the draw page
* uploading files gets pretty scary, this will really need to be locked
down before deploying
"""
if request.method == 'GET':
return render('upload.html')
elif request.method == 'POST':
file = request.files['upload']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render('draw.html', {
'filename': file.filename
})
@app.route('/gallery', methods=['GET'])
def gallery():
"""
Render the galler on a GET request
"""
return render('base.html')
if __name__ == '__main__':
app.debug = True
app.run()
|
19,057 | edb02371007d741b06bbfbe7faecf76da88ce8ab | from ProductAttributes import ProductAttributes
from DynamicDict import DynamicDict
class VariableAttributes(ProductAttributes):
def input_serialization_array(self, string_array: str) -> list:
attributes = super().input_serialization_array(string_array)
data = []
variable_attributes = DynamicDict()
for key, item in attributes.items():
if str(item['is_variation']) == '1':
variable_attributes['variable'][item['name']] = item['name']
data.append(variable_attributes)
return data
|
19,058 | 6515bf34a0426a9606adb7a3caf91c05f38c1f5a | class Solution(object):
def canReach(self, arr, start):
"""
:type arr: List[int]
:type start: int
:rtype: bool
"""
if not 0 in arr:
return False
n = len(arr)
return self.dfs(arr, start, n, {})
def dfs(self, arr, i, n, memo):
# print(i)
if i < 0 or i >= n:
return False
if arr[i] == 0:
return True
if i in memo:
return memo[i]
memo[i] = False
if self.dfs(arr, i + arr[i], n, memo):
memo[i] = True
return memo[i]
if self.dfs(arr, i - arr[i], n, memo):
memo[i] = True
return memo[i]
return memo[i]
|
19,059 | 5f1a4bbe4a2ab652e43a3a633f0a42c13414cecf | """General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
import cv2
import torch
import numpy as np
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 0
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
if opt.eval:
model.eval()
#start video/webcamsetup
webcam = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not webcam.isOpened():
raise IOError("Cannot open webcam")
#cycling through filter set up
style_models = ['style_monet_pretrained', 'style_vangogh_pretrained', 'style_ukiyoe_pretrained', 'style_cezanne_pretrained']
style_model_index = 0
#text set up
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (0, 25)
# fontScale
fontScale = 1
# Blue color in BGR
color = (255, 255, 255)
# Line thickness of 2 px
thickness = 2
#the CycleGan takes data as a dictionary
#easier to work within that constraint than to reright
# start an infinite loop and keep reading frames from the webcam until we encounter a keyboard interrupt
data = {"A": None, "A_paths": None}
while True:
#ret is bool returned by cap.read() -> whether or not frame was captured succesfully
#if captured correctly, store in frame
ret, frame = webcam.read()
#resize frame
frame = cv2.resize(frame, (256,256), interpolation=cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#model wants batchsize * channels * h * w
#gives it a dimension for batch size
frame = np.array([frame])
#now shape is batchsize * channels * h * w
frame = frame.transpose([0,3,1,2])
#convert numpy array to tensor
#need data to be a tensor for compatability with running model. expects floatTensors
data['A'] = torch.FloatTensor(frame)
model.set_input(data) # unpack data from data loader
model.test()
#get only generated image - indexing dictionary for "fake" key
result_image = model.get_current_visuals()['fake']
#use tensor2im provided by util file
result_image = util.tensor2im(result_image)
result_image = cv2.cvtColor(np.array(result_image), cv2.COLOR_BGR2RGB)
result_image = cv2.resize(result_image, (512, 512))
result_image = cv2.putText(result_image, str(opt.name)[6:-11], org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('style', result_image)
#ASCII value of Esc is 27.
c = cv2.waitKey(1)
if c == 27:
break
if c == 99:
if style_model_index == len(style_models):
style_model_index = 0
opt.name = style_models[style_model_index]
style_model_index += 1
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt)
cap.release()
cv2.destroyAllWindows()
|
19,060 | 983d5d6e2e5ee76d7b557b8cc6c3f859dea2a317 | import logging
import deepcut
import numpy as np
from .base import OOVStrategy
logger = logging.getLogger(__name__)
class NoActionOOVStrategy(OOVStrategy):
def handle_oov(self, embeddings, X, words):
pass
class DeepcutOOVStrategy(OOVStrategy):
def handle_oov(self, embeddings, X, words):
oov_vecs_created = 0
info_created_words = {}
info_oov_words = {}
# creating a set of OOV words
oov_words = set()
for query in X:
for query_word in query:
if query_word not in words:
oov_words.add(query_word)
# iterating through OOV words to get AVG vectors for them
for ds_word in oov_words:
tokens = deepcut.tokenize(ds_word)
in_voc_tokens = [token for token in tokens if token in embeddings]
## if we found word-parts in the emb - use their vectors (avg) to represent the OOV word
if in_voc_tokens:
token_vecs = [embeddings.get(t) for t in in_voc_tokens]
embeddings[ds_word] = np.mean(token_vecs, axis=0)
oov_vecs_created += 1
info_created_words[ds_word] = in_voc_tokens
else:
info_oov_words[ds_word] = tokens
logger.debug('All OOV words after deepcut:')
logger.debug(info_oov_words)
logger.debug('All "created"/replaced words by deepcut:')
logger.debug(info_created_words)
class LettersCutOOVStrategy(OOVStrategy):
def handle_oov(self, embeddings, X, words):
oov_vecs_created = 0
info_created_words = {}
info_oov_words = {}
oov_words = set()
# collecting oov words
for query in X:
for query_word in query:
if query_word not in words:
oov_words.add(query_word)
# iterating through each oov-word
for oov_word in oov_words:
cut_word = oov_word
words_with_same_prefix = set()
# cutting letter by letter until we find some words with the same prefix
while len(cut_word) and cut_word not in words:
cut_word = cut_word[:-1]
# collectings words with the same prefix
for vocabulary_word in embeddings:
if vocabulary_word[0].startswith(cut_word):
words_with_same_prefix.add(vocabulary_word[0])
# if found at least one word, then stop cutting and let's compute the avg vector
if len(words_with_same_prefix):
break
logger.debug(f'FOR WORD {oov_word} FOUND WORDS WITH THE SAME PREFIX: {str(words_with_same_prefix)}')
if words_with_same_prefix:
token_vecs = [embeddings.get(t) for t in words_with_same_prefix]
embeddings[oov_word] = np.mean(token_vecs, axis=0)
oov_vecs_created += 1
info_created_words[oov_word] = cut_word
|
19,061 | 5fff2dd7e5a622492df161ba68188d14653c01df | import numpy as np
import pandas as pd
from datetime import datetime
#pour création jeu de test/entrainement
from sklearn.model_selection import train_test_split
cie_aerienne_dict = {
'FL':'AirTran airways',
'AS':'Alaska airlines',
'AA':'American airlines',
'DL':'Delta airways',
'9E':'Endeavor air',
'MQ':'Envoy air',
'EV':'ExpressJet airlines',
'F9':'Frontier airlines',
'HA':'Hawaiian airlines',
'B6':'JetBlue airways',
'YV':'Mesa airlines',
'OO':'SkyWest airlines',
'WN':'Southwest airlines',
'NK':'Spirit airlines',
'UA':'United airlines',
'US':'US airways',
'VX':'Virgin America'
}
jour_semaine_dict = { 1:'Lundi',2:'Mardi',3:'Mercredi',4:'Jeudi',5:'Vendredi',6:'Samedi',7:'Dimanche' }
# Fonction créant une liste de valeurs distinctes d'une colonne d"un dataframe
def liste_distincte_col(df, nom_col, sep=""):
"""Créet une liste de valeurs distinctes d'une colonne d"un dataframe
- Args:
df(pandas.dataframe): dataframe
nom_col(str): nom de la colonne
sep(str): chaine de caractère séparateur (optionnel)
- Returns:
Liste contenant les colonnes contenant la chaîne de caractères.
"""
liste_distincte = []
for col in df[nom_col].values:
#On exclut les données non renseignées
if not isinstance(col, float):
if sep != "" and sep in col:
liste = col.split(sep)
for l in liste:
if not (l in liste_distincte):
liste_distincte.append(l)
else:
if not (col in liste_distincte):
liste_distincte.append(col)
return liste_distincte
# Fonction listant les colonnes dont le titre contient des chaines de caractères
def col_rech_titre(df, fin = True, suffix =""):
"""Affiche le nom des colonnes d'un dataframe contenant une chaîne de caractères.
- Args:
df(pandas.dataframe): dataframe
fin(boolean): flag pour indiquer un préfixe ou un suffixe
suffix(str): chaîne de caractères recherchée
- Returns:
Liste contenant les colonnes contenant la chaîne de caractères.
"""
liste_col = []
if suffix !="":
if fin == True:
for col in df.columns:
if col.endswith(suffix) == True:
liste_col.append(col)
else:
for col in df.columns:
if col.startswith(suffix) == True:
liste_col.append(col)
return liste_col
def MaxFlightsCode(df, airport_code):
"""Comptabilise le nombre de vols sur un code aéroport dans la base complète des vols (fly)
- Args:
df(pandas.dataframe): dataframe des vols
airport_code(integer): Code aéroport
- Returns:
Renvoie le nombre de vols sur l'aéroport donné dans la base df
"""
#df = fly
# Calculates the most likely flights based on flight frequency.
codeFrame = df.loc[df.ORIGIN_AIRPORT_ID == airport_code]
numFlights = codeFrame.shape[0]
# Get the number of rows in this frame
return(numFlights)
def AirportCode (df_fly, df_airport, city):
"""Cherche quel est l'AIRPORT_ID d'un aéroport principal d'une ville
- Args:
df_fly(pandas.dataframe): dataframe des vols
df_airport(pandas.dataframe): dataframe des aéroports
city(string): Nom de la ville dont on cherche le code de l'aéroport principal
- Returns:
Renvoie le AIRPORT_ID d'un aéroport principal d'une ville
"""
#Création d'une nouvelle colonne avec nom aéroport en minuscule
df_airport['airport_minus'] = df_airport.Description.str.lower() #.str.strip()
#Création dataFrame avec uniquement les aéroports cherché avec city
lescodes = df_airport.loc[df_airport.airport_minus.str.contains(city.lower())].copy()
if lescodes.shape[0]>0:
#Recherche pour chacun de ces aéoports du dataFrame du nombre de vols total dans le df des vols
lescodes['NumFlights'] = lescodes.apply(lambda row: MaxFlightsCode(df_fly, row['Code']), axis=1)
#Recherhe de l'aéroport qui a le plus de vols dans NumFlights
code_aeroport = lescodes.loc[lescodes.NumFlights == max(lescodes.NumFlights)]['Code'].values
# Return our top airport
return(code_aeroport[0])
else:
return(0)
def prediction2(param, estimators, origin = '', destination = '',
carrier = '', month = 0, weekday = 0):
'''
This function allows you to input all of your flight information (no leaks!) and
the function will return how late your flight will arrive based on the output from the
Random Forest Regressor.
Inputs:
Origin (enter this as a city, state combo, or include the airport name (such as Bush
or Hobby). This will automatically calculate which airport you meant.
Destination (same as Origin, entered as a string)
Carrier (which Airline, use a string to represent the name (such as 'American' or 'United')
Month (the month the flight is scheduled for)
Weekday (Enter number between 1-7) such as 1:'Lundi',2:'Mardi',3:'Mercredi',4:'Jeudi',5:'Vendredi',6:'Samedi',7:'Dimanche'
Available Carriers:
'FL':'AirTran airways',
'AS':'Alaska airlines',
'AA':'American airlines',
'DL':'Delta airways',
'9E':'Endeavor air',
'MQ':'Envoy air',
'EV':'ExpressJet airlines',
'F9':'Frontier airlines',
'HA':'Hawaiian airlines',
'B6':'JetBlue airways',
'YV':'Mesa airlines',
'OO':'SkyWest airlines',
'WN':'Southwest airlines',
'NK':'Spirit airlines',
'UA':'United airlines',
'US':'US airways',
'VX':'Virgin America
Outputs:
int: Estimated delay for the arrival (in minutes, can be negative if the flight is expected to arrive early)
text: Status of the estimation
'''
from sklearn.ensemble import RandomForestRegressor
col_utiles = ['CARRIER_DELAY','WEATHER_DELAY', 'NAS_DELAY', 'SECURITY_DELAY', 'LATE_AIRCRAFT_DELAY',
'MONTH','DAY_OF_MONTH', 'DAY_OF_WEEK','UNIQUE_CARRIER',
'ORIGIN_AIRPORT_ID', 'DEST_AIRPORT_ID','CRS_DEP_TIME',
'DISTANCE', 'AIR_TIME','CRS_ELAPSED_TIME','ACTUAL_ELAPSED_TIME', 'ARR_DELAY' ]
#Lecture du fichier d'input
fly = pd.read_csv('./Dataset_Projet_4/2016_sample_file_09.csv', sep=",", encoding='utf_8', low_memory=False,
error_bad_lines=False, usecols = col_utiles)
#Et celui des aéroports
airport = pd.read_csv('./Dataset_Projet_4/L_AIRPORT_ID.csv', sep=",", encoding='utf_8', low_memory=False, error_bad_lines=False)
#Label encoding des cies aériennes
Cie = liste_distincte_col(fly,'UNIQUE_CARRIER','|')
Cie.sort()
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Cie)
fly['CIE'] = le.transform(fly['UNIQUE_CARRIER'])
#Création d'un df avec la liaison UNIQUE_CARRIER <-> CIE
CIEdf = fly[['UNIQUE_CARRIER', 'CIE']].drop_duplicates() # On conserve un exemplaire unique
#Elimination de la colonne UNIQUE_CARRIER
fly.drop(['UNIQUE_CARRIER'], axis=1, inplace= True)
#Elimination des colonnes Unnamed
fly = fly.drop(col_rech_titre(fly, False, "Unnamed"), axis=1)
#Recherche du code des aéroports
id_origin=0
id_destination=0
if origin !='':
id_origin = AirportCode(fly, airport, origin)
if destination !='':
id_destination = AirportCode(fly, airport, destination)
#Création d'un marsk / filtre
def mask(df, key, value):
return df[df[key] == value]
pd.DataFrame.mask = mask
#Filtrage de Fly:
if id_origin != 0:
fly = fly.mask('ORIGIN_AIRPORT_ID', id_origin)
if id_destination != 0 :
fly = fly.mask('DEST_AIRPORT_ID', id_destination)
if month != 0 :
fly = fly.mask('MONTH', month)
if weekday != 0 :
fly = fly.mask('DAY_OF_WEEK', weekday)
if carrier != '' :
#Détermination de la CIE à partir de UNIQUE_CARRIER
carrier_num = CIEdf[CIEdf.UNIQUE_CARRIER == carrier]
carrier_num = carrier_num['CIE'].values[0]
fly = fly.mask('CIE', carrier_num)
#Remplacement des NaN
fly['CARRIER_DELAY'].fillna(0, inplace=True)
fly['WEATHER_DELAY'].fillna(0, inplace=True)
fly['NAS_DELAY'].fillna(0, inplace=True)
fly['SECURITY_DELAY'].fillna(0, inplace=True)
fly['LATE_AIRCRAFT_DELAY'].fillna(0, inplace=True)
if fly.shape[0] == 0 :
return 0 , "Insuffisament de données pour prédire ! Soyez moins précis !"
#Création des moyennes par colonne
fly_mean= fly.mean()
#Création d'un DataFRame mono observation reprenant la moyenne des infos nécessaires à la régression:
ar = np.array([[fly_mean['CARRIER_DELAY'],fly_mean['WEATHER_DELAY'],fly_mean['NAS_DELAY'],
fly_mean['SECURITY_DELAY'],fly_mean['LATE_AIRCRAFT_DELAY'],fly_mean['DISTANCE'],
fly_mean['AIR_TIME'],fly_mean['CRS_ELAPSED_TIME'],fly_mean['ACTUAL_ELAPSED_TIME'] ]])
df = pd.DataFrame(ar, index = [1], columns = ['CARRIER_DELAY','WEATHER_DELAY', 'NAS_DELAY',
'SECURITY_DELAY', 'LATE_AIRCRAFT_DELAY','DISTANCE', 'AIR_TIME','CRS_ELAPSED_TIME',
'ACTUAL_ELAPSED_TIME'])
New_rfr = RandomForestRegressor()
New_rfr.set_params = param
New_rfr.estimators_= estimators
print(f"Prédiction d'avance/retard: {int(New_rfr.predict(df)[0])} minutes " )
return int(New_rfr.predict(df)[0]) , "prédiction sans erreur"
|
19,062 | 978192d017046674434ae3c3d46c5b880758a1e4 | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
# configure modules via Global Tag
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'START53_V19::All'
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring("Log")
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet(SkipEvent = cms.untracked.vstring('ProductNotFound'))
fileList = cms.untracked.vstring()
#F2F1C38E-79DA-E111-B6E8-003048FFCB6A.root
fileList.extend(['file:/user/scheuch/CMSSW/crab/CMSSW_5_3_12_patch2/src/MuonMatch/MuonMatch/F2F1C38E-79DA-E111-B6E8-003048FFCB6A.root'])
#root://xrootd.unl.edu//store/mc/Summer12_DR53X/ZZTo4mu_8TeV_mll8_mZZ95-160-powheg15-pythia6/AODSIM/PU_S10_START53_V19-v1/10000/34675A33-99F9-E211-A1F4-0025907FD2BA.root
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = fileList
)
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff")
process.es_prefer_l1GtTriggerMaskAlgoTrig = cms.ESPrefer("L1GtTriggerMaskAlgoTrigTrivialProducer", "l1GtTriggerMaskAlgoTrig")
process.load('HLTrigger.HLTfilters.hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(False)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('L1_DoubleMu3')
process.load('PhysicsTools.PatAlgos.mcMatchLayer0.muonMatch_cfi')
process.muonMatch.src = cms.InputTag("muons")
process.muonMatch.mcPdgId = cms.vint32(13)
process.muonMatch.checkCharge = cms.bool(False)
process.muonMatch.resolveAmbiguities = cms.bool(False)
process.muonMatch.maxDeltaR = cms.double(10.)
process.muonMatch.maxDPtRel = cms.double(100.)
process.muonMatch.resolveByMatchQuality = cms.bool(True)
process.load("L1TriggerConfig.L1GtConfigProducers.l1GtTriggerMenuXml_cfi")
process.l1GtTriggerMenuXml._errorstr = 0;
process.load('L1Trigger.Skimmer.l1Filter_cfi')
process.l1Filter.algorithms = cms.vstring('L1_DoubleMu3')
#process.selectedMuonsGenParticlesMatchNew = cms.EDProducer("MCTruthDeltaRMatcherNew",
# src = cms.InputTag("muons"),
# matched = cms.InputTag("genParticles"),
# distMin = cms.double(0.15),
# matchPDGId = cms.vint32(13)
#)
process.demo = cms.EDAnalyzer('MuonMatch'
)
#process.demo2 = cms.EDAnalyzer('MuonMatchPre'
#)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('AnalysisResult.root')
)
process.p = cms.Path(process.muonMatch*process.demo) #process.l1GtTriggerMenuXml, process.l1Filter* *process.hltLevel1GTSeed
|
19,063 | 004776b9ae4db686b27fcd5a4447c91526996cb9 | from src.utils.global_settings import GlobalSettings
from src.affinity_structure.bacterial import BacterialAffinityStructure
GlobalSettings.init(tree_depth=9, segment_min_size=int(2.5e3))
affinity_structure = BacterialAffinityStructure('grouptest')
for _ in range(2):
affinity_structure.handle_next_genome()
|
19,064 | e790bf7c987e3c81106a698c42d0d54d3650b182 | """Settings for production."""
from etc.base import * # noqa
|
19,065 | 0444b170776ced1a4352e9e482d1988c699a8495 | from time import time
from datetime import timedelta
import numpy as np
import torch
from torch.optim import AdamW
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModelForMultipleChoice
import sec1_preprossing
from context_Train_dataset import TrainingDataset
import logging
from tqdm import trange
from argparse import ArgumentParser, Namespace
from pathlib import Path
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = 'bert-base-chinese'
def evaluation(outputs, labels):
predict = torch.argmax(outputs , dim = 1)
correct = torch.sum(torch.eq(predict, labels)).item()
return correct
# Dataloader collate_fn
def collate_fn(batch):
input_ids, attention_mask, token_type_ids, labels = zip(*batch)
input_ids = pad_sequence(input_ids, batch_first=True).transpose(1,2).contiguous() # re-transpose
attention_mask = pad_sequence(attention_mask, batch_first=True).transpose(1,2).contiguous()
token_type_ids = pad_sequence(token_type_ids, batch_first=True).transpose(1,2).contiguous()
labels = torch.stack(labels)
return input_ids, attention_mask, token_type_ids, labels
def main(args):
# load data
train_data , context = sec1_preprossing.read_train_data(args)
# processing data
train_instances , dev_instances = sec1_preprossing.preprocess_data(args , train_data , context)
# load dataloader
logging.info("generate dataloader....")
train_dataset = TrainingDataset(train_instances)
dev_dataset = TrainingDataset(dev_instances)
train_dataloader = DataLoader(train_dataset, collate_fn = collate_fn, shuffle=True, \
batch_size = args.batch_size) # num_workers = 2
dev_dataloader = DataLoader(dev_dataset, collate_fn = collate_fn, shuffle=True, \
batch_size = args.batch_size) # num_workers = 2
# on windows , dataloader can't add num_workers may cause some problems !
logging.info("dataloader OK!")
# model
model = AutoModelForMultipleChoice.from_pretrained(args.model_name)
print(model)
model.to(device)
# model parameters
total = sum(p.numel() for p in model.parameters())
print('\nstart training, parameter total:{}\n'.format(total))
# optimizer
optimizer = AdamW(model.parameters(), lr = args.lr)
optimizer.zero_grad()
# patience, best_dev_loss = 0, 1e10
# best_state_dict = model.state_dict()
start_time = time()
t_batch = len(train_dataloader)
v_batch = len(dev_dataloader)
for epoch in range(1, args.num_epoch + 1):
total_loss, total_acc, best_acc = 0, 0, 0
model.train()
# train step
for i, batch in enumerate(train_dataloader, start=1):
batch = (tensor.to(device) for tensor in batch)
input_ids, attention_mask, token_type_ids, labels = batch
optimizer.zero_grad()
# Backpropogation
outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels)
loss = outputs[0]
loss.backward()
optimizer.step()
correct = evaluation(outputs[1], labels)
total_acc += (correct / input_ids.shape[0])
total_loss += loss.item()
# Progress bar with timer ;-)
elapsed_time = time() - start_time
elapsed_time = timedelta(seconds=int(elapsed_time))
print("Epoch: %d/%d | Batch: %d/%d | loss=%.5f | %s \r" \
% (epoch, args.num_epoch, i, len(train_dataloader), loss, elapsed_time), end='')
print('\nTrain | Loss:{:.5f} Acc: {:.3f}'.format(total_loss/t_batch, total_acc/t_batch*100))
# Save parameters of each epoch
filename = "%s_epoch_%d" % (model_name, epoch)
model.save_pretrained(args.ckpt_dir / filename)
# Get avg. loss on development set
print("Epoch: %d/%d | Validating... \r" % (epoch, args.num_epoch), end='')
dev_total_loss = 0
dev_total_acc = 0
model.eval()
for batch in dev_dataloader:
batch = (tensor.to(device) for tensor in batch)
input_ids, attention_mask, token_type_ids, labels = batch
with torch.no_grad():
outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels)
loss = outputs[0]
correct = evaluation(outputs[1], labels)
dev_total_acc += (correct / input_ids.shape[0])
dev_total_loss += loss
dev_avg_loss = dev_total_loss / v_batch
elapsed_time = time() - start_time
elapsed_time = timedelta(seconds=int(elapsed_time))
print("Epoch: %d/%d | dev_loss=%.5f | dev_acc=%.3f |%s " \
% (epoch, args.num_epoch , dev_avg_loss ,dev_total_acc/v_batch*100, elapsed_time))
def parse_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument(
"--train_file",
type= str,
help="Directory to the dataset.",
default="./dataset/train.json",
)
parser.add_argument(
"--context_file",
type= str,
help="Directory to the dataset.",
default="./dataset/context.json",
)
parser.add_argument(
"--cache_dir",
type = Path,
help="Directory to the preprocessed caches.",
default="./cache/choose_context/",
)
parser.add_argument(
"--ckpt_dir",
type = Path,
help="Directory to save the model file.",
default="./ckpt/choose_context/",
)
parser.add_argument(
"--model_name",
type = str,
help = "BERT model_name",
default = 'hfl/chinese-roberta-wwm-ext',
)
parser.add_argument(
"--tokenizer_name",
type=str,
default= 'hfl/chinese-roberta-wwm-ext',
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--split_ratio",
type = float,
help = "split ratio for train_dataset",
default = 0.95,
)
parser.add_argument(
"--input_length",
type= int,
help= "BERT token maximum input length",
default = 512,
)
# optimizer
parser.add_argument("--lr", type=float, default=1e-5)
# data loader
parser.add_argument("--batch_size", type=int, default = 2)
# training
parser.add_argument(
"--device", type=torch.device, help="cpu, cuda, cuda:0, cuda:1", default = "cuda:0"
)
parser.add_argument("--num_epoch", type=int, default = 2)
args = parser.parse_args()
# args = parser.parse_known_args()[0] # for colab
return args
if __name__ == "__main__":
args = parse_args()
args.ckpt_dir.mkdir(parents=True, exist_ok=True)
main(args) |
19,066 | 243afe7c16ec992d39b393175a0c363ddecafe14 | import libs.sly as sly
from libs.sly import Lexer
import re
class LanguageLexer(Lexer):
""" This Class is in charge of the lexical analysis
for the VECING-Lang, it is made using the Lexer class
from sly. It contains all the possible tokens in the language
and for some of them, a special function that modifies its value
before returning it.
attributes
----------
lineno: counter of lines (\n) seen in the program
constants
----------
tokens: a list of all the possible tokens
ignore: regular expression of the elements to ignore
ignore_newline: regular expression of a new line
SEM_COL: regular expression of the token called SEM_COL
COMMA: regular expression of the token called COMMA
LEFT_PARENTHESIS: regular expression of the token called LEFT_PARENTHESIS
RIGHT_PARENTHESIS: regular expression of the token called RIGHT_PARENTHESIS
LEFT_BRAKET: regular expression of the token called LEFT_BRAKET
RIGHT_BRAKET: regular expression of the token called RIGHT_BRAKET
OP_COMP: regular expression of the token called OP_COMP
OP_MATH: regular expression of the token called OP_MATH
DEFINE: regular expression of the token called DEFINE
CONST: regular expression of the token called CONST
LAMBDA: regular expression of the token called LAMBDA
RENDER: regular expression of the token called RENDER
END: regular expression of the token called END
LANGUAGE_FUNC: regular expression of the token called LANGUAGE_FUNC
ID: regular expression of the token called ID
methods
-------
COMMENT(t)
CONST_FLOAT(t)
CONST_INT(t)
NULL(t)
CONST_BOOL(t)
CONST_LIST(t)
ignore_newline(t)
error(t)
"""
tokens = {'COMMENT', 'SEM_COL', 'COMMA', 'LEFT_PARENTHESIS', 'RIGHT_PARENTHESIS',
'LEFT_BRAKET', 'RIGHT_BRAKET', 'OP_COMP', 'OP_MATH', 'DEFINE', 'CONST', 'LAMBDA', 'RENDER', 'END', #'NULL',
'LANGUAGE_FUNC', 'ID', 'CONST_INT', 'CONST_FLOAT', 'CONST_BOOL', 'CONST_LIST'}
# characters to ignore
ignore = ' \t'
ignore_newline = r'\n+'
# Tokens Regexes
#COMMENT = r'\/\/.*;'
@_(r'\/\/.*;')
def COMMENT(self, t):
pass
SEM_COL = r'\;'
COMMA = r'\,'
LEFT_PARENTHESIS = r'\('
RIGHT_PARENTHESIS = r'\)'
LEFT_BRAKET = r'\['
RIGHT_BRAKET = r'\]'
OP_COMP = r'\<\=|\>\=|\<|\>|\!\=|\='
OP_MATH = r'add|sub|mult|power|div|sqrt|abs'
#NULL = r'null|NULL|\(\)'
DEFINE = r'DEFINE|define|DEF|def'
CONST = r'CONST|const'
LAMBDA = r'lambda'
RENDER = r'RENDER|render'
END = r'END|end'
LANGUAGE_FUNC = r'cond'
#CONST_FLOAT = r'(\-)?[0-9]+\.[0-9]+'
# do not pay attention to IDE not recognizing the decorators
@_(r'(\-)?[0-9]+\.[0-9]+')
def CONST_FLOAT(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token CONST_FLOAT
parameters
----------
t: the token received
returns
-------
t: token after being converted to float
"""
t.value = float(t.value)
return t
#CONST_INT = r'\-?[0-9]+'
@_(r'\-?[0-9]+')
def CONST_INT(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token CONST_INT
parameters
----------
t: the token received
returns
-------
t: token after being converted to int
"""
t.value = int(t.value)
return t
#NULL
@_(r'null|NULL|\(\)')
def NULL(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token NULL
parameters
----------
t: the token received
returns
-------
t: None
"""
t.value = None
return t
# CONST_BOOL = r'#false|#true'
@_(r'#false|#true')
def CONST_BOOL(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token CONST_BOOL
parameters
----------
t: the token received
returns
-------
t: a boolean value representing the token
"""
t.value = False if t.value == '#false' else True
return t
"[1 2.3 #true]"
# CONST_LIST = r'\"\(\s*(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true)(\s+(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true))*\s*\)\"|\"\[\s*(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true)(\s+(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true))*\s*\]\"'
@_(r'\"\(\s*(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true)(\s+(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true))*\s*\)\"|\"\[\s*(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true)(\s+(((\-)?[0-9]+\.[0-9]+)|((\-)?[0-9]+)|#false|#true))*\s*\]\"')
def CONST_LIST(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token CONST_LIST
parameters
----------
t: the token received
returns
-------
t: token converted into a python list
"""
temp = list(re.split(r'\s+', t.value[2:-2]))
li = []
for elem in temp:
if re.search(r'(\-)?[0-9]+\.[0-9]+', elem):
li.append(float(elem))
elif re.search(r'\-?[0-9]+', elem):
li.append(int(elem))
elif re.search(r'#false|#true', elem):
boolean = False if elem == '#false' else True
li.append(boolean)
t.value = li
return t
ID = r'[a-zA-Z]\w*'
def ignore_newline(self, t):
""" Function that receives as argument
a parameter that matches the regex of the token ignore_newline
and increments the counter of new lines
parameters
----------
t: the token received
"""
self.lineno += t.value.count('\n')
def error(self, t):
""" Function that receives as argument
a parameter that does not match any previous token.
It raises an exception.
parameters
----------
t: the value without a token assigned to it
"""
print("Illegal character '%s' in line %i" % (t.value[0], self.lineno))
raise Exception("Illegal character '%s' in line %i" %
(t.value[0], self.lineno))
|
19,067 | 7c1da42541cc218ea57ae215f875ee2737e43b14 | """
time complexity: O(Log mxn)
space complexity: O(1)
"""
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return -1
l = 0
r = len(matrix) * len(matrix[0]) - 1
while l <= r:
mid = (l + r) // 2
row = mid // len(matrix[0])
col = mid % len(matrix[0])
print("mid: {}, row:{}, col:{}".format(mid, row, col))
num = matrix[row][col]
# print("row:{}, c:{}, num:{}".format(row, col, num))
if num == target:
return True
elif num < target:
# go to right
l = mid + 1
elif num > target:
r = mid - 1
return False
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50],
]
target = 3
###############
matrix = []
taret = 0
#############
matrix = [[1, 1]]
target = 2
s = Solution()
print(s.searchMatrix(matrix, target))
|
19,068 | d6b3921a2cc8722d3ac7a913dc703d2320a01ed3 | import os
import io
import json
import boto3
import base64
import logging
import numpy as np
from chalice import Chalice
from chalice import BadRequestError
app = Chalice(app_name="genai-rag-workshop")
app.debug = True
smr_client = boto3.client("runtime.sagemaker")
logger = logging.getLogger("genai-rag-workshop")
logger.setLevel(logging.DEBUG)
@app.route("/")
def index():
return {'hello': 'world'}
@app.route("/emb/{variant_name}", methods=["POST"], content_types=["application/json"])
def invoke_emb(variant_name):
models = ['gptj_6b', 'kosimcse']
if variant_name not in models:
raise BadRequestError("[ERROR] Invalid model!")
logger.info(f"embedding model: {variant_name}")
if variant_name == "gptj_6b":
endpoint_name = os.environ["ENDPOINT_EMB_GPTJ_6B"]
elif variant_name == "kosimcse":
endpoint_name = os.environ["ENDPOINT_EMB_KOSIMCSE"]
payload = app.current_request.json_body
try:
response = smr_client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload).encode("utf-8")
)
res = response['Body'].read()
return json.loads(res.decode("utf-8"))
except Exception as e:
print(e)
print(payload)
@app.route("/llm/{variant_name}", methods=["POST"], content_types=["application/json"])
def invoke_llm(variant_name):
models = ['llama2_7b', 'llama2_13b', 'kkulm_12_8b', 'falcon_40b']
if variant_name not in models:
raise BadRequestError("[ERROR] Invalid model!")
logger.info(f"txt2txt model: {variant_name}")
if variant_name == "llama2_7b":
endpoint_name = os.environ["ENDPOINT_LLM_LLAMA2_7B"]
elif variant_name == "llama2_13b":
endpoint_name = os.environ["ENDPOINT_LLM_LLAMA2_13B"]
elif variant_name == "kkulm_12_8b":
endpoint_name = os.environ["ENDPOINT_LLM_KKULM_12_8B"]
elif variant_name == "kkulm_12_8b":
endpoint_name = os.environ["ENDPOINT_LLM_KKULM_12_8B"]
elif variant_name == "falcon_40b":
endpoint_name = os.environ["ENDPOINT_LLM_FALCON_40B"]
payload = app.current_request.json_body
try:
if "llama2" in variant_name:
response = smr_client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload).encode("utf-8"),
CustomAttributes="accept_eula=true",
)
else:
response = smr_client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload).encode("utf-8")
)
res = response['Body'].read()
return json.loads(res.decode("utf-8"))
except Exception as e:
print(e)
print(payload) |
19,069 | b50dc347d35f603661469cb0b01cda9eff8d45fb | import socket
client = socket.socket()
client.connect(('localhost',6969))
while True:
msg = input('>>:').strip()
if len(msg) == 0 :
continue
client.send(msg.encode())
date = client.recv(1024)
print('来自服务器:',date.decode())
client.close() |
19,070 | 56519ba29dbc45e69749fab701513875d6b263ee | # Даны списки:
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89];
# b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13].
# Нужно вернуть список, который состоит из элементов, общих для этих двух списков.
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
c = []
for i in a:
for j in b:
if i == j:
c.append(i)
print(c)
c = list(set(c))
print(c)
d = list(set(a) & set(b)) # список из пересечения множеств
print(d) |
19,071 | c72a11f9058f89f0bc9ebd8c4c8e2cc9dffc518b | """
Author: Robert J. Ward
Changelog:
-- Version: 1.0 Robert J. Ward
--- Initial Release
"""
import Eby_AssignmentStatus
import Eby_CountFlag
import Eby_PickAreaCopyPaste
import Eby_PickQty
import Eby_RouteStatus
import Eby_VerifyTrailersData
import time
import python_config
import mysql.connector
config = python_config.read_db_config()
host = config.get('host')
user = config.get('user')
wcsdatabase = config.get('wcsdatabase')
database = config.get('database')
password = config.get('password')
doors = [1, 2]
while True:
try:
connection = mysql.connector.connect(
host=host,
user=user,
database=database,
password=password
)
cursor = connection.cursor()
print(Eby_AssignmentStatus.update_assignment_status())
print(Eby_CountFlag.count_flag())
print(Eby_PickAreaCopyPaste.find_and_replace())
print(Eby_PickQty.update_route_pick_qty())
print(Eby_PickQty.update_verify_trailers_pick_qty())
for door in doors:
print(Eby_PickQty.update_early_late(door))
print(Eby_RouteStatus.route_status())
for door in doors:
print(Eby_VerifyTrailersData.add_routes(door))
print(Eby_VerifyTrailersData.freezer_cooler_picks())
print(Eby_VerifyTrailersData.priority_update())
except Exception as e:
print(e)
finally:
connection.close()
time.sleep(1)
|
19,072 | 55f28e0ef7aba7019f7d85998d349868a3ce4fc7 | import arcade
from game.director import Director
from menu.check_over_action import CheckOverAction
from menu.show_menu_action import ShowMenuAction
from menu.show_help_action import ShowHelpAction
from menu.start_game_action import StartGameAction
from menu.background import Background
from menu.button import Button
from menu.draw_actors_action import DrawActorsAction
from menu.label import Label
class Factory:
def create_cast(self, scene):
cast = {}
if scene == "menu_scene":
cast["background"] = [ Background(arcade.color.WHITE) ]
cast["start_button"] = [ Button("Start Game", 400, 350, 200, 50) ]
cast["help_button"] = [ Button("Instructions", 400, 250, 200, 50) ]
cast["title_label"] = [ Label("Menu Scene", 400, 550) ]
elif scene == "help_scene":
cast["background"] = [ Background(arcade.color.LIGHT_BLUE) ]
cast["back_button"] = [ Button("Back to Menu", 400, 100, 200, 50) ]
cast["title_label"] = [ Label("Help Scene", 400, 550) ]
cast["instructions"] = [ Label("this is a game", 400, 300)]
elif scene == "game_scene":
cast["background"] = [ Background(arcade.color.LIGHT_GRAY) ]
cast["title_label"] = [ Label("Game Scene", 400, 550) ]
cast["instructions_label"] = [ Label("Wait 5 seconds!", 400, 350) ]
cast["time_label"] = [ Label("", 400, 300) ]
return cast
def create_script(self, scene):
script = {}
if scene == "menu_scene":
script[Director.ON_MOUSE_RELEASE] = []
script[Director.ON_MOUSE_RELEASE].append(ShowHelpAction(self))
script[Director.ON_MOUSE_RELEASE].append(StartGameAction(self))
script[Director.ON_DRAW] = []
script[Director.ON_DRAW].append(DrawActorsAction())
elif scene == "help_scene":
script[Director.ON_MOUSE_RELEASE] = []
script[Director.ON_MOUSE_RELEASE].append(ShowMenuAction(self))
script[Director.ON_DRAW] = []
script[Director.ON_DRAW].append(DrawActorsAction())
elif scene == "game_scene":
script[Director.ON_UPDATE] = []
script[Director.ON_UPDATE].append(CheckOverAction(self))
script[Director.ON_DRAW] = []
script[Director.ON_DRAW].append(DrawActorsAction())
return script
|
19,073 | c7f3d136f826e9d1c64d93f8a84c4284a64c7093 |
class video_effect:
def __init__(self, _name, _midiChannel, _oscAddress, mapFunc = None):
self.name = _name
self.midiChannel = _midiChannel
self.oscAddress = _oscAddress
self.currentValue = 63
self.isModified = False
self.mapFunc = mapFunc or (lambda x:x)
def printResult(self):
print (self.name + " : "+str(self.currentValue))
def setValue(self, newVal):
if(newVal>=0 and newVal <= 127):
self.currentValue = newVal
self.isModified = True
def getMappedValue(self):
return self.mapFunc(self.currentValue)
def update(self):
#Can smooth the final value send in OSC, using an easing method
if self.isModified:
self.isModified = False
return self.mapFunc(self.currentValue)
else :
return None
|
19,074 | 9e3d2f0ff8c93c81271b636fe3752d86a6c7c830 | '''
Created on Nov 24, 2016
@author: zhangxing
'''
import configparser
import os
import mysql.connector as mysql
config = configparser.ConfigParser()
cnf_path=os.path.join(os.path.join(os.path.dirname(os.getcwd()),'Config'),'mysql.cnf')
config.read(cnf_path)
host = config.get('mysql', 'host')
user = config.get('mysql','user')
psd = config.get('mysql','password')
db = config.get('mysql','database')
port = config.getint('mysql','port')
charset = config.get('mysql','charset')
def get_connnection():
try:
conn = mysql.connect(host=host, port=port,user=user,password=psd,database=db)
except Exception as e:
print(e)
return conn
def execute(sql):
conn = get_connnection();
try:
cur = conn.cursor()
cur.execute(sql)
conn.commit()
except Exception as e:
print(e)
finally:
cur.close()
conn.close()
if __name__=='__main__':
# conn = get_connnection()
# sql = 'insert into test values(1565,\'first haha\');'
# cur = conn.cursor()
# cur.execute(sql)
# cur.execute("select * from test")
# for i in cur:
# print(i)
#
# conn.commit()
# cur.close()
# conn.close()
sql = 'insert into test values(65,\'second\');'
execute(sql)
|
19,075 | 976270fbe3a7612325c0f05d395303b3ed4bc7b3 | #!/usr/bin/python
"""
x = float(input("1st Number: "))
y = float(input("2nd Number: "))
z = float(input("3rd Number: "))
x = int(input("1st Number: "))
y = int(input("2nd Number: "))
z = int(input("3rd Number: "))
if x > y:
if x > z:
maximum = x
else:
maximum = z
else:
if y > z:
maximum = y
else:
maximum = z
print("The maximam value is: ", maximum)
"""
#!/usr/bin/python
x = float(input("1st Number: "))
y = float(input("2nd Number: "))
z = float(input("3rd Number: "))
if x < y:
if x < z:
minimum = x
else:
minimum = z
else:
if y < z:
minimum = y
else:
minimum = z
print("The minimum value is: ",minimum)
|
19,076 | 2e33c23fdb2c89b4994b43958dc249e41de8b119 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-28 07:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('english', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wrong',
name='times',
field=models.IntegerField(default=1, verbose_name='次数'),
),
migrations.AlterField(
model_name='history',
name='catagory',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='english.Catagory', verbose_name='分类'),
),
migrations.AlterField(
model_name='history',
name='grade',
field=models.CharField(max_length=200, verbose_name='分数'),
),
migrations.AlterField(
model_name='wrong',
name='word',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='english.Word', verbose_name='单词'),
),
]
|
19,077 | 1b4d9b887c6aec80f1df486159b84736f57dc20a | import types
class ListTree:
def __str__(self):
self.__visited = {}
return '<Instance of {0},address {1}:\n{2}{3}>'.format(
self.__class__.__name__,
id(self),
self.__attrnames(self,0),
self.__listclass(self.__class__,4))
def __listclass(self,aClass,indent):
dots = '.' * indent
if aClass in self.__visited:
return '\n{0}<Class {1}:, address {2}: (see Above)>\n'.format(
dots,
aClass.__name__,
id(aClass))
else:
self.__visited[aClass] = True
genabove = (self.__listclass(c,indent+4) for c in aClass.__bases__)
#for i in genabove: print "===> ",i
#print type(genabove)
return '\n{0}<Class {1}, address {2}: \n{3}{4}{5}>\n'.format(
dots,
aClass.__name__,
id(aClass),
self.__attrnames(aClass,indent),
''.join(genabove),
dots)
def __attrnames(self,obj,indent):
spaces = ' ' * (indent + 4)
result = ''
for attr in sorted(obj.__dict__):
if attr.startswith('__') and attr.endswith('__'):
result += spaces + '{0}=<>\n'.format(attr)
else:
result += spaces + '{0}={1}\n'.format(attr,getattr(obj,attr))
return result
#=========================================================================
class ListInherited:
def __repr__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__,
id(self),
self.__attrnames())
def __attrnames(self):
result=''
for attr in dir(self):
if attr[:2] == '__' and attr[-2:] == '__':
result += '\t name %s=< >\n' % attr
else:
if not isinstance(getattr(self,attr),types.MethodType):
result += '\t name %s=%s\n' % (attr,getattr(self,attr))
return result
#=========================================================================
class ListInstance:
def __str__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__,
id(self),
self.__attrnames())
def __attrnames(self):
result=''
for attr in sorted(self.__dict__):
result += '\tname %s = %s\n' % (attr,self.__dict__[attr])
return result
#=========================================================================
if __name__ == '__main__':
class Super:
superman="LHM"
def __init__(self):
self.data1="Katiana"
self.data2="Lorena"
#class Sub(Super,ListInstance):
#class Sub(Super,ListInherited):
class Sub(Super,ListTree):
def __init__(self):
Super.__init__(self)
self.data3="Salinas"
self.data4="Angeles"
def spam(self): pass
x=Sub()
x.name='Luis'
x.country='Peru'
print x
# print "-" * 30
# print dir(x)
|
19,078 | ce144cae3c6267f155664c5ebd33bffeacd77cef | #!/usr/bin/env python3
import requests
import os
import datetime
import time
import json
import plotly
import plotly.graph_objs as go
import pandas as pd
url = "https://www.st-andrews.ac.uk/sport/"
filename = "./data.json"
def main():
initialise(filename)
count=0
while True:
hour = int(str(datetime.datetime.now()).split(" ")[1][:6].replace(":",""))
if hour < 2230 and hour > 630:
night = False
else:
night = True
try:
read_web_and_add(night)
except:
print("reading from web failed. website might be down")
if(count%12==0):
update_plot(night)
count=0
time.sleep(300)
count+=1
def read_web_and_add(night):
if night:
return
hr_timestamp = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
r = requests.get(url)
for line in r.text.split("\n"):
if "Occupancy" in line:
percentage = int(line.split("Occupancy: ")[1].split("<")[0][:-1])
entry = {'hr_t': hr_timestamp, 'p' : percentage}
add_new_data(filename, entry)
def initialise(filename):
if os.path.isfile(filename):
return
with open(filename, mode='w', encoding='utf-8') as f:
f.write("")
def add_new_data(filename, entry):
with open(filename, mode='a+', encoding='utf-8') as f:
f.writelines(json.dumps(entry, sort_keys=True)+"\n")
def update_plot(night):
if night:
return
df = pd.read_json(filename, lines=True)
trace = go.Scatter(x=list(df.hr_t),
y=list(df.p))
data = [trace]
layout = dict(
title='StA Gym occupancy (updated every hour)',
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=4,
label='4h',
step='hour',
stepmode='backward'),
dict(count=1,
label='1d',
step='day',
stepmode='backward'),
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
rangeslider=dict(
visible = True
),
type='date'
)
)
fig = dict(data=data, layout=layout)
plotly.offline.plot(fig, filename='index.html', auto_open=False)
if __name__ == "__main__":
main()
|
19,079 | 4d69e775b5473b97d7a36137b5e203934ac74264 | #!usr/bin/python
import os
cmd = os.popen('playerctl -p spotify status').read()
cmd = cmd.split('\n')
cmd = cmd[0]
if cmd == "Playing":
print('')
elif cmd == 'Paused':
print('')
else:
print('')
|
19,080 | bb0149f5abc60a85898161ca53a15c20b6f76f77 | i=0
dict=list()
while i<5:
i=int(input("Enter elements of the dictionary: "))
dict.append(i)
i+=1
' '.join(dict)
print(eval(dict))
|
19,081 | d97fa6ce8f18f01e4cce040c49655d65c119e315 | import psycopg2
db_settings = {}
try:
with open("resources/db_settings", "r") as settings:
for line in settings:
(key, val) = line.split()
db_settings[key] = str(val)
except ValueError:
print("Please provide values in resources/db_settings")
exit(100)
conn = psycopg2.connect(host=db_settings['host:'], database=db_settings['database:'], user=db_settings['user:'],
password=db_settings['password:'])
cur = conn.cursor()
def store_password(username, email, password, website):
pubkey = open("resources/pubkey.asc", "r").read()
data = (username, email, password, pubkey, website)
SQL = "INSERT INTO password (username,email,pw,website) VALUES (%s,%s,pgp_pub_encrypt(%s,dearmor(%s)),%s)"
cur.execute(SQL, data)
conn.commit()
def delete_password(username, email, website):
data = (username, email, website)
SQL = "DELETE FROM password where username = %s AND email = %s and website = %s"
cur.execute(SQL, data)
conn.commit()
def find_by_mail(pw: str, email: str):
private_key = open("resources/privkey.asc", 'r').read()
data = (private_key, pw, email)
format = ('Username: ', 'Email: ', 'Password: ', 'App/Site name: ')
SQL = "SELECT username,email,pgp_pub_decrypt(pw :: bytea,dearmor(%s),%s),website FROM password WHERE email = %s"
cur.execute(SQL, data)
result = cur.fetchall()
if not result:
print("No entry found")
else:
for row in result:
print()
for i in range(0, len(row)):
print(format[i] + row[i])
def find_by_website(pw: str, website: str):
private_key = open("resources/privkey.asc", 'r').read()
data = (private_key, pw, website)
format = ('Username: ', 'Email: ', 'Password: ', 'App/Site name: ')
SQL = "SELECT username,email,pgp_pub_decrypt(pw :: bytea,dearmor(%s),%s),website FROM password WHERE website = %s"
cur.execute(SQL, data)
result = cur.fetchall()
if not result:
print("No entry found")
else:
for row in result:
print()
for i in range(0, len(row)):
print(format[i] + row[i])
def find_by_website_and_email(pw: str, website: str, email: str):
private_key = open("resources/privkey.asc", 'r').read()
data = (private_key, pw, website, email)
SQL = "SELECT username,email,pgp_pub_decrypt(pw :: bytea,dearmor(%s),%s),website FROM password WHERE website = %s AND email = %s"
cur.execute(SQL, data)
result = cur.fetchone()
if result is None:
print("No entry found")
else:
print("Username: " + result[0])
print("Password: " + result[2])
|
19,082 | ffd3315cbdf66f8495fed2dc2003873eefca093a | """kadchan.ka URL Configuration"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^', views.Info.as_view(), name="home"), # view information of ka (Kiss Anime Video Downloader)
url(r'^info', views.Info.as_view(), name="info"), # view information of ka (Kiss Anime Video Downloader)
# url(r'^downloads', None, name="downloads"), # view current downloads and statuses
# url(r'^updates', None, name="updates"), # view updates of future downloads and new episodes and series
# url(r'^search', None, name="search"), # search without advertisements
# url(r'^cron', None, name="cron"), # run tasks via a cron task or task scheduling application
# url(r'^api', None, name="api"), # view API document
] |
19,083 | 8801b96df375fada4d565c910611bc223dc03162 | # Now that you have completed your initial analysis, design a Flask api based on the queries that you have just developed.
# Use FLASK to create your routes.
# Hints
# You will need to join the station and measurement tables for some of the analysis queries.
# Use Flask jsonify to convert your api data into a valid json response object.
# Dependencies
from flask import Flask, jsonify
from sqlalchemy import create_engine, func
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
import datetime as dt
import pandas as pd
from dateutil import parser
import matplotlib.pyplot as plt
#Load engine created in database_engineering
engine = create_engine("sqlite:///hawaii.sqlite")
# Upload the Base
Base = automap_base()
# Use the Base class to reflect the database tables
Base.prepare(engine, reflect=True)
# Reflect Database into ORM class ===save references
prcp = Base.classes.prcp
tobs = Base.classes.tobs
stations = Base.classes.stations
# Start a session to query the database
session = Session(engine)
#Create app
app = Flask(__name__)
first_date = '2016-08-23'
###########################################################################################
# Routes
###########################################################################################
#Home route
@app.route("/")
def home():
print("Server received request for 'Home' page...")
return (
f"Welcome to my 'Home' page!<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start-date<br/>"
f"/api/v1.0/start-date/end-date<br/>"
)
# /api/v1.0/precipitation
# Query for the dates and precipitation observations from the last year.
# Convert the query results to a Dictionary using date as the key and prcp as the value.
# Return the json representation of your dictionary.
@app.route("/api/v1.0/precipitation")
def precip():
print("Server received request for 'precipitation' page...")
# Select only the date and prcp values.
prcp_vals = session.query(prcp.date, prcp.prcp).\
filter(prcp.date > first_date).\
order_by(prcp.date).all()
#Load the query results into a Pandas DataFrame and convert to a dict
prcp_df = pd.DataFrame(prcp_vals)
prcp_dict = dict(zip(prcp_df.date, prcp_df.prcp))
#Convert the date column into a datetime
return jsonify(prcp_dict)
# /api/v1.0/stations
# Return a json list of stations from the dataset.
@app.route("/api/v1.0/stations")
def station():
print("Server received request for 'stations' page...")
#Query station info and convert to json
station_vals = session.query(stations.station,
stations.name,
stations.latitude,
stations.longitude,
stations.elevation).all()
station_df = pd.DataFrame(station_vals)
station_dict = station_df.to_dict('records')
return jsonify(station_dict)
# /api/v1.0/tobs
# Return a json list of Temperature Observations (tobs) for the previous year
@app.route("/api/v1.0/tobs")
def temps():
print("Server received request for 'tobs' page...")
#Query temp info and convert to json
temp_vals = session.query(tobs.station,
tobs.date,
tobs.tobs).\
filter(tobs.date > first_date).\
all()
temp_df = pd.DataFrame(temp_vals)
temp_dict = temp_df.to_dict('records')
return jsonify(temp_dict)
# /api/v1.0/<start> and /api/v1.0/<start>/<end>
# Return a json list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
#min_avg_max1 route
@app.route("/api/v1.0/<start>")
def min_avg_max1(start):
print("Server received request for weather since 'start' page...")
"""Return a json list of the minimum temperature,
the average temperature, and the max temperature
since the start date"""
if(start>'2016-08-24' and start<'2017-08-18'):
temp_vals= session.query(func.min(tobs.tobs), func.avg(tobs.tobs), func.max(tobs.tobs)).\
filter(tobs.date >= start).all()
min = temp_vals[0][0]
avg = temp_vals[0][1]
max = temp_vals[0][2]
temp_dict = {'min':min,'avg':avg,'max':max}
return jsonify(temp_dict)
return "error: start date must be between 2016-08-24 and 2017-08-18, in the format yyyy-mm-dd", 404
#min_avg_max2 route
@app.route("/api/v1.0/<start>/<end>")
def calc_temps2(start, end):
print("Server received request for weather between 'start and end' page...")
"""Return a json list of the minimum temperature,
the average temperature, and the max temperature
between the start and end dates"""
if(start>'2016-08-24' and end<'2017-08-18' and start<end):
temp_vals= session.query(func.min(tobs.tobs), func.avg(tobs.tobs), func.max(tobs.tobs)).\
filter(tobs.date >= start).filter(tobs.date <= end).all()
min = temp_vals[0][0]
avg = temp_vals[0][1]
max = temp_vals[0][2]
temp_dict = {'min':min,'avg':avg,'max':max}
return jsonify(temp_dict)
return "error: start and end dates must be between 2016-08-24 and 2017-08-18, in the format yyyy-mm-dd", 404
# Allows you to run the app through python
if __name__ == "__main__":
app.run(debug=True) |
19,084 | 9d2386578509c2167545a589ebc5c77eed84e155 | from django.test import TestCase
from blog.models import Post
class PostModelTests(TestCase):
def test_is_empty(self):
#初期状態では何も登録されていないことをチェック
#saved_postsに現時点のPost modelを格納
#assertEqualでカウント数(記事数)が「0」となっていることを確認
saved_posts = Post.objects.all()
self.assertEqual(saved_posts.count(), 0)
def test_is_count_one(self):
"""1つレコードを適当に作成すると、レコードが1つだけカウントされることをテスト"""
post = Post(title='test_title', text='test_text')
post.save()
saved_posts = Post.objects.all()
self.assertEqual(saved_posts.count(), 1)
def test_saving_and_retrieving_post(self):
"""内容を指定してデータを保存し、すぐに取り出した時に保存した時と同じ値が返されることをテスト"""
post = Post()
title = 'test_title_to_retrieve'
text = 'test_text_to_retrieve'
post.title = title
post.text = text
post.save()
saved_posts = Post.objects.all()
actual_post = saved_posts[0]
self.assertEqual(actual_post.title, title)
self.assertEqual(actual_post.text, text) |
19,085 | bf666ffe9cc1c17df649ffe1913c4735e26de5d1 | import numpy as np
import csv
import cv2 as cv
name = "UBFC2"
signalGT = 'BVP'
numlevel = 2 # depth of the filesystem collecting video and BVP files
numSubjects = 26 #number of subjects
video_EXT = 'avi' #extension of the video files
frameRate = 30 #video frame rate
VIDEO_SUBSTRING = '' #substring contained in the filename
SIG_EXT = 'txt' #extension of the ground truth file
SIG_SUBSTRING = '' #substring contained in the filename
SIG_SampleRate = 30 #sample rate of the BVP files
skinThresh = [40,60] #thresholds for skin detection
def readSigfile(filename):
"""
load BVP signal file.
:param self:
:param filename:
:return:
"""
gtTrace = []
gtTime = []
gtHR = []
with open(filename,'r') as f :
x = f.readlines()
s = x[0].split(' ')
s = list(filter(lambda a:a != '',s))
gtTrace = np.array(s).astype(np.float64)
t = x[2].split(' ')
t = list(filter(lambda a: a != '' ,t))
gtTime = np.array(t).astype(np.float64)
hr = x[1].split(' ')
hr = list(filter(lambda a: a != '' ,hr))
gtHR = np.array(hr).astype(np.float64)
data = np.array(gtTrace)
time = np.array(gtTime)
hr = np.array(gtHR)
return data,hr
MIN_FRAME = 50
def read_video(filename,data):
cap = cv.VideoCapture(filename)
success = 1
graph_height = 200
graph_width = 0
count = 0
value = []
fourcc = cv.VideoWriter_fourcc('X','V','I','D')
out = cv.VideoWriter('BPV_show.avi',fourcc, 30.0, (640,680))
while success :
success ,image = cap.read()
if success is False:
break
view = np.array(image)
print(view.shape)
value.append(data[count])
graph_width = int(view.shape[1])
if len(value) > MAX_VALUES_TO_GRAPH:
value.pop(0)
graph = draw_graph(value, graph_width, graph_height)
new_image = np.vstack((view, graph))
cv.imwrite('./frame/UBFC_{}.jpg'.format(count),new_image)
print(new_image.shape)
out.write(new_image)
count = count +1
cap.release()
out.release()
MAX_VALUES_TO_GRAPH = 50
def draw_graph(signal_values,graph_width, graph_height):
graph = np.zeros((graph_height, graph_width, 3), np.uint8)
scale_factor_x = float(graph_width) / MAX_VALUES_TO_GRAPH
#Automatically rescale vertically based on the value with largest absolute value
max_abs = get_max_abs(signal_values)
scale_factor_y = (float(graph_height) / 2.0) / max_abs
midpoint_y = graph_height / 2
for i in range(0,len(signal_values) - 1):
curr_x = int(i * scale_factor_x)
curr_y = int(midpoint_y + signal_values[i] * scale_factor_y)
next_x = int((i + 1) * scale_factor_x)
next_y = int(midpoint_y + signal_values[i + 1] * scale_factor_y)
#print('curr_x: {},curr_y: {};next_x: {},next_y: {}'.format(curr_x, curr_y,next_x,next_y))
cv.line(graph,(curr_x, curr_y), (next_x, next_y), color=(0,255,0),thickness=2)
return graph
def get_max_abs(lst):
return (max(max(lst),-min(lst)))
BVP_filename = "/home/zq/video_process/dataset/UBFC/dataset_2/subject1/ground_truth.txt"
data,hr = readSigfile(BVP_filename)
print(data,hr)
Video_filename = '/home/zq/video_process/dataset/UBFC/dataset_2/subject1/vid.avi'
def channel_augment(file):
cap = cv.VideoCapture(file)
success = 1
fourcc = cv.VideoWriter_fourcc('X','V','I','D')
out = cv.VideoWriter('BPV_show.avi',fourcc, 30.0, (640,480))
while success:
success, image = cap.read()
if success is False:
break
image = np.array(image)
image[:,:,1] = image[:,:,1]*1.4
image[:, :, 0] = image[:, :, 0] * 1
image[:, :, 2] = image[:, :, 2] * 1
out.write(image)
cap.release()
out.release()
#channel_augment(Video_filename)
|
19,086 | e4db7f343a14d044aa469c5a3167ae4bb64eb813 | from logic.v1.models import Document, db
class Sample(Document):
"""
Models are defined using Flask-Mongoengine. For all options, see
flask-mongoengine.readthedocs.org or mongoengine.readthedocs.org
*Note*: If parameters here have required=True and args are auto-generated
using model.fields_to_args(), the API will require that parameter in the
API call.
To ameliorate the API issue, you have one of two options:
- override options:
model.fields_to_args(override={'required': False})
- override the argument:
model.fields_to_args(required=Arg(str, required=False))
"""
options = ['a', 'b', 'c', 'd']
option = db.StringField(choices=options)
required = db.IntField(default=10, required=True) |
19,087 | 88a2341c0da3901c41f55994b3b2ddb872580d2f | #Autor: Guilherme Cassoli de Souza
age = int(input("How old are you?"))
if age >= 18:
print ("maior")
else:
print ("menor") |
19,088 | c364350fe282465cbbd040f85d86b82c5bac03bb | '''
Created on Nov 3, 2015
@author: darkbk
'''
import logging
from beecell.logger.helper import LoggerHelper
from signal import SIGHUP, SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT
from signal import signal
from datetime import timedelta
from socket import gethostname
from celery.utils.term import colored
from celery.utils.log import ColorFormatter
from celery.app.log import TaskFormatter
from celery import Celery
from celery.utils.log import get_task_logger
from celery._state import get_current_task
import celery.signals
from beehive.common.apimanager import ApiManager
class ExtTaskFormatter(ColorFormatter):
COLORS = colored().names
colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
def format(self, record):
task = get_current_task()
if task and task.request:
name = task.name.split(u'.')[-1]
record.__dict__.update(task_id=task.request.id,
task_name=name)
else:
record.__dict__.update(task_id=u'xxx',
task_name=u'xxx')
#record.__dict__.setdefault('task_name', '???')
#record.__dict__.setdefault('task_id', '???')
return ColorFormatter.format(self, record)
logger = get_task_logger(__name__)
logger_level = logging.DEBUG
task_manager = Celery('tasks')
task_scheduler = Celery('scheduler')
# setup logging
@celery.signals.setup_logging.connect
def on_celery_setup_logging(**args):
print args
#@celery.signals.after_setup_logger.connect
#def on_celery_after_setup_logger(**args):
# print args
def configure_task_manager(broker_url, result_backend, tasks=[],
expire=60*60*24, logger_file=None):
"""
:param broker_url: url of the broker
:param result_backend: url of the result backend
:param tasks: list of tasks module. Ex.
['beehive.module.scheduler.tasks',
'beehive.module.service.plugins.filesharing',]
"""
task_manager.conf.update(
BROKER_URL=broker_url,
CELERY_RESULT_BACKEND=result_backend,
CELERY_REDIS_RESULT_KEY_PREFIX='celery-task-meta2-',
CELERY_REDIS_RESULT_EXPIRES=expire,
CELERY_TASK_RESULT_EXPIRES=600,
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'], # Ignore other content
CELERY_RESULT_SERIALIZER='json',
CELERY_TIMEZONE='Europe/Rome',
CELERY_ENABLE_UTC=True,
CELERY_IMPORTS=tasks,
CELERY_DISABLE_RATE_LIMITS = True,
CELERY_TRACK_STARTED=True,
CELERY_CHORD_PROPAGATES=True,
CELERYD_TASK_TIME_LIMIT=7200,
CELERYD_TASK_SOFT_TIME_LIMIT=7200,
#CELERY_SEND_TASK_SENT_EVENT=True,
#CELERY_SEND_EVENTS=True,
#CELERY_EVENT_SERIALIZER='json',
#CELERYD_LOG_FORMAT=u'[%(asctime)s: %(levelname)s/%(processName)s] %(name)s:%(funcName)s:%(lineno)d - %(message)s',
CELERYD_TASK_LOG_FORMAT=u'[%(asctime)s: %(levelname)s/%(processName)s] [%(task_name)s:%(task_id)s] %(name)s:%(funcName)s:%(lineno)d - %(message)s'
#worker_task_log_format=u'[%(asctime)s: %(levelname)s/%(processName)s] [%(task_name)s:%(task_id)s] %(name)s:%(funcName)s:%(lineno)d - %(message)s'
)
return task_manager
def configure_task_scheduler(broker_url, schedule_backend, tasks=[]):
"""
:param broker_url: url of the broker
:param schedule_backend: url of the schedule backend where schedule entries
are stored
:param tasks: list of tasks module. Ex.
['beehive.module.scheduler.tasks',
'beehive.module.service.plugins.filesharing',]
"""
task_scheduler.conf.update(
BROKER_URL=broker_url,
CELERY_SCHEDULE_BACKEND=schedule_backend,
CELERY_REDIS_SCHEDULER_KEY_PREFIX='celery-schedule',
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'], # Ignore other content
CELERY_RESULT_SERIALIZER='json',
CELERY_TIMEZONE='Europe/Rome',
CELERY_ENABLE_UTC=True,
#CELERY_IMPORTS=tasks,
CELERYBEAT_SCHEDULE = {
'test-every-600-seconds': {
'task': 'tasks.test',
'schedule': timedelta(seconds=600),
'args': ()
},
}
)
return task_scheduler
def start_task_manager(params):
"""Start celery task manager
"""
logname = "%s.task" % params['api_id']
frmt = u'[%(asctime)s: %(levelname)s/%(processName)s] ' \
u'%(name)s:%(funcName)s:%(lineno)d - %(message)s'
frmt = u'[%(asctime)s: %(levelname)s/%(task_name)s:%(task_id)s] '\
u'%(name)s:%(funcName)s:%(lineno)d - %(message)s'
log_path = u'/var/log/%s/%s' % (params[u'api_package'],
params[u'api_env'])
run_path = u'/var/run/%s/%s' % (params[u'api_package'],
params[u'api_env'])
#loggers = [logging.getLogger('beehive.common.event')]
#LoggerHelper.rotatingfile_handler(loggers, logger_level,
# '%s/%s.event.log' % (log_path, logname),
# frmt=frmt)
# base logging
loggers = [
logging.getLogger(u'beehive'),
logging.getLogger(u'beehive.db'),
logging.getLogger(u'beecell'),
logging.getLogger(u'beedrones'),
logging.getLogger(u'celery'),
logging.getLogger(u'proxmoxer'),
logging.getLogger(u'requests')]
LoggerHelper.rotatingfile_handler(loggers, logger_level,
u'%s/%s.log' % (log_path, logname),
frmt=frmt, formatter=ExtTaskFormatter)
# transaction and db logging
loggers = [
logging.getLogger('beehive.util.data'),
logging.getLogger('sqlalchemy.engine'),
logging.getLogger('sqlalchemy.pool')]
LoggerHelper.rotatingfile_handler(loggers, logger_level,
'%s/%s.db.log' % (log_path, logname))
# performance logging
loggers = [
logging.getLogger('beecell.perf')]
LoggerHelper.rotatingfile_handler(loggers, logger_level,
'%s/%s.watch' % (log_path, params[u'api_id']),
frmt='%(asctime)s - %(message)s')
api_manager = ApiManager(params, hostname=gethostname())
api_manager.configure()
api_manager.register_modules()
#worker = ProcessEventConsumerRedis(api_manager)
#from beehive.module.tasks import task_manager
task_manager.api_manager = api_manager
logger_file = '%s/%s.log' % (log_path, logname)
configure_task_manager(params['broker_url'], params['result_backend'],
tasks=params['task_module'], expire=params['expire'],
logger_file=logger_file)
argv = [u'',
u'--loglevel=%s' % logging.getLevelName(logger_level),
#u'--pool=prefork',
u'--pool=gevent',
u'--purge',
#'--time-limit=600',
#'--soft-time-limit=300',
u'--concurrency=100',
u'--maxtasksperchild=100',
#u'--autoscale=100,10',
u'--logfile=%s' % logger_file,
u'--pidfile=%s/%s.task.pid' % (run_path, logname)]
def terminate(*args):
#run_command(['celery', 'multi', 'stopwait', 'worker1',
# '--pidfile="run/celery-%n.pid"'])
task_manager.stop()
#for sig in (SIGHUP, SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT):
# signal(sig, terminate)
task_manager.worker_main(argv)
def start_scheduler(params):
"""start celery scheduler """
log_path = u'/var/log/%s/%s' % (params[u'api_package'],
params[u'api_env'])
run_path = u'/var/run/%s/%s' % (params[u'api_package'],
params[u'api_env'])
logger_file = u'%s/%s.scheduler.log' % (log_path, params[u'api_id'])
loggers = [
logging.getLogger(u'beehive'),
logging.getLogger(u'beecell'),
logging.getLogger(u'beedrones'),
logging.getLogger(u'celery'),
]
LoggerHelper.rotatingfile_handler(loggers, logger_level,
logger_file,
formatter=ExtTaskFormatter)
api_manager = ApiManager(params)
api_manager.configure()
api_manager.register_modules()
#worker = ProcessEventConsumerRedis(api_manager)
#from beehive.module.tasks import task_manager
task_scheduler.api_manager = api_manager
configure_task_scheduler(params['broker_url'], params['result_backend'])
#from beehive.module.scheduler.scheduler import RedisScheduler
from beehive.module.scheduler.redis_scheduler import RedisScheduler
beat = task_scheduler.Beat(loglevel=logging.getLevelName(logger_level),
logfile='%s/%s.scheduler.log' % (log_path,
params['api_id']),
pidfile='%s/%s.scheduler.pid' % (run_path,
params['api_id']),
scheduler_cls=RedisScheduler)
def terminate(*args):
#run_command(['celery', 'multi', 'stopwait', 'worker1',
# '--pidfile="run/celery-%n.pid"'])
#beat.Service.stop()
pass
for sig in (SIGHUP, SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT):
signal(sig, terminate)
beat.run() |
19,089 | 8269abe0f884c411f0610547698cef46a34bf2d2 | # -*- coding: utf-8 -*-
# import os
# print [d for d in os.listdir('.')]
d = {'x':'A','y':'B','z':'C'}
print [k+'='+v for k,v in d.iteritems()]
# 运用列表生成式,可以快速生成list,可以通过一个list推导出另一个list,而代码却十分简洁。
g = (x*x for x in xrange(10))
# print g.next()
# for n in g:
# print n
def fab(max):
n,a,b=0,0,1
while n<max:
print b
a,b=b,a+b
n+=1
fab(9) |
19,090 | 88a15259b1d1f596b53ffc1e3dfc96bab18ecc40 |
import uuid
class IdGenerator(object):
None
def getRequestId():
return str(uuid.uuid1()).replace('-','')[0:20]
|
19,091 | 8023c2100284bb457e8a3893ab6e486d25264c25 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tp
import workoutBuilder
import datetime
from pathlib import Path
TP_DOMAIN = 'https://tpapi.trainingpeaks.com/'
WORKOUT_URL = TP_DOMAIN + 'fitness/v5/athletes/{}/workouts'
RECALC_TSS = TP_DOMAIN + 'fitness/v1/athletes/{}/commands/workouts/{}/recalctss'
DIRECTORY_WORKOUT = 'Build.Me.Up/'
def get_next_monday(date):
"""
Get the next monday for a given date
date : date
"""
return date + datetime.timedelta(days=-date.weekday(), weeks=1)
def upload_workout_from_directory(file_path, date):
"""
Upload a zwift workout for a given date
file_path : str
date : date
"""
my_workout_builder = workoutBuilder.WorkoutBuilder(file_path)
my_workout_builder.parse_xml_file()
my_workout_builder.define_workout_name_from_filename()
tpconnect.upload_workout('Zwift - Build Me Up -' + my_workout_builder.get_workout_name(),
my_workout_builder.get_workout_structure(),
my_workout_builder.get_workout_time(), date.strftime("%Y-%m-%dT%H:%M:%S"))
def upload_all_workout_from_directory(directory_path):
"""
Upload all workout in directory (deep directory OK)
directory_path : str
"""
day = datetime.date.today() - datetime.timedelta(days=datetime.date.today().weekday(), weeks=1)
for root, dirs, files in os.walk(directory_path):
for f in files:
print(f)
upload_workout_from_directory(os.path.relpath(os.path.join(root, f), "."), get_next_monday(day))
day = get_next_monday(day)
if __name__ == '__main__':
username, password = open("trainingpeaks.key").read().rstrip().split(':')
tpconnect = tp.TPconnect(username, password)
print("Connected to TrainingPeaks")
upload_all_workout_from_directory(DIRECTORY_WORKOUT)
print("All workouts uploaded") |
19,092 | 986d1cab81815ecda21d119eb9eeb153e9a95bf7 | import torch
import torch.nn.functional as F
from xautodl.xlayers import super_core
from xautodl.xlayers import trunc_normal_
from xautodl.xmodels.xcore import get_model
class MetaModelV1(super_core.SuperModule):
"""Learning to Generate Models One Step Ahead (Meta Model Design)."""
def __init__(
self,
shape_container,
layer_dim,
time_dim,
meta_timestamps,
dropout: float = 0.1,
seq_length: int = None,
interval: float = None,
thresh: float = None,
):
super(MetaModelV1, self).__init__()
self._shape_container = shape_container
self._num_layers = len(shape_container)
self._numel_per_layer = []
for ilayer in range(self._num_layers):
self._numel_per_layer.append(shape_container[ilayer].numel())
self._raw_meta_timestamps = meta_timestamps
assert interval is not None
self._interval = interval
self._thresh = interval * seq_length if thresh is None else thresh
self.register_parameter(
"_super_layer_embed",
torch.nn.Parameter(torch.Tensor(self._num_layers, layer_dim)),
)
self.register_parameter(
"_super_meta_embed",
torch.nn.Parameter(torch.Tensor(len(meta_timestamps), time_dim)),
)
self.register_buffer("_meta_timestamps", torch.Tensor(meta_timestamps))
self._time_embed_dim = time_dim
self._append_meta_embed = dict(fixed=None, learnt=None)
self._append_meta_timestamps = dict(fixed=None, learnt=None)
self._tscalar_embed = super_core.SuperDynamicPositionE(
time_dim, scale=1 / interval
)
# build transformer
self._trans_att = super_core.SuperQKVAttentionV2(
qk_att_dim=time_dim,
in_v_dim=time_dim,
hidden_dim=time_dim,
num_heads=4,
proj_dim=time_dim,
qkv_bias=True,
attn_drop=None,
proj_drop=dropout,
)
model_kwargs = dict(
config=dict(model_type="dual_norm_mlp"),
input_dim=layer_dim + time_dim,
output_dim=max(self._numel_per_layer),
hidden_dims=[(layer_dim + time_dim) * 2] * 3,
act_cls="gelu",
norm_cls="layer_norm_1d",
dropout=dropout,
)
self._generator = get_model(**model_kwargs)
# initialization
trunc_normal_(
[self._super_layer_embed, self._super_meta_embed],
std=0.02,
)
def get_parameters(self, time_embed, attention, generator):
parameters = []
if time_embed:
parameters.append(self._super_meta_embed)
if attention:
parameters.extend(list(self._trans_att.parameters()))
if generator:
parameters.append(self._super_layer_embed)
parameters.extend(list(self._generator.parameters()))
return parameters
@property
def meta_timestamps(self):
with torch.no_grad():
meta_timestamps = [self._meta_timestamps]
for key in ("fixed", "learnt"):
if self._append_meta_timestamps[key] is not None:
meta_timestamps.append(self._append_meta_timestamps[key])
return torch.cat(meta_timestamps)
@property
def super_meta_embed(self):
meta_embed = [self._super_meta_embed]
for key in ("fixed", "learnt"):
if self._append_meta_embed[key] is not None:
meta_embed.append(self._append_meta_embed[key])
return torch.cat(meta_embed)
def create_meta_embed(self):
param = torch.Tensor(1, self._time_embed_dim)
trunc_normal_(param, std=0.02)
param = param.to(self._super_meta_embed.device)
param = torch.nn.Parameter(param, True)
return param
def get_closest_meta_distance(self, timestamp):
with torch.no_grad():
distances = torch.abs(self.meta_timestamps - timestamp)
return torch.min(distances).item()
def replace_append_learnt(self, timestamp, meta_embed):
self._append_meta_timestamps["learnt"] = timestamp
self._append_meta_embed["learnt"] = meta_embed
@property
def meta_length(self):
return self.meta_timestamps.numel()
def clear_fixed(self):
self._append_meta_timestamps["fixed"] = None
self._append_meta_embed["fixed"] = None
def clear_learnt(self):
self.replace_append_learnt(None, None)
def append_fixed(self, timestamp, meta_embed):
with torch.no_grad():
device = self._super_meta_embed.device
timestamp = timestamp.detach().clone().to(device)
meta_embed = meta_embed.detach().clone().to(device)
if self._append_meta_timestamps["fixed"] is None:
self._append_meta_timestamps["fixed"] = timestamp
else:
self._append_meta_timestamps["fixed"] = torch.cat(
(self._append_meta_timestamps["fixed"], timestamp), dim=0
)
if self._append_meta_embed["fixed"] is None:
self._append_meta_embed["fixed"] = meta_embed
else:
self._append_meta_embed["fixed"] = torch.cat(
(self._append_meta_embed["fixed"], meta_embed), dim=0
)
def gen_time_embed(self, timestamps):
# timestamps is a batch of timestamps
[B] = timestamps.shape
# batch, seq = timestamps.shape
timestamps = timestamps.view(-1, 1)
meta_timestamps, meta_embeds = self.meta_timestamps, self.super_meta_embed
timestamp_v_embed = meta_embeds.unsqueeze(dim=0)
timestamp_qk_att_embed = self._tscalar_embed(
torch.unsqueeze(timestamps, dim=-1) - meta_timestamps
)
# create the mask
mask = (
torch.unsqueeze(timestamps, dim=-1) <= meta_timestamps.view(1, 1, -1)
) | (
torch.abs(
torch.unsqueeze(timestamps, dim=-1) - meta_timestamps.view(1, 1, -1)
)
> self._thresh
)
timestamp_embeds = self._trans_att(
timestamp_qk_att_embed,
timestamp_v_embed,
mask,
)
return timestamp_embeds[:, -1, :]
def gen_model(self, time_embeds):
B, _ = time_embeds.shape
# create joint embed
num_layer, _ = self._super_layer_embed.shape
# The shape of `joint_embed` is batch * num-layers * input-dim
joint_embeds = torch.cat(
(
time_embeds.view(B, 1, -1).expand(-1, num_layer, -1),
self._super_layer_embed.view(1, num_layer, -1).expand(B, -1, -1),
),
dim=-1,
)
batch_weights = self._generator(joint_embeds)
batch_containers = []
for weights in torch.split(batch_weights, 1):
batch_containers.append(
self._shape_container.translate(torch.split(weights.squeeze(0), 1))
)
return batch_containers
def forward_raw(self, timestamps, time_embeds, tembed_only=False):
raise NotImplementedError
def forward_candidate(self, input):
raise NotImplementedError
def easy_adapt(self, timestamp, time_embed):
with torch.no_grad():
timestamp = torch.Tensor([timestamp]).to(self._meta_timestamps.device)
self.replace_append_learnt(None, None)
self.append_fixed(timestamp, time_embed)
def adapt(self, base_model, criterion, timestamp, x, y, lr, epochs, init_info):
distance = self.get_closest_meta_distance(timestamp)
if distance + self._interval * 1e-2 <= self._interval:
return False, None
x, y = x.to(self._meta_timestamps.device), y.to(self._meta_timestamps.device)
with torch.set_grad_enabled(True):
new_param = self.create_meta_embed()
optimizer = torch.optim.Adam(
[new_param], lr=lr, weight_decay=1e-5, amsgrad=True
)
timestamp = torch.Tensor([timestamp]).to(new_param.device)
self.replace_append_learnt(timestamp, new_param)
self.train()
base_model.train()
if init_info is not None:
best_loss = init_info["loss"]
new_param.data.copy_(init_info["param"].data)
else:
best_loss = 1e9
with torch.no_grad():
best_new_param = new_param.detach().clone()
for iepoch in range(epochs):
optimizer.zero_grad()
time_embed = self.gen_time_embed(timestamp.view(1))
match_loss = F.l1_loss(new_param, time_embed)
[container] = self.gen_model(new_param.view(1, -1))
y_hat = base_model.forward_with_container(x, container)
meta_loss = criterion(y_hat, y)
loss = meta_loss + match_loss
loss.backward()
optimizer.step()
if meta_loss.item() < best_loss:
with torch.no_grad():
best_loss = meta_loss.item()
best_new_param = new_param.detach().clone()
self.easy_adapt(timestamp, best_new_param)
return True, best_loss
def extra_repr(self) -> str:
return "(_super_layer_embed): {:}, (_super_meta_embed): {:}, (_meta_timestamps): {:}".format(
list(self._super_layer_embed.shape),
list(self._super_meta_embed.shape),
list(self._meta_timestamps.shape),
)
|
19,093 | c7abaa88d4da55f94774b0d71fa9bd65a93f76b8 | import uuid
from taranis import publish
from taranis.abstract import DomainEvent
from mobi_logic.aggregations.organization.domain.entities.response import Response
class AnswerText():
class Created(DomainEvent):
type = "AnswerText.Created"
|
19,094 | e7d5001a0e92d61207c17fac41de566e04a7985d | import os.path
import json
from flask import request, Response, url_for, send_from_directory
from werkzeug.utils import secure_filename
from jsonschema import validate, ValidationError
from . import models
from . import decorators
from tuneful import app
from .database import session
from .utils import upload_path
song_schema ={
"file": {
"id": 7
}
}
@app.route("/api/songs", methods=["GET"])
@decorators.accept("application/json")
def posts_get():
""" Get a list of songs """
# Get and filter the posts from the database
songs = session.query(models.Song).all()
# Convert the posts to JSON and return a response
data = json.dumps([song.as_dictionary() for song in songs])
return Response(data, 200, mimetype="application/json")
@app.route("/api/songs", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def post_song():
data = request.json
# check that the JSON supplied is valid
# if not return a 422 Unprocessable Entity
try:
validate(data, song_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
#add the song to the database
song = models.Song(song_file_id=data["file"]["id"])
session.add(song)
session.commit()
# return a 201 Created, containing the post as JSON and with the
# location header set to the location of the post
data = json.dumps(song.as_dictionary())
headers = {"Location": url_for("get_songs", id=song.id)}
return Response(json.dumps(data), 200, headers=headers, mimetype="application/json")
@app.route("/api/songs/<int:id>", methods=["PUT"])
@decorators.accept("application/json")
@decorators.require("application/json")
def edit_song():
# check if the song exists, if not return a 404 with a helpful message
song = session.query(models.Song).get(id)
if not song:
message = "Could not find song with id {}".format(id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
data = request.json
# check that the JSON supplied is valid
# if not, return a 422 Unprocessable Entity
try:
validate(data, song_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
song = data[""]
session.commit()
# return an OK 200, containing the song as JSON with the
# location header set to the location of the post
data = json.dumps(song.as_dictionary())
headers = {"Location": url_for("post_get", id=song.id)}
return Response(
data, 200, headers=headers,
mimetype="application/json"
)
@app.route("/api/songs/<int:id>", methods=["DELETE"])
@decorators.accept("application/json")
def delete_song(id):
""" Delete a single song """
# check if the song exists, if not return a 404 with a helpful message
song = session.query(models.Song).get(id)
if not song:
message = "Could not find song with id {}".format(id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
session.delete(song)
session.commit
message = "deleted song {}".format(id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
|
19,095 | ac20733233901cf1bd7571166b88048f19b56861 | """Kartejlar (Tuole)"""
numbers = (1,2,3,4)
print(type(numbers)) |
19,096 | 1c737cdc69ed524a6eb87748f80605d0cdbf83c7 | import os
import sys
import pprint
from PIL import Image
from av import open
video = open(sys.argv[1])
stream = next(s for s in video.streams if s.type == b'video')
for packet in video.demux(stream):
for frame in packet.decode():
frame.to_image().save('sandbox/%04d.jpg' % frame.index)
if frame_count > 5:
break
|
19,097 | 9cd53e62aa757c22fd16a611bc2ba8fedf9bb6c2 | """Models for tests."""
from sqlalchemy.ext.declarative import declarative_base
from bag.sqlalchemy.tricks import MinimalBase
from pluserable.data.sqlalchemy.models import (
ActivationMixin, GroupMixin, UsernameMixin, UserGroupMixin)
Base = declarative_base(cls=MinimalBase) # type: MinimalBase
# Inherit from NoUsernameMixin instead if you do not want a username field.
class User(UsernameMixin, Base): # noqa
pass
class Group(GroupMixin, Base): # noqa
pass
class UserGroup(UserGroupMixin, Base): # noqa
pass
class Activation(ActivationMixin, Base): # noqa
pass
|
19,098 | d1f695b86f8051c53f3823690afe8e0c41a01c2e | from __future__ import division, print_function
import pprint
import pandas as pd
import numpy as np
import itertools
import requests
import random
import math
from sklearn import preprocessing
import scipy.stats as stats
import matplotlib.mlab as mlab
import evaluation
import getcontext
from scipy.stats import multivariate_normal
# TODO:
# Point estimate for reward r (i.e. average price)
credentials = {'teamid' : 'Coffeeblack',
'teampw' : '23e88f60c155804c79cfd1af83e47fc8'}
pp = pprint.PrettyPrinter(indent=4)
def logit_link(x):
"""
Link function for Stochastic Gradient Descent (SGD)
"""
return 1 / (1 + math.exp(-0.05 * x))
# return 1 / (1 + math.exp(-0.01 * x))
class MultiArmedBanditPolicy(object):
def __init__(self, arms, epsilon=1):
self.arms = arms # Initalize arms
self.pulls = [0] * len(arms) # Number of trials of each arm
self.values = [0.0] * len(arms) # Expected reward of each arm
self.epsilon = epsilon
self.t = 0 # Time steps
self.alphas = np.ones(len(self.arms))
self.betas = np.ones(len(self.arms))
def update(self, arm, reward):
"""
Update the value of a chosen arm
"""
# Increate pulls by one
self.pulls[arm] += 1
# New number of pull
n = self.pulls[arm]
# Old value
old_val = self.values[arm]
# New value (online weighted average)
new_val = ((n - 1) / n) * old_val + (1 / n) * reward
# Update value
self.values[arm] = new_val
# Update epsilon
self.t += 1
self.epsilon = self.calc_dynamic_epsilon()
def select_arm(self):
"""
Return index of the arm we want to choose
"""
# Exploitation
if random.uniform(0, 1) > self.epsilon:
return np.argmax(self.values)
# Exploration
else:
return random.randrange(len(self.values))
# Create dummies for context
def create_dummy_context(self, path):
# Read in df
context_df = pd.read_csv(path)
# Create dummy variables
df = pd.get_dummies(context_df, columns=['Agent',
'Language',
'Referer'
])
return df
def create_dummy_arms(self, df):
# Create dummy variables
df = pd.get_dummies(df, columns=['adtype',
'color',
'header',
#'price',
'productid'])
return df
def create_dummy_arms_bayesian(self, df):
# Create dummy variables
df = pd.get_dummies(df, columns=['adtype',
'color',
'header',
'productid'])
return df
def calc_dynamic_epsilon(self, epsilon_0=1, power_t=0.3):
return epsilon_0 / (self.t ** power_t)
class LinearPolicy(MultiArmedBanditPolicy):
def __init__(self, arms, context_path):
self.mu = 25 # for normal distribution
# Load context
self.context = self.create_dummy_context(context_path)
self.context['Age'] = self.context['Age'] / 50.
# DEBUG introduce agent again !!!
self.context.drop(['ID'], axis=1, inplace=True)
# Length of context
self.d_c = len(self.context.ix[0, :])
# All arm properties
self.df_arms = pd.DataFrame([getattr(arm, "properties") for arm in arms])
self.proporties = None
self.df_arms_original = self.df_arms.copy()
self.df_arms['price'] = self.df_arms.price / 50.
self.df_arms['price_2'] = self.df_arms.price ** 2
self.df_arms['price_3'] = self.df_arms.price ** 3
## Init Bayesian arms
#self.n_arms = 1000
#self.df_arms = self.init_bayesian_arms()
# Dummy encoded arm properties
self.df_arm_dummies = self.create_dummy_arms(self.df_arms)
# self.df_arm_dummies = self.create_dummy_arms_bayesian(self.df_arms)
# Length of actions
self.d_a = len(self.df_arm_dummies.ix[0, :])
# Coefficients of the model
self.d = self.d_c + self.d_a
# Initalize arms
self.arms = arms
# Number of trials of each arm
self.pulls = [0] * len(arms)
# Expected reward of each arm
self.values = [0.0] * len(arms)
# Time steps
self.t = 0
## From here on Bayes:
self.d += 10
self.mu_hat = np.zeros(self.d).reshape(-1, 1)
self.delta = 0.3 # WHAT IS THIS?!
self.epsilon = 0.086 # See Agrawal, Remark 2
self.f = np.zeros(self.d).reshape(-1, 1)
self.B = np.matrix(np.identity(self.d))
self.R = 0.086
self.v = self.R * math.sqrt(
24 / self.epsilon * self.d * math.log(1 / self.delta))
def init_bayesian_arms(self):
p_headers = [1/3.] * 3
p_adtypes = [1/3.] * 3
p_colors = [1/5.] * 5
p_productids = [1/16.] * 16
n = self.n_arms
arms = create_bayesian_arms(p_headers,
p_adtypes,
p_colors,
p_productids,
self.mu,
n)
return arms
def combine_all_context_action(self, t):
repeated_context = pd.DataFrame([self.context.iloc[t, :]], index=range(len(self.df_arms)))
combined = pd.concat([repeated_context, self.df_arm_dummies], axis=1)
# Add price
#price_dict = {}
#productid_dict = {}
for var in self.context.columns:
combined[var + '_price'] = self.context[var] * self.df_arm_dummies.ix[:, 'price']
for i in range(10, 26):
combined[var + '_productid_' + str(i)] = self.context[var] * \
self.df_arm_dummies.ix[:, 'productid_' + str(i)]
#combined['Age_price'] = combined.price * combined.Age
#combined['Agent_Linux_price'] = combined.price * combined.Agent_Linux
#combined['Agent_OSX_price'] = combined.price * combined.Agent_OSX
#combined['Agent_Windows_price'] = combined.price * combined.Agent_Windows
#combined['Agent_mobile_price'] = combined.price * combined.Agent_mobile
#combined['Language_EN_price'] = combined.price * combined.Language_EN
#combined['Language_GE_price'] = combined.price * combined.Language_GE
#combined['Language_NL_price'] = combined.price * combined.Language_NL
#combined['Referer_Bing_price'] = combined.price * combined.Referer_Bing
#combined['Referer_Google_price'] = combined.price * combined.Referer_Google
return combined
def combine_context_bayesian_arms(self, t, n_arms):
p_headers = [1/3.] * 3
p_adtypes = [1/3.] * 3
p_colors = [1/5.] * 5
p_productids = [1/16.] * 16
arms = create_bayesian_arms(p_headers,
p_adtypes,
p_colors,
p_productids,
self.mu,
n_arms)
self.df_arms = arms
self.df_arm_dummies = self.create_dummy_arms_bayesian(self.df_arms)
repeated_context = pd.DataFrame([self.context.iloc[t, :]],
index=range(n_arms))
combined = pd.concat([repeated_context, self.df_arm_dummies], axis=1)
return combined
def update(self, arm, reward, alpha=0.05, l=0.05):
"""
Update the value of a chosen arm
"""
# Get context
context = self.context.iloc[self.t, :]
# Add price
price_dict = {}
productid_dict = {}
for var in context.keys():
price_dict[var + '_price'] = context[var] * self.df_arm_dummies.ix[arm, 'price']
for i in range(10, 26):
productid_dict[var + '_productid_' + str(i)] = context[var] * \
self.df_arm_dummies.ix[arm, 'productid_' + str(i)]
print("Price dict is")
print(price_dict)
print(productid_dict)
#Age_price = context.Age * self.df_arm_dummies.ix[arm, 'price']
#Agent_Linux_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Linux
#Agent_OSX_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_OSX
#Agent_Windows_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Windows
#Agent_mobile_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_mobile
#
#
#Language_EN_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_EN
#Language_GE_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_GE
#Language_NL_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_NL
#Referer_Bing_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Bing
#Referer_Google_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Google
#
combined = np.append(context, self.df_arm_dummies.iloc[arm, :])#.reshape(-1, 1)
prices = prict_dict.items()
# Combine with arm
combined = np.append(combined,
[Age_price,
Agent_Linux_price,
Agent_OSX_price,
Agent_Windows_price,
Agent_mobile_price,
Language_EN_price,
Language_GE_price,
Language_NL_price,
Referer_Bing_price,
Referer_Google_price
]).reshape(-1, 1)
if reward > 0:
reward = 1
else:
reward = -1
# Bayes
self.B = self.B + np.dot(context, context)
self.f = self.f + combined * reward
self.mu_hat = np.dot(np.linalg.inv(self.B), self.f)
self.mu = min(5, self.mu + 0.1 * (-0.5 + int(bool(reward))))
# Update time step
self.t += 1
def draw(self, runid, i):
""" Draw the random sample arm """
ids = {'runid': runid, 'i': i }
payload = dict(self.properties.items() + ids.items())
payload.update(credentials) # Add credentials
print("Price is", payload['price'])
# Propose page and get JSON answer
r = requests.get("http://krabspin.uci.ru.nl/proposePage.json/",
params=payload)
r_json = r.json()['effect']
if r_json['Error'] is not None:
print("Error in id:", i)
return r_json['Success'] * self.properties['price']
def select_arm(self):
"""
Return index of the arm we want to choose
"""
# Chose sample from betas
mu_tilde = multivariate_normal(np.squeeze(np.asarray(self.mu_hat)),
self.v ** 2 * np.linalg.inv(self.B)).rvs()
# combined_context_bayesian_arms = self.combine_context_bayesian_arms(self.t, self.n_arms)
combined_context_arms = self.combine_all_context_action(self.t)
cca_numpy = combined_context_arms.values
linear_predictor = np.dot(cca_numpy, mu_tilde)
pp.pprint(zip(mu_tilde, combined_context_arms.columns))
logit_link_vec = np.vectorize(logit_link)
hypotheses = logit_link_vec(linear_predictor)
print(hypotheses)
hypotheses = np.multiply(hypotheses, self.df_arms_original.price.values)
print(hypotheses)
# hypo_with_price = np.multiply(hypotheses, stats.norm.pdf(self.df_arms.price.values, 25, 100))
bestarm = np.argmax(hypotheses)
# print("Max is", np.max(hypotheses))
self.properties = self.df_arms_original.iloc[bestarm, :].to_dict()
# print(self.properties)
# bestarm = np.argmax(linear_predictor)
#print(np.max(linear_predictor))
# print("Best arm", bestarm)
return bestarm
class BootstrapSampler(MultiArmedBanditPolicy):
def __init__(self, arms, context_path):
self.mu = 25 # for normal distribution
# Load context
self.context = self.create_dummy_context(context_path)
# DEBUG introduce agent again !!!
self.context.drop(['ID'], axis=1, inplace=True)
# Length of context
self.d_c = len(self.context.ix[0, :])
# All arm properties
# self.df_arms = pd.DataFrame([getattr(arm, "properties") for arm in arms])
self.proporties = None
## Init Bayesian arms
self.n_arms = 1000
self.df_arms = self.init_bayesian_arms()
# Dummy encoded arm properties
# self.df_arm_dummies = self.create_dummy_arms(self.df_arms)
self.df_arm_dummies = self.create_dummy_arms_bayesian(self.df_arms)
# Length of actions
self.d_a = len(self.df_arm_dummies.ix[0, :])
# Coefficients of the model
self.d = self.d_c + self.d_a
# Initalize arms
self.arms = arms
# Number of trials of each arm
self.pulls = [0] * len(arms)
# Expected reward of each arm
self.values = [0.0] * len(arms)
# Time steps
self.t = 0
## From here on Bayes:
self.mu_hat = np.zeros(self.d).reshape(-1, 1)
self.delta = 0.2 # WHAT IS THIS?!
self.epsilon = 0.4
self.f = np.zeros(self.d).reshape(-1, 1)
self.B = np.matrix(np.identity(self.d))
self.R = 0.2
def combine_all_context_action(self, t):
repeated_context = pd.DataFrame([self.context.iloc[t, :]], index=range(len(self.df_arms)))
combined = pd.concat([repeated_context, self.df_arm_dummies], axis=1)
return combined
def combine_context_bayesian_arms(self, t, n_arms):
p_headers = [1/3.] * 3
p_adtypes = [1/3.] * 3
p_colors = [1/5.] * 5
p_productids = [1/16.] * 16
arms = create_bayesian_arms(p_headers,
p_adtypes,
p_colors,
p_productids,
self.mu,
n_arms)
self.df_arms = arms
self.df_arm_dummies = self.create_dummy_arms_bayesian(self.df_arms)
repeated_context = pd.DataFrame([self.context.iloc[t, :]],
index=range(n_arms))
combined = pd.concat([repeated_context, self.df_arm_dummies], axis=1)
return combined
def update(self, arm, reward, alpha=0.05, l=0.05):
"""
Update the value of a chosen arm
"""
# Get context
context = self.context.iloc[self.t, :]
# Combine with arm
# combined = np.append(context, self.df_arm_dummies.iloc[arm, :]).reshape(-1, 1)
combined = np.append(context, self.df_arm_dummies.iloc[arm, :]).reshape(-1, 1)
# Bayes
self.B = self.B + np.dot(context, context)
self.f = self.f + combined * int(bool(reward))
self.mu_hat = np.dot(np.linalg.inv(self.B), self.f)
self.mu = min(5, self.mu + 0.1 * (-0.5 + int(bool(reward))))
# Update time step
self.t += 1
def draw(self, runid, i):
""" Draw the random sample arm """
ids = {'runid': runid, 'i': i }
payload = dict(self.properties.items() + ids.items())
payload.update(credentials) # Add credentials
# Propose page and get JSON answer
r = requests.get("http://krabspin.uci.ru.nl/proposePage.json/",
params=payload)
r_json = r.json()['effect']
if r_json['Error'] is not None:
print("Error in id:", i)
return r_json['Success'] * self.properties['price']
def select_arm(self):
"""
Return index of the arm we want to choose
"""
# Chose sample from betas
mu_tilde = multivariate_normal(np.squeeze(np.asarray(self.mu_hat)),
self.v ** 2 * np.linalg.inv(self.B)).rvs()
combined_context_bayesian_arms = self.combine_context_bayesian_arms(self.t, self.n_arms)
print(mu_tilde)
cca_numpy = combined_context_bayesian_arms.values
linear_predictor = np.dot(cca_numpy, mu_tilde)
logit_link_vec = np.vectorize(logit_link)
hypotheses = logit_link_vec(linear_predictor)
hypo_with_price = np.multiply(hypotheses, stats.norm.pdf(self.df_arms.price.values, 25, 100))
bestarm = np.argmax(hypo_with_price)
self.properties = self.df_arms_original.iloc[bestarm, :].to_dict()
# bestarm = np.argmax(linear_predictor)
#print(np.max(linear_predictor))
# print("Best arm", bestarm)
return bestarm
class BanditArm(object):
def __init__(self, properties):
self.properties = properties
def draw(self, runid, i):
ids = {'runid': runid, 'i': i }
payload = dict(self.properties, **ids)
payload.update(credentials) # Add credentials
# Propose page and get JSON answer
r = requests.get("http://krabspin.uci.ru.nl/proposePage.json/",
params=payload)
r_json = r.json()['effect']
if r_json['Error'] is not None:
print("Error in id:", i)
return r_json['Success'] * self.properties['price']
def arm_product(dicts):
"""
Create all arm combinations
"""
return (dict(itertools.izip(dicts, x)) for x in itertools.product(*dicts.itervalues()))
def create_all_arm_properties():
combined = {
'header': [5, 15, 35],
'adtype': ['skyscraper', 'square', 'banner'],
'color': ['green', 'blue', 'red', 'black', 'white'],
'price': [float(str(np.around(x, 2))) for x in np.arange(1.99, 50.01, 1.00)], # in 1 Euro steps
'productid': range(10, 26)
}
arms = list(arm_product(combined))
return arms
def create_bayesian_arms(p_headers, p_adtypes, p_colors, p_productids, mu, n):
arms = []
for i in range(n):
header_msk = np.random.multinomial(1, p_headers)
headers = np.array([5, 15, 35])
header = headers[np.where(header_msk)][0]
adtype_msk = np.random.multinomial(1, p_adtypes)
adtypes = np.array(['skyscraper', 'square', 'banner'])
adtype = adtypes[np.where(adtype_msk)][0]
color_msk = np.random.multinomial(1, p_colors)
colors = np.array(['green', 'blue', 'red', 'black', 'white'])
color = colors[np.where(color_msk)][0]
productid_msk = np.random.multinomial(1, p_productids)
productids = np.array(range(10, 26))
productid = productids[np.where(productid_msk)][0]
price = float(
str(
np.around(
np.min([50,
np.max(
[0, np.random.normal(mu, 10)]
)]),
2)))
combined = {
'header': header,
'adtype': adtype,
'color': color,
'productid': productid,
'price': price,
}
arms.append(combined)
arms_df = pd.DataFrame(arms)
return arms_df
def main():
all_arm_properties = create_all_arm_properties()
all_arms = [BanditArm(prop) for prop in all_arm_properties]
for i in range(10000):
print(all_arms[1903].draw(57, i))
# print(all_arms[432])
# getcontext.proposepage()
#big_df = join_df(57)
#big_df.to_csv("context_57.csv", index=False)
if __name__ == "__main__":
main()
|
19,099 | 8cfbb9a6788b5adc9a8202087ec1c01b72b25265 | """
Tests for test case queryset-filtering by ID and with optional ID prefix.
"""
from sets import Set
from tests import case
from moztrap.view.lists.cases import PrefixIDFilter
class PrefixIDFilterTest(case.DBTestCase):
"""Tests for PrefixIDFilter"""
def create_testdata(self):
testdata = {}
testdata["cv1"] = self.F.CaseVersionFactory.create(name="CV 1",
case=self.F.CaseFactory.create(idprefix="pre"))
testdata["cv2"] = self.F.CaseVersionFactory.create(name="CV 2")
testdata["cv3"] = self.F.CaseVersionFactory.create(name="CV 3",
case=self.F.CaseFactory.create(idprefix="moz"))
testdata["cv4"] = self.F.CaseVersionFactory.create(name="CV 4",
case=self.F.CaseFactory.create(idprefix="moz"))
return testdata
def filter(self, criteria):
f = PrefixIDFilter("id")
res = f.filter(
self.model.CaseVersion.objects.all(),
criteria,
)
return res
def test_prefix_and_id(self):
"""prefix and ID"""
td = self.create_testdata()
res = self.filter([u"pre-{0}".format(td["cv1"].case.id)])
self.assertEqual(res.get().name, "CV 1")
def test_prefix_only(self):
"""prefix only"""
self.create_testdata()
res = self.filter([u"pre"])
self.assertEqual(res.get().name, "CV 1")
def test_id_only(self):
"""ID only"""
td = self.create_testdata()
res = self.filter([unicode(td["cv1"].case.id)])
self.assertEqual(res.get().name, "CV 1")
def test_id_only_int(self):
"""ID as an int"""
td = self.create_testdata()
res = self.filter([int(td["cv1"].case.id)])
self.assertEqual(res.get().name, "CV 1")
def test_id_and_prefix_from_different_cases_gets_both(self):
"""ID from one case and prefix from a different case gets both"""
td = self.create_testdata()
res = self.filter([u"pre", unicode(td["cv2"].case.id)])
self.assertEqual(
Set([x.name for x in res.all()]),
Set(["CV 1", "CV 2"]),
)
def test_id_case_without_prefix(self):
"""id when case has no prefix"""
td = self.create_testdata()
res = self.filter([unicode(td["cv2"].case.id)])
self.assertEqual(res.get().name, "CV 2")
def test_cases_different_prefix_return_both(self):
"""
3 cases have 2 different prefixes returns cases from both prefixes.
"""
self.create_testdata()
res = self.filter([u"pre", u"moz"])
self.assertEqual(
Set([x.name for x in res.all()]),
Set(["CV 1", "CV 3", "CV 4"]),
)
def test_cases_same_prefix_return_both(self):
"""2 cases with no prefixes, IDs OR'ed"""
self.create_testdata()
res = self.filter([u"moz"])
self.assertEqual(
Set([x.name for x in res.all()]),
Set(["CV 3", "CV 4"]),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.