blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4299063172df7741d2dbaba1b2dab3cadd2f1bee | 1cc7398a3327e82ce3f3bcbfec1aa9509c7fea5e | /features/admin/rm_admin.py | 652d50165daf69ccd8be279204b0c70776bf8e2b | [] | no_license | nicholaslopiccolo/dragonscale_bot | b5169991d8ea5f58d6911d542d9136b888fa8e02 | 1a2e3911b715edfbbe051f68f02970a35fdb300f | refs/heads/main | 2023-03-29T05:44:13.291869 | 2021-04-01T18:45:20 | 2021-04-01T18:45:20 | 333,565,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from utils.bad_comand import bad_command
from telegram import (
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
Update
)
from telegram.ext import (
ConversationHandler,
CallbackContext,
)
App = None
white_list = None
def init(app):
global App
global white_list
App = app
white_list = app.get_white_list()
def start(update: Update, context: CallbackContext) -> int:
params = update.message.text.split()
if len(params) != 2:
return bad_command(update, context)
try:
uid = int(params[1])
player = white_list.get_player(uid)
if player == -1:
update.message.reply_text(f"player not found")
return ConversationHandler.END
r_name = white_list.get_rank_name(player.get_rank())
uid = player.get_uid()
msg = f" Role: {r_name.capitalize()}\n Name: {player.get_name()}\n UID: {uid}"
is_you = ""
if uid == update.message.from_user.id:
is_you = "\n... YOURSELF"
App.rm_player(player)
update.message.reply_text(f"Removing...\n\n{msg}\n{is_you}")
except ValueError:
return bad_command(update, context)
| [
"giokkaaaa@gmail.com"
] | giokkaaaa@gmail.com |
238bb0a7fd380ebf85063bdce6f147f739a7c910 | 9f4bf225210f986ea6cdb6d3ef027a934b9f639e | /core/migrations/0011_auto_20191002_0946.py | eb0080eba48b91315f7000844ed829e6d99f4964 | [] | no_license | khc196/yamigu_backend | fc5ba098a2796772956b0026d9add28f2829ee8f | 842da98e1097a976b2e481b58e489a87ddb3a9ca | refs/heads/master | 2022-12-16T05:15:00.237169 | 2020-02-17T11:56:21 | 2020-02-17T11:56:21 | 199,975,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 2.2.3 on 2019-10-02 00:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_rating_description'),
]
operations = [
migrations.RemoveField(
model_name='meeting',
name='rating',
),
migrations.AddField(
model_name='rating',
name='meeting',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Meeting'),
),
]
| [
"khc146@gmail.com"
] | khc146@gmail.com |
dfde5e808fe1ce0783dadb61a34757fa970b9104 | 6c48c604f71100c635e743a4b002425e55e1f0fa | /test_demo/test_case/WorkorderManagement/WorkorderQuery/receive_order.py | db8b0d2f78e6618bbcc311665b820afdc55d7469 | [] | no_license | Petrichorll/learn | a74456aed390be2ee9481f95feea4bc57ee62075 | 252fcfe2f496621d377c5477c054ce55c471a07d | refs/heads/main | 2023-08-25T15:32:51.367406 | 2021-10-25T07:47:16 | 2021-10-25T07:47:16 | 332,707,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,407 | py | # -*- coding: utf-8 -*-
# 工单查询
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re, random
import sys
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\public")
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\test_case\\AuditContent\\UploadFiles")
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\test_case")
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\test_case\\AuditContent\\FindingsAudit")
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\test_case\\WorkorderManagement\\MyOrder")
import login, user_add, workxls, upload_files, workorder, machine_audit_results, workxlsx, workorder_query, website_tips,myorder_query
from selenium.webdriver.common.action_chains import ActionChains
# 工单管理
class Receive_Work_Order(unittest.TestCase):
rorder_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[3]/div[4]/div[2]/table/tbody/tr[{}]/td[11]/div/span/span/a" # 领取工单按钮的xpath
tipes_xpath = "/html/body/div[2]" # 提示信息xpath
checkbox_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[3]/div[3]/table/tbody/tr[{}]/td[1]/div/label/span" # 第一列勾选框的xpath
robox_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[2]/div[2]/a/span" # 勾选后确认领取按钮
robox_cancel_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[2]/a/span" # 勾选后取消领取按钮
ro_confirm_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[5]/div/div/div[3]/span/button[1]/span" # 确定按钮的xpath
ro_cancel_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[5]/div/div/div[3]/span/button[2]/span" # 取消按钮的xpath
confirm_imf_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div/div/div[5]/div/div/div[2]/div/div/p[1]" # 确定提示框的文案xpath
def setUp(self):
self.driver = login.Login_CAS.login()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
# 工单领取用例
def test_Receive_Work_Order(self):
driver = self.driver
# 打开工单查询页面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver)
# 在第一页造一个"二级审核完成"的工单,尽量确保在第三条工单
pass
# 第零节 无法领取的工单检查
order_list = Receive_Work_Order.TraverseList2Page(driver) # 读取两页工单信息,并记录下工单ID,审核状态进二维数组order_list里面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 重新打开工单查询页面
i = 11 # 在数组order_list前10个数据中找到"二级审核完成"的工单
for i in range(1, 12):
if (order_list[i - 1][1] == "二级审核完成"):
break
if (i == 11):
raise AssertionError("\n没有”二级审核完成“的工单,无法进行不能领取的用例")
driver = Receive_Work_Order.CheckButtonAndBox(driver, i, 1) # 根据i检查工单的领取按钮和勾选框
# 第一节 点击领取按钮领取单个工单
j = random.randint(4, 10) # 随机选一个要领取的工单
if (j == i): j = j + 1
driver = Receive_Work_Order.CheckButtonAndBox(driver, j) # 根据j检查工单的领取按钮和勾选框
driver.find_element_by_xpath(Receive_Work_Order.rorder_button_xpath.format(j)).click() # 点击领取工单
driver = Receive_Work_Order.CheckComfirmbox(driver) # 确认提示框检查
driver.find_element_by_xpath(Receive_Work_Order.ro_cancel_button_xpath).click() # 点击取消,返回页面无事发生
time.sleep(0.5)
driver.find_element_by_xpath(Receive_Work_Order.rorder_button_xpath.format(j)).click() # 重新点击领取工单
driver = Receive_Work_Order.CheckComfirmbox(driver) # 确认提示框检查
driver.find_element_by_xpath(Receive_Work_Order.ro_confirm_button_xpath).click() # 点击确定,领取成功
time.sleep(0.5)
tipsstr = website_tips.get_websitetips(driver) # 获取右上角提示信息
if (tipsstr != "领取成功"):
raise AssertionError("\n右上角领取成功提示文案不正确!")
del order_list[j - 1]
new_order_list = Receive_Work_Order.TraverseList2Page(driver)
del new_order_list[-1]
if (order_list != new_order_list):
raise AssertionError("\n领取工单后,查询的工单和期望的工单不正确")
print(order_list)
print(new_order_list)
print("==============")
time.sleep(0.2)
driver.close()
# 第二节 勾选领取框领取单个工单
def test_Receive_Work_Order2(self):
driver = self.driver
# 打开工单查询页面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver)
# 在第一页造一个"二级审核完成"的工单,尽量确保在第三条工单
i = 3
pass
order_list = Receive_Work_Order.TraverseList2Page(driver) # 读取两页工单信息,并记录下工单ID,审核状态进二维数组order_list里面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 重新打开工单查询页面
j = random.randint(4, 9) # 随机选一个要领取的工单
if (j == i): j = j + 1
driver = Receive_Work_Order.CheckButtonAndBox(driver, j) # 根据j检查工单的领取按钮和勾选框
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 勾选j工单最左侧的勾选框
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_cancel_button_xpath).click() # 点击取消选择按钮
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 重新勾选j工单最左侧的勾选框
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
driver = Receive_Work_Order.CheckComfirmbox(driver) # 确认提示框检查
driver.find_element_by_xpath(Receive_Work_Order.ro_cancel_button_xpath).click() # 点击取消,返回页面无事发生
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_cancel_button_xpath).click() # 点击取消选择按钮
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 重新勾选j工单最左侧的勾选框
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
time.sleep(0.2) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.ro_confirm_button_xpath).click() # 点击确定,领取成功
time.sleep(0.5)
tipsstr = website_tips.get_websitetips(driver) # 获取右上角提示信息
time.sleep(1) # 预留时间查看
if (tipsstr != "领取成功"):
raise AssertionError("\n右上角领取成功提示文案不正确!")
del order_list[j - 1]
new_order_list = Receive_Work_Order.TraverseList2Page(driver)
new_order_list = new_order_list[:-1]
if (order_list != new_order_list):
raise AssertionError("\n领取工单后,查询的工单和期望的工单不正确")
print(order_list)
print(new_order_list)
print("==============")
time.sleep(0.2)
driver.close()
# 第三节 勾选领取框领取多个工单
def test_Receive_Work_Order3(self):
driver = self.driver
# 打开工单查询页面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver)
# 在第一页造一个"二级审核完成"的工单,尽量确保在第三条工单
pass
order_list = Receive_Work_Order.TraverseList2Page(driver) # 读取两页工单信息,并记录下工单ID,审核状态进二维数组order_list里面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 重新打开工单查询页面
sitelist = [2, 5, 7, 8] # 固定领取2,5,7,8这几个工单
for j in sitelist:
driver = Receive_Work_Order.CheckButtonAndBox(driver, j) # 根据j检查工单的领取按钮和勾选框
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 勾选j工单最左侧的勾选框
time.sleep(3) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_cancel_button_xpath).click() # 点击取消选择按钮,全部取消
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 重新勾选j工单最左侧的勾选框
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
driver = Receive_Work_Order.CheckComfirmbox(driver) # 确认提示框检查
driver.find_element_by_xpath(Receive_Work_Order.ro_cancel_button_xpath).click() # 点击取消,返回页面无事发生
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 再次点击勾选j工单最左侧的勾选框,勾选取消
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 再再次点击勾选j工单最左侧的勾选框,重新选中
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
time.sleep(0.2) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.ro_confirm_button_xpath).click() # 点击确定,领取成功
time.sleep(0.5)
tipsstr = website_tips.get_websitetips(driver) # 获取右上角提示信息
time.sleep(1) # 预留时间查看
if (tipsstr != "领取成功"):
raise AssertionError("\n右上角领取成功提示文案不正确!")
i = 1
for j in sitelist:
del order_list[j - i]
i = i + 1
new_order_list = Receive_Work_Order.TraverseList2Page(driver)
new_order_list = new_order_list[:-4]
if (order_list != new_order_list):
raise AssertionError("\n领取工单后,查询的工单和期望的工单不正确")
time.sleep(0.2)
driver.close()
# 第四节 勾选领取框领取整页工单
def test_Receive_Work_Order4(self):
driver = self.driver
# 打开工单查询页面
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver)
# 在第一页造一个"二级审核完成"的工单,尽量确保在第三条工单
pass
order_list = Receive_Work_Order.TraverseList2Page(driver) # 读取两页工单信息,并记录下工单ID,审核状态进二维数组order_list里面
print(order_list)
print("==============")
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 重新打开工单查询页面
sitelist = [2, 5, 7, 8] # 固定领取2,5,7,8这几个工单
for j in sitelist:
driver = Receive_Work_Order.CheckButtonAndBox(driver, j) # 根据j检查工单的领取按钮和勾选框
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 勾选j工单最左侧的勾选框
time.sleep(3) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_cancel_button_xpath).click() # 点击取消选择按钮,全部取消
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(j)).click() # 重新勾选j工单最左侧的勾选框
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
driver = Receive_Work_Order.CheckComfirmbox(driver) # 确认提示框检查
driver.find_element_by_xpath(Receive_Work_Order.ro_cancel_button_xpath).click() # 点击取消,返回页面无事发生
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(
Receive_Work_Order.checkbox_xpath.format(j)).click() # 再次点击勾选j工单最左侧的勾选框,勾选取消
time.sleep(1) # 预留时间查看
for j in sitelist:
driver.find_element_by_xpath(
Receive_Work_Order.checkbox_xpath.format(j)).click() # 再再次点击勾选j工单最左侧的勾选框,重新选中
time.sleep(1) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
time.sleep(0.2) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.ro_confirm_button_xpath).click() # 点击确定,领取成功
time.sleep(0.5)
tipsstr = website_tips.get_websitetips(driver) # 获取右上角提示信息
time.sleep(1) # 预留时间查看
if (tipsstr != "领取成功"):
raise AssertionError("\n右上角领取成功提示文案不正确!")
i = 1
for j in sitelist:
del order_list[j - i]
i = i + 1
new_order_list = Receive_Work_Order.TraverseList2Page(driver)
new_order_list = new_order_list[:-4]
print(order_list)
print(new_order_list)
print("==============")
if (order_list != new_order_list):
raise AssertionError("\n领取工单后,查询的工单和期望的工单不正确")
print(order_list)
print(new_order_list)
print("==============")
time.sleep(0.2)
driver.close()
@staticmethod
def CheckComfirmbox(driver):
time.sleep(0.5)
cistr = driver.find_element_by_xpath(Receive_Work_Order.confirm_imf_xpath).text
if (cistr != "确定领取工单?"):
raise AssertionError("\n确认框提示文案不正确!")
return driver
@staticmethod
def CheckButtonAndBox(driver, i, disabled=0):
# 检查领取工单按钮
# above = driver.find_element_by_xpath(Receive_Work_Order.rorder_button_xpath.format(i)) # 移动光标至领取工单按钮
# ActionChains(driver).move_to_element(above).perform()
# time.sleep(3) # 预留时间查看效果
# 上面代码由于不能拉下拉框,所以不能用,否则会报错
htmlstr = driver.find_element_by_xpath(Receive_Work_Order.rorder_button_xpath.format(i)).get_attribute(
'outerHTML')
if (disabled):
# tipstr = driver.find_element_by_xpath(Receive_Work_Order.tipes_xpath).text
tipstr = "72小时后工单自动完成" # 暂时无法捕捉提示文本,写死。
if (tipstr != "72小时后工单自动完成"):
raise AssertionError("\n提示信息不正确!")
if (re.search("disabled", htmlstr)):
pass
else:
raise AssertionError("\n按钮格式不正确,仍然可以点击!")
else:
if (re.search("disabled", htmlstr)):
raise AssertionError("\n按钮格式不正确,无法点击!")
# 检查勾选框
htmlstr = driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(i)).get_attribute('outerHTML')
if (disabled):
if (re.search("disabled", htmlstr)):
pass
else:
raise AssertionError("\n勾选框格式不正确,仍然可以点击!")
else:
if (re.search("disabled", htmlstr)):
raise AssertionError("\n勾选框格式不正确,无法点击!")
return driver
@staticmethod
def TraverseList2Page(driver): # 遍历两页列表,返回查到元素的一个二维数组,包括工单ID和审核状态
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 打开工单查询页面
time.sleep(1)
ret_list = []
i = 1
j = 1
while (i):
one_row_list = []
try:
driver.implicitly_wait(1)
one_row_list.append(
driver.find_element_by_xpath(workorder_query.Work_Order_Query.dataxpath.format(i, 2)).text)
one_row_list.append(
driver.find_element_by_xpath(workorder_query.Work_Order_Query.dataxpath.format(i, 7)).text)
driver.implicitly_wait(30)
except:
driver.implicitly_wait(30)
break
ret_list.append(one_row_list)
if (i == 10):
hstr = driver.find_element_by_xpath(workorder_query.Work_Order_Query.nextpage_xpath).get_attribute(
'outerHTML')
if (re.findall("disabled", hstr)):
break
driver.find_element_by_xpath(workorder_query.Work_Order_Query.nextpage_xpath).click()
time.sleep(1)
i = 0
i = i + 1
j = j + 1
if (j == 21):
break
return ret_list
@staticmethod
def ReceiveOrders(driver, conut): # 领取前conut个工单
# 记录当前操作人,可以去login里面写函数
name = "组织A角色1"
pass
driver = workorder_query.Work_Order_Query.OpenOrderQuery(driver) # 打开工单查询页面
time.sleep(1)
i = 0
imf = []
while (i < conut):
i = i + 1
oneimf = []
# 记录信息到imf--当前负责人,工单状态,文件来源。
oneimf.append(name)
oneimf.append(
driver.find_element_by_xpath(myorder_query.MyOrder_Query.dataxpath.format(i, 7)).text)
oneimf.append(
driver.find_element_by_xpath(myorder_query.MyOrder_Query.dataxpath.format(i, 5)).text)
# 勾选
driver.find_element_by_xpath(Receive_Work_Order.checkbox_xpath.format(i)).click()
time.sleep(0.2)
imf.append(oneimf)
driver.find_element_by_xpath(Receive_Work_Order.robox_button_xpath).click() # 点击确定领取按钮
time.sleep(0.2) # 预留时间查看
driver.find_element_by_xpath(Receive_Work_Order.ro_confirm_button_xpath).click() # 点击确定,领取成功
# 记录时间
timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 将imf和timestr修改进excle
i = 0
for oneimf in imf:
i = i + 1
wod = workxls.getallimf('workorderdata.xls', i)
wod.workorder_operationtime = timestr
wod.workorder_inchargeperson = oneimf[0]
if (oneimf[1] == "机器审核完成"):
wod.workorder_orderstate = "一级审核中"
elif (oneimf[1] == "一级审核完成"):
wod.workorder_orderstate = "二级审核中"
else:
pass
wod.workorder_documentssource = oneimf[2]
workxls.changeimf('workorderdata.xls', i, wod)
return driver
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"690267573@qq.com"
] | 690267573@qq.com |
20dd1dac0aeb8c51e07802451498abaf083c5dee | d0e892a2f6fe96148de74f3b4c54b550eada8e9d | /accounts/models.py | 63ccfd6309a43c5af03c902d24b3c7d53ac9116f | [] | no_license | InshaManowar/iste-summer-backend | 5db9eec42c329aecc824865fc20b04522cfbf332 | 5958f92fa40cf6a753d873d07a83414f0cb877d3 | refs/heads/master | 2023-08-02T19:42:22.960116 | 2021-09-20T18:17:17 | 2021-09-20T18:17:17 | 361,243,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils import timezone
class MyAccountManager(BaseUserManager):
def create_user(self, email,password=None,**kwargs):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
**kwargs
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password,**kwargs):
user = self.create_user(
email=self.normalize_email(email),
password=password,
**kwargs
)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser, PermissionsMixin):
email=models.EmailField(verbose_name="email", max_length=160, unique=True)
registration_number=models.CharField(max_length=9, unique=True)
first_name= models.CharField(max_length=100)
last_name= models.CharField(max_length=100)
date_joined = models.DateTimeField(default=timezone.now)
last_login=models.DateTimeField(default=timezone.now)
is_admin=models.BooleanField(default=False)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
is_superuser=models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name','last_name','registration_number']
objects = MyAccountManager()
def __str__(self):
return self.email
| [
"inshamanowar22@gmail.com"
] | inshamanowar22@gmail.com |
2aa45bc5a565f3f723b23720ec89900a1c5bd6b0 | a83c3fa830a73bc07544783193fba615f6e76e51 | /ann_models/feedForwardNetwork/newfypfeedforward.py | f38786d3dfe51bec4787a7e966532df7476f265f | [] | no_license | RamzanShahidkhan/FYP-Bitcoin-forecasting | 984df983bcaa4c673c422071282727c12e346442 | 0bba3221573068ffb42581a4eba30eb06364aa5f | refs/heads/master | 2020-03-14T05:59:38.683177 | 2018-04-29T08:03:58 | 2018-04-29T08:03:58 | 131,475,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | import numpy
import pandas as pd
import matplotlib.pyplot as plt
# load the data set
data_original = pd.read_csv('./krakenUSD_1-min_data_2014-01-07_to_2017-05-31.csv', usecols=[1,2,3,4,5,6],engine='python')
data_original = data_original.dropna()
data_original = data_original.values
#data_original = data_original.astype('float32')
print("ddd ",data_original[3])
print(data_original.shape)
train_size = int(len(data_original)* 0.70)
test_size = len(data_original) - train_size
print(train_size, test_size)
train = data_original[0:train_size,:]
test = data_original[train_size:len(data_original),:]
#test = data_original[train_size:len(data_original),4]
#,percentage =0.67
def create_Xt_Yt(X,y):
p = (int(len(X) )*.70)
X_train = X[0:p]
Y_train = y[0:p]
#shuffle
X_test = X[p:]
Y_test = y[p:]
return X_train, X_test, Y_train, Y_test
#trx,tsx,try1, tsy1 = create_Xt_Yt(data_original,data_original[3])
#print("jajja", len(trx), len(tsx))
dataX, dataY = [], []
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
look_back = 1
trainX, trainY= create_dataset(train, look_back)
testX, testY = create_dataset(test,look_back)
train = pd.DataFrame(train)
test = pd.DataFrame(test)
trainX = pd.DataFrame(trainX)
trainY = pd.DataFrame(trainY)
print("len data : ",len(data_original))
print("datax ",len(dataX))
print("dataY : ",len(dataY))
print("l train: ",len(train))
print("l test: ",len(test))
print("l xtrain: ",len(trainX))
print("l testX: ",len(testX))
print("l yrain: ",len(trainY))
print("l testY: ",len(testY))
print("tarin: ", train)
print("test : ", test)
print("trainX")
print(trainX)
print("trainY")
print(trainY)
'''
plt.plot(data_original, label="original")
plt.plot(train, label="train")
plt.plot(test, label ="test")
plt.legend()
plt.show()
''' | [
"noreply@github.com"
] | RamzanShahidkhan.noreply@github.com |
795307e94bb23223bab69e5d2c60681bf9d40c23 | 8990841b20e6ca2a249402cf6f6d148baf797034 | /src/utils/__init__.py | 6d9632449b4701aba91fa72280147e53dc6f8047 | [] | no_license | elumixor/PSIA | 2a0a74b2c60ad52e374b96d13587f90069d2359d | 7242f7c23bdd168e581f7cb732b73a9d0fec580e | refs/heads/master | 2023-04-11T04:07:34.772454 | 2021-04-03T15:30:15 | 2021-04-03T15:30:15 | 341,991,359 | 0 | 0 | null | 2021-04-03T15:30:15 | 2021-02-24T18:10:02 | Python | UTF-8 | Python | false | false | 855 | py | import yaml
def read_yaml(path: str):
with open(path, "r") as stream:
return yaml.safe_load(stream)
class log:
_GREEN = '\033[92m'
_RED = '\033[91m'
_DARK_GREY = '\033[90m'
_END = '\033[0m'
_BOLD = '\033[01m'
def __init__(self, *args, **kwargs):
print(*args, **kwargs)
@staticmethod
def error(*args, **kwargs):
message = log.get_message(*args)
print(log._RED + message + log._END, **kwargs)
@staticmethod
def info(*args, **kwargs):
message = log.get_message(*args)
print(log._DARK_GREY + message + log._END, **kwargs)
@staticmethod
def success(*args, **kwargs):
message = log.get_message(*args)
print(log._GREEN + message + log._END, **kwargs)
@staticmethod
def get_message(*args):
return ' '.join(map(str, args))
| [
"yazykov.v@nakukop.com"
] | yazykov.v@nakukop.com |
effe99957b725bfa9a85dca521246b732bfc9c5e | 8e128b9ac36baac326181f0d8cee4e2ea9d65284 | /webapp/urls.py | b07bcc42ed00602b5075885c4adf8ac936ff829e | [] | no_license | appcubator/book-face | 4217967096d1c1b40d7cd9c4ce0ed2c04eb7f426 | 63249306ad770620d3a74987e3719fc873002cd7 | refs/heads/master | 2021-01-16T17:47:05.843222 | 2013-09-08T21:40:22 | 2013-09-08T21:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py |
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.contrib.auth.views import logout
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.urlresolvers import reverse
from django.views.generic import RedirectView
urlpatterns = patterns('webapp.pages',
url(r'^$', 'homepage'),
url(r'^profile/(\d+)/$', 'user_profile'),
url(r'^Edit_profile/$', 'edit_profile'),
url(r'^All_users/$', 'all_users'),
url(r'^Wall_post_Page/(\d+)/$', 'wall_post_page'),
url(r'^Newsfeed/$', 'newsfeed'),
url(r'^Friendship_Page/(\d+)/$', 'friendship_page'),
url(r'^My_Friends/$', 'my_friends'),
)
urlpatterns += patterns('webapp.form_receivers',
url('^__form_receiver/loginform/$', 'login'),
url('^__form_receiver/shortsignupform/$', 'sign_up'),
url('^__form_receiver/create_wall_post/(\\d+)/$',
'create_wall_post'),
url('^__form_receiver/create_friendship/(\\d+)/$',
'create_friendship'),
url('^__form_receiver/edit_user/$', 'edit_user'),
)
admin.autodiscover()
urlpatterns += patterns('',
url(r'', include("social_auth.urls")),
url(r'^admin/', include(admin.site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^__logout/$', logout, kwargs={'next_page': '/'}),
)
| [
"www-data@ip-10-154-156-62.ec2.internal"
] | www-data@ip-10-154-156-62.ec2.internal |
27c31c06cb860a5ec60f0423085505bb64db4361 | 9dd06dd024e1897855db33762f3d987dff60a541 | /starting/read_file.py | d86267d5ce08bb11428143def52ec5794f17c7de | [] | no_license | adiyosef/PycharmProjects2 | 02efc5be51e992de846ae7a34ea386f86e348e87 | c5c2103a76c69add900e13d92601c2d1e5284d1c | refs/heads/master | 2020-11-25T02:32:56.568027 | 2019-12-23T17:02:28 | 2019-12-23T17:02:28 | 228,452,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py |
employee_file = open("employees.txt", "w")
employee_file.write("Toby - Human Resourcess \n")
employee_file.close() | [
"ayosef83@gmail.com"
] | ayosef83@gmail.com |
874cf9a190e00e1d8cffb3911b83199a529ef7d7 | 42853d0eb68b6be68a2761ae1ed6589d8b8b1669 | /C++/edo_connect4/scripts/player_detector_server.py | 30c18a53727a35184409b77e08b1346860caa919 | [] | no_license | EvaRamaj/projects | fd0c5ecf5ce1c252ca982d636127aa3d6d8c90d8 | 4cc6e520881ddcd27923d65016e2c9c7c909225f | refs/heads/master | 2023-01-29T04:22:59.781702 | 2020-03-16T16:17:09 | 2020-03-16T16:17:09 | 247,737,397 | 0 | 0 | null | 2023-01-13T23:44:00 | 2020-03-16T15:03:29 | JavaScript | UTF-8 | Python | false | false | 409 | py |
from edo_connect4.srv import *
import rospy
def handle_player_detector():
print "Returning the player's color"
player = 0
return PlayerDetectorResponse(player)
def player_detector_server():
s = rospy.Service('/edo_connect4_services/player_detector', player_detector)
print("player detector service is ready.")
rospy.spin()
if __name__ == "__main__":
player_detector_server()
| [
"eua.ramaj@gmail.com"
] | eua.ramaj@gmail.com |
9cdfc43db870a09854c65404a963963d2cb4b43d | bbf744bfbfd9a935bd98c7cf54152a5d41194161 | /chapter_15/die_visual.py | d9629d134497d4af77867b78e009e95a6471a52b | [] | no_license | terranigmark/python-crash-course-projects | 65a7863be2d26fe8b91ac452b12203386eb0259a | 79ed9ed8e6a1bf015990a9556689379274231d13 | refs/heads/master | 2022-12-05T21:59:00.352140 | 2020-08-21T04:59:50 | 2020-08-21T04:59:50 | 266,263,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
# create a D6
die_1 = Die()
die_2 = Die(10)
# make some rolls and store results in a list
results = []
for roll_num in range(50_000):
result = die_1.roll() + die_2.roll()
results.append(result)
# analyze the results
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2, die_1.num_sides + die_2.num_sides):
frequency = results.count(value)
frequencies.append(frequency)
# visualize the results
x_values = list(range(2, max_result + 1))
data = [Bar(x = x_values, y = frequencies)]
x_axis_config = {'title': 'Result', 'dtick': 1}
y_axis_config = {'title': 'Frequency of Result'}
my_layout = Layout(title = 'Results of rolling two D6 and D10 50,000 times', xaxis = x_axis_config, yaxis = y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename = 'd6_d10.html') | [
"linnk99@gmail.com"
] | linnk99@gmail.com |
b50602d83772f7c0f08c6134dafb99cfe78bfab9 | b96ee55778683e937101893930761f425b51f8c5 | /src/cfn_models.py | 2998553de5aca1ebefcaf2307e29a34cd3ec4142 | [
"MIT"
] | permissive | fp12/sfv-bot | 0206d95852a2183643000ad5baf8566ea43ade60 | df351675843879a2945ac96fdca3815d0331f486 | refs/heads/master | 2020-03-27T07:03:39.407149 | 2017-06-14T22:58:03 | 2017-06-14T22:58:03 | 63,254,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | def intersect(a, b):
return list(set(a) & set(b))
def list_in(a, b):
return len(intersect(a, b)) == len(a)
class Player():
def __init__(self, json_d):
self.name = json_d['fightersid']
self.cfn_id = int(json_d['publicid'])
self.region = json_d['region']
self.platform = json_d['accountsource']
@classmethod
def create(cls, json_d):
if json_d and list_in(['fightersid', 'publicid', 'region', 'accountsource'], json_d):
return Player(json_d)
else:
return None
class PlayerSearch():
def __init__(self, json_d):
self.found_players = []
for result in json_d:
new_player = Player.create(result)
if new_player:
self.found_players.append(new_player)
@classmethod
def create(cls, json_d):
if json_d and 'response' in json_d and 'searchresult' in json_d['response'][0]:
return PlayerSearch(json_d['response'][0]['searchresult'])
else:
return None
| [
"poupi.12.pi+github@gmail.com"
] | poupi.12.pi+github@gmail.com |
2f3b32eb3471d006c1efaa354bce90041d9bbcbe | 6193c2d2e1734e97a1b75dcd08db2966e4812335 | /neuralnetwork/neuralnetwork.py | 0f09caea117f9772e1ca8a27e208c7a50be46410 | [] | no_license | YHE54/machinelearningproject | c887e72ffce51622840270f3220d79337c475e96 | e64dcac4f19c1d3819ee884547e4351ceb779646 | refs/heads/master | 2021-01-13T12:33:27.355632 | 2016-12-07T03:13:21 | 2016-12-07T03:13:21 | 72,567,759 | 0 | 0 | null | 2016-11-01T19:09:12 | 2016-11-01T19:09:12 | null | UTF-8 | Python | false | false | 2,133 | py | import tensorflow as tf
import numpy as np
import operator
import scipy.io as sio
class NeuralNetwork(object):
def __init__(self, num_feats, num_nodes=4,learn_rate=0.001,keep_prob=1):
# create session
self.keep_prop = keep_prob
self.sess = tf.Session()
# create placeholders for inputs
self.x = self.placeholder('x',tf.float32,[None, num_feats])
self.y = self.placeholder('y',tf.float32,[None, 1])
# weight and bias variables for neural network
self.w1 = self.weight_variable('w1',tf.float32,[num_feats, num_nodes])
self.w2 = self.weight_variable('w2',tf.float32,[num_nodes, num_nodes])
self.w3 = self.weight_variable('w3',tf.float32,[num_nodes, 1])
# create model
self.yhat = self.model(self.x)
# loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.yhat, self.y))
self.update = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(self.loss)
# initialize all variables
self.sess.run([tf.initialize_all_variables()])
self.saver = tf.train.Saver()
def model(self,x):
layer1 = tf.nn.relu(tf.matmul(self.x, self.w1))
# layer1drop = tf.nn.dropout(layer1,self.keep_prop)
layer2 = tf.nn.relu(tf.matmul(layer1, self.w2))
# layer2drop = tf.nn.dropout(layer2, self.keep_prop)
layer2drop = tf.nn.dropout(layer2, self.keep_prop)
return tf.matmul(layer2,self.w3)
def train(self,x,y):
self.sess.run([self.update], feed_dict={
self.x: x,
self.y: y
})
def predict(self,testx):
return self.sess.run(self.yhat, feed_dict={
self.x: testx
})
def savemodel(self,path):
self.saver.save(self.sess,path)
def close(self):
self.sess.close()
def placeholder(self,name,type,shape):
return tf.placeholder(name=name,dtype=type,shape=shape)
def weight_variable(self,name,type,shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.01), name='w1')
if __name__ == "__main__":
pass | [
"rafihaque90@gmail.com"
] | rafihaque90@gmail.com |
e17ee545bad59da2e915e94d3552880ccaee6824 | 422dd5d3c48a608b093cbfa92085e95a105a5752 | /students/MikeShand/Lesson 07/personjobdept_model.py | 99df5a818bc4032721dd8e7a08aef3fc2e2ae648 | [] | no_license | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | a2052fdecd187d7dd6dbe6f1387b4f7341623e93 | b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1 | refs/heads/master | 2021-06-07T09:06:21.100330 | 2019-11-08T23:42:42 | 2019-11-08T23:42:42 | 130,731,872 | 4 | 70 | null | 2021-06-01T22:29:19 | 2018-04-23T17:24:22 | Python | UTF-8 | Python | false | false | 3,881 | py | """
Simple database example with Peewee ORM, sqlite and Python
Here we define the schema
Use logging for messages so they can be turned off
"""
import logging
from peewee import *
from pprint import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info('Here we define our data (the schema)')
logger.info('First name and connect to a database (sqlite here)')
logger.info('The next 3 lines of code are the only database specific code')
database = SqliteDatabase('personjobdept.db')
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
# needed for sqlite only
# if you wanted to use heroku postgres:
#
# psycopg2
#
# parse.uses_netloc.append("postgres")
# url = parse.urlparse(os.environ["DATABASE_URL"])
#
# conn = psycopg2.connect(
# database=url.path[1:],
# user=url.username,
# password=url.password,
# host=url.hostname,
# port=url.port
# )
# database = conn.cursor()
#
# Also consider elephantsql.com (be sure to use configparser for PWß)
logger.info('This means we can easily switch to a different database')
logger.info('Enable the Peewee magic! This base class does it all')
class BaseModel(Model):
class Meta:
database = database
logger.info('By inheritance only we keep our model (almost) technology neutral')
class Person(BaseModel):
"""
This class defines Person, which maintains details of someone
for whom we want to research career to date.
"""
logger.info('Note how we defined the class')
logger.info('Specify the fields in our model, their lengths and if mandatory')
logger.info('Must be a unique identifier for each person')
person_name = CharField(primary_key = True, max_length = 30)
lives_in_town = CharField(max_length = 40)
nickname = CharField(max_length = 20, null = True)
class Department(BaseModel):
"""
This class defines a department, the place where a person held a job
"""
logger.info('The department name')
dept_name = CharField(primary_key=True, max_length=30)
logger.info('The name of the manager')
dept_manager = CharField(max_length=30)
logger.info('The number of the department')
dept_number = CharField(max_length=4, constraints=[Check(
'upper (substr (dept_number, 1, 1) BETWEEN "A" AND "Z" )')])
class Job(BaseModel):
"""
This class defines Job, which maintains details of past Jobs
held by a Person.
"""
logger.info('Now the Job class with a similar approach')
job_name = CharField(primary_key = True, max_length = 30)
logger.info('Dates')
start_date = DateField(formats = 'YYYY-MM-DD')
end_date = DateField(formats = 'YYYY-MM-DD')
job_length = IntegerField()
logger.info('Number')
salary = DecimalField(max_digits = 7, decimal_places = 2)
logger.info('Which person had the Job')
person_employed = ForeignKeyField(Person, db_column='person_employed', related_name='was_filled_by', null = False)
job_dept = ForeignKeyField(Department, db_column='job_dept')
class PersonNumKey(BaseModel):
"""
This class defines Person, which maintains details of someone
for whom we want to research career to date.
"""
logger.info('An alternate Person class')
logger.info("Note: no primary key so we're give one 'for free'")
person_name = CharField(max_length = 30)
lives_in_town = CharField(max_length = 40)
nickname = CharField(max_length = 20, null = True)
try:
logger.info('Creating the database: {}'.format(database))
database.create_tables([
Person,
Department,
Job,
PersonNumKey
])
except Exception as ex:
logger.error('Unable to create database. Error: {}'.format(ex))
database.close()
raise Exception('Error: {}'.format(ex))
finally:
database.close()
| [
"noreply@github.com"
] | UWPCE-PythonCert-ClassRepos.noreply@github.com |
b856a354384fe2071b0485fdf1113cf230b29899 | cb048f6f1a9729badfaaf1c54e5532ef5e4c6c4b | /app.py | 976a3b2a3cc7a68b8d08302ad0da18948e9619d6 | [
"Apache-2.0"
] | permissive | wululu9312/sys-final-hw1 | d93207cd6da4dbf82dbb1b467d4ed839e87e57fd | 6ced8f7732053dd558a63284541863980fc91d00 | refs/heads/main | 2023-02-07T04:02:59.971638 | 2020-12-28T13:00:51 | 2020-12-28T13:00:51 | 323,875,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from flask import Flask
from redis import Redis
app = Flask(__name__)
redis = Redis(host="redis")
@app.route("/")
def hello():
visits = redis.incr('counter')
html ="<h3>Hello World!</h3>" \
"<b>Visits:</b> {visits}"\
"<br/>"
return html.format(visits=visits)
if __name__ == "__main__":
app.run(host="0.0.0.0",port=80)
| [
"iva10312@gmail.com"
] | iva10312@gmail.com |
0bfb79d36568cfccf8cf24f2b79c2e9d28ae5de3 | 17b375ec4902614db37d8d4cd41bd33df313e8b4 | /main.py | 258b4a5d43651c78442403025417f432c7f67353 | [] | no_license | buaaqt/NodeReorder | e7994db9fe7721f2d2a847d803e86a0cdc24c6b3 | 3d86593159b205952210db6a63134bf1bc2b9d0b | refs/heads/master | 2022-12-09T19:27:43.961927 | 2020-09-13T11:08:08 | 2020-09-13T11:08:08 | 294,055,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | import argparse
import time
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from utils import load_cora, accuracy
from layers import GraphSAGELayer
from models import GCN, GraphSAGE
def train(model_type, epoch):
t = time.time()
model.train()
optimizer.zero_grad()
if model_type is 'gcn':
output = model(features, neigh_tab)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, neigh_tab)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
else:
_range = idx_train.tolist()
loss_train, acc_train = model.loss(features, neigh_tab, _range, labels)
loss_train.backward()
optimizer.step()
_range = idx_val.tolist()
loss_val, acc_val = model.loss(features, neigh_tab, _range, labels)
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test(model_type):
model.eval()
if model_type is 'gcn':
output = model(features, neigh_tab)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
else:
_range = idx_test.tolist()
loss_test, acc_test = model.loss(features, neigh_tab, _range, labels)
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='sage',
help='Select a graph neural network model.')
parser.add_argument('--dataset', type=str, default='cora',
help='Select a graph dataset.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs to train.')
parser.add_argument('--hidden', type=int, default=128,
help='Number of hidden units.')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='Disables CUDA training.')
parser.add_argument('--lr', type=float, default=0.05,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--seed', type=int, default=42,
help='Random seed.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
print('Using Cuda with', torch.cuda.get_device_name(0))
torch.cuda.manual_seed(args.seed)
# Load data
neigh_tab, features, labels, idx_train, idx_val, idx_test = load_cora()
if args.model is 'gcn':
model = GCN(n_feat=features.shape[1],
n_hid=args.hidden,
n_class=labels.max().item() + 1,
dropout=args.dropout)
elif args.model is 'sage':
sage = GraphSAGELayer(in_features=features.shape[1],
out_features=args.hidden)
model = GraphSAGE(n_class=labels.max().item() + 1,
batch_size=128,
sage=sage,
dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
# Train model
t_total = time.time()
for epoch in range(args.epochs):
train(args.model, epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
test(args.model)
| [
"36197504+buaaqt@users.noreply.github.com"
] | 36197504+buaaqt@users.noreply.github.com |
89395092d95bda68413dbd80746d436fccf9d6be | 184a9b54b8f07df7b51a7199db1287e8ff36c45f | /homevisit/urls.py | a29b2a8e47ede53b80724bc1c8d6325f443637aa | [
"Apache-2.0"
] | permissive | curtis628/homevisit | 6292fffbd1e5d1ad8b49fb1a7a2984e7f5a28dbd | 1b4972888f1d21aed58377049790d91ab367abe8 | refs/heads/master | 2022-12-22T19:44:50.839016 | 2020-06-05T20:15:49 | 2020-06-05T20:15:49 | 159,893,479 | 0 | 0 | Apache-2.0 | 2022-12-08T01:28:49 | 2018-12-01T00:32:41 | Python | UTF-8 | Python | false | false | 556 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.HouseholdCreateView.as_view(), name="index"),
path("success", views.SuccessView.as_view(), name="success"),
path("about", views.AboutView.as_view(), name="about"),
path("contact", views.ContactUsCreateView.as_view(), name="contact"),
path("contact/success", views.ContactUsSuccessView.as_view(), name="contact_success"),
path("faqs", views.FaqListView.as_view(), name="faqs"),
path("ajax/load-times", views.load_times, name="ajax_load_times"),
]
| [
"tcurtis@vmware.com"
] | tcurtis@vmware.com |
37cb577af44fb8d2bd97a533aeb8b5320c0c9566 | 8e5e2fbb499ed178785c59f6f8a125d9e0a2501a | /lib/python3.5/site-packages/GPSReader/GPSError.py | 903ebb52c7ad64919dc5109cde0d879662e04e6f | [
"MIT"
] | permissive | hwroitzsch/BikersLifeSaver | 3bbbb544c0c24669c9dbce4ae53ae0185bd020c0 | 469c738fdd6352c44a3f20689b17fa8ac04ad8a2 | refs/heads/master | 2021-01-10T10:14:13.249642 | 2016-01-10T12:32:11 | 2016-01-10T12:32:11 | 44,967,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # $Id: GPSError.py 31 2010-04-05 16:38:20Z cfluegel $
class GPSError(Exception):
""" Generelle GPS Exception """
def __str__(self):
return "ERROR: General GPS error"
#Todo: Delete maybe?
class GPSTelegramMalformed(GPSError):
def __str__(self):
return "ERROR: NMEA sentence is malformed!"
#Todo: Delete maybe?
class GPSCommError(GPSError):
""" Will be raised if something is wrong with the communication between the
the software and the connected GPS receiver or if no communciation is possible"""
def __init__(self,msg=""):
if msg <> "":
self._msg = msg
def __str__(self):
return self._msg
### new
class NMEATypeError(Exception):
def __str__(self):
return "ERROR: Type Fehler!"
class NMEAParseError(Exception):
def __str__(self):
return "ERROR: NMEA sentence couldn't be parsed correctly!"
class NMEANoValidFix(Exception):
def __str__(self):
return "ERROR: No valid position fix!"
if __name__ == "__main__":
print dir()
| [
"ghisallo@ghisallo.com"
] | ghisallo@ghisallo.com |
2e4ace5fd3813f53e76152447a7c0a5083bf930b | 41af719a9fe075db6ec883037d1d87d78da5aff9 | /producer/producer.py | 3fe24454a88d592f35e6f5fbd3e815de91c25c89 | [] | no_license | hammadasad/Kafka-Test | b6ec3aa6dddf7bb75e4884e0a43e961fce999a2d | 70a3c0041329c1cda043df1b3198dc41ed41926a | refs/heads/master | 2020-03-09T03:44:30.492019 | 2018-04-08T20:17:27 | 2018-04-08T20:17:27 | 128,571,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # Imports
from kafka import KafkaProducer as kafka
import json
print("producer running")
# A producer sends messages aka records!
# Create producer
# We need a list of brokers (bootstrap_servers) & key/value serializers
# Shows the producer how to serialize its outoing messages (string serializer default)
# choosing json here
producer = kafka(bootstrap_servers = 'localhost:9092',
value_serializer = lambda value: json.dumps(value).encode('utf-8'))
# Send the messages to the consumer
for aMessage in range(6):
producer.send('kafka-test-topic', { 'values' : aMessage })
| [
"hammad_asad@msn.com"
] | hammad_asad@msn.com |
4fc82996213f3cd2cfb65f7f15a7d88bce6ffd6a | cd0001009720f8b167ba4e62432ee895893743a6 | /ve/bin/python-config | bcbf870f10aa43c40e5d22aa62df5f3da3755fdd | [] | no_license | prashantbhensdadia/crud_django | 7125402cef608c6aaf667b4afe65a78c0d874201 | 40bf4303cb3ac6991e103ce068f98085eb38cd8e | refs/heads/master | 2021-03-17T17:04:04.810248 | 2020-03-13T06:45:51 | 2020-03-13T06:45:51 | 247,002,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | #!/home/agile/Desktop/prashant/test/ve/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"pbhensdadia@gmail.com"
] | pbhensdadia@gmail.com | |
cc0f68e8359cb95579b7d20bc6c3581cdc712cbd | 5e5e99e8493fbef64847494caf059c910c03c823 | /arrays/palindromic-substrings.py | b398b82336fa0371d58df3ab24c16dec63daf978 | [] | no_license | jcockbain/leetcode-python | f4e487b13ae4cacef9cbedfd4358f8ee0006e2b8 | d7f83ea5a11e4c8340c48698d29aa3bc0b942121 | refs/heads/master | 2020-07-09T19:58:42.933881 | 2019-10-28T23:34:34 | 2019-10-28T23:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
N = len(s)
ans = 0
for center in range(2*N - 1):
left = center / 2
right = left + center % 2
while left >= 0 and right < N \
and s[left] == s[right]:
ans += 1
left -= 1
right += 1
return ans
| [
"james.cockbain@ibm.com"
] | james.cockbain@ibm.com |
3308ea0512ff5b4243f5b124131c6d7352dbc14f | fa7d8f0558312c16ac2256aac0ab499edc38d16b | /Dogs_Cnn/run.py | 177b5b6a190f8b110424f7753c4c8daf249a8b97 | [] | no_license | ShiinaClariS/CNN | 3cd43d133d7335bbb5280e9743910acf0d73a6f5 | 4e97242f2122bc8d1cac48e3ab3b83cac0bfbc38 | refs/heads/main | 2023-07-17T17:18:21.617323 | 2021-08-24T06:49:36 | 2021-08-24T06:49:36 | 399,363,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # -*- coding: utf-8 -*-
"""
author: ShiinaClariS
time: 2021年8月24日13:39:10
"""
from Dogs_Cnn.read_image import ReadImage
from Dogs_Cnn.create_cnn import CreateCnn
class Run:
def __init__(self, kind):
r = ReadImage(kind)
x_train, y_train, x_test, y_test = r.x_train, r.y_train, r.x_test, r.y_test
c = CreateCnn(kind)
cnn = c.cnn
self.history = cnn.fit(x=x_train,
y=y_train,
epochs=50,
batch_size=256,
validation_data=(x_test, y_test))
def get(self):
print(self.history.history)
print(self.history.epoch)
return self.history.history, self.history.epoch
if __name__ == '__main__':
run = Run(10)
run.get()
| [
"noreply@github.com"
] | ShiinaClariS.noreply@github.com |
a821ca1d55ea583b8a006afaf18094fad5155d80 | a73ddda82b9dc66122a5d93c7543fed2295baab9 | /Assignment-1/Code/que-12.py | 55cb8f31620ebadd9390c0875eca6802c627c05a | [] | no_license | patelmeet/Machine-Preception-Assignments | 9181d8951a9e6b51930001a5054f310a141f91fd | ef5b3af57e1009528096b1db97ada28511225622 | refs/heads/master | 2021-01-25T04:49:44.598442 | 2017-06-06T07:27:21 | 2017-06-06T07:27:21 | 93,488,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,771 | py | import cv2
import numpy as np
#Loop to check for all files together
string = "portrait" #filename prefix='portrait'
start = 1 #filename start index
end = 5 #filename end index "portrait1.jpg" to "portrait16.jpg"
for i in range(start,end+1):
color_image = cv2.imread(string + str(i) + ".jpg",cv2.IMREAD_COLOR) #read color image
#resize image
resized_image = np.copy(color_image)
h,w,d = resized_image.shape
while(h>700 or w>700):
resized_image = cv2.resize(resized_image,(0,0), fx=0.5, fy=0.5)
h,w,d = resized_image.shape
gray_image = cv2.cvtColor(resized_image,cv2.COLOR_BGR2GRAY) #convert color image to gray image
#define various haarcascade classifiers to detect face and eye
haarcascade_eye = cv2.CascadeClassifier('haarcascade_eye.xml')
haarcascade_eye_tree_eyeglasses = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
haarcascade_frontalface_alt = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
haarcascade_frontalface_alt2 = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
haarcascade_frontalface_alt_tree = cv2.CascadeClassifier('haarcascade_frontalface_alt_tree.xml')
haarcascade_frontalface_default = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
haarcascade_lefteye_2splits = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
haarcascade_profileface = cv2.CascadeClassifier('haarcascade_profileface.xml')
haarcascade_righteye_2splits = cv2.CascadeClassifier('haarcascade_righteye_2splits.xml')
#apply all haarcascade classifiers to gray image using various parameters
#first parameter is image, second parameter is scale ratio after each stage of classifier
#third parameter is minimum size of detected object
result1 = haarcascade_eye.detectMultiScale(gray_image,1.2,4)
result2 = haarcascade_eye_tree_eyeglasses.detectMultiScale(gray_image)
result3 = haarcascade_frontalface_alt.detectMultiScale(gray_image,1.3,4)
result4 = haarcascade_frontalface_alt2.detectMultiScale(gray_image,1.3,4)
result5 = haarcascade_frontalface_alt_tree.detectMultiScale(gray_image,1.3,4)
result6 = haarcascade_frontalface_default.detectMultiScale(gray_image,1.3,4)
result7 = haarcascade_lefteye_2splits.detectMultiScale(gray_image)
result8 = haarcascade_profileface.detectMultiScale(gray_image,1.3,4)
result9 = haarcascade_righteye_2splits.detectMultiScale(gray_image,1.2,4)
#draw rectangle over detected region--for debug purpose
"""
for (x, y, w, h) in result1:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (255, 0, 0), 1)
for (x, y, w, h) in result2:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
for (x, y, w, h) in result3:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (0, 0, 255), 2)
for (x, y, w, h) in result4:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (255, 255, 0), 2)
for (x, y, w, h) in result5:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (255, 0, 255), 2)
for (x, y, w, h) in result6:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (0, 255, 255), 2)
for (x, y, w, h) in result7:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (0, 0, 0), 2)
for (x, y, w, h) in result8:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (255, 255, 255), 2)
for (x, y, w, h) in result9:
cv2.rectangle(resized_image, (x, y), (x + w, y + h), (0, 125, 250), 2)
"""
#if any classifier doesn't detect anything then it is not portrait
if(len(result1)==0 and len(result2)==0 and len(result3)==0 and len(result4)==0 and len(result5)==0 and len(result6)==0 and len(result7)==0 and len(result8)==0 and len(result9)==0):
hsv = cv2.cvtColor(resized_image, cv2.COLOR_BGR2HSV) #convert resized color image to HSV image
value = hsv[:,:,2] #extract Value channel from HSV image
total_pixel = h * w; #total number of pixel in resized image
#apply binary thresholding to get dark pixels, dark pixels have 0 value in thresholded_image,all other are 255
_, thresholded_image = cv2.threshold(value, 60, 255, cv2.THRESH_BINARY)
dark_pixels = np.where(thresholded_image == 0) #get cordinates of dark pixels
dark_pixel_count = len(dark_pixels[0]) #count number of dark pixels
if (dark_pixel_count > total_pixel / 2): #if more than half image is covered by dark pixels then it is night picture
print string + str(i) +' - night'
else:
print string + str(i) +' - landscape' #otherwise landscape
else :
print string + str(i) +' - portrait'
| [
"patelmeet953@gmail.com"
] | patelmeet953@gmail.com |
9d3c6ad0da27b0d12a6f5370c28f5a3aba341f95 | 9d9cecdc22dddc9605c0dc12f3ee65af42d1c5e6 | /01-conversation.py | 03cdba699e80da8eab98d522ae3578ddec338066 | [] | no_license | Jinhee81/python-and-djanggo | f8e7841dab3483261a2e611ef28905464a7afaad | 72a4f42863c14a90eee7e7b90a4d359743b1411e | refs/heads/master | 2020-03-25T19:58:26.707270 | 2018-08-14T05:46:47 | 2018-08-14T05:46:47 | 144,109,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | print(1+2)
# 입력값 받기
name = input ("당신의 이름은 무엇입니까?")
print("안녕하세요 " + name + "님")
| [
"noreply@github.com"
] | Jinhee81.noreply@github.com |
3b62c92fe6d71910367176af56ef6e899b0db55c | 9ad4804d7b6da11549ff698cebd390ac24ed7dde | /src/xsd_frontend/management/commands/build_version_cache.py | 2fa050924e95911e81d0adfc232f2cbb135b994e | [
"MIT"
] | permissive | Mystik01/wsas | eae36ded2ad42360a61dc3a0509e7e36513d54ed | c96c5dbe26da53e62bf75e7a3c17dfbab008ac1e | refs/heads/master | 2023-03-16T15:36:42.458461 | 2020-05-03T17:42:13 | 2020-05-03T17:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | from django.core.management.base import BaseCommand
from django.template.loader import get_template
from xsd_frontend.activity import XSDAction
class Command(BaseCommand):
"""A simple management command which clears the site-wide cache."""
# Taken from https://github.com/rdegges/django-clear-cache/blob/master/clear_cache/management/commands/clear_cache.py
help = 'Builds the cache of version diffs.'
def handle(self, *args, **kwargs):
self.stdout.write(self.style.MIGRATE_HEADING('Building version cache...'))
diff_template = get_template('versioning/diff.html')
i = 1
for action in XSDAction.objects.all():
if len(action.versions) == 0:
continue
for version in action.versions:
if i % 10 == 0:
self.stdout.write("{} ".format(version.pk), ending="\n")
else:
self.stdout.write("{} ".format(version.pk), ending="")
i += 1
diff_template.render({
'version': version,
})
self.stdout.write("Done")
| [
"will@wjdp.uk"
] | will@wjdp.uk |
0ba3ed98a522196a66863cdd0ce816654065b1b2 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /services/web/server/src/simcore_service_webserver/db_listener/plugin.py | a4fda5b69bdff33c6386eee2e702f5c74e8bbb01 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 870 | py | """
computation module is the main entry-point for computational backend
"""
import logging
from aiohttp import web
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from ..db.plugin import setup_db
from ..projects.db import setup_projects_db
from ..rabbitmq import setup_rabbitmq
from ..socketio.plugin import setup_socketio
from ._db_comp_tasks_listening_task import create_comp_tasks_listening_task
_logger = logging.getLogger(__name__)
@app_module_setup(
__name__,
ModuleCategory.ADDON,
settings_name="WEBSERVER_DB_LISTENER",
logger=_logger,
)
def setup_db_listener(app: web.Application):
setup_rabbitmq(app)
setup_socketio(app)
setup_projects_db(app)
# Creates a task to listen to comp_task pg-db's table events
setup_db(app)
app.cleanup_ctx.append(create_comp_tasks_listening_task)
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
6bcfc01476eee3630773e7cf5e73cd58795a967d | e744534cb38567665c2033589f52f5bebc978294 | /hexlet-code/brain-games/brain_calc.py | d399be9372c8e6bdaa85ff41054d2f5286734522 | [] | no_license | Norik123/python-project-lvl1 | 534cef6cb23b138cdf52466d79275cc26c661386 | 3e61b6c23f90073e5ddf6b31c98c887f80f11aeb | refs/heads/main | 2023-08-19T13:41:11.565095 | 2021-10-22T10:09:29 | 2021-10-22T10:09:29 | 418,884,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import common_part
def run():
common_part.run("brain-calc","What is the result of the expression?"," 22 + 35 ",57," 6 * 8 ",48,\
" 33 - 4 ",29) | [
"83397313+Norik123@users.noreply.github.com"
] | 83397313+Norik123@users.noreply.github.com |
91b5cd1793212f9b413cb0d83a31695e0c689826 | 307100fbeebaf197e0d4e2a223ec8e93f912bcad | /DjangoWeb/DjangoWeb/wsgi.py | 343b105d3d31417b8a6739bfea555ef0849a007f | [] | no_license | nicontrerasi/proyecto-web | 0a6535968e77764b610bc82127421c82717f9cb4 | b773893d44e05195e8a4413ff94c8299bda59d05 | refs/heads/master | 2023-06-05T02:54:39.687405 | 2021-06-27T03:13:47 | 2021-06-27T03:13:47 | 358,968,378 | 0 | 0 | null | 2021-05-06T23:33:54 | 2021-04-17T19:45:58 | HTML | UTF-8 | Python | false | false | 395 | py | """
WSGI config for DjangoWeb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoWeb.settings')
application = get_wsgi_application()
| [
"ll.alberto@hotmail.com"
] | ll.alberto@hotmail.com |
50f1d7d67751358ce27272f293578950d39f2677 | 04af09bb0aad3deeacaaa2053a17cd307a30ed00 | /backend/neuralnetworkwebapp/views.py | 239fecbce6010d4642cf139a0c5b2f7efbaafd34 | [
"MIT"
] | permissive | Haydn-Robinson/Neural-Network-Web-App | 9738036695baea49d2171173132ed6bf8810f7c7 | 7948fb4f62d450a5a6bde2744c38469a726e1d7e | refs/heads/master | 2023-08-16T11:15:24.477837 | 2021-09-30T15:00:24 | 2021-09-30T15:00:24 | 392,456,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | """
Routes and views for the flask application.
"""
from flask import Blueprint, render_template, request, redirect, url_for, session, Response, current_app, send_file, jsonify
import numpy as np
from pathlib import Path
from time import sleep
import json
import os
import redis
from rq import Queue, Connection
from rq.job import Job
from .hrpymlapi import do_network_training
import io
api = Blueprint('api', __name__)
public_routes = Blueprint('public', __name__)
@public_routes.route("/")
def index():
return current_app.send_static_file('index.html')
@public_routes.app_errorhandler(404)
def page_not_found(error):
return current_app.send_static_file('index.html')
# Endpoint to serve dataset metadata
@api.route('/api/datasetmetadata', methods=['GET'])
def get_dataset_metadata():
path = Path(current_app.root_path) / './static/' / 'datasetMetadata.json'
with open(path) as json_file:
return jsonify(json.load(json_file))
# Endpoint to serve dataset info
@api.route('/api/datasetinfo/<datasetid>', methods=['GET'])
def get_dataset_info(datasetid):
path = Path(current_app.root_path) / './static/' / 'datasetInfo.json'
with open(path) as json_file:
all_dataset_info = json.load(json_file)
return all_dataset_info[datasetid]
# Endpoint to add network training task to queue
@api.route('/api/trainnetwork', methods=['POST'])
def register_task():
if request.method == 'POST':
params = request.get_json()
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
path = Path(current_app.root_path) / './static/' / 'datasetInfo.json'
with open(path) as json_file:
all_dataset_info = json.load(json_file)
dataset_info = all_dataset_info[params['datasetID']]
with Connection(redis.from_url(redis_url)):
q = Queue()
job = q.enqueue(do_network_training, params, dataset_info)
session['job_id'] = job.id
return {'id': job.id}
# Endpoint to get task progress
@api.route('/api/progress', methods=['GET', 'POST'])
def stream():
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
with Connection(redis.from_url(redis_url)):
job = Job.fetch(session['job_id'])
def progress_stream(job):
complete = False
progress = 0
while not complete:
job.refresh()
if 'progress' in job.meta:
progress = job.meta['progress']
else:
progress = 0
if job.is_finished:
complete = True
sleep(0.1)
if not complete:
event = 'progress'
else:
event = 'redirect'
yield f"event:{event}\ndata:{progress}\n\n"
return Response(progress_stream(job), mimetype='text/event-stream', headers={'Cache-Control': 'no-transform'})
@api.route('/api/trainingsummary')
def results():
"""Renders the network training results page."""
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
#data_buffer = io.StringIO()
with Connection(redis.from_url(redis_url)):
job = Job.fetch(session['job_id'])
#data_buffer.write(job.meta['print_output'])
return Response(
job.meta['print_output'],
headers = {
'Content-Type': 'text/plain; charset=utf-8',
'Content-Disposition': "attachment; filename='training_summary.txt'"
}
)
@api.route('/api/results')
def get_results():
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
with Connection(redis.from_url(redis_url)):
job = Job.fetch(session['job_id'])
response = {
'auroc': job.meta['auroc'],
'roc_curve': job.meta['roc_curve'],
'training_failed': job.meta['training_failed']
}
return jsonify(response) | [
"haydn.robinson96@gmail.com"
] | haydn.robinson96@gmail.com |
2671fbfa345590729a83bef8261428be9a1bf018 | f8d5c4eb0244c4a227a615bc11c4c797760c3bec | /utils/rldraw.py | 2e944f936c3ad5ea6d074a6f0f9d74759cdd0c70 | [] | no_license | SamPlvs/reinforcement_learning_pytorch | e9b84659f870d938814177f1288fa4a2eb152599 | ffb9e53eeff011c4d3d5933a60c2b65fdbb18e2a | refs/heads/master | 2020-03-23T04:08:51.778325 | 2018-01-16T22:36:48 | 2018-01-16T22:36:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import matplotlib.pyplot as plt
import numpy as np
def reward_episode(rewards, image_path, env_name='', method_name='', comment=''):
reward_list = rewards
total_num = np.shape(reward_list)[0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(list(range(total_num)), reward_list)
ax.set_xlabel('iteration')
ax.set_ylabel('rewards')
fig.suptitle("rewards_episodes_{}_{}_{}".format(env_name, method_name, comment))
fig.savefig(image_path) | [
"kebai0624@gmail.com"
] | kebai0624@gmail.com |
a7c38b7101a08b420124bd5cf5ef16cd28810fc1 | cfd8c95ec5c43941018e0640a8254d17a488833d | /lib/pymongo/max_key.py | b84945fbaf6afd3f805c805f1aeb002ac3f136de | [] | no_license | anarchivist/cablegate | 6d6ff5c823a811e9af3edd7ca13f9c8b4dcd2fd7 | fbfcee217eb8d96803b564eba0a8528fa9841c95 | refs/heads/master | 2021-01-18T04:51:46.548421 | 2010-12-01T04:01:21 | 2010-12-01T04:06:56 | 1,127,394 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bson.max_key import *
| [
"andrew@typeslashcode.com"
] | andrew@typeslashcode.com |
d2a3a52c10aa2c2c23543cdf7a0be7015a48572a | 26aa1149ca082385c603840827ce1eb3f73655bb | /Sliding.py | bef8bfcd45d99ca73ed38d5deca7d658169fc884 | [
"MIT"
] | permissive | cc-3/lab10 | da37640be161869bd12d3de293be8ba9b6c26c8c | 12c028737d0a31173e1bbb630b1fb207ed3fae0e | refs/heads/master | 2021-01-19T23:12:47.547541 | 2017-04-21T06:14:07 | 2017-04-21T06:14:07 | 88,947,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | """
___ _ _ _ _
/ __| (_)__| (_)_ _ __ _
\__ \ | / _` | | ' \/ _` |
|___/_|_\__,_|_|_||_\__, |
|___/
"""
import string
def solution(W, H):
"""
Retorna la solucion del sliding puzzle para las dimensiones dadas
"""
return tuple(string.ascii_uppercase[0:(W*H) - 1] + "-")
def swap(board, i, j):
"""
Simula lo de mover las piezas del tablero
"""
boardL = list(board)
boardL[i], boardL[j] = boardL[j], boardL[i]
return tuple(boardL)
def children(W, H, board):
"""
Retorna una lista de todos los tableros hijos de una configuracion
de tablero dada.
"""
i = board.index("-")
children = []
if i % W != 0: # no en el borde izquierdo
children.append(swap(board, i, i-1))
if i % W != (W-1): # no en el borde derecho
children.append(swap(board, i, i+1))
if i >= W: # no en el borde superior
children.append(swap(board, i, i-W))
if i < W*(H-1): # no en el borde inferior
children.append(swap(board, i, i+W))
return children
| [
"andres.cv@galileo.edu"
] | andres.cv@galileo.edu |
c644f8047baa90d4d8fa571c27a0168540c120f6 | aceb0786c59ae07ea13681d108eed0a00dedabbd | /bin/symilar | 4936d4b63b2c9130769754e5e462bef5cfa5febd | [] | no_license | Abbas766/simple_is_better_than_complex | 92061dc2b74ac653008d3f905c9c93dc34d6f177 | 754ec8085e7714f102faf213eddcf58552c43671 | refs/heads/master | 2020-03-15T14:20:25.794352 | 2018-05-17T19:35:15 | 2018-05-17T19:35:15 | 132,188,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | #!/home/monarch/Desktop/sibtc/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"blade766@yahoo.com"
] | blade766@yahoo.com | |
6443f9b0c5ac63e497fafe16a636f45c11acfa52 | 193aea128eed89098d0b26ca1b9505056148556e | /train_ns.py | c55cf89f8fd1b085870c792f3844a967cdf737e5 | [] | no_license | jralha/libra_facies | 0928ea2813ac9f782438288e3a6c534a397f2ffd | d2ac24625e43b14aa97ee2d5d663afd68ca4f03f | refs/heads/master | 2020-09-06T02:19:10.941984 | 2020-08-04T17:23:26 | 2020-08-04T17:23:26 | 220,284,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,692 | py | #%% Imports
import os
import sys
import tensorflow as tf
import glob
import random
import numpy as np
import datetime
import pandas as pd
from utils import make_gen
from utils import define_model
from utils import postutils
import argparse
import xgboost as xgb
from sklearn.metrics import log_loss, precision_score, recall_score
#%%Parsing args
#Parser when run on command line
if 'ipykernel' not in sys.argv[0]:
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_ids', type=str, default='-1')
parser.add_argument('--continue_training', type=bool, default=False)
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints')
parser.add_argument('--logs_dir', type=str, default='./logs')
parser.add_argument('--format', type=str, default='las')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--epoch_count', type=int, default=1500)
parser.add_argument('--init_epoch', type=int, default=0)
parser.add_argument('--model_file', type=str, default=None)
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--window_size', type=int, default=1)
parser.add_argument('--run_name', type=str, required=True)
parser.add_argument('--model', type=str, default='cnn1d')
parser.add_argument('--datafile', type=str, required=True)
parser.add_argument('--labels', type=str, required=True)
args = parser.parse_args()
else:
# Set default options when using Jupyter
class Args():
def __init__(self):
self.gpu_ids = '-1'
self.continue_training = False
self.checkpoints_dir = './checkpoints'
self.logs_dir = './logs'
self.format = 'las'
self.batch_size=10
self.steps=100
self.epoch_count=1500
self.init_epoch=0
self.model_file=None
self.optimizer='adam'
self.window_size=1
self.run_name='test0'
self.model='resnet1d'
self.datafile='./data/north_sea/train.csv'
self.labels='LITHOLOGY_GEOLINK'
args = Args()
#%% Setting path to dataset and dataset properties.
##########################################################
checks = args.checkpoints_dir+"\\"
log_dir = args.logs_dir+"\\"
data_file_path = os.path.join(args.datafile)
labels = args.labels
data_file = pd.read_csv(data_file_path)
sample_count = len(data_file)
class_names = np.unique(data_file[labels])
features = data_file.columns[3:]
#%% Training parameters.
########################################
RUN_NAME = args.run_name
CONTINUE = args.continue_training
BATCH_SIZE = args.batch_size
if args.steps == None:
STEPS_PER_EPOCH = np.ceil((sample_count/BATCH_SIZE)*0.01)
else:
STEPS_PER_EPOCH = args.steps
epochs = args.epoch_count
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
window_size = args.window_size
#%%Data generator and model
#########################################
seed = random.randint(1,999)
gens = make_gen.from_dataframe(data_file,BATCH_SIZE=BATCH_SIZE,length=window_size)
train_data_gen = gens[0]
val_data_gen = gens[1]
data_file = None
if CONTINUE == False:
FIRST_EPOCH = 1
model = define_model.build_model(
args.model,
len(class_names),
len(features),
window_size)
elif CONTINUE == True:
modelfile = args.model_file
FIRST_EPOCH = int(modelfile.split('.')[0].split('-')[-1])
model = tf.keras.models.load_model(checks+modelfile)
else:
print('Either start or continue training')
sys.exit()
#%% Compile Keras model, set callbacks and start/continue training
#########################################
if args.model != 'xgb':
model.compile(
optimizer=args.optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy']
)
#Callbacks
#Save model if there is an increase in performance
filepath_best=checks+RUN_NAME+"-{epoch}"+".hdf5"
ckp_best=tf.keras.callbacks.ModelCheckpoint(filepath_best,
monitor='val_accuracy',
verbose=1,
save_best_only=True,
mode='max',
save_weights_only=False,
save_freq='epoch'
)
#Log model history in csv
logfile=RUN_NAME+'.csv'
csv_log=tf.keras.callbacks.CSVLogger(filename=log_dir+logfile)
#Early stopping, not using right now
earlystopping=tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0,patience=10
)
# Metrics logging, still unstable, takes too long on small batch sizes
# Currently after each epoch it calculates the metrics for each batch
# So on small batch sizes this takes a lot of time
# metrics = postutils.Metrics(val_data=val_data_gen,batch_size=BATCH_SIZE)
callbacks_list = [ckp_best,csv_log]
#Train or resume training
model.fit(
x=train_data_gen,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=epochs,
callbacks=callbacks_list,
validation_data=val_data_gen,
validation_steps=STEPS_PER_EPOCH,
initial_epoch=FIRST_EPOCH
)
# %% XGB model, still needs a way to save results
if args.model == 'xgb':
print('Training XGB Model)')
model.fit(train_data_gen.data,train_data_gen.targets,verbose=1)
pred = model.predict(val_data_gen.data)
pred_proba = model.predict_proba(val_data_gen.data)
logloss = log_loss(val_data_gen.targets,pred_proba)
prec = precision_score(val_data_gen.targets,pred,average='macro')
rec = recall_score(val_data_gen.targets,pred,average='macro')
print(logloss,prec,rec)
# %%
| [
"42118093+jralha@users.noreply.github.com"
] | 42118093+jralha@users.noreply.github.com |
c011ed7f2cb3e67b62e09c3bacca76a3d4f716da | eee1560febb6f9d0020c70af9c0bf44b52d85040 | /train2View.py | 320f98e56b0a27c39911fc517cb2fd3ac4ce240b | [] | no_license | rohit256/Lipper-Multi-View_Lipreading | d0ce51655b01df78c956adcf20d49777a0c2b8a0 | 282019597b846f3b845bef8f03dbcb052b3d8b65 | refs/heads/master | 2020-04-29T05:21:46.969775 | 2019-03-15T20:01:27 | 2019-03-15T20:01:27 | 175,880,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,311 | py | from keras.utils import *
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Convolution2D, MaxPooling2D, Convolution3D, MaxPooling3D, LSTM
from keras.layers import *
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from keras.layers import merge, Input
from keras.layers.advanced_activations import ELU
from keras.optimizers import Adam
from keras.layers.wrappers import *
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import *
import numpy as np
import os
from keras.regularizers import *
from keras.layers import LSTM, Input
from keras.applications.resnet50 import ResNet50,preprocess_input
from keras.layers import Dense, Activation, Flatten,Concatenate,concatenate
from keras.models import Model
from sklearn.utils import shuffle
import keras
import keras.backend as BK
import sys
from os.path import isfile, join
import shutil
import h5py
import os.path
import glob
import audio_tools as aud
import moduletest
FRAME_ROWS = 128
FRAME_COLS = 128
SR = moduletest.SamplingRate
LPC_ORDER = moduletest.LPCOrder
NFRAMES = moduletest.NumberOfFrames
MARGIN = int(NFRAMES/2)
COLORS = 1
CHANNELS = NFRAMES
TRAIN_PER = (len(moduletest.SpeakerTrain)*1.0)/(len(moduletest.SpeakerTrain)+len(moduletest.SpeakerTest))
LR = 0.005
nb_pool = 2
BATCH_SIZE = 26
DROPOUT = 0
DROPOUT2 = 0.0
EPOCHS = 30
FINETUNE_EPOCHS = 5
activation_func2 = 'tanh'
net_out = 50
reg = 0.0005
respath = '../results/speaker_independent/2_View/'
weight_path = join(respath,'weights/')
datapath = '../../lipsync/dataset/numpy_datasets/'
def savedata(Ytr, Ytr_pred, Yte, Yte_pred,v1,v2):
respath_view = join(respath,'View='+str(v1)+','+str(v2)+'/')
if not os.path.exists(respath_view):
os.makedirs(respath_view)
speakerlist = '('+str(moduletest.SpeakerTrain)[1:-1] + '||||' + str(moduletest.SpeakerTest)[1:-1]+')'
nameoffile = 'STCNN'+'_'+ speakerlist +'_'+str(v1)+','+str(v2)+'_'+str(SR)+'_'+str(NFRAMES)
np.save(join(respath_view,'Ytr_'+nameoffile+'.npy'),Ytr)
np.save(join(respath_view,'Ytr_pred_'+nameoffile+'.npy'),Ytr_pred)
np.save(join(respath_view,'Yte_'+nameoffile+'.npy'),Yte)
np.save(join(respath_view,'Yte_pred_'+nameoffile+'.npy'),Yte_pred)
def standardize_data(Xtr, Ytr, Xte, Yte):
Xtr = Xtr.astype('float32')
Xte = Xte.astype('float32')
Xtr /= 255
Xte /= 255
xtrain_mean = np.mean(Xtr)
Xtr = Xtr-xtrain_mean
Xte = Xte-xtrain_mean
Y_means = np.mean(Ytr,axis=0)
Y_stds = np.std(Ytr, axis=0)
Ytr_norm = ((Ytr-Y_means)/Y_stds)
Yte_norm = ((Yte-Y_means)/Y_stds)
return Xtr, Ytr_norm, Xte, Yte_norm, Y_means, Y_stds
def load_data(datapath , view):
finalviddata = []
finalauddata = []
for i in np.concatenate((moduletest.SpeakerTrain,moduletest.SpeakerTest)):
speaker = i
viddata_path = join(datapath,'viddata_'+str(SR)+'_'+str(NFRAMES)+'_'+str(speaker)+'_'+str(view)+'.npy')
auddata_path = join(datapath,'auddata_'+str(SR)+'_'+str(NFRAMES)+'_'+str(speaker)+'.npy')
if isfile(viddata_path) and isfile(auddata_path):
print ('Loading data...')
viddata = np.load(viddata_path)
auddata = np.load(auddata_path)
if(len(finalviddata)==0):
finalviddata = viddata
else :
finalviddata = np.concatenate((finalviddata,viddata),axis=0)
if(len(finalauddata)==0):
finalauddata = auddata
else :
finalauddata = np.concatenate((finalauddata,auddata),axis=0)
else:
print ('Preprocessed data not found.')
return None, None
print ('Done.')
#print finalviddata.shape,finalauddata.shape
return finalviddata, finalauddata
def split_data(viddata, auddata):
vidctr = len(auddata)
Xtr = viddata[:int(vidctr*TRAIN_PER),:,:,:]
Ytr = auddata[:int(vidctr*TRAIN_PER),:]
Xte = viddata[int(vidctr*TRAIN_PER):,:,:,:]
Yte = auddata[int(vidctr*TRAIN_PER):,:]
return (Xtr, Ytr), (Xte, Yte)
def build_model(net_out):
# first input model
visible1 = Input(shape=(CHANNELS*COLORS,FRAME_ROWS,FRAME_COLS))
conv11 = (Convolution2D(32, 3, border_mode='same',data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(visible1)
batch11 = (BatchNormalization())(conv11)
LR11 = (LeakyReLU())(batch11)
MP11 = LR11
DO11 = MP11
conv12 = (Convolution2D(64, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO11)
batch12 = (BatchNormalization())(conv12)
LR12 = (LeakyReLU())(batch12)
MP12 = (AveragePooling2D(nb_pool,data_format='channels_first'))(LR12)
DO12 = MP12
conv13 = (Convolution2D(64, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO12)
batch13 = (BatchNormalization())(conv13)
LR13 = (LeakyReLU())(batch13)
MP13 = LR13
DO13 = MP13
conv14 = (Convolution2D(128, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO13)
batch14 = (BatchNormalization())(conv14)
LR14 = (LeakyReLU())(batch14)
MP14 = LR14
DO14 = MP14
conv15 = (Convolution2D(128, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO14)
batch15 = (BatchNormalization())(conv15)
LR15 = (LeakyReLU())(batch15)
MP15 = LR15
DO15 = MP15
flat11 = (Flatten())(DO15)
# second input model
visible2 = Input(shape=(CHANNELS*COLORS,FRAME_ROWS,FRAME_COLS))
conv21 = (Convolution2D(32, 3, border_mode='same',data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(visible2)
batch21= (BatchNormalization())(conv21)
LR21 = (LeakyReLU())(batch21)
MP21 = LR21
DO21 = MP21
conv22 = (Convolution2D(64, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO21)
batch22 = (BatchNormalization())(conv22)
LR22 = (LeakyReLU())(batch22)
MP22 = (AveragePooling2D(nb_pool,data_format='channels_first'))(LR22)
DO22 = MP22
conv23 = (Convolution2D(64, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO22)
batch23 = (BatchNormalization())(conv23)
LR23 = (LeakyReLU())(batch23)
MP23 = LR23
DO23 = MP23
conv24 = (Convolution2D(128, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO23)
batch24 = (BatchNormalization())(conv24)
LR24 = (LeakyReLU())(batch24)
MP24 = LR24
DO24 = MP24
conv25 = (Convolution2D(128, 3, padding='same', data_format='channels_first',
init='he_normal',kernel_regularizer=l2(reg)))(DO24)
batch25 = (BatchNormalization())(conv25)
LR25 = (LeakyReLU())(batch25)
MP25 = LR25
DO25 = MP25
flat21 = (Flatten())(DO25)
# merge input models
merge = concatenate([flat11, flat21])
D = (Dense(128, init='he_normal'))(merge)
batch =(BatchNormalization())(D)
D2 = Dense(net_out, init='he_normal', use_bias=True)(batch)
L = Activation('linear')(D2)
model = Model(inputs=[visible1, visible2], outputs=L)
print(model.summary())
return model
def corr2_mse_loss(a,b):
a = BK.tf.subtract(a, BK.tf.reduce_mean(a))
b = BK.tf.subtract(b, BK.tf.reduce_mean(b))
tmp1 = BK.tf.reduce_sum(BK.tf.multiply(a,a))
tmp2 = BK.tf.reduce_sum(BK.tf.multiply(b,b))
tmp3 = BK.tf.sqrt(BK.tf.multiply(tmp1,tmp2))
tmp4 = BK.tf.reduce_sum(BK.tf.multiply(a,b))
r = -BK.tf.divide(tmp4,tmp3)
m=BK.tf.reduce_mean(BK.tf.square(BK.tf.subtract(a, b)))
rm=BK.tf.add(r,m)
return rm
def train_net(model1, Xtr_v1, Ytr_norm_v1, Xte_v1, Yte_norm_v1,
Xtr_v2, Ytr_norm_v2, Xte_v2, Yte_norm_v2,
batch_size=BATCH_SIZE, epochs=EPOCHS, finetune=False):
if finetune:
lr = LR/10
else:
lr = LR
adam = Adam(lr=lr)
model1.compile(loss='mse', optimizer=adam)
if finetune:
epochs = FINETUNE_EPOCHS
history = model1.fit( [Xtr_v1, Xtr_v2], Ytr_norm_v1, shuffle=False,batch_size=batch_size,nb_epoch=epochs,
verbose=1, validation_data=([Xte_v1, Xte_v2], Yte_norm_v1))
return model1
def predict(model, X_v1, Y_means_v1, Y_stds_v1, X_v2, Y_means_v2, Y_stds_v2, batch_size=BATCH_SIZE):
Y_pred = model.predict([X_v1,X_v2], batch_size=batch_size, verbose=1)
Y_pred = (Y_pred*Y_stds_v1+Y_means_v1)
return Y_pred
def main():
for v1 in range(1,2):
print('****************************************')
print('View = ',v1)
viddata_v1, auddata_v1 = load_data(datapath,v1)
print(viddata_v1.shape)
(Xtr_v1,Ytr_v1), (Xte_v1, Yte_v1) = split_data(viddata_v1, auddata_v1)
print(Xtr_v1.shape, Ytr_v1.shape)
print(Xte_v1.shape, Yte_v1.shape)
#sys.exit()
Xtr_v1, Ytr_norm_v1, Xte_v1, Yte_norm_v1, Y_means_v1, Y_stds_v1 = standardize_data(Xtr_v1, Ytr_v1, Xte_v1, Yte_v1)
for v2 in range(v1+1,3):
print('########################################')
print('View = ',v2)
viddata_v2, auddata_v2 = load_data(datapath,v2)
print(viddata_v2.shape)
(Xtr_v2,Ytr_v2), (Xte_v2, Yte_v2) = split_data(viddata_v2, auddata_v2)
print(Xtr_v2.shape, Ytr_v2.shape)
print(Xte_v2.shape, Yte_v2.shape)
Xtr_v2, Ytr_norm_v2, Xte_v2, Yte_norm_v2, Y_means_v2, Y_stds_v2 = standardize_data(Xtr_v2, Ytr_v2, Xte_v2, Yte_v2)
model = build_model(net_out)
model = train_net(model, Xtr_v1, Ytr_norm_v1, Xte_v1, Yte_norm_v1,
Xtr_v2, Ytr_norm_v2, Xte_v2, Yte_norm_v2)
model = train_net(model, Xtr_v1, Ytr_norm_v1, Xte_v1, Yte_norm_v1,
Xtr_v2, Ytr_norm_v2, Xte_v2, Yte_norm_v2, finetune = True)
Ytr_pred = predict(model, Xtr_v1, Y_means_v1, Y_stds_v1,
Xtr_v2, Y_means_v2, Y_stds_v2)
Yte_pred = predict(model, Xte_v1, Y_means_v1, Y_stds_v1,
Xte_v2, Y_means_v2, Y_stds_v2)
savedata(Ytr_v1, Ytr_pred, Yte_v1, Yte_pred,v1 ,v2)
if __name__ == "__main__":
main()
| [
"rohitjn256@gmail.com"
] | rohitjn256@gmail.com |
fec2a67a92873717c966226aa1b66786aef95811 | 02c35ac2c6d4a832327b72865661d0f5d4c89219 | /Decorators/Assigning_fun_2_variable.py | 5858834eb4ecc28356582524edcdd0c542d50b01 | [] | no_license | pinky0916/python_Basics | 9b4eba261bd67bef370f87ce9890126c1c7d5e6a | d1e498338e2bb6b2f4eb9bc25533c406f71e0773 | refs/heads/main | 2023-09-02T06:03:10.987401 | 2021-11-02T01:04:55 | 2021-11-02T01:04:55 | 331,766,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #We can assign fucntions to other variables and execute that variables
def cool():
return "Hello"
hello=cool
print(hello())
print('*****************') | [
"varishtaja.kunjukuttan@gmail.com"
] | varishtaja.kunjukuttan@gmail.com |
56e58a80390b5a427072ff56d7825759676e663d | 8b0790884ab690b32bf7978d255331196adc6f97 | /setup.py | 5053a63ccd012b89148aec9d9aa0a8f7b1f38920 | [
"BSD-2-Clause"
] | permissive | vikashgupta-may/django-rest-framework | 110a62c4df7f1a55ff0bf0f44e3ee78bbf5f7eea | 5c7d4e23be45c824c15dffe684e2a9fd9c78674d | refs/heads/master | 2022-12-25T02:52:56.467527 | 2020-10-07T07:52:19 | 2020-10-07T07:52:19 | 301,963,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | #!/usr/bin/env python
from __future__ import print_function
import os
import re
import sys
from setuptools import setup
needs_wheel = {'bdist_wheel'}.intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
def read(*paths):
"""
Build a file path from paths and return the contents.
"""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(
get_version('rest_framework_json_api')))
print(" git push --tags")
sys.exit()
setup(
name='djangorestframework-jsonapi',
version=get_version('rest_framework_json_api'),
url='https://github.com/django-json-api/django-rest-framework-json-api',
license='BSD',
description='A Django REST framework API adapter for the JSON API spec.',
long_description=read('README.rst'),
author='Jerel Unruh',
author_email='',
packages=get_packages('rest_framework_json_api'),
package_data=get_package_data('rest_framework_json_api'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'inflection>=0.3.0',
'djangorestframework>=3.12,<3.13',
'django>=2.2,<3.2',
],
extras_require={
'django-polymorphic': ['django-polymorphic>=2.0'],
'django-filter': ['django-filter>=2.0']
},
setup_requires=wheel,
python_requires=">=3.5",
zip_safe=False,
)
| [
"vikashgupta.may@gmail.com"
] | vikashgupta.may@gmail.com |
a703dcba1f9e3d95e354395ee522f152798f574b | e4822338d507cc83315b2122b0f18cbf1b94f4a7 | /Work/Craiglist/craiglist_scraping.py | c2e12dd504a61925894093b490b833c0ebefa67e | [
"MIT"
] | permissive | rahulbordoloi/Selenium | f6659aacebb3691d3e143c4383f2e5f3bcb7f98b | 12e621a4ad05572d23622195a643ec1c40ccf6e9 | refs/heads/master | 2022-12-03T14:20:37.464030 | 2020-08-03T07:37:43 | 2020-08-03T07:37:43 | 282,360,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,491 | py | ## Craiglist Scraping using Selenium Python
## Documentation -> https://www.selenium.dev/documentation/en/
# Importing Neccessary Libraries
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
import urllib.request
import pandas as pd
class CraiglistScraper:
# Constructor
def __init__(self, location, min_price, max_price):
self.location = location
self.min_price = min_price
self.max_price = max_price
self.url = f'https://{self.location}.craigslist.org/search/sss?min_price={min_price}&max_price={max_price}'
self.driver = webdriver.Chrome(executable_path = "C:\Personal\Work\Selenium\Drivers\chromedriver.exe")
self.delay = 3
self.titles = []
self.prices = []
self.dates = []
self.urls = []
# Load the Page associated with the URL
def load_url(self):
self.driver.get(self.url)
try:
wait = WebDriverWait(self.driver, self.delay)
wait.until(EC.presence_of_element_located((By.ID, "searchform"))) # searchform -> the whole list of items
print("Page is Loaded and Ready to get Parsed!")
except TimeoutException:
print("Loading Took too much Time! Increase Delay Amount.")
# Extract the information from Each Card
def extract_cards(self):
cards = self.driver.find_elements_by_class_name("result-row")
# print(cards)
card_list = []
for card in cards:
# print(card.text)
title = card.text.split("$")
# print(title)
if title[0] == '':
title = title[1]
else:
title = title[1]
title = title.split("\n") # Each New Line depicts the information about an object
price = title[0]
title = title[-1]
title = title.split(" ")
month, day = title[0], title[1]
date = month + " " + day
title = ' '.join(title[2:]) # Taking the Rest of String excluding
# print(title)
# print("Title of the Item : {}".format(title))
# print("Price of the Item : ${}".format(price))
# print("Date of the Item : {} 2020".format(date))
# Appending the Information into Lists
self.titles.append(title)
self.prices.append(price)
self.dates.append(date)
#card_list.append(card.text)
# for i in card_list: print(i)
# return card_list
# return titles, prices, dates
# Extarct URLs from the Cards
def extract_card_urls(self):
html_page = urllib.request.urlopen(self.url)
soup = BeautifulSoup(html_page, 'lxml')
for link in soup.findAll("a", {"class" : "result-title hdrlnk"}):
# print(link["href"])
self.urls.append(link["href"])
# Extract Information about the Cards
def generate_csv(self):
# Take out the URLs of the Cards
self.extract_card_urls()
# Using Pandas DataFrame
df = pd.DataFrame()
df['Date'] = self.dates
df['URL'] = self.urls
df['Title'] = self.titles
df['Price($)'] = self.prices
df.to_csv('craiglist_results.csv', index = False)
# Using File IO
'''
# Initialising CSV File to Save our Results
with open('craiglist_results.csv', 'w') as f:
f.write("Date, Title, Price($) \n")
# Store out all the information
no_of_items = len(self.titles)
with open('craiglist_results.csv', 'a') as f:
for i in range(no_of_items):
f.write(str(self.dates[i]) + "," + str(self.titles[i]) + + "," + str(self.prices[i]) + "\n")
'''
# Close the Browser Session
def quit(self):
self.driver.close()
# Test Function to Print the URL
def test(self):
print(self.url)
# Main Function
if __name__ == '__main__':
scrapper = CraiglistScraper('sfbay', 5, 5000)
# scrapper.test()
scrapper.load_url()
# scrapper.extract_card_urls()
scrapper.extract_cards()
scrapper.generate_csv()
scrapper.quit() | [
"rahulbordoloi24@gmail.com"
] | rahulbordoloi24@gmail.com |
9803b67002e1ba840f83ee2f65cd4d5679d4b1ca | 158618aad4a15d234e820e801016848cde53cbbc | /piece_of_cake_ms4/asgi.py | 56bc05166c933d8482ebd59c9804674b779d8d00 | [] | no_license | niamhbrowne/piece_of_cake_ms4_resub | 8aed135b9276ebde50bb6708247e1f6f6f44d3c7 | d9941db1519484c4be6e2ce24e5c5fc423fce6c0 | refs/heads/main | 2023-08-26T07:25:05.766430 | 2021-11-10T11:50:05 | 2021-11-10T11:50:05 | 425,999,524 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for piece_of_cake_ms4 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'piece_of_cake_ms4.settings')
application = get_asgi_application()
| [
"niamh_browne@icloud.com"
] | niamh_browne@icloud.com |
8d52a25ebc670edc425ed6dc325e37c4c0a83c68 | b7e60a0a1341a8c91e6690e49c0d37ac34b20693 | /dashboard/views.py | 3091349588618b61fb765dea76bf58d8efb6623a | [] | no_license | staylomak/AzurianTrack | b25fa052f26491057f6da1680402cab1ab3cd02b | 6feb6c7a3913cdcc7afc9e04b3321ec7e62453ea | refs/heads/master | 2020-05-02T10:18:46.666003 | 2019-03-27T01:24:27 | 2019-03-27T01:24:27 | 177,893,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | # -*- coding: utf-8 -*-
import json
import logging
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic import TemplateView
from autenticacion.views import LoginRequiredMixin
from emails.models import Email
from perfiles.models import Perfil
from utils.generics import timestamp_to_date
logger = logging.getLogger(__name__)
class StatisticsView(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
try:
date_from = request.GET['date_from']
date_to = request.GET['date_to']
date_from = int(date_from, base=10)
date_to = int(date_to, base=10)
query_params = dict()
query_params['date_from'] = timestamp_to_date(date_from)
query_params['date_to'] = timestamp_to_date(date_to)
query_params['empresa'] = request.GET['empresas']
query_params['tipo_receptor'] = request.GET['tipo_receptor']
statistic = Email.get_statistics_count_by_dates(**query_params)
results = Email.get_statistics_range_by_dates(**query_params)
data = {
'statistic': statistic,
'results': results,
}
data = json.dumps(data)
return HttpResponse(data, content_type='application/json')
except Exception as e:
logger.error(e)
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/index.html'
def get(self, request, *args, **kwargs):
try:
perfil = Perfil.get_perfil(request.user)
perfil.empresas = perfil.empresas.all().order_by('empresa')
logger.info(perfil.usuario)
data = {
'perfil': perfil
}
return render(request, self.template_name, data)
except Exception as e:
logger.error(e)
return HttpResponse("No autorizado")
| [
"bogginice@gmail.com"
] | bogginice@gmail.com |
d81a7b84b4465dbb7b7e7446f0c0c796ea57daca | 8c6256f95668a23628be7aa1d4a39a2cd94f37ab | /streamlit_app.py | e9f69f352d581b0abdf6e83653e651edac0c90f3 | [] | no_license | monchier/pf_test | e70de1073622645fec431fa805d52885b61c981d | 6d4e366a9508d88f2e67028edd21c2ed2073c5b0 | refs/heads/master | 2023-07-15T12:39:05.207198 | 2021-02-25T17:44:17 | 2021-02-25T17:44:17 | 340,729,410 | 1 | 1 | null | 2021-02-22T14:35:32 | 2021-02-20T18:52:50 | Python | UTF-8 | Python | false | false | 431 | py | import streamlit as st
import pandas as pd
from fbprophet import Prophet
import time
df = pd.read_csv('example_wp_log_peyton_manning.csv')
st.write(df)
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=365)
st.write(future)
start = time.time()
forecast = m.predict(future)
end = time.time()
#forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
st.write(forecast)
st.write(f"Elapsed time: {end - start}")
| [
"matteo@streamlit.io"
] | matteo@streamlit.io |
37f2612b3b1262ebb4d0cc8192c8cfc52ce6c04e | c2f3b440494121962f8c0d5c4af0d989e1ff1a30 | /fact.py | f8c62b7619744b34e922f456bffc4b237ad74e4d | [] | no_license | 15cs026priyanka/balajipri | 3a8997efcf9a51372a9b575c38a07e9b6bd99d2c | b206a5780d266c809253df59b12c102114e420f3 | refs/heads/master | 2021-03-31T02:15:34.337002 | 2018-03-13T10:42:05 | 2018-03-13T10:42:05 | 125,033,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | x=int(input("enter a number:"))
if x<0:
print("not a factorial number")
elif x==0:
print("this is the factorial number")
| [
"noreply@github.com"
] | 15cs026priyanka.noreply@github.com |
d64e894856d36724821b05d2b9df8e46b4ec5512 | 5afb399c14b78ba8a5bfe24fb101a8cf01f26e76 | /app.py | d5415de0a6c26730efaba6b5addb8e9e652cc823 | [] | no_license | ropenta/first_news_app | bb811e5065ec32d0e3eb87ac26dcfe6daac617ec | 60064e7b30b282c82285437e16f45217c7727579 | refs/heads/master | 2021-01-01T04:17:30.714427 | 2017-07-13T23:00:04 | 2017-07-13T23:00:04 | 97,159,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | import csv
from flask import Flask
from flask import abort
from flask import render_template
app = Flask(__name__)
def get_csv():
csv_path = './static/la-riots-deaths.csv' #path to file
csv_file = open(csv_path, 'rb') #open the csv
csv_obj = csv.DictReader(csv_file) #parse as dict
csv_list = list(csv_obj) #keeps csv object permanently
return csv_list
@app.route('/')
def index():
template = 'index.html'
object_list = get_csv()
return render_template(template, object_list = object_list)
@app.route('/<row_id>')
def detail(row_id):
template = 'detail.html'
object_list = get_csv()
for row in object_list:
if row['id'] == row_id:
return render_template(template, object = row)
abort(404)
if __name__ == '__main__':
app.run(use_reloader=True, debug=True)
| [
"penta@umich.edu"
] | penta@umich.edu |
5a75b3e5fcce03f7bd10d309196f67bdbc85c252 | 1d641f71f7aab082ed0b3ee805d6ff24b012ca2d | /ecommerce/carts/urls.py | aacdcfc353ac76fe4c2a60b52d83aa8708090caa | [] | no_license | Arkajit-m18/django-mca-major-project | 3d63ac96cd32c49e9a95629a680c5b0b7561cbd3 | 59b6f39d923a7e134bbb4bbb769bc06721321760 | refs/heads/master | 2020-05-18T00:31:44.435948 | 2019-05-15T15:23:21 | 2019-05-15T15:23:21 | 184,065,280 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.urls import path
from . import views
app_name = 'carts'
urlpatterns = [
path('', views.cart_home, name = 'cart_home'),
path('update/', views.cart_update, name = 'cart_update'),
path('checkout/', views.checkout_home, name = 'checkout'),
path('checkout/success/', views.checkout_done, name = 'success'),
] | [
"arkajit.18@gmail.com"
] | arkajit.18@gmail.com |
f0c863824b5bc933786d755d811e7f3294660239 | e2aa2b6c28fd36860f7dd11022a8d0425c6a2335 | /pymtl3/passes/backends/verilog/translation/structural/VStructuralTranslatorL4.py | 03d36a0c667ad250d6834317e0efdf53654ab6d6 | [
"BSD-3-Clause"
] | permissive | juanalbrecht/pymtl3 | f509458fe588721dfc8c715bc75c259409b5f7a5 | 6d2cda6370fae74c82f69a1e65e641fab28d6958 | refs/heads/master | 2021-03-02T21:32:03.214499 | 2020-03-08T01:38:45 | 2020-03-08T01:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,911 | py | #=========================================================================
# VStructuralTranslatorL4.py
#=========================================================================
"""Provide SystemVerilog structural translator implementation."""
from textwrap import dedent
from pymtl3 import Placeholder
from pymtl3.passes.backends.generic.structural.StructuralTranslatorL4 import (
StructuralTranslatorL4,
)
from pymtl3.passes.rtlir import RTLIRDataType as rdt
from pymtl3.passes.rtlir import RTLIRType as rt
from pymtl3.passes.rtlir import get_component_ifc_rtlir
from ...util.utility import make_indent, pretty_concat
from .VStructuralTranslatorL3 import VStructuralTranslatorL3
class VStructuralTranslatorL4(
VStructuralTranslatorL3, StructuralTranslatorL4 ):
#-----------------------------------------------------------------------
# Declarations
#-----------------------------------------------------------------------
def rtlir_tr_subcomp_port_decls( s, _port_decls ):
return _port_decls
def rtlir_tr_subcomp_port_decl( s, m, c_id, c_rtype, c_array_type, port_id,
port_rtype, port_dtype, port_array_type ):
return {
'direction' : port_rtype.get_direction(),
'data_type' : port_dtype['data_type'],
'packed_type' : port_dtype['packed_type'],
'id' : port_id,
'unpacked_type' : port_array_type['unpacked_type'],
}
def rtlir_tr_subcomp_ifc_port_decls( s, _ifc_port_decls ):
return sum(_ifc_port_decls, [])
def rtlir_tr_subcomp_ifc_port_decl( s, m, c_id, c_rtype, c_array_type,
ifc_id, ifc_rtype, ifc_array_type, port_id, port_rtype, port_array_type ):
if isinstance( port_rtype, rt.Port ):
port_dtype = s.rtlir_data_type_translation( m, port_rtype.get_dtype() )
return [{
'direction' : port_rtype.get_direction(),
'data_type' : port_dtype['data_type'],
'packed_type' : port_dtype['packed_type'],
'id' : f'{ifc_id}__{port_id}',
'unpacked_type' : ifc_array_type['unpacked_type']+port_array_type['unpacked_type'],
}]
else:
# Nested interface
ret = []
all_properties = port_rtype.get_all_properties_packed()
for _port_id, _port_rtype in all_properties:
if isinstance(_port_rtype, rt.Array):
_port_array_rtype = _port_rtype
_port_rtype = _port_rtype.get_sub_type()
else:
_port_array_rtype = None
_port_rtype = _port_rtype
ret += s.rtlir_tr_subcomp_ifc_port_decl( m, c_id, c_rtype, c_array_type,
f'{ifc_id}__{port_id}', port_rtype, port_array_type,
_port_id, _port_rtype, s.rtlir_tr_unpacked_array_type(_port_array_rtype))
return ret
def rtlir_tr_subcomp_ifc_decls( s, _ifc_decls ):
return sum(_ifc_decls, [])
def rtlir_tr_subcomp_ifc_decl( s, m, c_id, c_rtype, c_array_type,
ifc_id, ifc_rtype, ifc_array_type, ports ):
return ports
def rtlir_tr_subcomp_decls( s, subcomps ):
subcomp_decls = sum( subcomps, [] )
return '\n\n'.join( subcomp_decls )
def rtlir_tr_subcomp_decl( s, m, c_id, c_rtype, c_array_type, port_conns, ifc_conns ):
def pretty_comment( string ):
comments = [
' //-------------------------------------------------------------',
f' // {string}',
' //-------------------------------------------------------------',
]
return '\n'.join(comments)
def gen_subcomp_array_decl( c_id, port_conns, ifc_conns, n_dim, c_n_dim ):
nonlocal m, s
tplt = dedent(
"""\
{c_name} {c_id}
(
{port_conn_decls}
);""")
if not n_dim:
# Get the object from the hierarchy
_n_dim = list(int(num_str) for num_str in c_n_dim.split('__') if num_str)
attr = c_id + ''.join(f'[{dim}]' for dim in _n_dim)
obj = eval(f'm.{attr}')
# Get the translated component name
obj_c_rtype = get_component_ifc_rtlir(obj)
_c_name = s.rtlir_tr_component_unique_name(obj_c_rtype)
if isinstance(obj, Placeholder):
c_name = obj.config_placeholder.pickled_top_module
else:
c_name = _c_name
orig_c_id = c_id
c_id = c_id + c_n_dim
# Generate correct connections
port_conn_decls = []
unpacked_str = ''.join([f'[{i}]' for i in _n_dim])
no_clk = s.structural.component_no_synthesis_no_clk[obj]
no_reset = s.structural.component_no_synthesis_no_reset[obj]
for i, dscp in enumerate(port_conns + ifc_conns):
comma = ',\n' if i != len(port_conns+ifc_conns)-1 else ''
port_name = dscp['id']
port_wire = f"{orig_c_id}__{dscp['id']}{unpacked_str}"
if (port_name == 'clk' and no_clk) or (port_name == 'reset' and no_reset):
comma = ',\n' if i != len(port_conns+ifc_conns)-1 else '\n'
newline = '\n' if i != len(port_conns+ifc_conns)-1 else ''
port_conn_decls.append("`ifndef SYNTHESIS\n")
port_conn_decls.append(f".{port_name}( {port_wire} ){comma}")
port_conn_decls.append(f"`endif{newline}")
else:
port_conn_decls.append(f".{port_name}( {port_wire} ){comma}")
make_indent( port_conn_decls, 2 )
port_conn_decls = ''.join(port_conn_decls)
return [ tplt.format( **locals() ) ]
else:
return sum( [gen_subcomp_array_decl( c_id,
port_conns, ifc_conns, n_dim[1:], c_n_dim+'__'+str(idx) ) \
for idx in range( n_dim[0] )], [] )
# If `c_array_type` is not None we need to impelement an array of
# components, each with their own connections for the ports.
# Generate wire declarations for all ports
defs = []
for dscp in port_conns + ifc_conns:
defs.append(pretty_concat(dscp['data_type'], dscp['packed_type'],
f"{c_id}__{dscp['id']}", f"{c_array_type['unpacked_type']}{dscp['unpacked_type']}", ';'))
make_indent( defs, 1 )
defs = ['\n'.join(defs)]
n_dim = c_array_type['n_dim']
subcomps = gen_subcomp_array_decl( c_id, port_conns, ifc_conns, n_dim, '' )
return [pretty_comment(f"Component {c_id}{c_array_type['unpacked_type']}")] + \
defs + subcomps + \
[pretty_comment(f"End of component {c_id}{c_array_type['unpacked_type']}")]
#-----------------------------------------------------------------------
# Signal operations
#-----------------------------------------------------------------------
def rtlir_tr_component_array_index( s, base_signal, index, status ):
s._rtlir_tr_unpacked_q.append( index )
return base_signal
def rtlir_tr_subcomp_attr( s, base_signal, attr, status ):
return s._rtlir_tr_process_unpacked(
f'{base_signal}__{attr}',
f'{base_signal}__{attr}{{}}',
status, ('status') )
| [
"pp482@cornell.edu"
] | pp482@cornell.edu |
939da88ad050ae09813851b20ecbf9bbff1388f2 | 30bee457380142b46b8e7a6fadc4d54cee1dad5b | /lessons/migrations/0004_add_slug_field.py | 04a018b4254d604522bafdb6f31bcc085975e6da | [] | no_license | Feralo/website | 5f49bc317a6efa8f7dd3af23781ec55df153abd5 | 27d6cfaea580a57e4e3560f96218f8924d858858 | refs/heads/master | 2021-01-01T17:21:39.976587 | 2015-11-06T06:30:19 | 2015-11-06T06:30:19 | 21,190,077 | 1 | 1 | null | 2015-11-06T06:17:44 | 2014-06-25T04:18:12 | Python | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('lessons', '0003_fix_date_time'),
]
operations = [
migrations.AlterModelOptions(
name='lesson',
options={'ordering': ['-created']},
),
migrations.AddField(
model_name='lesson',
name='slug',
field=models.SlugField(default=datetime.datetime.now, max_length=40),
preserve_default=False,
),
]
| [
"noah.de@gmail.com"
] | noah.de@gmail.com |
21fa4ec01f1456601156243bdc48776ee6254d51 | 9e3a0fde65f7279be14b844b229d077bfe66c4ef | /flaskr/model/user.py | a9368ac51b19bc137c9f38243a86277bf472a5d3 | [] | no_license | anhvtt-teko/todoApp | 0033f9b6b3bdf7365d7269ee202152ae14f8acc3 | 68374eceb064f25d1659cc2ee897b48e2e04210d | refs/heads/master | 2023-05-13T13:05:12.927426 | 2019-10-11T02:22:08 | 2019-10-11T02:22:08 | 214,324,530 | 0 | 0 | null | 2023-05-01T20:36:59 | 2019-10-11T02:18:39 | Python | UTF-8 | Python | false | false | 407 | py | from flaskr.repository import db
class User(db.Model):
__tablename__ = 'user'
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return '<User %r>' % self.username
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), nullable=False)
password_hash = db.Column(db.String(100))
| [
"anh.vtt@teko.vn"
] | anh.vtt@teko.vn |
2fb72d64270d843ff92230fd45260ab5cb55888f | 369a4184493972d0be1418ca9c587b2d735f4ee4 | /invisible_cloak.py | b416bb131575be1c602a7ee06e1a3ee506b290ed | [] | no_license | sakshi1003/INVISIBLE-CLOAK | 281e5a5085d0b342c900403da9247ea0f17347dd | d08bf2404a4fc5323374beab57e3fac4ae633d85 | refs/heads/main | 2023-02-27T04:16:44.342946 | 2021-02-05T17:26:38 | 2021-02-05T17:26:38 | 336,315,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
back = cv2.imread('./image.jpg')
while cap.isOpened():
# take each frame
ret, frame = cap.read()
if ret:
# how do we convert rgb to hsv?
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# cv2.imshow("hsv", hsv)
# how to get hsv value?
# lower: hue - 10, 100, 100, higher: h+10, 255, 255
red = np.uint8([[[0,0,255]]]) # bgr value of red
hsv_red = cv2.cvtColor(red, cv2.COLOR_BGR2HSV)
# get hsv value of red from bgr
# print(hsv_red)
# threshold the hsv value to get only red colors
l_red = np.array([0, 100, 100])
u_red = np.array([10, 255, 255])
mask = cv2.inRange(hsv, l_red, u_red)
# cv2.imshow("mask", mask)
# all things red
part1 = cv2.bitwise_and(back, back, mask=mask)
# cv2.imshow("part1", part1)
mask = cv2.bitwise_not(mask)
# part 2 is all things not red
part2 = cv2.bitwise_and(frame, frame, mask=mask)
# cv2.imshow("mask", part2)
cv2.imshow("cloak", part1 + part2)
if cv2.waitKey(5) == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"tiwari.sakshi1003@gmail.com"
] | tiwari.sakshi1003@gmail.com |
ca0d04658eb03c43a7dceddf7338d8c1f5cd372f | 346cf248e94fe97ba9c0a841827ab77f0ed1ff20 | /experiments/kdd-exps/experiment_DynaQtable_130_Feb14_0029.py | efabd8516978796f715bed1b20adcd12deaf5f2b | [
"BSD-3-Clause"
] | permissive | huangxf14/deepnap | cae9c7c654223f6202df05b3c3bc5053f9bf5696 | b4627ce1b9022d4f946d9b98d8d1622965cb7968 | refs/heads/master | 2020-03-26T02:54:01.352883 | 2018-08-12T01:55:14 | 2018-08-12T01:55:14 | 144,429,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'gym'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 5, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgent(
env_model=env_model, num_sim=num_sim,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| [
"liujingchu@gmail.com"
] | liujingchu@gmail.com |
49203b940fdd86cab37a192ac067eb32e9b434bf | 5eafc71a497fad643a9743958f67df9a94f5f076 | /src/models/predict_model.py | bd4d39646ad2c7aa95cec7342617b9fe5bb8d328 | [
"MIT"
] | permissive | mwegrzyn/volume-wise-language | debf2f65fdd1c4b49f40ac8f8fa67b33849a7b15 | ed24b11667e6b26d3ed09ce0aae383c26852821c | refs/heads/master | 2020-09-17T04:45:33.584668 | 2019-11-25T16:33:11 | 2019-11-25T16:33:11 | 180,776,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,267 | py |
# coding: utf-8
# # Leave-One-Patient-Out classification of individual volumes
#
# Here, we train a classifier for each patient, based on the data of all the other patients except the current one (Leave One Out Cross-Validation). To this end, we treat each volume as an independent observation, so we have a very large sample of volumes which are used for training; and later, we do not classify the patient as a whole, but the classifier makes a decision for each of the held-out patient's 200 volumes. Therefore, at this stage, we have not made a decision on the patient level, but only at the volume-as-unit-of-observation level.
# ### import modules
# In[1]:
import os
import pickle
import numpy as np
import pandas as pd
from sklearn import svm, preprocessing, metrics
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('poster')
# In[2]:
sns.set_context('poster')
# In[3]:
# after converstion to .py, we can use __file__ to get the module folder
try:
thisDir = os.path.realpath(__file__)
# in notebook form, we take the current working directory (we need to be in 'notebooks/' for this!)
except:
thisDir = '.'
# convert relative path into absolute path, so this will work with notebooks and py modules
supDir = os.path.abspath(os.path.join(os.path.dirname(thisDir), '..'))
supDir
# ### get meta df
# We need this e.g. to get information about conclusiveness
#
## In[4]:
#
#
#data_df = pd.read_csv(
# '../data/interim/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_df.csv',
# index_col=[0, 1],
# header=0)
#
#
#
## In[5]:
#
#
#data_df.tail()
#
#
# #### conclusiveness filters
#
## In[6]:
#
#
#is_conclusive = data_df.loc[:, 'pred'] != 'inconclusive'
#
#
#
## In[7]:
#
#
#is_conclusive.sum()
#
#
# ### get data
#
## In[8]:
#
#
#def make_group_df(data_df,metric='corr_df'):
# '''load correlation data of all patients'''
#
# group_df = pd.DataFrame()
#
# for p in data_df.index:
# # get data
# filename = data_df.loc[p, metric]
# this_df = pd.read_csv(filename, index_col=[0], header=0)
# # add patient infos to index
# this_df.index = [[p[0]], [p[1]]]
#
# group_df = pd.concat([group_df, this_df])
#
# # reorder the colums and make sure volumes are integer values
# group_df.columns = group_df.columns.astype(int)
#
# # sort across rows, then across columns, to make sure that volumes
# # are in the right order
# group_df = group_df.sort_index(axis=0)
# group_df = group_df.sort_index(axis=1)
#
# assert all(group_df.columns == range(200)), 'wrong order of volumes'
#
# return group_df
#
#
#
## In[9]:
#
#
#group_df = make_group_df(data_df)
#
#
#
## In[10]:
#
#
#group_df.tail()
#
#
# #### filter data
#
## In[11]:
#
#
## only conclusive cases
#conclusive_df = group_df[is_conclusive]
## only inconclusive cases
#inconclusive_df = group_df[is_conclusive == False]
## all cases unfiltered
#withinconclusive_df = group_df.copy()
#
#
#
## In[12]:
#
#
#print(conclusive_df.shape, inconclusive_df.shape, withinconclusive_df.shape)
#
#
# ### get design
# In[13]:
conds_file = os.path.join(supDir,'models','conds.p')
with open(conds_file, 'rb') as f:
conds = pickle.load(f)
#
## In[14]:
#
#
#print(conds)
#
#
# ### get colors
#
## In[15]:
#
#
#with open('../models/colors.p', 'rb') as f:
# color_dict = pickle.load(f)
#
#my_cols = {}
#for i, j in zip(['red', 'blue', 'yellow'], ['left', 'right', 'bilateral']):
# my_cols[j] = color_dict[i]
#
#
# ### invert the resting timepoints
#
## In[16]:
#
#
#inv_df = conclusive_df*conds
#
#
#
## In[17]:
#
#
#inv_df.tail()
#
#
# ### train the classifier
#
## In[18]:
#
#
#stack_df = pd.DataFrame(inv_df.stack())
#stack_df.tail()
#
#
#
## In[19]:
#
#
#stack_df.shape
#
#
#
## In[20]:
#
#
#my_groups = ['left','bilateral','right']
#
#
#
## In[21]:
#
#
#dynamite_df = stack_df.copy()
#dynamite_df.columns = ['correlation']
#dynamite_df['group'] = dynamite_df.index.get_level_values(0)
#sns.catplot(data=dynamite_df,y='group',x='correlation',kind='bar',orient='h',palette=my_cols,order=my_groups,aspect=1)
#plt.axvline(0,color='k',linewidth=3)
#plt.xlim(0.05,-0.05,-0.01)
#sns.despine(left=True,trim=True)
#plt.ylabel('')
#plt.savefig('../reports/figures/10-dynamite-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
#
## In[22]:
#
#
#from scipy import stats
#
#
#
## In[23]:
#
#
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['left','correlation'])
#print('\nt=%.2f,p=%.64f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.38f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['left','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.248f'%(t,p))
#
#
# ### as histogram
#
## In[24]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#plt.legend()
#plt.xlim(0.4,-0.4,-0.2)
#sns.despine()
#plt.show()
#
#
# ### set up the classifier
#
## In[25]:
#
#
#clf = svm.SVC(kernel='linear',C=1.0,probability=False,class_weight='balanced')
#
#
# In[26]:
def scale_features(X):
'''z-transform the features before applying a SVC.
The scaler is also stored so it can later be re-used on test data'''
my_scaler = preprocessing.StandardScaler()
my_scaler.fit(X)
X_scaled = my_scaler.transform(X)
return X_scaled,my_scaler
# In[27]:
def encode_labels(y):
'''get from number labels to strings and back'''
my_labeler = preprocessing.LabelEncoder()
my_labeler.fit(np.unique(y))
y_labels = my_labeler.transform(y)
return y_labels, my_labeler
# In[28]:
def train_classifier(df):
'''get features and labels
* scale the features
* transform the labels
* apply the classifier
'''
X = df.values
y = df.index.get_level_values(0)
X_scaled,my_scaler = scale_features(X)
y_labels, my_labeler = encode_labels(y)
clf.fit(X_scaled,y_labels)
return clf,my_scaler,my_labeler
#
## In[29]:
#
#
#example_clf, example_scaler, example_labeler = train_classifier(stack_df)
#
#
#
## In[30]:
#
#
#example_clf
#
#
#
## In[31]:
#
#
#example_scaler
#
#
#
## In[32]:
#
#
#example_labeler.classes_
#
#
#
## In[33]:
#
#
#def get_boundaries(clf,my_scaler):
# '''find the point where the classifier changes its prediction;
# this is an ugly brute-force approach and probably there is a much
# easier way to do this
# '''
#
# d = {}
# for i in np.linspace(-1,1,10000):
# this_val = my_scaler.transform(np.array([i]).reshape(1,-1))
# this_predict = clf.predict(this_val)
# d[i] = this_predict[-1]
# df = pd.DataFrame(d,index=['pred']).T
# return df[(df-df.shift(1))!=0].dropna().index[1:]
#
#
#
## In[34]:
#
#
#from datetime import datetime
#
#
# ### get class boundaries of all folds
#
## In[35]:
#
#
#import tqdm
#
#
#
## In[36]:
#
#
#def get_all_boundaries(stack_df):
# '''for each fold, get the boundaries, by
# training on everybody but the held-out patient
# and storing the boundaries'''
#
# all_boundaries = {}
#
# conclusive_pats = np.unique(stack_df.index.get_level_values(1))
#
# for p in tqdm.tqdm(conclusive_pats):
#
# # in the current fold, we drop one patient
# df = stack_df.drop(p,level=1)
#
# # train on this fold's data
# clf,my_scaler,my_labeler = train_classifier(df)
#
# # get the classifier boundaries
# boundaries = get_boundaries(clf,my_scaler)
# all_boundaries[p] = boundaries
#
# return all_boundaries
#
#
# Compute the boundaries and store them for later re-use:
#
## In[37]:
#
#
#all_boundaries = get_all_boundaries(stack_df)
#bound_df = pd.DataFrame(all_boundaries).T
#bound_df.tail()
#
#
#
## In[38]:
#
#
#bound_df.to_csv('../data/processed/csv/bound_df.csv')
#
#
# To make things faster, we can re-load the computed boundaries here:
#
## In[39]:
#
#
#bound_df = pd.read_csv('../data/processed/csv/bound_df.csv',index_col=[0],header=0)
#bound_df.tail()
#
#
# rename so boundaries have meaningful descriptions:
#
## In[40]:
#
#
#bound_df = bound_df.rename(columns={'0':'B/R','1':'L/B'})
#bound_df.tail()
#
#
#
## In[41]:
#
#
#bound_df.describe()
#
#
# #### show the class boundaries overlaid on the data distribution
#
## In[42]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#
#for b in bound_df.values.flatten():
# plt.axvline(b,alpha=0.1,color=color_dict['black'])
#
#plt.legend()
#plt.xlabel('correlation')
#plt.ylabel('density')
#plt.xlim(0.4,-0.4,-0.2)
#plt.ylim(0,8)
#plt.legend(loc=(0.65,0.65))
#sns.despine(trim=True,offset=5)
#plt.savefig('../reports/figures/10-distribution-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# #### make swarm/factorplot with boundary values
#
## In[43]:
#
#
#sns_df = pd.DataFrame(bound_df.stack())
#sns_df.columns = ['correlation']
#sns_df.loc[:,'boundary'] = sns_df.index.get_level_values(1)
#sns_df.loc[:,'dummy'] = 0
#
#
#
## In[44]:
#
#
#sns_df.tail()
#
#
#
## In[45]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(4,5))
#sns.swarmplot(data=sns_df,
# x='correlation',
# y='dummy',
# hue='boundary',
# orient='h',
# palette={'L/B':my_cols['left'],'B/R':my_cols['right']},
# size=4,
# alpha=0.9,
# ax=ax
# )
#plt.xlim(0.04,-0.02,-0.02)
#ax.set_ylabel('')
#ax.set_yticks([])
#sns.despine(left=True,trim=True)
#plt.savefig('../reports/figures/10-boundary-swarm-plot.png',dpi=300,bbox_inches='tight')
#
#plt.show()
#
#
# ### combine above into one plot
#
## In[46]:
#
#
#sns.set_style('dark')
#
#
#
## In[47]:
#
#
#fig = plt.figure(figsize=(16,6))
#
#ax1 = fig.add_axes([0.36, .999, 1, .7], xticklabels=[], yticklabels=[])
#ax1.imshow(Image.open('../reports/figures/10-dynamite-plot.png'))
#
#ax2 = fig.add_axes([0, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax2.imshow(Image.open('../reports/figures/10-distribution-plot.png'))
#
#ax3 = fig.add_axes([0.65, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax3.imshow(Image.open('../reports/figures/10-boundary-swarm-plot.png'))
#
#plt.text(0,1, 'A',transform=ax2.transAxes, fontsize=32)
#plt.text(1.04,1, 'B',transform=ax2.transAxes, fontsize=32)
#plt.text(1.63,1, 'C',transform=ax2.transAxes, fontsize=32)
#
#plt.savefig('../reports/figures/10-training-overview.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# ### make predictions for all patients (conc and inconc)
# #### invert
#
## In[48]:
#
#
#all_inv_df = group_df*conds
#
#
#
## In[49]:
#
#
#all_inv_df.tail()
#
#
# In[50]:
def make_preds(this_df,clf,my_scaler,my_labeler):
'''apply fitted classifier to the held-out patient;
based on what has been done during training, we
* scale the features using the stored scaler
* transform the labels using the stored labeler
* apply the classifier using the stored classfier
'''
scaled_features = my_scaler.transform(this_df.T)
predictions = clf.predict(scaled_features)
labeled_predictions = my_labeler.inverse_transform(predictions)
counts = pd.Series(labeled_predictions).value_counts()
counts_df = pd.DataFrame(counts).T
counts_df.index = pd.MultiIndex.from_tuples(this_df.index)
return counts_df
# Example:
#
## In[51]:
#
#
#make_preds(all_inv_df.iloc[[-1]],example_clf, example_scaler, example_labeler)
#
#
#
## In[52]:
#
#
#import warnings
## this is necessary to get rid of https://github.com/scikit-learn/scikit-learn/issues/10449
#with warnings.catch_warnings():
# warnings.filterwarnings("ignore",category=DeprecationWarning)
#
# for p in tqdm.tqdm(all_inv_df.index):
#
# # get data in leave-one-out fashion
# this_df = all_inv_df.loc[[p],:]
# other_df = stack_df.drop(p[-1],level=1)
#
# # train on this fold's data
# clf,my_scaler,my_labeler = train_classifier(other_df)
# # make predictions
# p_df = make_preds(this_df,clf,my_scaler,my_labeler)
#
# out_name = '../data/processed/csv/%s_counts_df.csv' % p[-1]
# p_df.to_csv(out_name)
# data_df.loc[p,'counts_df'] = out_name
#
#data_df.to_csv('../data/processed/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_counts_df.csv')
#
#
# ### train classifier once on all data and store
#
# We store a classifer trained on all data as a pickle file so we can re-use it in the future on new data
#
## In[53]:
#
#
#clf,my_scaler,my_labeler = train_classifier(stack_df)
#d = {'clf':clf,'scaler':my_scaler,'labeler':my_labeler}
#
#
#
## In[54]:
#
#
#with open('../models/volume_clf.p','wb') as f:
# pickle.dump(d,f)
#
#
# #### toolbox model
#
# The toolbox assumes that a dataset used as input is a new dataset and was not part of this study
clf_file = os.path.join(supDir,'models','volume_clf.p')
with open(clf_file,'rb') as f:
clf_dict = pickle.load(f)
clf = clf_dict['clf']
my_scaler = clf_dict['scaler']
my_labeler = clf_dict['labeler']
def make_p(pFolder,pName,clf=clf,my_scaler=my_scaler,my_labeler=my_labeler):
filename = os.path.join(pFolder, ''.join([ pName, '_corr_df.csv']))
this_df = pd.read_csv(filename, index_col=[0], header=0)
this_df.index = [['correlations'],[pName]]
inv_df = this_df*conds
counts_df = make_preds(inv_df,clf,my_scaler,my_labeler)
out_name = os.path.join(pFolder, ''.join([ pName, '_counts_df.csv']))
counts_df.to_csv(out_name)
return out_name
# ### summary
#
# For each patient, a classfier has been developed based on all the other patient (Leave-One-Out) and applied to the 200 volumes of that patient. There are now 200 decisions for each patient, as many as there are volumes. These data are stored in csv files which we can now access to make a prediction on the level of the patient.
#
#
# **************
#
# < [Previous](09-mw-correlations-with-template.ipynb) | [Contents](00-mw-overview-notebook.ipynb) | [Next >](11-mw-logistic-regression.ipynb)
| [
"martin.wegrzyn@uni-bielefeld.de"
] | martin.wegrzyn@uni-bielefeld.de |
3812d7d8d4e45d818400e24894ece0afc5782613 | a3f7f018673a44f86a6d5e308553a96ef3d1a6f0 | /WriteToGoogle.py | 25ecf24b851fd4e9bc0565fa345ce0f7bc728123 | [] | no_license | DhyeyaDesai/XHRData | 91ee0d536749a9a3bc1e8d994560c956033c39c3 | 00fb7adf0baff8fa11290a4b75a19e42e0fc540e | refs/heads/master | 2022-12-18T08:54:12.248690 | 2020-09-27T06:01:55 | 2020-09-27T06:01:55 | 296,892,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | import pandas as pd
import gspread
from df2gspread import df2gspread as d2g
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive.file']
def write(df, SPREADSHEET_KEY, WORKSHEET_NAME):
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scope)
gc = gspread.authorize(credentials)
d2g.upload(df, SPREADSHEET_KEY, WORKSHEET_NAME, credentials=credentials, row_names=True)
print("Written") | [
"rahuldesai1999@gmail.com"
] | rahuldesai1999@gmail.com |
69a637c1fffb04dde2eae7199d16032c313307b2 | ef0c9565875aced961b6281fb9a441263af9d7e6 | /tools/families/generate_families_with_taxon_subsampling.py | 042929a1a3963a95d3dab40a866e807e1969f379 | [] | no_license | BenoitMorel/phd_experiments | 4984a876a6ae71319d979a8bd59905b7b805d3f4 | a9a5b69aa623214fca9ce15f3c068d28127fe95f | refs/heads/master | 2023-08-08T00:04:59.662131 | 2023-07-29T21:30:43 | 2023-07-29T21:30:43 | 120,287,952 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | import sys
import os
import random
import ete3
import fam
sys.path.insert(0, 'scripts')
import generate_families_with_prunespecies
import experiments as exp
def generate_replicate(input_datadir, sampling_ratio, replicate):
random.seed(replicate + 42)
input_datadir = os.path.normpath(input_datadir)
output_datadir = input_datadir + "_subtax" + str(sampling_ratio)
output_datadir += "_rep" + str(replicate)
if (os.path.exists(output_datadir)):
print("Directory " + output_datadir + " already exists. Skipping.")
return
species_tree = fam.get_species_tree(input_datadir)
leaves = ete3.Tree(species_tree).get_leaf_names()
number_to_remove = int(float(len(leaves)) * (1.0 - sampling_ratio))
leaves_to_remove = random.sample(leaves, number_to_remove)
generate_families_with_prunespecies.generate(input_datadir, output_datadir, "true", "true", False, leaves_to_remove)
print("Result datadir in " + output_datadir)
def generate(input_datadir, sampling_ratio, replicates):
for replicate in replicates:
generate_replicate(input_datadir, sampling_ratio, replicate)
if (__name__ == "__main__"):
if (len(sys.argv) != 4):
print("Syntax python " + os.path.basename(__file__) + " datadir sampling_ratio replicates")
sys.exit(1)
input_datadir = sys.argv[1]
sampling_ratio = float(sys.argv[2])
replicates = int(sys.argv[3])
generate(input_datadir, sampling_ratio, range(0, replicates))
| [
"morelbt@hitssv543.villa-bosch.de"
] | morelbt@hitssv543.villa-bosch.de |
5fa7f58890afc61e1c24cb99b79dbb415ee50d40 | 9845c872596c64426a64454b04bb17929391328a | /Day 2.py | 8eaec34169921945c1eb0706205a736e4d1211dd | [] | no_license | Stripey2001/AdventOfCode2020 | ac4cca7dc73d6015847e865a5709fdba71d70852 | 5473c5dbbb20e4c1bb471ff36ad65fda8e5c8c04 | refs/heads/main | 2023-02-02T15:49:25.362772 | 2020-12-16T18:43:08 | 2020-12-16T18:43:08 | 318,014,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | file = open("inputDay2.txt.","r")
lines = file.readlines()
#Part1
Valid = 0
for line in lines:
line = line.split()
Range = line[0].split("-")
count = 0
for char in line[2]:
if char == line[1][0]:
count += 1
if count >= int(Range[0]) and count <= int(Range[1]):
Valid += 1
print(Valid)
#Part2
Valid = 0
for line in lines:
line = line.split()
Positions = line[0].split("-")
Positions[0] = int(Positions[0]) - 1
Positions[1] = int(Positions[1]) - 1
if line[2][Positions[0]] == line[1][0]:
if line[2][Positions[1]] != line[1][0]:
Valid += 1
elif line[2][Positions[1]] == line[1][0]:
Valid += 1
print(Valid)
| [
"noreply@github.com"
] | Stripey2001.noreply@github.com |
373838d3fdf18145e57517fdaddc9974d59fe21e | 54e9e8c1cb42718ad662768e03e31515d062dc7b | /LiH_gate_counts_smalltime.py | f3f55d0ac427e40b73049baf44130f3a3b9531e1 | [] | no_license | nmoran/qiskit-qdrift-quid19 | c811dfced29bde7f7c99df715e6f05359beb4bbb | dc7a9ec951abda1a33bd94bcfac2e9639d49634c | refs/heads/master | 2020-07-24T21:10:27.563703 | 2019-10-07T16:10:01 | 2019-10-07T16:10:01 | 208,049,574 | 4 | 1 | null | 2019-10-07T16:10:03 | 2019-09-12T12:47:03 | Jupyter Notebook | UTF-8 | Python | false | false | 4,942 | py | import numpy as np
import matplotlib.pyplot as plt
import math
from qiskit import Aer, IBMQ, QuantumRegister, QuantumCircuit
from qiskit.providers.ibmq import least_busy
from qiskit.providers.aer import noise
# lib from Qiskit Aqua
from qiskit.aqua.operators.common import evolution_instruction
from qiskit.aqua import Operator, QuantumInstance
from qiskit.aqua.algorithms import VQE, ExactEigensolver
from qiskit.aqua.components.optimizers import COBYLA, SPSA, L_BFGS_B
from qiskit.aqua.components.variational_forms import RY, RYRZ, SwapRZ
# lib from Qiskit Aqua Chemistry
from qiskit.chemistry import QiskitChemistry
from qiskit.chemistry import FermionicOperator
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.aqua_extensions.components.variational_forms import UCCSD
from qiskit.chemistry.aqua_extensions.components.initial_states import HartreeFock
driver = PySCFDriver(atom='H .0 .0 .0; Li .0 .0 1.6', unit=UnitsType.ANGSTROM,
charge=0, spin=0, basis='sto3g')
molecule = driver.run()
nuclear_repulsion_energy = molecule.nuclear_repulsion_energy
num_particles = molecule.num_alpha + molecule.num_beta
num_spin_orbitals = molecule.num_orbitals * 2
print('HF Done')
h1 = molecule.one_body_integrals
h2 = molecule.two_body_integrals
ferOp = FermionicOperator(h1=h1, h2=h2)
qubitOp = ferOp.mapping(map_type='jordan_wigner', threshold=10**-10)
qubitOp.chop(10**-10)
num_terms = len(qubitOp.paulis)
max_term = max([np.abs(qubitOp.paulis[i][0]) for i in range(num_terms)])
error=.01
norm = 0
probs = []
for i in range(len(qubitOp.paulis)):
norm += np.abs(qubitOp.paulis[i][0])
for i in range(len(qubitOp.paulis)):
probs.append(np.abs(qubitOp.paulis[i][0])/norm)
runs = 10
print('start of big loop')
times = np.linspace(.05,.1,10)
qdrift_av_counts=[]
trotter_counts=[]
#iterate through the list of durations
for time_idx in range(len(times)):
qdrift_gate_counts = []
num_time_slices = math.ceil((num_terms*max_term*times[time_idx])**2 / 2*error)
#Iterate (runs) numbers of time to get average data
for run in range(runs):
random_pauli_list=[]
#the number of steps from the norm, time, and error
num_steps = math.ceil((2*norm*times[time_idx])**2 /error)
standard_timestep = times[time_idx]*norm/num_steps
for i in range(num_steps):
idx = np.random.choice(num_terms,p=probs)
#form the list keeping track of the sign of the coefficients
random_pauli_list.append([np.sign(qubitOp.paulis[idx][0])*standard_timestep,qubitOp.paulis[idx][1]])
instruction_qdrift=evolution_instruction(random_pauli_list, evo_time=1, num_time_slices=1, controlled=False, power=1, use_basis_gates=True, shallow_slicing=False)
print('completed {} qdrift evolution_instructions'.format(str(time_idx)))
quantum_registers_qdrift = QuantumRegister(qubitOp.num_qubits)
qc_qdrift = QuantumCircuit(quantum_registers_qdrift)
qc_qdrift.append(instruction_qdrift, quantum_registers_qdrift)
qc_qdrift = qc_qdrift.decompose()
total_qdrift = 0
try:
total_qdrift+=qc_qdrift.count_ops()['cx']
except:
pass
try:
total_qdrift+=qc_qdrift.count_ops()['u1']
except:
pass
try:
total_qdrift+=qc_qdrift.count_ops()['u2']
except:
pass
try:
total_qdrift+=qc_qdrift.count_ops()['u3']
except:
pass
qdrift_gate_counts.append(total_qdrift)
print('start of trotter evolution instruction')
instruction_trotter=evolution_instruction(qubitOp.paulis, evo_time=times[time_idx], num_time_slices=num_time_slices, controlled=False, power=1, use_basis_gates=True, shallow_slicing=False)
print('end of trotter evolution instruction - on to circuit construction')
quantum_registers_trotter = QuantumRegister(qubitOp.num_qubits)
qc_trotter = QuantumCircuit(quantum_registers_trotter)
qc_trotter.append(instruction_trotter, quantum_registers_trotter)
qc_trotter = qc_trotter.decompose()
total_trotter = 0
try:
total_trotter+=qc_trotter.count_ops()['cx']
except:
pass
try:
total_trotter+=qc_trotter.count_ops()['u1']
except:
pass
try:
total_trotter+=qc_trotter.count_ops()['u2']
except:
pass
try:
total_trotter+=qc_trotter.count_ops()['u3']
except:
pass
trotter_counts.append(total_trotter)
qdrift_av_counts.append(sum(qdrift_gate_counts)/len(qdrift_gate_counts))
print('got through {} iterations'.format(str(time_idx)))
plt.plot(times,qdrift_av_counts,label='qdrift_avg_counts')
plt.plot(times,trotter_counts,label = 'trotter_counts')
plt.title('Gates vs Error for Time Evolution')
plt.xlabel("Duration of evolution")
plt.ylabel("Number of Gates")
plt.legend(loc=0)
plt.savefig("LiH_gates_v_time.png", dpi=600)
| [
"Riley@vpn-two-factor-general-231-131-20.dartmouth.edu"
] | Riley@vpn-two-factor-general-231-131-20.dartmouth.edu |
6fdc3db5b428914f4813bf4199befece5ed7563e | df4a7c46c46d1eca6570493b9707bdf64e54f8d3 | /py/209.minimum-size-subarray-sum.py | adaf3f0e6093c8efaad3d2fbdcb5fae7fb66b2a1 | [] | no_license | CharmSun/my-leetcode | 52a39bf719c507fb7032ed424fe857ba7340aea3 | 5325a56ba8c40d74d9fef2b19bac63a4e2c44a38 | refs/heads/master | 2023-03-29T06:39:49.614264 | 2021-03-28T16:33:52 | 2021-03-28T16:33:52 | 261,364,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | #
# @lc app=leetcode id=209 lang=python3
#
# [209] Minimum Size Subarray Sum
#
# @lc code=start
from typing import List
class Solution:
# 双指针移动
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
if not nums:
return 0
left = 0
right = -1
sum = 0
length = len(nums) + 1
while left < len(nums) and right < len(nums):
if right < len(nums) - 1 and sum < target:
right += 1
sum += nums[right]
else:
sum -= nums[left]
left += 1
if sum >= target:
length = min(length, right - left + 1)
if length == len(nums) + 1:
return 0
return length
# @lc code=end
| [
"suncan0812@gmail.com"
] | suncan0812@gmail.com |
e80ac8c78a628d36e3b4d0788d9adfb5968ae19d | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/flicker.py | c9770573731f1ec62ddbbc5ee7fd117eb6088ec5 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 546 | py | ii = [('CookGHP3.py', 1), ('KembFJ1.py', 1), ('TennAP.py', 1), ('CarlTFR.py', 5), ('LyttELD.py', 1), ('TalfTAC.py', 1), ('AinsWRR3.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 1), ('GilmCRS.py', 1), ('DibdTRL2.py', 1), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('FerrSDO2.py', 1), ('TalfTIT.py', 3), ('MedwTAI2.py', 1), ('HowiWRL2.py', 1), ('MartHRW.py', 2), ('LyttELD3.py', 4), ('KembFJ2.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 1), ('ClarGE3.py', 1), ('RogeSIP.py', 1), ('MartHRW2.py', 1), ('MartHSI.py', 2), ('NortSTC.py', 1), ('BeckWRE.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
09228ae64537dd9fb78fcabb808a96dacec36126 | 2ab391bfaadf0743da8ffee084896b999e88482d | /wx.py | a2bd1358136ac0530889f2fe820be14236fd42ec | [] | no_license | wean/coupon-windows | 552a59637ea45539bdfa70c6d1bd04626f0fdbd0 | 9565b23c7f44594f182d7a268d4ed45bdeaf8dd3 | refs/heads/master | 2020-04-05T07:11:43.024665 | 2017-11-24T08:23:50 | 2017-11-24T08:23:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,676 | py | # -*- coding:utf-8 -*-
import random
import itchat
import time
from schedule import Schedule
from search import SearchingKeyRegex
from special import Searcher
from utils import getProperty, randomSleep, reprDict
class WX(Schedule):
def __init__(self, configFile):
Schedule.__init__(self, configFile)
self.searcher = Searcher(configFile)
self.configFile = configFile
def login(self, exitCallback, uuid=None):
def isLoginned(uuid):
for count in range(10):
status = int(itchat.check_login(uuid))
if status is 200:
return True
if status is 201:
print 'Wait for confirm in mobile #', count
randomSleep(1, 2)
continue
print 'Error status:', status
return False
return False
if uuid is None:
statusFile = getProperty(self.configFile, 'wechat-status-file')
itchat.auto_login(hotReload=True, statusStorageDir=statusFile)
else:
if not isLoginned(uuid):
raise Exception('Failed to login with {}'.format(uuid))
userInfo = itchat.web_init()
itchat.show_mobile_login()
itchat.get_friends(True)
itchat.start_receiving(exitCallback)
self.me = itchat.search_friends()
print self.me['NickName'], 'is working'
self.watchFriends = list()
names = getProperty(self.configFile, 'wechat-watch-friends').split(';')
for name in names:
friends = itchat.search_friends(name=name)
self.watchFriends.extend(friends)
self.watchGroups = list()
names = getProperty(self.configFile, 'wechat-watch-groups').split(';')
for name in names:
groups = itchat.search_chatrooms(name=name)
self.watchGroups.extend(groups)
self.searchReplyPlate = getProperty(self.configFile, 'search-reply-plate')
itchat.run(blockThread=False) # Run in a new thread
self.run()
@staticmethod
def sendTo(obj, plate=None, image=None):
print '================================================================'
print 'Send a message to', obj['NickName']
if plate is not None:
interval = random.random() * 10
time.sleep(interval)
ret = obj.send(plate)
print 'Result of text message:', ret['BaseResponse']['ErrMsg']
print '----------------------------------------------------------------'
print plate
print '----------------------------------------------------------------'
if image is not None:
interval = random.random() * 10
time.sleep(interval)
ret = obj.send_image(image)
print 'Result of', image, ':', ret['BaseResponse']['ErrMsg']
print '================================================================'
def text(self, msg):
for friend in self.watchFriends:
if msg['FromUserName'] == friend['UserName']:
break
else:
return
print '================================================================'
print msg['User']['NickName'], 'sends a message:'
print '----------------------------------------------------------------'
print msg['Content']
print '================================================================'
self.search(friend, msg['Content'])
def textGroup(self, msg):
for friend in self.watchGroups:
if msg['FromUserName'] == friend['UserName']:
break
else:
return
print '================================================================'
print msg['User']['NickName'], 'sends a message:'
print '----------------------------------------------------------------'
print msg['Content']
print '================================================================'
self.search(friend, msg['Content'])
def send(self, plate, image):
for friend in self.watchFriends:
WX.sendTo(friend, plate, image)
def search(self, friend, content):
content = SearchingKeyRegex.parse(content)
if content is None:
return
print 'Searching', content
WX.sendTo(friend, self.searchReplyPlate.format(content.replace('#', ' ')))
if not self.searcher.search(content):
return
WX.sendTo(friend, self.searcher.plate, self.searcher.image)
| [
"974644081@qq.com"
] | 974644081@qq.com |
4b1c1861c991db68dad4a92a4609c7b982c49b49 | 98de82f74b94c08c40366f08b4155fbca804ea04 | /model.py | 9f851bb3169ee83b335805607d8832a605e3f8d7 | [] | no_license | hailiang194/pytorch-emnist | 1695da3467158da959930d851462950909cf1239 | dc257d6a78d2b98602d5eab55ece4a409274f8e5 | refs/heads/main | 2023-04-18T17:42:03.533046 | 2021-05-11T01:43:37 | 2021-05-11T01:43:37 | 366,041,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | from torch import nn
import torch
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
# Reshape from flatten to 28 x 28
self.__reshape = lambda x: x.reshape((-1, 1, 28, 28,))
#Training model
self.__model = nn.Sequential(
nn.Conv2d(1, 32, (5, 5)), # user conv 2d with kernel to create output with size is 24 x 24 x 32
nn.ReLU(),
nn.MaxPool2d((2, 2)), # max pooling the input layer to create output with size is 12 x 12 x 32
nn.Flatten(), # flatten to user regular neural network to create output with size 4608 x 1
nn.Linear(4608, 512), # create output with size 512 x 1
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(512, 62), # create output with size 62 x 1
nn.Softmax()
)
def forward(self, x):
x = self.__reshape(x)
y = self.__model(x)
return y
| [
"hailuongthe2000@gmail.com"
] | hailuongthe2000@gmail.com |
67de1e30a9745e9bc17f74b5ae27b1a93878aee2 | 8036a4b9e9a3bb00749b1cfbe519e81433c69d25 | /eabc/extras/rewardingSystem.py | 2cff487f5817d7509bc554187fce5af228ff84f4 | [] | no_license | jungla88/eabc_v2 | dd99c5451bc74d43199bf67e169b3da6b4e1ed0e | cf58ee5d4f98fd192903e060d240536fdb4d9cd4 | refs/heads/main | 2023-06-04T10:40:45.151349 | 2021-05-30T18:11:42 | 2021-05-30T18:11:42 | 301,366,295 | 1 | 3 | null | 2021-06-21T22:23:46 | 2020-10-05T10:08:41 | Python | UTF-8 | Python | false | false | 4,844 | py | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import norm
class Rewarder:
def __init__(self,MAX_GEN=20, isBootStrapped = True):
#Max generation
self.MAX_GEN = MAX_GEN
#current generation
self.gen = 0
#tradeoff weight between model contribution and internal cluster quality
self._modelWeight = 0
self._isBootStrapped = isBootStrapped
#Test reward with mean and var
self._meanModelPerformances = None
self._stdModelPerformances = None
self._scaleFactor = 10
@property
def Gen(self):
return self.gen
@Gen.setter
def Gen(self,val):
if val <= self.MAX_GEN and self._isBootStrapped:
self.gen = val
else:
raise ValueError
self._modelWeight = self.gen/self.MAX_GEN
@property
def modelWeight(self):
return self._modelWeight
@property
def isBootStrapped(self):
return self._isBootStrapped
def evaluateReward(self,models_with_performance):
p = np.asarray([perf for _,perf in models_with_performance])
self._meanModelPerformances = p.mean()
self._stdModelPerformances = p.std()
# def applySymbolReward(self,models_with_performance):
# for model,performance in models_with_performance:
# for i,symbol in enumerate(model):
# if performance <= 0.5:
# symbol.quality = symbol.quality-1
# elif performance >= 0.95:
# symbol.quality = symbol.quality+10
# else:
# symbol.quality = symbol.quality+1
def applySymbolReward(self,models_with_performance):
for model,performance in models_with_performance:
pVal = norm.pdf(performance,self._meanModelPerformances,self._stdModelPerformances)
valAtmean = norm.pdf(self._meanModelPerformances,self._meanModelPerformances,self._stdModelPerformances)
for symbol in model:
if performance >= self._meanModelPerformances + self._stdModelPerformances:
symbol.quality = symbol.quality + self._scaleFactor*(valAtmean - pVal)
elif performance <= self._meanModelPerformances - self._stdModelPerformances:
symbol.quality = symbol.quality - self._scaleFactor*(valAtmean - pVal)
def applyAgentReward(self,agents,alphabet):
symbolQualities = np.asarray([sym.quality for sym in alphabet]).reshape((-1,1))
symbolInternalQualities = np.asarray([sym.Fvalue for sym in alphabet]).reshape((-1,1))
scaledSymbolQs = MinMaxScaler().fit_transform(symbolQualities)
scaledSymbolInternalQs = MinMaxScaler().fit_transform(symbolInternalQualities)
agentQualities = np.zeros((len(agents),))
agentInternalQualities = np.zeros((len(agents),))
for i,agent in enumerate(agents):
agentSymbolsQ = np.asarray([quality for symbol,quality in zip(alphabet,scaledSymbolQs) if symbol.owner==agent.ID])
agentSymbolsInternalQ = np.asarray([quality for symbol,quality in zip(alphabet,scaledSymbolInternalQs) if symbol.owner==agent.ID])
meanQ = np.mean(agentSymbolsQ) if len(agentSymbolsQ) >= 1 else 0
##Update agent quality according to symbols qualities
if agent.fitness.valid:
agentQualities[i] = agent.fitness.values[0] + meanQ
else:
agentQualities[i] = meanQ
#Set the quality according to compactness and cardinality
agentInternalQualities[i] = 1- np.mean(agentSymbolsInternalQ) if len(agentSymbolsQ)>= 1 else 0
#TODO: make sense normalizing agent fitness in [0,1]?
scaledAgentQs = MinMaxScaler().fit_transform(agentQualities.reshape((-1,1)))
for agent,Q,symbolsInQ in zip(agents,scaledAgentQs,agentInternalQualities):
modelContribuiton = self._modelWeight*Q
clusterContribution = (1-self._modelWeight)*symbolsInQ
fitness = modelContribuiton + clusterContribution
agent.fitness.values= fitness,
##
agent.modelFitness = modelContribuiton
agent.clusterFitness =clusterContribution
print("Agent: {} - Model contribution: {} - Cluster contribution: {} - Total {}".format(agent.ID,agent.modelFitness,agent.clusterFitness,agent.fitness.values))
| [
"luca.baldini@uniroma1.it"
] | luca.baldini@uniroma1.it |
cb97bf7ae5fc7b209d27f00b58948f0f6626da16 | 8d38f23ec63e75f433d5de33c5d9bc51c9d7ac90 | /choco_py/03/__init__.py | f9160e38a2c85aee2b289c5caaf6fd40b73d3da4 | [] | no_license | aliwo/ChocoPy | 4a957468ef38a3bfcd99f112541e6e5b0e2adbdc | eb339c4103e5400c2cf8435b1d6af5f7b3b60548 | refs/heads/master | 2023-05-27T09:38:28.609554 | 2019-10-19T12:09:03 | 2019-10-19T12:09:03 | 211,509,685 | 5 | 1 | null | 2023-05-01T21:15:21 | 2019-09-28T14:06:06 | Python | UTF-8 | Python | false | false | 100 | py | # 이제 초코 변수들이 현재 갖고 있는 초코의 양을 나타내게 되었습니다.
| [
"aliwo@naver.com"
] | aliwo@naver.com |
d1512cac349d08b5f70a64f54f89e3b3633468c1 | 0ecee2ada1149ef4ba530dfa9b69b79b59587356 | /fitting/metrop.py | 801321960761eedcd7c336ee50f341f0bf8c6f85 | [] | no_license | psaha/microlens | 0ef1d4a3c991fb98772be671a7e76c42802b4e8e | a4592122919687f39f312ea9bce5ac668c58b8ea | refs/heads/master | 2020-04-06T09:53:15.012244 | 2020-02-20T15:32:47 | 2020-02-20T15:32:47 | 10,616,978 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from numpy.random import random, random_sample
from numpy import array, exp
def samp(fun,lo,hi,N):
w = (lo+hi)/2.
wlyst = [w+0]
lnplyst = [fun(w)]
n = 0
while True:
dw = 2*random_sample(len(w)) - 1
w += (hi-lo) * dw/10
for k in range(len(w)):
if w[k] < lo[k]:
w[k] += hi[k] - lo[k]
if w[k] > hi[k]:
w[k] -= hi[k] - lo[k]
lnp = fun(w)
if random() > exp(lnp-lnplyst[n%N]):
# print('rejected %10.5e vs %10.5e' % (lnp,lnplyst[n%N]))
w = wlyst[n%N] + 0
lnp = lnplyst[n%N]
# else:
# print('accepted')
n += 1
if n%20 == 0:
print('lnP = %10.5e after %1i steps' % (lnp,n))
if n < N:
wlyst += [w+0]
lnplyst += [lnp]
else:
wlyst[n%N] = w + 0
lnplyst[n%N] = lnp
if lnplyst[n%N] <= lnplyst[(n+1)%N]:
return (array(lnplyst),array(wlyst))
| [
"psaha@physik.uzh.ch"
] | psaha@physik.uzh.ch |
85cfd579495acdf292b299ebf685ed1fe311dedc | 69ab74cab9e66c1e2e3a344d41f533ead23cc777 | /src/ovirtcli/format/format.py | 4aec4de6fb5339d23083d9c0e66c62c50c0cd710 | [
"Apache-2.0"
] | permissive | minqf/ovirt-engine-cli | 907af78e9777441a6791e3aa9518cae3889c6bad | 422d70e1dc422f0ca248abea47a472e3605caa4b | refs/heads/master | 2021-04-19T22:48:41.991796 | 2016-12-28T18:58:48 | 2016-12-28T18:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | #
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Formatter(object):
"""Base class for formatter objects."""
name = None
def format(self, context, result, scope=None):
raise NotImplementedError
def format_terminal(self, text, border, termwidth, newline="\n\n", header=None, offsettext=True):
"""
formats (pretty) screen width adapted messages with border
@param text: text to prin
@param border: border to use
@param termwidth: terminal width
@param newline: new line separator (default is '\n\n')
@param header: upper border header (default is None)
@param offsettext: align the text to middle of the screen (default True)
"""
linlebreak = '\r\n'
offset = " "
space = " "
introoffset = (termwidth / 2 - (len(text) / 2))
borderoffset = (termwidth - 4)
# align multilined output
if text.find(linlebreak) <> -1 :
offsettext = False
text = offset + text.replace(linlebreak, (linlebreak + offset))
if (header):
headeroffset = (borderoffset / 2 - ((len(header) / 2)))
oddoffset = 0 if termwidth & 1 != 0 else 1
return offset + headeroffset * border + space + header + space + \
(headeroffset - len(offset) - oddoffset) * border + newline + \
((introoffset * space) if offsettext else "") + text + newline + \
offset + borderoffset * border + newline
return offset + borderoffset * border + newline + \
((introoffset * space) if offsettext else "") + text + newline + \
offset + borderoffset * border + newline
| [
"mpastern@redhat.com"
] | mpastern@redhat.com |
cb5d25209c2b79ece5a0ace71180e70062f84329 | 52cd1b9a4886dde92b5bc4670f282a6534324e48 | /utilsMini/sharpDateTime.py | f307cf6545e1b322845a63bb169faa147ba2caaf | [] | no_license | beincy/utils-mini | 81e158b40b16b095cea0506a70ddbc3ad68db6c3 | 04d87a90c5699dddd3afb7865e78f0d182488a0a | refs/heads/master | 2020-07-04T12:18:50.968902 | 2019-12-05T07:13:09 | 2019-12-05T07:13:09 | 202,285,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import datetime
from utilsMini.parse import parseTime, parseTimeStr
class SharpDateTime:
theTime = None
def __init__(self, timeStr='', timeFormat='%Y-%m-%d %H:%M:%S'):
'''
初始化时间。传入时间字符串,没传的话默认1997-01-01 00:00:01时间
'''
if len(timeStr):
self.theTime = parseTime(timeStr, timeFormat)
else:
self.theTime = datetime.datetime.strptime('1997-01-01 00:00:01',
'%Y-%m-%d %H:%M:%S')
def now(self):
'''
获取当前时间
'''
self.theTime = datetime.datetime.now()
return self
def addDay(self, days):
'''
修改天
大于0是增加天
小于0是减少天
'''
self.theTime = self.theTime + datetime.timedelta(hours=days)
return self
def addMinutes(self, minutes):
'''
同上
'''
self.theTime = self.theTime + datetime.timedelta(minutes=minutes)
return self
def addSeconds(self, seconds):
'''
同上
'''
self.theTime = self.theTime + datetime.timedelta(seconds=seconds)
return self
def addMicroseconds(self, microseconds):
'''
同上
'''
self.theTime = self.theTime + \
datetime.timedelta(microseconds=microseconds)
return self
def date(self):
'''
获取当天的00:00:00
'''
self.theTime = self.theTime - datetime.timedelta(
hours=self.theTime.hour,
minutes=self.theTime.minute,
seconds=self.theTime.second,
microseconds=self.theTime.microsecond)
return self
def Last(self):
'''
获取当天的23:59:59
'''
self.date()
self.theTime = self.theTime + datetime.timedelta(
hours=23, minutes=59, seconds=59)
return self
def toDateTime(self):
return self.theTime
def toString(self, timeFormat='%Y-%m-%d %H:%M:%S'):
return parseTimeStr(self.theTime, timeFormat)
| [
"bianhui0524@sina.com"
] | bianhui0524@sina.com |
20e3b03491e637d58092df7b8221e44c650d1805 | 41be0118b350c65c84cda66cc959f535eece1159 | /boards/tests/test_templatetags.py | 2d04ea7e52ea99bed3c0a0f481451768a1db2645 | [] | no_license | Zhuo-DAU/django-boards | 85bf69abf59d11042884e04895dd0b197633c9e0 | 2717b234752f9c8ec2dbf52869158a99afa4e097 | refs/heads/master | 2022-12-28T10:42:06.245545 | 2020-10-17T05:10:32 | 2020-10-17T05:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from django.test import TestCase
from ..templatetags.form_tags import field_type, input_class
from django import forms
class ExampleForm(forms.Form):
name = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
fields = ('name', 'password')
class FieldTypeTests(TestCase):
def test_field_widget_type(self):
form = ExampleForm()
self.assertEquals('TextInput', field_type(form['name']))
self.assertEquals('PasswordInput', field_type(form['password']))
class InputClassTests(TestCase):
def test_unbound_field_initial_state(self):
form = ExampleForm()
self.assertEquals('form-control ', input_class(form['name']))
def test_valid_bound_field(self):
form = ExampleForm({'name': 'john', 'password': '123'})
self.assertEquals('form-control is-valid', input_class(form['name']))
self.assertEquals('form-control ', input_class(form['password']))
def test_invalid_bound_field(self):
form = ExampleForm({'name': '', 'password': '123'})
self.assertEquals('form-control is-invalid', input_class(form['name']))
| [
"vitor@simpleisbetterthancomplex.com"
] | vitor@simpleisbetterthancomplex.com |
343fdb8f37b7142f134a0a0357917f9773fa9fdd | 995bce14dc06a4d9783d8dd3cfa74ceb8a55b742 | /tests/test_metrics.py | a7613a8b0dd059cbf0f1ec87d58295dbf8d795e7 | [] | no_license | MightyRaccoon/img_encoding_with_NNs | 3221580c945c8de72bcc470e7a6116205835f293 | 8b7480037d948405b33c0be9f03f705ab1ad7f96 | refs/heads/master | 2022-11-20T15:37:05.454433 | 2020-07-12T12:17:43 | 2020-07-12T12:17:43 | 275,252,726 | 0 | 0 | null | 2020-07-12T12:17:44 | 2020-06-26T21:40:23 | Python | UTF-8 | Python | false | false | 1,661 | py | import sys
sys.path.append('.')
import tensorflow as tf
from utils.metrics import MAPEavg
def test_MAPEavg_1d_ident():
x = tf.Variable([1, 2, 3])
y = tf.Variable([1, 2, 3])
metric = MAPEavg([1, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 0.0)
def test_MAPEavg_1d_pos():
x = tf.Variable([1, 1, 1])
y = tf.Variable([2, 2, 2])
metric = MAPEavg([1, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 1.0)
def test_MAPEavg_1d_neg():
x = tf.Variable([2, 2, 2])
y = tf.Variable([1, 1, 1])
metric = MAPEavg([1, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 0.5)
def test_MAPEavg_2d_ident():
x = tf.Variable([[1, 2, 3], [1, 2, 3]])
y = tf.Variable([[1, 2, 3], [1, 2, 3]])
metric = MAPEavg([2, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 0.0)
def test_MAPEavg_2d_pos():
x = tf.Variable([[1, 1, 1], [1, 1, 1]])
y = tf.Variable([[2, 2, 2], [2, 2, 2]])
metric = MAPEavg([2, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 1.0)
def test_MAPEavg_2d_neg():
x = tf.Variable([[2, 2, 2], [2, 2, 2]])
y = tf.Variable([[1, 1, 1], [1, 1, 1]])
metric = MAPEavg([2, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 0.5)
def test_MAPEavg_2d_pos_neg():
x = tf.Variable([[1, 1, 1], [2, 2, 2]])
y = tf.Variable([[2, 2, 2], [1, 1, 1]])
metric = MAPEavg([2, 3])
metric.update_state(x, y)
res = metric.result()
tf.assert_equal(res, 0.75) | [
"mishamolovtsev@gmail.com"
] | mishamolovtsev@gmail.com |
7b3013826554668547f7dd64c249b76937234705 | 0aeb464e9115785c600cc948cec67f4845245e1e | /mask.py | ec9d947f5fc9ee68da2917528ee5c413d9caaae9 | [] | no_license | AnnaSm0/FS19_MIA_lab | 45e68f0a0557ad92ea0fbf7f9f703983a4532e4c | 5e10b2c6bdd9b36764f4bdbdd756a56b20f139c8 | refs/heads/master | 2020-04-24T10:31:57.455106 | 2019-05-28T06:32:19 | 2019-05-28T06:32:19 | 171,896,898 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import datetime
import glob
import os
import cv2
import numpy as np
import pandas as pd
csv_file_name = glob.glob('*.csv')[0]
aug_type = 'mask'
df = pd.read_csv(csv_file_name)
augmented_data = [['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']]
date = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
augmented_images_directory = '%s-%s-imgs' % (date, aug_type)
os.makedirs(augmented_images_directory)
maxrange = len(df)
for i in range(0, maxrange):
print(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print("%s/%s" % (i+1, maxrange))
filename, imgclass = str(df.iloc[i]['filename']), str(df.iloc[i]['class'])
width, height = df.iloc[i]['width'], df.iloc[i]['height']
xmin, xmax = df.iloc[i]['xmin'], df.iloc[i]['xmax']
ymin, ymax = df.iloc[i]['ymin'], df.iloc[i]['ymax']
print(filename)
print(imgclass)
img = cv2.imread(filename)[:, :, ::-1]
X_DIMENSION = height
Y_DIMENSION = width
black_image = np.zeros((X_DIMENSION, Y_DIMENSION))
filename, extension = os.path.splitext(filename)[0], os.path.splitext(filename)[1]
old_filename = os.path.join(augmented_images_directory,"%s_%s%s" % (filename, imgclass, extension))
new_filename = "%s_%s_%s.png" % (filename, imgclass, aug_type)
new_filename_wo_extension = os.path.splitext(new_filename)[0]
implant_filename = os.path.join(augmented_images_directory, "%s_implant.jpg" % new_filename_wo_extension)
cv2.imwrite(old_filename, img)
implant = img[ymin-10:ymax+10, xmin-10:xmax+10]
cv2.imwrite(implant_filename, implant)
implant = cv2.imread(implant_filename, 0)
implant = cv2.equalizeHist(implant)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
implant = clahe.apply(implant)
implant = cv2.GaussianBlur(implant, (25, 25), 0)
black_image[ymin-10:ymax+10, xmin-10:xmax+10] = implant
cv2.imwrite(implant_filename, implant)
cv2.imwrite(os.path.join(augmented_images_directory, new_filename), black_image)
img = cv2.imread(os.path.join(augmented_images_directory, new_filename), 0)
# th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
# th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# ret3, th3 = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite(os.path.join(augmented_images_directory, new_filename), th1)
print(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print("DONE!")
| [
"anna.smolinski@stud.unibas.ch"
] | anna.smolinski@stud.unibas.ch |
b2a7575a4d1106cd9abbb00a260a7de4ca6c4650 | f2ebc38c206d74248322121a2291c4138716b10b | /lab10/config.py | db553c1d45021b7f531fb00512cbe4a70d5ad689 | [] | no_license | vermutsk/Laborator | 6453bd1e8cbc982b39f000d01151d202afafa126 | 8568c0e79c7a575a463fc8223207daedc8dd159a | refs/heads/master | 2023-08-24T20:29:33.027604 | 2021-10-07T10:40:37 | 2021-10-07T10:40:37 | 249,997,827 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | import os
import re
import json
import requests
import webbrowser
BASE_URL = 'https://api.vk.com/method/'
REDIRECT_URI = 'https://oauth.vk.com/blank.html'
#Добавить проверку на существование файла
class Config():
def __init__(self):
self.data = ''
self.APP_ID = open('app_id.txt', 'r').read()
#self.SECRET = ''
#self.ACS_TO = ''
#with open('app_id.txt', 'r') as app:
# list0 = []
# for line in app:
# str0 = line.rstrip('\n')
# list0.append(str0)
# self.APP_ID = list0[0]
# self.SECRET = list0[1]
# self.ACS_TOK = list0[2]
def is_loaded(self):
if os.path.isfile('id_token.txt') is True:
self.data = open('id_token.txt', 'r').read()
else:
self.data= self.new_token()
with open('id_token.txt', 'a') as doc:
doc.write(self.data)
#check = BASE_URL + f'secure.checkToken?access_token={self.ACS_TOK}&client_secret={self.SECRET}&v=5.21&client_id={self.APP_ID}&token={self.data}'
#load = requests.get(check).json()
#print(load)
#if load['response']['success'] != 1:
# return False
#else:
# return True
def new_token(self):
template = re.compile(r'^https://oauth.vk.com/blank.html#access_token=(\w+)&expires_in=(\d+)&user_id=(\d+)$')
flag = True
while flag:
webbrowser.open(IMPLICIT_URL)
token_url = input('Вставьте URL открывшейся страницы\n')
if template.match(token_url):
access_token, expires_in, user_id = re.findall(r'=\w+', token_url)
access_token = access_token[1:]
return access_token
else:
print('неверный формат URL')
config = Config()
IMPLICIT_URL = f'https://oauth.vk.com/authorize?client_id={config.APP_ID}&display=page&redirect_uri={REDIRECT_URI}&scope=friends,offline&response_type=token&v=5.124'
| [
"krnvitman@gmail.com"
] | krnvitman@gmail.com |
c1b6566c44583ea48d75e32ba0b68f2299e69ff5 | eb4affa4c8cb9d0c7e296ff74fa7d8bff280e8c1 | /lib/file_op.py | 7ca8ca4034ac4787d42e68e109e666aed219a81b | [] | no_license | zhaoxuan/every_news | db6f776439eabff754a61b59297d06b06e926a62 | 092b4eb9dee8a44fd83f15b8fbecfddaf115cea4 | refs/heads/master | 2021-01-01T19:33:41.436530 | 2014-08-17T08:10:20 | 2014-08-17T08:10:20 | 12,996,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import os
class File(object):
"""docstring for File"""
def __init__(self, file_path):
self.file_path = file_path
if os.path.isfile(file_path):
self.file = open(file_path, 'a')
else:
if self.mk_path(file_path):
self.file = open(file_path, 'w')
else:
raise 'not output file error'
def __del__(self):
if self.file:
self.file.close
pass
else:
pass
def mk_path(self, file_path):
path = os.path.split(file_path)[0]
if not os.path.exists(path): os.makedirs(path)
return True
def write(self, content):
self.file.write(content)
pass
def close(self):
self.file.close()
pass | [
"zhaoxuan1727@gmail.com"
] | zhaoxuan1727@gmail.com |
9ee032a5e092676515e9885801c7f1254633cc08 | fd97c7a1a8a732f77ff53d41c50abfcf48ae8647 | /test_partal_data/getUrl.py | 9697ecb4a509fdd6223661578430309e7d1c9a35 | [] | no_license | Zhaokun-max/workspaces | eb922902fa4762051f2e8660a70d95ce08c1b70b | 87d713a5c8d3763b3dfa191cd7a00933899679b9 | refs/heads/master | 2023-03-21T00:42:06.451609 | 2021-03-20T15:20:26 | 2021-03-20T15:20:26 | 329,214,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py |
class GetUrl():
@property
def getUrl_001(self):
url='http://test.portal.jlncjy.cacfintech.com/api/v1.0/chanquan/project/save'
return url
@property
def getUrl_002(self):
url = 'http://test.portal.jlncjy.cacfintech.com/api/v1.0/chanquan/project/saveAssignmentConditionsInfo'
return url
@property
def getUrl_003(self):
url = 'http://test.portal.jlncjy.cacfintech.com/api/v1.0/chanquan/fast/uploadFile'
return url
@property
def getUrl_004(self):
url = 'http://test.portal.jlncjy.cacfintech.com/api/v1.0/chanquan/project/uploadFile'
return url
@property
def getUrl_005(self):
url = 'http://test.portal.jlncjy.cacfintech.com/api/v1.0/chanquan/project/savePromiseHit'
return url
| [
"18701079606@163.com"
] | 18701079606@163.com |
10bd16b2629d3c226a90fa9ed757fd210049d940 | 2e1c1558f6fcb12a57449f9f6f0db6f1cbf38dd6 | /tests/integrations/test_package/config/test.py | 1523cb68f132b4ed41f31b404461758a9e2d19e6 | [
"MIT"
] | permissive | MasoniteFramework/masonite | ca51bf3d0e4777e624b3a9e94d1360936fb8006d | e8e55e5fdced9f28cc8acb1577457a490e5b4b74 | refs/heads/4.0 | 2023-09-01T18:59:01.331411 | 2022-11-05T01:29:29 | 2022-11-05T01:29:29 | 113,248,605 | 2,173 | 185 | MIT | 2023-04-02T02:29:18 | 2017-12-06T00:30:22 | Python | UTF-8 | Python | false | false | 29 | py | PARAM_1 = "test"
PARAM_2 = 1
| [
"idmann509@gmail.com"
] | idmann509@gmail.com |
d0c288557014a2037eb97dca9858deeb4b33a794 | 05901211fa00681063885f1a08f7a73c3951e2f5 | /datafaker/constant.py | 0e4c2b7ad3b8d66019ee816fa79d77221dd6dfba | [
"Apache-2.0"
] | permissive | XcAxel/datafaker | 4b54011cada16c9e4afc539db5dfa291f7ca8e63 | 0104dfff1d403cc31ad01adeb3b2c751b6fd9625 | refs/heads/master | 2022-08-27T02:47:59.132909 | 2020-05-21T02:03:22 | 2020-05-21T02:03:22 | 265,730,871 | 0 | 0 | null | 2020-05-21T02:01:50 | 2020-05-21T02:01:50 | null | UTF-8 | Python | false | false | 914 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__version__ = '0.7.2'
# batch size for inserting records
BATCH_SIZE = 1000
# multiprocessing queue, max size is 32767
MAX_QUEUE_SIZE = 30000
# time interval for streaming record producing
DEFAULT_INTERVAL = 1
# task num for paralleling
WORKERS = 4
# minimum records for multiple threading, single thread if number of record lower than MIN_RECORDS_FOR_PARALLEL
MIN_RECORDS_FOR_PARALLEL = 10
# output format
TEXT_FORMAT = 'text'
JSON_FORMAT = 'json'
DEFAULT_FORMAT = TEXT_FORMAT
# local language
DEFAULT_LOCALE = 'zh_CN'
# ENUM
ENUM_FILE = 'file://'
# types of needing quotation marks
STR_TYPES = ['date', 'time', 'datetime', 'char', 'varchar', 'tinyblob',
'tinytext', 'text', 'mediumtext', 'longtext', 'string']
INT_TYPES = ['tinyint', 'smallint', 'mediumint', 'int', 'integer', 'bigint', ]
FLOAT_TYPES = ['float', 'double', 'decimal', ]
| [
"ligangc@zbj.com"
] | ligangc@zbj.com |
5cd88237592e9d555a8d609bea7ca6d04a9b031f | a50e5050ee099877331748a92029819f5abcb0fc | /PackageClass/ClassInputArea.py | 5390379a16856e650c020633c70aca2f3eed3c6f | [] | no_license | darmawan06/pdpbo3-dikdik-rentaloke | ef05f53ec4cf58a0a59b2b3bf705dee1fcae8e83 | 53e4091ca2c52834d10e0e03cf1742e781c45588 | refs/heads/main | 2023-04-01T13:14:07.351208 | 2021-04-02T01:36:52 | 2021-04-02T01:36:52 | 353,663,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,823 | py | from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from PIL import ImageTk, Image
class InputArea(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent;
self.InputNoKTP = Entry(self.parent,width=25)
self.InputNoKTP.insert(0,"")
self.InputNama = Entry(self.parent,width=25)
self.InputNama.insert(0,"")
self.InputNamaKendaraan = Entry(self.parent,width=25)
self.InputNamaKendaraan.insert(0,"")
self.ValueJenisKendaraan = StringVar(self.parent)
self.ValueAksesoris1 = StringVar(self.parent)
self.ValueAksesoris2 = StringVar(self.parent)
self.ValueAksesoris3 = StringVar(self.parent)
self.ValueAksesoris4 = StringVar(self.parent)
self.ValueWarna = StringVar(self.parent)
pass
def Draw(self,frame):
# Membuat Label
Label(frame,text="No KTP :").grid(column=0,row=0)
Label(frame,
text="Nama :"
).grid(column=0,row=1)
Label(frame,text="Nama Kendaraan :").grid(column=0,row=2)
Label(frame,text="Jenis Kendaraan :").grid(column=0,row=3)
Label(frame,text="Aksesoris Kendaraan :").grid(column=0,row=4)
Label(frame,text="Warna Mobil :").grid(column=0,row=8)
# Membuat Proses Input
self.InputNoKTP = Entry(self.parent,width=25)
self.InputNoKTP.insert(0,"")
self.InputNama = Entry(self.parent,width=25)
self.InputNama.insert(0,"")
self.InputNamaKendaraan = Entry(self.parent,width=25)
self.InputNamaKendaraan.insert(0,"")
self.InputNoKTP.grid(column=1,row=0)
self.InputNama.grid(column=1,row=1)
self.InputNamaKendaraan.grid(column=1,row=2)
ListJenisKendaraan = ["Normal","Sport","Racing","OffRoad"]
self.ValueJenisKendaraan.set(ListJenisKendaraan[0])
DropdownJenisKendaraan = OptionMenu(frame,
self.ValueJenisKendaraan,
*ListJenisKendaraan)
DropdownJenisKendaraan.config(width=20)
DropdownJenisKendaraan.grid(column=1,row=3)
listValueAksesoris = ["Lampu Cadangan","Ban Candangan","GPS","Pengharum"]
cb1 = Checkbutton(frame,text=listValueAksesoris[0],variable = self.ValueAksesoris1,justify=LEFT,onvalue = listValueAksesoris[0], offvalue="Null")
cb2 = Checkbutton(frame,text=listValueAksesoris[1],variable = self.ValueAksesoris2,justify=LEFT,onvalue = listValueAksesoris[1], offvalue="Null")
cb3 = Checkbutton(frame,text=listValueAksesoris[2],variable = self.ValueAksesoris3,justify=LEFT,onvalue = listValueAksesoris[2], offvalue="Null")
cb4 = Checkbutton(frame,text=listValueAksesoris[3],variable = self.ValueAksesoris4,justify=LEFT,onvalue = listValueAksesoris[3], offvalue="Null")
cb1.deselect()
cb2.deselect()
cb3.deselect()
cb4.deselect()
cb1.grid(column=1,row=4)
cb2.grid(column=1,row=5)
cb3.grid(column=1,row=6)
cb4.grid(column=1,row=7)
ListWarna = ["Merah","Kuning","Hitam","Abu-Abu"]
baris = 8
self.ValueWarna = StringVar(self.parent)
for var in ListWarna:
Radiobutton(frame,
text=var,
variable=self.ValueWarna,
value=var).grid(column=1,row=baris)
baris = baris + 1
pass
def GetValueNoKTP(self):
return self.InputNoKTP.get()
pass
def GetValueNama(self):
return self.InputNama.get()
pass
def GetValueNamaKendaraan(self):
return self.InputNamaKendaraan.get()
pass
def GetValueJenisKendaraan(self):
return self.ValueJenisKendaraan.get()
pass
def GetValueAksesoris(self):
ValueAksesoris = []
if(self.ValueAksesoris1.get() != "Null"):
ValueAksesoris.append(self.ValueAksesoris1.get())
if(self.ValueAksesoris2.get() != "Null"):
ValueAksesoris.append(self.ValueAksesoris2.get())
if(self.ValueAksesoris3.get() != "Null"):
ValueAksesoris.append(self.ValueAksesoris3.get())
if(self.ValueAksesoris4.get() != "Null"):
ValueAksesoris.append(self.ValueAksesoris4.get())
return ValueAksesoris
pass
def GetValueWarna(self):
return self.ValueWarna.get()
pass
pass | [
"74578072+darmawan06@users.noreply.github.com"
] | 74578072+darmawan06@users.noreply.github.com |
beead89528382b978348836d26fab1b78be43800 | 26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9 | /pygame/Astar/implementation.py | 4965fc01f99a6ab2206ed2468d00869b3bb21107 | [] | no_license | MeetLuck/works | 46da692138cb9741a913d84eff6822f107510dc7 | ab61175bb7e2ed5c5113bf150e0541ae18eb04c4 | refs/heads/master | 2020-04-12T05:40:25.143075 | 2017-08-21T17:01:06 | 2017-08-21T17:01:06 | 62,373,576 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,781 | py | # Sample code from http://www.redblobgames.com/pathfinding/
# Copyright 2014 Red Blob Games <redblobgames@gmail.com>
#
# Feel free to use this code in your own projects, including commercial projects
# License: Apache v2.0 <http://www.apache.org/licenses/LICENSE-2.0.html>
from __future__ import print_function
class SimpleGraph:
def __init__(self):
self.edges = {}
def neighbors(self, id):
return self.edges[id]
example_graph = SimpleGraph()
example_graph.edges = {
'A': ['B'],
'B': ['A', 'C', 'D'],
'C': ['A'],
'D': ['E', 'A'],
'E': ['B']
}
import collections
class Queue:
def __init__(self):
self.elements = collections.deque()
def empty(self):
return len(self.elements) == 0
def put(self, x):
self.elements.append(x)
def get(self):
return self.elements.popleft()
# utility functions for dealing with square grids
def from_id_width(id, width):
return (id % width, id // width)
def draw_tile(graph, id, style, width):
r = "."
if 'number' in style and id in style['number']: r = "%d" % style['number'][id]
if 'point_to' in style and style['point_to'].get(id, None) is not None:
(x1, y1) = id
(x2, y2) = style['point_to'][id]
if x2 == x1 + 1: r = "\u2192"
if x2 == x1 - 1: r = "\u2190"
if y2 == y1 + 1: r = "\u2193"
if y2 == y1 - 1: r = "\u2191"
if 'start' in style and id == style['start']: r = "A"
if 'goal' in style and id == style['goal']: r = "Z"
if 'path' in style and id in style['path']: r = "@"
if id in graph.walls: r = "#" * width
return r
def draw_grid(graph, width=2, **style):
for y in range(graph.height):
for x in range(graph.width):
print("%%-%ds" % width % draw_tile(graph, (x, y), style, width), end="")
print()
# data from main article
DIAGRAM1_WALLS = [from_id_width(id, width=30) for id in [21,22,51,52,81,82,93,94,111,112,123,124,133,134,141,142,153,154,163,164,171,172,173,174,175,183,184,193,194,201,202,203,204,205,213,214,223,224,243,244,253,254,273,274,283,284,303,304,313,314,333,334,343,344,373,374,403,404,433,434]]
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
def in_bounds(self, id):
(x, y) = id
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id):
return id not in self.walls
def neighbors(self, id):
(x, y) = id
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
if (x + y) % 2 == 0: results.reverse() # aesthetics
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
class GridWithWeights(SquareGrid):
def __init__(self, width, height):
SquareGrid.__init__(self,width, height)
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
diagram4 = GridWithWeights(10, 10)
diagram4.walls = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8)]
diagram4.weights = {loc: 5 for loc in [(3, 4), (3, 5), (4, 1), (4, 2),
(4, 3), (4, 4), (4, 5), (4, 6),
(4, 7), (4, 8), (5, 1), (5, 2),
(5, 3), (5, 4), (5, 5), (5, 6),
(5, 7), (5, 8), (6, 2), (6, 3),
(6, 4), (6, 5), (6, 6), (6, 7),
(7, 3), (7, 4), (7, 5)]}
import heapq
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def dijkstra_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
| [
"withpig1994@hanmail.net"
] | withpig1994@hanmail.net |
bbbb9c609651e91e3a3c15c139ff1b5813c22879 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2333.py | c971dffe1465e621fa1a309de3e74ac9949af7f2 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | # qubit number=4
# total number=29
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=14
prog += X(3) # number=15
prog += RX(1.8001325905069514,3) # number=18
prog += CNOT(0,3) # number=16
prog += H(1) # number=22
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += X(3) # number=24
prog += H(1) # number=6
prog += X(1) # number=25
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(1,0) # number=26
prog += Z(1) # number=27
prog += CNOT(1,0) # number=28
prog += H(0) # number=9
prog += CNOT(2,0) # number=10
prog += X(1) # number=17
prog += CNOT(2,0) # number=11
prog += Y(0) # number=12
prog += Y(0) # number=13
prog += CNOT(2,1) # number=23
prog += X(0) # number=19
prog += X(0) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2333.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
fbddef4b9d48e173fddbe92424567a8926db63a3 | 94c7440e7f1d2fdbe4a1e26b9c75a94e49c14eb4 | /leetcode/303.py | 3e11c0d02ea85837838c1abfd9fcbb8f9d209292 | [
"Apache-2.0"
] | permissive | windniw/just-for-fun | 7ddea4f75cf3466a400b46efe36e57f6f7847c48 | 44e1ff60f8cfaf47e4d88988ee67808f0ecfe828 | refs/heads/master | 2022-08-18T09:29:57.944846 | 2022-07-25T16:04:47 | 2022-07-25T16:04:47 | 204,949,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | """
link: https://leetcode.com/problems/range-sum-query-immutable
problem: 离线计算数组区间和
solution: 转存 sum[:i]
"""
class NumArray:
def __init__(self, nums: List[int]):
self.s = [0 for _ in range(len(nums) + 1)]
for i in range(1, len(nums) + 1):
self.s[i] = self.s[i - 1] + nums[i - 1]
def sumRange(self, i: int, j: int) -> int:
return self.s[j + 1] - self.s[i]
| [
"windniw36@gmail.com"
] | windniw36@gmail.com |
4cdee05ad49fdba13018488725c88f8b8c699ef3 | ddd93d17e0aaa34517d2c3c1fcb2bf183c07f46c | /passwords/passwords_v1.py | d0bcc3db25358e4b8e0589d69119b56498014771 | [] | no_license | Romny468/FHICT | 953d1698ee1df96065b53b5dcbe12fc38665d0cf | 016b8c68d1059d178cb264b388a5fa123c5af4ee | refs/heads/main | 2023-04-12T09:26:56.102648 | 2021-04-15T13:09:27 | 2021-04-15T13:09:27 | 343,802,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,161 | py | #this is a password checker and a password generator
#this program will ask the user whether the user wants to generate or check a password
# this function is used to generate passwords
def passwordGenerate():
try:
import string, random
print("This is a password generator","\nYou can create a random password with this program")
# ask how long the password should be with some checks
while True:
characters = input("\nhow many characters should the password be? ")
if characters.isdigit():
if int(characters) > 100:
check("\nare you sure you want a password bigger than 100 characters?")
pass_length = int(characters)
break
else:
pass_length = int(characters)
break
elif characters == "":
pass_length = 10
break
else: print("\nyou must type a number, try again.")
# ask if the user wants numbers and special characters in the password with a self-made check function
numbers = check("\ndo you wish to have numbers in the password?")
characters = check("\ndo you wish to have special characters in the password?")
#the password will be gererated here based on the user answers
if numbers == True and characters == True:
char = string.ascii_letters + string.punctuation + string.digits
password = "".join(random.choice(char) for i in range(pass_length))
print("The generated password is: " + password)
elif numbers == True and characters == False:
char = string.ascii_letters + string.digits
password = "".join(random.choice(char) for i in range(pass_length))
print("The generated password is: " + password)
elif numbers == False and characters == True:
char = string.ascii_letters + string.punctuation
password = "".join(random.choice(char) for i in range(pass_length))
print("The generated password is: " + password)
else:
char = string.ascii_letters
password = "".join(random.choice(char) for i in range(pass_length))
print("\nThe generated password is: " + password)
except:
print("an error occured while loading one or more libraries!")
pass
# check function to make sure the user answers a question with "y" or "n" ("y" is preselected)
def check(question):
while True:
check1 = input(question + " ([y]/n): ")
if check1 == "y":
return True
elif check1 == "n":
return False
elif check1 == "":
return True
else:
print('\nthe question must be answered with "y" or "n"')
# this function is used to check the password security on length, lower case, upper case, digits and special chars
def passwdCheck(passwd):
try:
import string
passworderror = "Your password is missing the following:"
if len(passwd) < 8: passworderror = passworderror + "\n - at least 8 characters"
if not any(char.islower() for char in passwd): passworderror = passworderror + "\n - one lower case character"
if not any(char.isupper() for char in passwd): passworderror = passworderror + "\n - one upper case character"
if not any(char.isdigit() for char in passwd): passworderror = passworderror + "\n - one digit"
if not any(char in string.punctuation for char in passwd): passworderror = passworderror + "\n - one special character"
if passworderror == "Your password is missing the following:": return True
else: print("\n", passworderror,sep="")
except: pass
# passwword check setup and final.
def passwordChecker():
passwd = input("Give me your password and I will check it for toughness: ")
while True:
if (passwdCheck(passwd)):
print("\npassword it secure enough, for now")
break
else:
answer1 = check("\npassword not secure, would you want to try another password?")
if answer1 == True: passwd = input("\nGive me another password and I will check it for toughness: ")
elif answer1 == False: break
# main function to ask what the user wants to do
def main():
print("What would you like me to do?", "\n\n 1: Check \n 2: generate")
answer = input("Choose an option: ")
while True:
if answer in ("1", "check", "Check"):
print("\nYou chose option 1: password check.")
passwordChecker()
break
elif answer in ("2", "generate", "Generate"):
print("\nYou chose option 2: generate password.")
passwordGenerate()
break
else: answer = input("I did not understand your wish, answer again: ")
while True:
answer2 = check("\nWould you like me to do something else?")
if answer2 == True: main()
else:
print("\nGoodbye!")
exit()
print("Hello, I am able to check and generate passwords.")
main()
| [
"noreply@github.com"
] | Romny468.noreply@github.com |
43019288f3f44d30d1aa829e0d7775c74d503881 | a4bb09ff7b879a84230b3fa0f4ee4cf95d2d5456 | /alerts/blueprint.py | 5cd44a0331f024c405cfbcbeae5e3660ee459d57 | [] | no_license | hosyaeuw/ubit_flask | d8594c4c8848647be894dc33c60327cf0ed2ee15 | 616c3033406a6cd845b98f175a10f9176dbe52e1 | refs/heads/main | 2023-07-05T21:39:24.108884 | 2021-07-16T14:43:31 | 2021-07-16T14:43:31 | 386,669,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | from models import *
from sms.blueprint import send_sms
from telegram.blueprint import send_telegram_message
from flask import Blueprint, request, jsonify
import re
blueprint_alerts = Blueprint('alerts', __name__)
db_fields = get_column_fiends(Alerts)
@blueprint_alerts.route('/', methods=['GET'])
def index():
return "alerts page"
@blueprint_alerts.route('/get_all', methods=['GET'])
def get_all():
items = Alerts.query.all()
data = [{
'id': item.id,
'text': item.text,
'date': item.date.strftime("%d.%m.%Y")
} for item in items]
return jsonify(data)
@blueprint_alerts.route('/get_by_user/<id>', methods=['GET'])
def get_by_user(id):
g = Users.query.filter(Users.id == id).first().dancers[0].group
items = g.alerts.order_by(Alerts.date.desc()).all()
data = [{
'id': item.id,
'text': item.text,
'date': item.date.strftime("%d.%m.%Y")
} for item in items]
return jsonify(data)
def strip_phone(phone):
template = r'\d+'
return ''.join(re.findall(template, phone))
@blueprint_alerts.route('/send_message', methods=['POST'])
def send_message():
args = request.get_json(force=True)
message = args.get('message')
id_groups = args.get('groups')
dancers_sms_active = Dancers.query.filter(
(Dancers.sms_active == True) &
(Dancers.group_id.in_(id_groups) == True)).all()
dancers_telegram_active = Dancers.query.filter(
(Dancers.telegram_active == True) &
(Dancers.group_id.in_(id_groups) == True)).all()
groups = Groups.query.filter(Groups.id.in_(id_groups)).all()
dancers_phones = [strip_phone(dancer.users.phone) for dancer in dancers_sms_active]
dancers_telegram_chat_id = [dancer.telegram_chat_id
for dancer in dancers_telegram_active]
user_id = 1
send_sms(dancers_phones, message)
for chat_id in dancers_telegram_chat_id:
send_telegram_message(chat_id, message)
a = Alerts(user_id=user_id, text=message)
a.groups = groups
db.session.add(a)
db.session.commit()
return 'ok'
| [
"gad972@mail.ru"
] | gad972@mail.ru |
d4fb1b0b8ced17a755b72114f85bfb291c6789a9 | fc76da973dea749cd15b77351c50243fd03908af | /dash_live_plot.py | a6ffd7ca216d92da8d9255092a60e2b550c6bd5b | [] | no_license | phet2309/Live-Candlestick-plotting-of-Stocks-Live-Price | 6d15287011aebf5bd3119a1727264e4a34124cb7 | 3854259fc16c8aaec28e014e42e3237af939083a | refs/heads/master | 2023-02-02T06:15:17.198713 | 2020-12-22T19:20:56 | 2020-12-22T19:20:56 | 279,780,093 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import dash
from dash.dependencies import Output, Input
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
# import plotly.graph_objs as go
import plotly.graph_objs as go
from collections import deque
X = deque(maxlen=20)
X.append(1)
Y = deque(maxlen=20)
Y.append(1)
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Graph(id='live-graph', animate=True),
dcc.Interval(
id='graph-update',
interval=1*1000
),
]
)
@app.callback(output = Output('live-graph', 'figure'),inputs = [Input('graph-update', 'n_intervals')])
# @app.callback()
def update_graph_scatter(n):
X.append(X[-1]+1)
Y.append(Y[-1]+Y[-1]*random.uniform(-0.1,0.1))
data = plotly.graph_objs.Scatter(
x=X,
y=Y,
name='Scatter',
mode= 'lines+markers'
)
return {'data': [data],'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),
yaxis=dict(range=[min(Y),max(Y)]),)}
if __name__ == '__main__':
app.run_server(debug=True)
| [
"phet2309@gmail.com"
] | phet2309@gmail.com |
410436994d50c3b996e5791d3c62a10affc8a697 | 56123e8f2c21656c92f44035d386fe16e9c9df45 | /other/lamp_time.py | f666b2ea82f0a719462d03ef278cc9fa7921a3e9 | [] | no_license | NikitaFedyanin/python_tests | 338bcfcef55b8ef3a8f361e4eddb16bb9642de1a | 1976c4d067b0e71657422a398d5d3d81b16a2c6f | refs/heads/master | 2023-05-10T11:54:36.795170 | 2023-05-04T20:15:40 | 2023-05-04T20:15:40 | 164,305,229 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | """На вход функции дан массив datetime объектов — это дата и время нажатия на кнопку.
Вашей задачей является определить, как долго горела лампочка. Массив при этом всегда отсортирован по возрастанию,
в нем нет повторяющихся элементов и количество элементов всегда четное число (это значит, что лампочка,
в конце концов, будет выключена).
"""
from datetime import datetime
from typing import List
def sum_light(els: List[datetime]) -> int:
"""
how long the light bulb has been turned on
"""
total_light = 0
on = None
for index, action in enumerate(els):
if index % 2 == 0:
on = action
else:
total_light += (action.timestamp() - on.timestamp())
return int(total_light)
if __name__ == '__main__':
assert sum_light([
datetime(2015, 1, 12, 10, 0, 0),
datetime(2015, 1, 12, 10, 10, 10),
datetime(2015, 1, 12, 11, 0, 0),
datetime(2015, 1, 12, 11, 10, 10),
]) == 1220
assert sum_light([
datetime(2015, 1, 12, 10, 0, 0),
datetime(2015, 1, 12, 10, 10, 10),
datetime(2015, 1, 12, 11, 0, 0),
datetime(2015, 1, 12, 11, 10, 10),
datetime(2015, 1, 12, 11, 10, 10),
datetime(2015, 1, 12, 12, 10, 10),
]) == 4820
assert sum_light([
datetime(2015, 1, 12, 10, 0, 0),
datetime(2015, 1, 12, 10, 0, 1),
]) == 1
assert sum_light([
datetime(2015, 1, 12, 10, 0, 0),
datetime(2015, 1, 12, 10, 0, 10),
datetime(2015, 1, 12, 11, 0, 0),
datetime(2015, 1, 13, 11, 0, 0),
]) == 86410
print("The first mission in series is completed? Click 'Check' to earn cool rewards!") | [
"fedyanin5479"
] | fedyanin5479 |
25e13abd24bb84938ef8ab2428be4b480ac7ada4 | 2f63044850b061b93b46590819ece97833829ddb | /Strings/whatsyourname.py | a372754e82134f2ad8d7864e510cb96f1c6c86be | [] | no_license | Mzaba014/Hackerrank-Solutions | 30bd0380b09d2fb1d20cefd5010bf94d62ab060b | b09399e5031ea9a911d36bce1bfee2d7b6f9309e | refs/heads/master | 2020-04-23T23:37:46.845731 | 2019-03-22T13:47:10 | 2019-03-22T13:47:10 | 171,542,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | '''
Title : What's Your Name?
Subdomain : Strings
Domain : Python
Author : Manuel Zabala
Created : 1/23/2019
Problem : https://www.hackerrank.com/challenges/whats-your-name/problem
'''
def print_full_name(a, b):
full_name = 'Hello {} {}! You just delved into python.'.format(a, b)
print(full_name)
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
| [
"mzaba014@gmail.com"
] | mzaba014@gmail.com |
25747b63781fe738c768e8add31f0125a5d5f137 | 402bdf94dd6f5656ba665e41aafbed43098289e2 | /self-dividing-numbers/self-dividing-numbers.py | 0642a2139ec8a98c3189c02a148313b74b5998ba | [] | no_license | Shubham3842/Leet-Code-Solved | fcb51f1694f76728b5dad1b397fcce94d9179c9c | b0bc025ad7440814a72e4aadaabf69766a146f97 | refs/heads/main | 2023-03-04T11:03:26.674194 | 2021-02-08T15:23:02 | 2021-02-08T15:23:02 | 325,962,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | class Solution:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
result =[]
for i in range(left,right+1):
l = list(str(i))
c = 0
if '0' not in l:
for k in l:
if k == 0:
break
if i%int(k)==0:
c+=1
if c == len(l):
result.append(i)
return result
| [
"70909503+Shubham3842@users.noreply.github.com"
] | 70909503+Shubham3842@users.noreply.github.com |
150ada0104f487967baa8037bdf9800d1d660c71 | d10c5d3603e027a8fd37115be05e62634ec0f0a5 | /13_Machine-Learning-with-Tree-Based-Models-in-Python/13_ex_1-12.py | 8bc8ee02a70ea444f217bbab5bc0d3c2c3a249c6 | [] | no_license | stacygo/2021-01_UCD-SCinDAE-EXS | 820049125b18b38ada49ffc2036eab33431d5740 | 027dc2d2878314fc8c9b2796f0c2e4c781c6668d | refs/heads/master | 2023-04-29T01:44:36.942448 | 2021-05-23T15:29:28 | 2021-05-23T15:29:28 | 335,356,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | # Exercise 1-12: Linear regression vs regression tree
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error as MSE
SEED = 3
df = pd.read_csv('input/auto.csv')
y = df['mpg']
X = pd.get_dummies(df.drop(['mpg'], axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)
dt = DecisionTreeRegressor(max_depth=8, min_samples_leaf=0.13, random_state=SEED)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
mse_dt = MSE(y_test, y_pred)
rmse_dt = mse_dt**(1/2)
lr = LinearRegression()
lr.fit(X_train, y_train)
# Predict test set labels
y_pred_lr = lr.predict(X_test)
# Compute mse_lr
mse_lr = MSE(y_test, y_pred_lr)
# Compute rmse_lr
rmse_lr = mse_lr**(1/2)
# Print rmse_lr
print('Linear Regression test set RMSE: {:.2f}'.format(rmse_lr))
# Print rmse_dt
print('Regression Tree test set RMSE: {:.2f}'.format(rmse_dt))
| [
"stacy.gorbunova@gmail.com"
] | stacy.gorbunova@gmail.com |
6247020a52976a3a19c5307691a2285f94c52703 | aebb80d02f3bea874e2ce569cde2fe16ceb7d098 | /MxOnline/apps/users/forms.py | 08533a7bc9daa85e980defa8845924358ec2eb77 | [] | no_license | zhanxiangyu/MxOnline | 6a9bf71ead5c7857d42637bb59af4d9372f662ed | c0d3879190fe801989ff19e893f7c1a4798f9510 | refs/heads/master | 2021-01-01T04:42:50.299376 | 2017-10-25T08:39:54 | 2017-10-25T08:39:54 | 97,231,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #coding:utf-8
#__author__ = 'zhan'
#__date__ = '2017/5/8 14:00'
from django import forms
from captcha.fields import CaptchaField
from .models import UserProfile
class LoginFrom(forms.Form):
username = forms.CharField(required=True) #这是一个必填项目
password = forms.CharField(required=True, min_length=5)
class RegisterFrom(forms.Form):
email = forms.EmailField(required=True)
password = forms.CharField(required=True, min_length=5)
captcha = CaptchaField(error_messages={'invalid':u'验证码错误'})
class ForgetFrom(forms.Form):
email = forms.EmailField(required=True)
captcha = CaptchaField(error_messages={'invalid':u'验证码错误'})
class ModifyFrom(forms.Form):
password1 = forms.CharField(required=True, min_length=5)
password2 = forms.CharField(required=True, min_length=5)
class ImageUploadFrom(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['image']
class UserInfoFrom(forms.ModelForm):
"""
验证个人中心修改数据
"""
class Meta:
model = UserProfile
fields = ['nick_name', 'birday', 'gender', 'address', 'mobile']
| [
"1033432955@qq.com"
] | 1033432955@qq.com |
14a1fbcf1b983545a3fc8090f4763d301087831a | 29776538ad6978f9a083d683554ff396272db071 | /SecureFileShare/wsgi.py | b54f5db9d127940c6919553d7c75e993af657fce | [
"MIT"
] | permissive | bwelch21/secure-file-share | 6ecffd4d5ec123a6dadeed8fc3d8e9f2488a9dae | 69e693dad4eb02a4a7b8051244947115eea71abc | refs/heads/master | 2021-01-12T08:14:41.975384 | 2016-12-15T03:33:13 | 2016-12-15T03:33:13 | 76,517,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | """
WSGI config for SecureFileShare project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SecureFileShare.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | [
"bew5te@virginia.edu"
] | bew5te@virginia.edu |
652923489c3400dbc45efcd56745119f186d1af4 | 924743b86e7c6e8f5bd86a7b04606179429abbbd | /manage.py | 56fd15d5154ff87517d6a29c4e8382e0c70558bb | [] | no_license | swaraj70/olx-scraper | 01bc864d2b5cb3a9e592c5a21672d153b4faa414 | 0d3d7582f0a58c7f0e0848005ce7ec698c532134 | refs/heads/master | 2022-12-13T00:33:33.316810 | 2021-05-01T06:32:21 | 2021-05-01T06:32:21 | 217,027,126 | 1 | 0 | null | 2022-12-08T06:45:42 | 2019-10-23T10:09:23 | Python | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webscraper.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | swaraj70.noreply@github.com |
e133da1ad46a42d43b0b17f6d3fc2d83933ee9c0 | ac4988c7f9cc98f6dcc31280cd208eeb57d674f2 | /Semana4Sesion1/martinperez/files.py | eba5a294849af7b337a4be981387c1564adc9e14 | [] | no_license | BrucePorras/PachaQTecMayo2020-1 | 77e396ceb2619d028dd740b9414f8a3160eee2cd | 5f39146cc4f69d844dae22a12098479194fd6332 | refs/heads/master | 2022-12-18T02:20:39.828175 | 2020-09-25T05:39:20 | 2020-09-25T05:39:20 | 269,999,646 | 1 | 0 | null | 2020-06-06T14:06:19 | 2020-06-06T14:06:18 | null | UTF-8 | Python | false | false | 1,668 | py | import os
# directorioActual = os.getcwd()
# print(directorioActual)
# CREACION DE CARPETA
# os.makedirs("pachaqtecMPerez")
# Lista todos los archivos en el directorio actual
# directorio = os.listdir(".")
# print(directorio)
import shutil
# Copiar un archivo de una carpeta a otra
# archivoACopiar = "archivocopiado.txt"
# directorioDestino = "E:\\BACKEND-PAQ\\git-repositorio2\\PachaQTecMayo2020\\Semana4Sesion1\\martinperez\\pachaqtecMPerez"
# shutil.copy(archivoACopiar,directorioDestino)
#try:
# #file = open("archivonuevo.txt",'r')
# file = open("archivocopiado.txt",'r')
# print(file.read())
#except Exception as e:
# print("error: ",str(e))
#else:
# file.close()
# try:
# file = open("archivocopiado.txt",'r')
# for lineas in file.readlines():
# print(f"linea {lineas}")
# print(file.read())
# except Exception as e:
# print("error: ",str(e))
# else:
# file.close()
# try:
# file = open("archivocopiado.txt",'w')
# file.write("Nueva linea")
# file = open("archivocopiado.txt",'r')
# for lineas in file.readlines():
# print(f"linea {lineas}")
# except Exception as e:
# print("error: ",str(e))
# else:
# file.close()
# try:
# file = open("archivocopiado.txt",'w')
# for i in range(1,10,1):
# file.write(f"Nueva Linea de for2222 - {i} \n")
# except Exception as e:
# print("error: ",str(e))
# else:
# file.close()
try:
file = open("archivocopiado.txt",'a')
for i in range(1,10,1):
file.write(f"Nueva Linea de for2222 - {i} \n")
except Exception as e:
print("error: ",str(e))
else:
file.close()
| [
"perez_13lo@hotmail.com"
] | perez_13lo@hotmail.com |
f13dd503a9b25ec0cf197860872374891737e452 | 24c84c5b93cd816976d370a99982f45e0d18a184 | /ArraysProblem/Python/FindAllNumbersDisappearedinAnArray.py | 25420fb3ce55ce8bdb7c4beb3f9a49d0977405c8 | [] | no_license | purushottamkaushik/DataStructuresUsingPython | 4ef1cf33f1af3fd25105a45be4f179069e327628 | e016fe052c5600dcfbfcede986d173b401ed23fc | refs/heads/master | 2023-03-12T13:25:18.186446 | 2021-02-28T18:21:37 | 2021-02-28T18:21:37 | 343,180,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | class Solution:
def findDisappearedNumbers(self, nums):
lst = []
if not nums:
return lst
m = max(nums)
for i in range(len(nums)):
print(i)
if i+1 in nums:
continue
else:
lst.append(i+1)
return lst
def findDisappearedNumbers2(self, nums):
s = set(nums)
n = len(nums) + 1
lst = []
for i in range(1,n):
if i not in s:
lst.append(i)
return lst
s = Solution().findDisappearedNumbers([1,1])
print(s) | [
"purushottamkaushik96@gmail.com"
] | purushottamkaushik96@gmail.com |
f1ff0402faa98eac1d68bcde8887767d13fc56b0 | d5277c402d01ff4c68c073f65dab95290fa6d44e | /common/forms.py | b048bd657de23bb7a7c9f4cfa9f918d156c15770 | [
"MIT"
] | permissive | huynhduyman/realestatecrm | b9c8b27405753b60c2e8a35c72146cd143c06d91 | 66fed5766f60260f45bff4f8ab58a895860d0d4a | refs/heads/master | 2022-05-17T04:28:26.575837 | 2020-01-10T16:19:28 | 2020-01-10T16:19:28 | 232,336,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,994 | py | import re
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import PasswordResetForm
from common.models import Address, User, Document, Comment, APISettings
from django.contrib.auth import password_validation
from teams.models import Teams
class BillingAddressForm(forms.ModelForm):
class Meta:
model = Address
fields = ('address_line', 'street', 'city',
'state', 'postcode', 'country')
def __init__(self, *args, **kwargs):
account_view = kwargs.pop('account', False)
super(BillingAddressForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["country"].choices = [
("", "--Country--"), ] + list(self.fields["country"].choices)[1:]
if account_view:
self.fields['address_line'].required = True
self.fields['street'].required = True
self.fields['city'].required = True
self.fields['state'].required = True
self.fields['postcode'].required = True
self.fields['country'].required = True
class ShippingAddressForm(forms.ModelForm):
class Meta:
model = Address
fields = ('address_line', 'street', 'city',
'state', 'postcode', 'country')
def __init__(self, *args, **kwargs):
super(ShippingAddressForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["country"].choices = [
("", "--Country--"), ] + list(self.fields["country"].choices)[1:]
class UserForm(forms.ModelForm):
password = forms.CharField(max_length=100, required=False)
# sales = forms.BooleanField(required=False)
# marketing = forms.BooleanField(required=False)
class Meta:
model = User
fields = ['email', 'first_name', 'last_name',
'username', 'role', 'profile_pic',
'has_sales_access', 'has_marketing_access', 'brokerage_commission']
def __init__(self, *args, **kwargs):
self.request_user = kwargs.pop('request_user', None)
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
if not self.instance.pk:
self.fields['password'].required = True
# self.fields['password'].required = True
# def __init__(self, args: object, kwargs: object) -> object:
# super(UserForm, self).__init__(*args, **kwargs)
#
# self.fields['first_name'].required = True
# self.fields['username'].required = True
# self.fields['email'].required = True
#
# if not self.instance.pk:
# self.fields['password'].required = True
def clean_password(self):
password = self.cleaned_data.get('password')
if password:
if len(password) < 4:
raise forms.ValidationError(
'Password must be at least 4 characters long!')
return password
def clean_has_sales_access(self):
sales = self.cleaned_data.get('has_sales_access', False)
user_role = self.cleaned_data.get('role')
if user_role == 'ADMIN':
is_admin = True
else:
is_admin = False
if self.request_user.role == 'ADMIN' or self.request_user.is_superuser:
if not is_admin:
marketing = self.data.get('has_marketing_access', False)
if not sales and not marketing:
raise forms.ValidationError('Select atleast one option.')
# if not (self.instance.role == 'ADMIN' or self.instance.is_superuser):
# marketing = self.data.get('has_marketing_access', False)
# if not sales and not marketing:
# raise forms.ValidationError('Select atleast one option.')
if self.request_user.role == 'USER':
sales = self.instance.has_sales_access
return sales
def clean_has_marketing_access(self):
marketing = self.cleaned_data.get('has_marketing_access', False)
if self.request_user.role == 'USER':
marketing = self.instance.has_marketing_access
return marketing
def clean_email(self):
email = self.cleaned_data.get("email")
if self.instance.id:
if self.instance.email != email:
if not User.objects.filter(
email=self.cleaned_data.get("email")).exists():
return self.cleaned_data.get("email")
raise forms.ValidationError('Email already exists')
else:
return self.cleaned_data.get("email")
else:
if not User.objects.filter(
email=self.cleaned_data.get("email")).exists():
return self.cleaned_data.get("email")
raise forms.ValidationError('User already exists with this email')
class LoginForm(forms.ModelForm):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = ['email', 'password']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super(LoginForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data.get('password')
if password:
if len(password) < 4:
raise forms.ValidationError(
'Password must be at least 4 characters long!')
return password
def clean(self):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
if email and password:
self.user = authenticate(username=email, password=password)
if self.user:
if not self.user.is_active:
pass
# raise forms.ValidationError("User is Inactive")
else:
pass
# raise forms.ValidationError("Invalid email and password")
return self.cleaned_data
class ChangePasswordForm(forms.Form):
# CurrentPassword = forms.CharField(max_length=100)
Newpassword = forms.CharField(max_length=100)
confirm = forms.CharField(max_length=100)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_confirm(self):
# if len(self.data.get('confirm')) < 4:
# raise forms.ValidationError(
# 'Password must be at least 4 characters long!')
if self.data.get('confirm') != self.cleaned_data.get('Newpassword'):
raise forms.ValidationError(
'Confirm password do not match with new password')
password_validation.validate_password(
self.cleaned_data.get('Newpassword'), user=self.user)
return self.data.get('confirm')
class PasswordResetEmailForm(PasswordResetForm):
def clean_email(self):
email = self.cleaned_data.get('email')
if not User.objects.filter(email__iexact=email,
is_active=True).exists():
raise forms.ValidationError("User doesn't exist with this Email")
return email
class DocumentForm(forms.ModelForm):
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
self.instance = kwargs.get('instance', None)
users = kwargs.pop('users', [])
super(DocumentForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['status'].choices = [
(each[0], each[1]) for each in Document.DOCUMENT_STATUS_CHOICE]
self.fields['status'].required = False
self.fields['title'].required = True
if users:
self.fields['shared_to'].queryset = users
self.fields['shared_to'].required = False
self.fields["teams"].choices = [(team.get('id'), team.get('name')) for team in Teams.objects.all().values('id', 'name')]
self.fields["teams"].required = False
class Meta:
model = Document
fields = ['title', 'document_file', 'status', 'shared_to']
def clean_title(self):
title = self.cleaned_data.get('title')
if not self.instance.id:
if Document.objects.filter(title=title).exists():
raise forms.ValidationError(
'Document with this Title already exists')
return title
if Document.objects.filter(title=title).exclude(id=self.instance.id).exists():
raise forms.ValidationError(
'Document with this Title already exists')
return title
return title
class UserCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'user', 'commented_by')
def find_urls(string):
# website_regex = "^((http|https)://)?([A-Za-z0-9.-]+\.[A-Za-z]{2,63})?$" # (http(s)://)google.com or google.com
# website_regex = "^https?://([A-Za-z0-9.-]+\.[A-Za-z]{2,63})?$" # (http(s)://)google.com
# http(s)://google.com
website_regex = "^https?://[A-Za-z0-9.-]+\.[A-Za-z]{2,63}$"
# http(s)://google.com:8000
website_regex_port = "^https?://[A-Za-z0-9.-]+\.[A-Za-z]{2,63}:[0-9]{2,4}$"
url = re.findall(website_regex, string)
url_port = re.findall(website_regex_port, string)
if url and url[0] != '':
return url
return url_port
class APISettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
assigned_users = kwargs.pop('assign_to', [])
super(APISettingsForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['lead_assigned_to'].queryset = assigned_users
self.fields['lead_assigned_to'].required = False
# self.fields['title'].widget.attrs.update({
# 'placeholder': 'Project Name'})
# self.fields['lead_assigned_to'].widget.attrs.update({
# 'placeholder': 'Assign Leads To'})
class Meta:
model = APISettings
fields = ('title', 'lead_assigned_to', 'website')
def clean_website(self):
website = self.data.get('website')
if website and not (website.startswith('http://') or
website.startswith('https://')):
raise forms.ValidationError("Please provide valid schema")
if not len(find_urls(website)) > 0:
raise forms.ValidationError(
"Please provide a valid URL with schema and without trailing slash - Example: http://google.com")
return website
| [
"huynhduyman@gmail.com"
] | huynhduyman@gmail.com |
6a50aebb2ce0e653fed8fd6dce6072f20e52dad2 | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.main/branches/sterch-issue734/bungeni/core/workflows/_actions.py | 66c34fc27d3bf4947bb1e876aff19418c9569781 | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,254 | py | # Bungeni Parliamentary Information System - http://www.bungeni.org/
# Copyright (C) 2010 - Africa i-Parliaments - http://www.parliaments.info/
# Licensed under GNU GPL v2 - http://www.gnu.org/licenses/gpl-2.0.txt
"""Workflow transition actions.
All actions with names starting with a "_" may NOT be referenced from the
workflow XML definitions i.e. they are internal actions, private to bungeni.
They are AUTOMATICALLY associated with the name of a workflow state, via the
following simple naming convention:
_{workflow_name}_{state_name}
Signature of all (both private and public) action callables: !+WFINFO
(context:Object) -> None
!+ All actions with names that start with a letter are actions that may be
liberally used from within workflow XML definitions.
$Id$
"""
log = __import__("logging").getLogger("bungeni.core.workflows._actions")
from bungeni.core.workflows import utils
from bungeni.core.workflows import dbutils
from ore.alchemist import Session
import zope.event
import zope.lifecycleevent
from bungeni.core.serialize import publish_to_xml
import sys
import traceback
# special handled action to make a new version of a ParliamentaryItem, that is
# not tied to a state name, but to <state> @version bool attribute
create_version = utils.create_version
# parliamentary item, utils
def __pi_create(context):
#!+utils.setParliamentId(context)
utils.assign_owner_role_pi(context)
def __pi_submit(context):
utils.set_pi_registry_number(context)
utils.pi_update_signatories(context)
utils.pi_unset_signatory_roles(context)
def __pi_redraft(context):
"""Signatory operations on redraft - Unsetting signatures e.t.c
"""
utils.pi_update_signatories(context)
utils.pi_unset_signatory_roles(context, all=True)
# address
def _address_private(context):
# !+OWNER_ADDRESS(mr, mov-2010) is this logic correct, also for admin?
try:
user_login = dbutils.get_user(context.user_id).login
except AttributeError:
# 'GroupAddress' object has no attribute 'user_id'
user_login = utils.get_principal_id()
if user_login:
utils.assign_owner_role(context, user_login)
def _address_attached(context):
publish_to_xml(context)
# agendaitem
_agendaitem_draft = _agendaitem_working_draft = __pi_create
_agendaitem_submitted = __pi_submit
_agendaitem_redraft = __pi_redraft
_agendaitem_admissible = publish_to_xml
# bill
_bill_draft = _bill_working_draft = __pi_create
_bill_redraft = __pi_redraft
_bill_approved = publish_to_xml
def _bill_gazetted(context):
utils.setBillPublicationDate(context)
utils.set_pi_registry_number(context)
utils.pi_update_signatories(context)
publish_to_xml(context)
# group
def _group_draft(context):
user_login = utils.get_principal_id()
if user_login:
utils.assign_owner_role(context, user_login)
def _deactivate(context):
utils.unset_group_local_role(context)
_deactivate(context)
def _group_active(context):
utils.set_group_local_role(context)
publish_to_xml(context, type='group', include=[])
def _group_dissolved(context):
""" when a group is dissolved all members of this
group get the end date of the group (if they do not
have one yet) and there active_p status gets set to
False"""
dbutils.deactivateGroupMembers(context)
groups = dbutils.endChildGroups(context)
utils.dissolveChildGroups(groups, context)
utils.unset_group_local_role(context)
# committee
_committee_draft = _group_draft
_committee_active = _group_active
_committee_dissolved = _group_dissolved
# parliament
_parliament_draft = _group_draft
_parliament_active = _group_active
_parliament_dissolved = _group_dissolved
# groupsitting
def _groupsitting_draft_agenda(context):
dbutils.set_real_order(context)
def _groupsitting_published_agenda(context):
utils.schedule_sitting_items(context)
publish_to_xml(context, type='groupsitting',include=[])
# motion
_motion_draft = _motion_working_draft = __pi_create
_motion_submitted = __pi_submit
_motion_redraft = __pi_redraft
def _motion_admissible(context):
dbutils.setMotionSerialNumber(context)
publish_to_xml(context)
# question
_question_response_completed = publish_to_xml
def __question_create(context):
__pi_create(context)
utils.assign_question_minister_role(context)
_question_draft = _question_working_draft = __question_create
_question_submitted = __pi_submit
_question_redraft = __pi_redraft
def _question_withdrawn(context):
"""A question can be withdrawn by the owner, it is visible to ...
and cannot be edited by anyone.
"""
utils.setQuestionScheduleHistory(context)
_question_withdrawn_public = _question_withdrawn
def _question_response_pending(context):
"""A question sent to a ministry for a written answer,
it cannot be edited, the ministry can add a written response.
"""
utils.setMinistrySubmissionDate(context)
def _question_admissible(context):
"""The question is admissible and can be send to ministry,
or is available for scheduling in a sitting.
"""
dbutils.setQuestionSerialNumber(context)
publish_to_xml(context)
def _heading_public(context):
publish_to_xml(context,type='heading',include=[])
def _report_published(context):
publish_to_xml(context,type='report',include=[])
# tableddocument
_tableddocument_draft = _tableddocument_working_draft = __pi_create
_tableddocument_submitted = __pi_submit
_tableddocument_redraft = __pi_redraft
def _tableddocument_adjourned(context):
utils.setTabledDocumentHistory(context)
def _tableddocument_admissible(context):
dbutils.setTabledDocumentSerialNumber(context)
publish_to_xml(context)
# user
def _user_A(context):
utils.assign_owner_role(context, context.login)
context.date_of_death = None
publish_to_xml(context, type='user', include=[])
#
# signatories
def __make_owner_signatory(context):
"""Make document owner a default signatory when document is submited to
signatories for consent.
"""
signatories = context.signatories
if context.owner_id not in [sgn.user_id for sgn in signatories._query]:
session = Session()
signatory = signatories._class()
signatory.user_id = context.owner_id,
signatory.item_id = context.parliamentary_item_id
session.add(signatory)
session.flush()
zope.event.notify(zope.lifecycleevent.ObjectCreatedEvent(signatory))
def __pi_submitted_signatories(context):
__make_owner_signatory(context)
for signatory in context.signatories.values():
owner_login = utils.get_owner_login_pi(signatory)
utils.assign_owner_role(signatory, owner_login)
utils.assign_signatory_role(context, owner_login)
utils.pi_update_signatories(context)
_question_submitted_signatories = __pi_submitted_signatories
_motion_submitted_signatories = __pi_submitted_signatories
_bill_submitted_signatories = __pi_submitted_signatories
_agendaitem_submitted_signatories = __pi_submitted_signatories
_tableddocument_submitted_signatories = __pi_submitted_signatories
def _signatory_awaiting_consent(context):
"""Done when parent object is already in submitted_signatories stage.
Otherwise roles assignment is handled by `__pi_assign_signatory_roles`
"""
if context.item.status == u"submitted_signatories":
owner_login = utils.get_owner_login_pi(context)
utils.assign_owner_role(context, owner_login)
utils.assign_signatory_role(context.item, owner_login)
def _signatory_rejected(context):
#!+SIGNATORIES(mb, aug-2011) Unsetting of roles now handled when
# document is submitted or redrafted. Deprecate this action if not needed.
#owner_login = utils.get_owner_login_pi(context)
#utils.assign_signatory_role(context.item, owner_login, unset=True)
return
_signatory_withdrawn = _signatory_rejected
# events
def _event_private(context):
"""
Assigns owner role to event creator - Limit viewing to owner
"""
login = utils.get_principal_id()
if login is not None:
utils.assign_owner_role(context, login)
def _event_attached(context):
publish_to_xml(context)
| [
"jura.stakhiv@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | jura.stakhiv@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
ce59e45ee8cddd99cedd8e16aefcff92641a326a | 8214e7369f2b86f19602eaffe9e8072f336391bb | /tasks.py | e4ab65a9167e0813e7287c98ba19959386973525 | [
"BSD-3-Clause"
] | permissive | pydev-git/cookiecutter_flask_docker | 27dc47e69a957bd89aeb76db13cc0a08897cd467 | 0bbe0f366d0d8d914b02518c94f5ff75d03386b5 | refs/heads/master | 2021-06-01T11:30:30.912658 | 2016-07-27T08:05:18 | 2016-07-27T08:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks."""
import os
import json
import shutil
from invoke import task, run
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'cookiecutter.json'), 'r') as fp:
COOKIECUTTER_SETTINGS = json.load(fp)
# Match default value of app_name from cookiecutter.json
COOKIE = os.path.join(HERE, COOKIECUTTER_SETTINGS['app_name'])
REQUIREMENTS = os.path.join(COOKIE, 'requirements', 'dev.txt')
@task
def build():
"""Build the cookiecutter."""
run('cookiecutter {0} --no-input'.format(HERE))
@task
def clean():
"""Clean out generated cookiecutter."""
if os.path.exists(COOKIE):
shutil.rmtree(COOKIE)
print('Removed {0}'.format(COOKIE))
else:
print('App directory does not exist. Skipping.')
def _run_manage_command(command):
run('python {0} {1}'.format(os.path.join(COOKIE, 'manage.py'), command), echo=True)
@task(pre=[clean, build])
def test():
"""Run lint commands and tests."""
run('pip install -r {0} --ignore-installed'.format(REQUIREMENTS), echo=True)
os.chdir(COOKIE)
_run_manage_command('lint')
_run_manage_command('test')
| [
"sloria1@gmail.com"
] | sloria1@gmail.com |
3db36797f51b902bc863db8e36d3511fa311c4f4 | cd29d5109e725fe322cef424eddf43b0f3638663 | /Homeworks/HW7/__init__.py | b7f6df8375ebfac38631410a7533a97ac314975c | [
"MIT"
] | permissive | alliemclean/MachineLearning_CS6140 | 0d3a7690dd184800ab8ad1c7d0605bcc5f72e0b1 | ee5e7cefc125fe16ab30a33cd5e3bf8093042282 | refs/heads/master | 2021-05-30T06:55:10.928076 | 2015-12-15T15:01:53 | 2015-12-15T15:01:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,385 | py | import inspect
import warnings
import collections
import cython
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KernelDensity
import numpy.linalg as la
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.metrics.pairwise import cosine_similarity
import os
import subprocess
subprocess.call(["cython", "-a", os.path.join(os.getcwd(), "CS6140_A_MacLeay/Homeworks/HW7/speedy.pyx")])
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()},
reload_support=True)
import speedy
__author__ = 'Allison MacLeay'
class KNN(object):
def __init__(self, n_neighbors=5, classifier=KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='minkowski', p=2)):
self.k = n_neighbors
self.classifier = classifier
def predict(self, X_test, X, y):
sciKNN = self.classifier
sciKNN.fit(X, y)
return sciKNN.predict(X_test)
class MyKNN(object):
def __init__(self, n_neighbors=5, algorithm='brute', metric='minkowski', metric_params=None, p=2, cls_metric=np.mean, radius=None, density=False, outlier_label=None, bandwidth=None):
self.n_neighbors = n_neighbors
self.metric = metric
if (metric == 'minkowski' and p == 2) or metric == 'euclidean':
self.kernel = speedy.Kernel('euclidean')
else:
self.kernel = Kernel(ktype=metric)
self.N = None
self.cls_metric = cls_metric
self.X_train = None
self.y_train = None
self.radius = radius
self.density = density
self.outlier_label = outlier_label
self.outlier_index = None
self.bandwidth = bandwidth # for density
def fit(self, X, y):
if type(X) is not np.ndarray:
X = np.asarray(X)
y = np.asarray(y, dtype=np.float)
self.X_train = X
self.y_train = y
if self.outlier_label is not None:
self.outlier_index = self.y_train.shape[0]
self.y_train = np.append(self.y_train, self.outlier_label)
def predict(self, X_test):
dec = self.decision_function(X_test)
dsz = len(dec)
return [-1 if dec[i] <= 0 else 1 for i in range(dsz)]
def decision_function(self, X_test):
# Map to K
print 'my predict {} {}'.format(self.n_neighbors, self.kernel.name())
if type(X_test) is not np.ndarray:
X_test = np.asarray(X_test)
#K = speedy.calc_K(self.kernel, X_test, self.X_train)
print('start kernel')
K = calc_K(self.kernel, X_test, self.X_train)
print 'my Kernel calculated'
print K
print K.shape
y_pred = np.zeros(X_test.shape[0])
if self.radius is not None:
#radius
return speedy.decision_function_radius(K, np.array(X_test), self.y_train, self.n_neighbors, self.kernel.name(),
float(self.radius), float(self.outlier_label), int(self.outlier_index), self.cls_metric)
elif self.density:
px_given_1 = np.zeros(K.shape[0])
px_given_0 = np.zeros(K.shape[0])
print set(self.y_train)
p1 = float(np.sum(self.y_train > .5)) / self.y_train.shape[0]
print(collections.Counter(self.y_train))
print(p1)
#p0_arr = np.zeros(K.shape[0])
for i in range(K.shape[0]):
#print('predict {}'.format(i))
# k for each sample in test set i-test j-train
ones = K[i, self.y_train > .5]
zeros = K[i, self.y_train <= .5]
print ones
n_ones = len(ones)
n_zeros = len(zeros)
sum_ones = float(np.sum(ones))
sum_zeros = float(np.sum(zeros))
total = sum_ones + sum_zeros
if total == 0:
px_given_1[i] = 0
px_given_0[i] = 0
continue
px_given_1[i] = sum_ones / total
px_given_0[i] = sum_zeros / total
px1 = np.asarray([float(p1 * px_given_1[i]) for i in xrange(K.shape[0])])
print(px1)
px0 = np.asarray([float((1.0 - p1) * px_given_0[i]) for i in xrange(K.shape[0])])
zs = [a + b for a, b in zip(px0, px1)]
px1 /= zs
px0 /= zs
print(zip(px1, px0))
y_pred = [1 if px1[i] > px0[i] else 0 for i in range(K.shape[0])]
else:
self.N = np.array([sorted(zip(K[i, :], range(len(K[i, :]))))[:self.n_neighbors] for i in range(K.shape[0])])
if not self.density:
for i in xrange(self.N.shape[0]):
y_pred[i] = self.cls_metric([self.y_train[self.N[i][j][1]] for j in xrange(self.N[i].shape[0])])
return y_pred
# get_params needed for clone() in multiclass.py
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
# _get_param_names needed for clone() in multiclass.py
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def calc_K(kernel, X_test, X_train):
n_samples = X_test.shape[0]
n_samples_train = X_train.shape[0]
K = np.zeros(shape=(n_samples, n_samples_train))
for i in range(n_samples):
for j in range(n_samples_train):
K[i, j] = kernel.f(X_test, X_train, i, j)
return K
class Kernel(object):
def __init__(self, ktype='euclidean', sigma=1):
self.sigma = sigma # for Gaussian
self.ktype = ktype
self.f = None
if ktype == 'euclidean' or ktype == 'minkowski':
self.f = self.euclid
if ktype == 'cosine':
self.f = self.cosine
if ktype == 'cosine_sci':
self.f = self.cosine_sci
if ktype == 'cosine_similarity':
self.f = self.cosine_similarity
if ktype == 'gaussian':
self.f = self.gaussian
if ktype == 'poly2':
self.f = self.poly2
if ktype == 'gaussian_sci':
self.f = self.gaussian_sci
if ktype == 'gaussian_density':
self.f = self.gaussian_density
if ktype == 'poly2_sci':
self.f = self.poly2_sci
def euclid(self, xi, xj, **kwargs):
return np.sqrt(np.sum([(xi[m]-xj[m]) ** 2 for m in range(xi.shape[0])]))
#return [np.sqrt(np.sum((xi[m] - xj[m]) **2)) for m in range(xi.shape[0])]
def cosine(self, X, Xt, i, j):
# X and Xt are vectors
return 1-(np.dot(X[i], Xt[j].T) / (la.norm(X[i]) * la.norm(Xt[j]))) # equals cosine distance
#return cosine(X[i], Xt[j])
#return cosine_similarity(xi, xj)
def cosine_similarity(self, X, Xt, i, j):
return cosine_similarity(X[i], Xt[j])
def cosine_sci(self, xi, xj):
return 1-(np.dot(xi, xj.T) / (la.norm(xi) * la.norm(xj))) # equals cosine distance
def xxxgaussian(self, xi, xj, i=None, j=None, sigma=1, **kwargs):
return np.sum([np.exp(-(la.norm(x-y) ** 2 / (2 * sigma ** 2))) for x, y in zip (xi, xj)])
def gaussian(self, x, y, i=None, j=None, sigma=1, **kwargs):
return np.exp(-(la.norm(x[i]-y[j]) ** 2 / (2 * sigma ** 2)))
def gaussian_sci(self, xi, yj):
sigma = 1
return np.exp(-(la.norm(xi-yj) ** 2 / (2 * sigma ** 2)))
def gaussian_density(self, x, y, i, j):
deltaRow = x[i] - y[j]
return np.exp(np.dot(deltaRow, deltaRow.T) / -(2**2))
def poly2(self, x, y, i, j):
return - np.dot(x[i], y[j]) ** 2
#return np.sum[xi*yi+ xi**2 * yi**2 + 2*xi*yi for xi, yi in zip(x[i], y[i])]
def poly2_sci(self, xi, xj, **kwargs):
return - np.dot(xi, xj) ** 2
#return np.sum[xi*yi+ xi**2 * yi**2 + 2*xi*yi for xi, yi in zip(x[i], y[i])]
def name(self):
return self.ktype
def compute(self, xi, xj, **kwargs):
return self.f(xi, xj)
def testCython():
print 'out of speedy'
speedy.test()
| [
"allison.macleay+csedu@gmail.com"
] | allison.macleay+csedu@gmail.com |
e06215fdfb4e2456cf5f6f26ef24b108051d7371 | cd9eb87e3e1b04e6f421377eff02514de05c98e2 | /learn_SciPy/scikit-learn/User Guide/1. Supervised learning/1.10. Decision Trees.py | 2e0907d0cc61331fa0146ca0c4f1677688f35028 | [] | no_license | zhaojinxi/learn_python | 45f116f9729bbf19d9bb4a574b06e0ec41f754dc | 07b4a5a231e39b6d2c28f98e99a3a8fe3cb534c4 | refs/heads/master | 2021-06-05T22:00:02.528023 | 2020-03-22T04:19:22 | 2020-03-22T04:19:22 | 129,857,802 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import sklearn.tree
import sklearn.datasets
import graphviz
#1.10.1. Classification
X = [[0, 0], [1, 1]]
Y = [0, 1]
clf = sklearn.tree.DecisionTreeClassifier()
clf = clf.fit(X, Y)
clf.predict([[2., 2.]])
clf.predict_proba([[2., 2.]])
iris = sklearn.datasets.load_iris()
clf = sklearn.tree.DecisionTreeClassifier()
clf = clf.fit(iris.data, iris.target)
dot_data = sklearn.tree.export_graphviz(clf, out_file=None)
graph = graphviz.Source(dot_data)
graph.render("iris")
dot_data = sklearn.tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
clf.predict(iris.data[:1, :])
clf.predict_proba(iris.data[:1, :])
#1.10.2. Regression
X = [[0, 0], [2, 2]]
y = [0.5, 2.5]
clf = sklearn.tree.DecisionTreeRegressor()
clf = clf.fit(X, y)
clf.predict([[1, 1]])
#1.10.3. Multi-output problems
#1.10.4. Complexity
#1.10.5. Tips on practical use
#1.10.6. Tree algorithms: ID3, C4.5, C5.0 and CART
#1.10.7. Mathematical formulation | [
"super-string@outlook.com"
] | super-string@outlook.com |
346bdc0ae36022127e9a167b7a8296499068ea96 | 4b3ede9feeb72ccca84ed1a4d9959fb7ef818d32 | /hw/hw07/hw07.py | 99a19f9e1da4109c90c977239751181cf1983179 | [] | no_license | eqchen1024/cs61a | 2e1936e969b769d64d17b2cccae96c222595ea64 | 6b98724923b65a399ac0e86b3fb08cbeb3f88cd6 | refs/heads/main | 2023-08-16T03:07:41.713360 | 2021-10-21T15:02:19 | 2021-10-21T15:02:19 | 398,767,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,126 | py | class Link:
"""A linked list.
>>> s = Link(1)
>>> s.first
1
>>> s.rest is Link.empty
True
>>> s = Link(2, Link(3, Link(4)))
>>> s.second
3
>>> s.first = 5
>>> s.second = 6
>>> s.rest.rest = Link.empty
>>> s # Displays the contents of repr(s)
Link(5, Link(6))
>>> s.rest = Link(7, Link(Link(8, Link(9))))
>>> s
Link(5, Link(7, Link(Link(8, Link(9)))))
>>> print(s) # Prints str(s)
<5 7 <8 9>>
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
@property
def second(self):
return self.rest.first
@second.setter
def second(self, value):
self.rest.first = value
def __repr__(self):
if self.rest is not Link.empty:
rest_repr = ', ' + repr(self.rest)
else:
rest_repr = ''
return 'Link(' + repr(self.first) + rest_repr + ')'
def __str__(self):
string = '<'
while self.rest is not Link.empty:
string += str(self.first) + ' '
self = self.rest
return string + str(self.first) + '>'
def digits(n):
"""Return the digits of n as a linked list.
>>> digits(0) is Link.empty
True
>>> digits(543)
Link(5, Link(4, Link(3)))
"""
s = Link.empty
while n > 0:
n, last = n // 10, n % 10
"*** YOUR CODE HERE ***"
s=Link(last,s)
return s
class VendingMachine:
"""A vending machine that vends some product for some price.
>>> v = VendingMachine('candy', 10)
>>> v.vend()
'Machine is out of stock.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> v.restock(2)
'Current candy stock: 2'
>>> v.vend()
'You must deposit $10 more.'
>>> v.deposit(7)
'Current balance: $7'
>>> v.vend()
'You must deposit $3 more.'
>>> v.deposit(5)
'Current balance: $12'
>>> v.vend()
'Here is your candy and $2 change.'
>>> v.deposit(10)
'Current balance: $10'
>>> v.vend()
'Here is your candy.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> w = VendingMachine('soda', 2)
>>> w.restock(3)
'Current soda stock: 3'
>>> w.restock(3)
'Current soda stock: 6'
>>> w.deposit(2)
'Current balance: $2'
>>> w.vend()
'Here is your soda.'
"""
def __init__(self, product, price):
self.product = product
self.price = price
self.stock = 0
self.balance = 0
def restock(self, n):
self.stock += n
return 'Current {0} stock: {1}'.format(self.product, self.stock)
def deposit(self, n):
if self.stock == 0:
return 'Machine is out of stock. Here is your ${0}.'.format(n)
self.balance += n
return 'Current balance: ${0}'.format(self.balance)
def vend(self):
if self.stock == 0:
return 'Machine is out of stock.'
difference = self.price - self.balance
if difference > 0:
return 'You must deposit ${0} more.'.format(difference)
message = 'Here is your {0}'.format(self.product)
if difference != 0:
message += ' and ${0} change'.format(-difference)
self.balance = 0
self.stock -= 1
return message + '.'
class MissManners:
"""A container class that only forwards messages that say please.
>>> v = VendingMachine('teaspoon', 10)
>>> v.restock(2)
'Current teaspoon stock: 2'
>>> m = MissManners(v)
>>> m.ask('vend')
'You must learn to say please first.'
>>> m.ask('please vend')
'You must deposit $10 more.'
>>> m.ask('please deposit', 20)
'Current balance: $20'
>>> m.ask('now will you vend?')
'You must learn to say please first.'
>>> m.ask('please hand over a teaspoon')
'Thanks for asking, but I know not how to hand over a teaspoon.'
>>> m.ask('please vend')
'Here is your teaspoon and $10 change.'
>>> double_fussy = MissManners(m) # Composed MissManners objects
>>> double_fussy.ask('deposit', 10)
'You must learn to say please first.'
>>> double_fussy.ask('please deposit', 10)
'Thanks for asking, but I know not how to deposit.'
>>> double_fussy.ask('please please deposit', 10)
'Thanks for asking, but I know not how to please deposit.'
>>> double_fussy.ask('please ask', 'please deposit', 10)
'Current balance: $10'
"""
def __init__(self, obj):
self.obj = obj
def ask(self, message, *args):
magic_word = 'please '
if not message.startswith(magic_word):
return 'You must learn to say please first.'
"*** YOUR CODE HERE ***"
command=message[message.find(magic_word)+len(magic_word):]
if hasattr(self.obj,command):
return getattr(self.obj,command)(*args)
else:
return 'Thanks for asking, but I know not how to {0}.'.format(command)
| [
"cdysbt@gmail.com"
] | cdysbt@gmail.com |
d161f283b3cca035a2f287a0c8a03a93f697ae0f | 50edd705fb055456c072cce631e9a5b7737b6082 | /Doraemon/Model/mongopie.py | 166ceb4c05c426821d36ffdcbbe8e19008f62cd6 | [] | no_license | x0fengluo/Doraemon | 829dd557ee90c74942ecb3ad2374de8dbf8c7e38 | 4f1915486f4d03d60401db7338dbd52df63953f5 | refs/heads/master | 2021-05-16T13:14:13.805073 | 2017-10-12T06:23:13 | 2017-10-12T06:23:13 | 105,358,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,006 | py | import os
from urllib.parse import urlparse
from datetime import datetime
from pymongo import MongoClient, ASCENDING, DESCENDING
from pymongo.cursor import Cursor
from gridfs import GridFS
from bson.objectid import ObjectId, InvalidId
from collections import defaultdict
import pytz
def utc_now():
return datetime.utcnow().replace(tzinfo=pytz.utc)
# Simple signal hub
class SignalSlot(object):
def __init__(self):
self.clear()
def connect(self, sender, handler):
if sender is None:
sender = 'root'
handlers = self.handlers[sender]
handlers.append(handler)
return len(handlers) - 1
def disconnect(self, sender, index):
if sender is None:
sender = 'root'
self.handlers[sender][index] = None
def send(self, sender, **kw):
if sender is None:
sender = 'root'
handlers = self.handlers[sender]
for handler in handlers:
if handler:
handler(sender, **kw)
def clear(self):
self.handlers = defaultdict(list)
class ModelSignal():
def __init__(self):
self.pre_update = SignalSlot()
self.post_update = SignalSlot()
self.pre_create = SignalSlot()
self.post_create = SignalSlot()
self.recycled = SignalSlot()
self.revived = SignalSlot()
self.will_erase = SignalSlot()
modelsignal = ModelSignal()
def merge_condition_dicts(dict1, dict2):
for k, v2 in dict2.iteritems():
v1 = dict1.get(k)
if isinstance(v1, dict) and isinstance(v2, dict):
# Merge 2 complicated conditions
v1.update(v2)
dict1[k] = v1
else:
dict1[k] = v2
def force_string_keys(datadict, encoding='utf-8'):
return dict((k.encode(encoding), v)
for k, v in datadict.iteritems())
default_db = ('localhost', 27017, 'modeltest')
dbconn = os.getenv('MONGODB_CONNECTION')
if dbconn:
# We accept url like mongo://127.0.0.1:27017/modeltest' or
# 'tcp://127.0.0.1:27017/modeltest'
parsed = urlparse(dbconn)
if parsed.scheme in ('tcp', 'mongo'):
host, port = parsed.netloc.split(':')
dbname = parsed.path[1:]
port = int(port)
default_db = (host, port, dbname)
def set_defaultdb(host, port, name):
global default_db
default_db = (host, port, name)
_conn_pool = {}
def get_server(host, port, db_name):
if (host, port) not in _conn_pool:
conn = MongoClient(host, port, tz_aware=True)
_conn_pool[(host, port)] = conn
return _conn_pool[(host, port)][db_name]
class CursorWrapper:
index = None
def __init__(self, cls, conditions=None, orders=None, index=None):
if conditions:
self.conditions = conditions
else:
self.conditions = {}
if orders:
self.orders = orders
else:
self.orders = []
if index:
self.index = index
self.cls = cls
def get_cursor(self):
col = self.cls.collection()
cursor = col.find(self.conditions)
if self.orders:
cursor = cursor.sort(self.orders)
if self.index:
cursor = cursor.__getitem__(self.index)
return cursor
def __len__(self):
return self.get_cursor().count()
def __nonzero__(self):
return self.get_cursor().count() > 0
def __repr__(self):
return repr(list(self))
def __iter__(self):
def cursor_iter():
cursor = self.get_cursor()
for datadict in cursor:
yield self.cls.get_from_data(datadict)
return iter(cursor_iter())
def paginate(self, page=1, count=20):
if page < 1:
page = 1
index = slice((page - 1) * count, page * count)
return self.__getitem__(index)
def __getitem__(self, index):
if isinstance(index, slice):
return CursorWrapper(
self.cls,
conditions=self.conditions,
orders=self.orders,
index=index)
else:
assert isinstance(index, (int, float))
data = self.get_cursor().__getitem__(index)
assert isinstance(data, dict)
return self.cls.get_from_data(data)
def count(self):
return self.get_cursor().count()
def sort(self, *fields):
cols = self.cls.make_sort(fields)
return CursorWrapper(self.cls,
conditions=self.conditions,
orders=self.orders + cols
)
def find(self, **kwargs):
kwargs = self.cls.filter_condition(kwargs)
conditions = self.conditions.copy()
merge_condition_dicts(conditions, kwargs)
return CursorWrapper(self.cls,
conditions=conditions,
orders=self.orders)
class Field(object):
""" Field that defines the schema of a DB
Much like the field of relation db ORMs
A proxy of a object's attribute
"""
def __init__(self, default=None, **args):
self._fieldname = None
self.default_value = default
def _get_fieldname(self):
return self._fieldname
def _set_fieldname(self, v):
self._fieldname = v
fieldname = property(_get_fieldname, _set_fieldname)
def get_raw(self, obj):
return self.__get__(obj)
def __get__(self, obj, type=None):
v = getattr(obj, self.get_obj_key(),
self.default_value)
return v
def __set__(self, obj, value):
if value is not None:
setattr(obj, self.get_obj_key(), value)
def __del__(self):
pass
def get_key(self):
return self.fieldname
def get_obj_key(self):
return '_' + self.fieldname
class BooleanField(Field):
def __init__(self, default=False, **kwargs):
super(BooleanField, self).__init__(default=default,
**kwargs)
def __set__(self, obj, value):
value = not not value
super(BooleanField, self).__set__(obj, value)
class IntegerField(Field):
def __init__(self, default=0, **kwargs):
super(IntegerField, self).__init__(default=default,
**kwargs)
def __set__(self, obj, value):
value = float(value)
super(IntegerField, self).__set__(obj, value)
class FloatField(Field):
def __init__(self, default=0, **kwargs):
super(FloatField, self).__init__(default=default,
**kwargs)
def __set__(self, obj, value):
value = float(value)
super(FloatField, self).__set__(obj, value)
class SequenceField(IntegerField):
def __init__(self, key, default=0, **kwargs):
self.key = key
super(SequenceField, self).__init__(default=default, **kwargs)
class StringField(Field):
def __set__(self, obj, value):
super(StringField, self).__set__(obj, value)
class CollectionField(Field):
def __get__(self, obj, type=None):
val = super(CollectionField, self).__get__(obj, type=type)
if val is None:
val = self.get_default_value()
self.__set__(obj, val)
return val
def get_default_value(self):
raise NotImplemented
class ArrayField(CollectionField):
def get_default_value(self):
return []
class ChildrenField(ArrayField):
def __init__(self, child_cls, **kw):
super(ChildrenField, self).__init__(**kw)
self.child_cls = child_cls
def get_child_class(self, obj):
return self.child_cls
def __get__(self, obj, type=None):
arr = super(ChildrenField, self).__get__(obj, type=type)
objarr = [self.child_cls(**v) for v in arr]
return objarr
def __set__(self, obj, arr):
value = []
for v in arr:
if isinstance(v, Model):
v = v.get_dict()
value.append(v)
super(ChildrenField, self).__set__(obj, value)
class DictField(CollectionField):
def get_default_value(self):
return {}
class ObjectIdField(Field):
@classmethod
def toObjectId(cls, v):
if v is None:
return None
elif isinstance(v, str):
# TODO: handle invalidid exception
return ObjectId(v)
else:
assert isinstance(v, ObjectId)
return v
def __init__(self, default=None, **kwargs):
super(ObjectIdField, self).__init__(default=default,
**kwargs)
def __set__(self, obj, value):
value = self.toObjectId(value)
super(ObjectIdField, self).__set__(obj, value)
def get_key(self):
return '_' + self.fieldname
class FileField(ObjectIdField):
def get_obj_key(self):
return '_' + self.fieldname
@staticmethod
def get_fs(obj):
cls = obj.__class__
database = getattr(cls, '__database__', default_db)
server = get_server(*database)
return GridFS(server)
def __get__(self, obj, type=None):
objid = super(FileField, self).__get__(obj, type=type)
if not objid:
return None
fs = self.get_fs(obj)
f = fs.get(objid)
return f
def get_raw(self, obj):
v = getattr(obj, self.get_obj_key(),
self.default_value)
return v
def __set__(self, obj, value):
fs = self.get_fs(obj)
old_f = self.__get__(obj, type=None)
if old_f:
fs.delete(old_f._id)
if isinstance(value, str):
f = fs.new_file()
f.write(value)
f.close()
value = f
super(FileField, self).__set__(obj, value._id)
class ReferenceField(ObjectIdField):
def __init__(self, ref_cls, default=None, **kwargs):
super(ReferenceField, self).__init__(default=default,
**kwargs)
self.ref_cls = ref_cls
def get_raw(self, obj):
return super(ReferenceField, self).__get__(obj)
def get_ref_class(self, obj):
return self.ref_cls == 'self' and obj.__class__ or self.ref_cls
def __get__(self, obj, type=None):
objid = super(ReferenceField, self).__get__(obj, type=type)
if objid is self.default_value:
return self.default_value
ref_cls = self.get_ref_class(obj)
val = ref_cls.get(objid)
return val
def __set__(self, obj, value):
ref_cls = self.get_ref_class(obj)
if isinstance(value, ref_cls):
value = ObjectId(value.id)
super(ReferenceField, self).__set__(obj, value)
class DateTimeField(Field):
def __init__(self, default=None, **kwargs):
self.auto_now_add = kwargs.get('auto_now_add', False)
self.auto_now = kwargs.get('auto_now', False)
super(DateTimeField, self).__init__(default=default,
**kwargs)
def __get__(self, obj, type=None):
val = super(DateTimeField, self).__get__(obj,
type=type)
if val is None:
if self.auto_now and self.auto_now_add:
val = utc_now()
self.__set__(obj, val)
return val
def __set__(self, obj, value):
if value is not None:
assert isinstance(value, datetime)
super(DateTimeField, self).__set__(obj, value)
cache_classes = set()
def clear_obj_cache():
for cls in cache_classes:
if cls.use_obj_cache:
cls.obj_cache = {}
class ModelMeta(type):
""" The meta class of Model
Do some registering of Model classes
"""
__clsdicts__ = {}
def __new__(meta, clsname, bases, classdict):
allclassdict = {}
for basecls in bases:
baseclsname = basecls.__name__
if baseclsname != 'Model':
allclassdict.update(
meta.__clsdicts__.get(baseclsname, {}))
allclassdict.update(classdict)
meta.__clsdicts__[clsname] = allclassdict
cls = type.__new__(meta, clsname, bases, allclassdict)
if clsname == 'Model':
return cls
cls.initialize()
return cls
class Model(object):
""" The model of couchdb
A model defines the schema of a database using its fields
Customed model can be defined by subclassing the Model class.
"""
__metaclass__ = ModelMeta
index_list = []
use_obj_cache = True
def __str__(self):
"""
Only use unicode method
"""
if hasattr(self, '__unicode__'):
return self.__unicode__()
return super(Model, self).__str__()
@classmethod
def initialize(cls):
""" Initialize the necessary stuffs of a model class
Including:
* Touch db if not exist.
Called in ModelMeta's __new__
"""
if cls.use_obj_cache:
cls.obj_cache = {}
cache_classes.add(cls)
cls.col_name = cls.__name__.lower()
idfield = ObjectIdField()
cls.id = idfield
cls.fields = [idfield]
cls.field_map = {}
for fieldname, v in vars(cls).items():
if isinstance(v, Field):
v.fieldname = fieldname
cls.fields.append(v)
cls.field_map[fieldname] = v
@classmethod
def ensure_indices(cls):
''' It's better to use js instead of this functions'''
col = cls.collection()
for idx, kwargs in cls.index_list:
col.ensure_index(idx, **kwargs)
@classmethod
def get_auto_incr_value(cls):
pass
@classmethod
def collection(cls):
database = getattr(cls, '__database__', default_db)
server = get_server(*database)
return server[cls.col_name]
@classmethod
def recycle_collection(cls):
database = getattr(cls, '__database__', default_db)
server = get_server(*database)
return server['%s_recycle' % cls.col_name]
def create(cls, **kwargs):
""" Create a new object
"""
model_obj = cls(**kwargs)
model_obj.save()
return model_obj
def get_addtime(self):
if isinstance(self.id, ObjectId):
return self.id.generation_time
@classmethod
def make_sort(cls, fields):
cols = []
if not fields:
return cols
for f in fields:
if f.startswith('-'):
order = DESCENDING
f = f[1:]
else:
order = ASCENDING
if f in cls.field_map:
f = cls.field_map[f].get_key()
cols.append((f, order))
return cols
@classmethod
def make_sort_dict(cls, fields):
cols = {}
if not fields:
return cols
for f in fields:
if f.startswith('-'):
f = f[1:]
order = -1
else:
order = 1
if f in cls.field_map:
f = cls.field_map[f].get_key()
cols[f] = order
return cols
@classmethod
def filter_condition(cls, conditions):
newcondition = {}
if conditions is None:
conditions = {}
for k, v in conditions.iteritems():
if isinstance(v, Model):
v = v.id
if k in cls.field_map:
field = cls.field_map[k]
k = field.get_key()
newcondition[k] = v
return newcondition
@classmethod
def find_and_modify(cls, query=None, update=None, sort=None, upsert=False, new=False):
"""
Atomic find and modify
"""
if cls.use_obj_cache:
cls.obj_cache = {}
col = cls.collection()
query = cls.filter_condition(query)
sort = cls.make_sort_dict(sort)
update = cls.filter_condition(update)
datadict = col.find_and_modify(query=query,
update=update,
sort=sort,
upsert=upsert, new=new)
if datadict:
return cls.get_from_data(datadict)
@classmethod
def increment_field(cls, field, value=1, **query):
return cls.find_and_modify(
query=query,
update={
'$inc': {field: value}
})
@classmethod
def find_and_remove(cls, query=None, sort=None):
"""
Atomic way to dequeue an object
"""
if cls.use_obj_cache:
cls.obj_cache = {}
col = cls.collection()
query = cls.filter_condition(query)
sort = cls.make_sort_dict(sort)
datadict = col.find_and_modify(query=query,
sort=sort,
remove=True)
if datadict:
return cls.get_from_data(datadict)
@classmethod
def find(cls, **conditions):
conditions = cls.filter_condition(conditions)
return CursorWrapper(cls, conditions=conditions)
@classmethod
def find_one(cls, **conditions):
conditions = cls.filter_condition(conditions)
col = cls.collection()
datadict = col.find_one(conditions)
if datadict:
return cls.get_from_data(datadict)
else:
return datadict
@classmethod
def count(cls):
return cls.collection().count()
@classmethod
def remove(cls, **conditions):
if cls.use_obj_cache:
cls.obj_cache = {}
conditions = cls.filter_condition(conditions)
return cls.collection().remove(conditions)
def erase(self):
if self.use_obj_cache:
self.__class__.obj_cache.pop(self._id, None)
modelsignal.will_erase.send(self.__class__,
instance=self)
return self.collection().remove({'_id': self._id})
def recycle(self):
col = self.recycle_collection()
objid = col.save(self.get_dict())
assert objid == self._id
modelsignal.recycled.send(self.__class__,
instance=self)
self.erase()
return objid
@classmethod
def revive(cls, objid):
rcol = cls.recycle_collection()
obj = rcol.find_one({'_id': objid})
if obj:
col = cls.collection()
col.save(obj)
obj = cls.get(objid)
modelsignal.revived.send(cls,
instance=obj)
return obj
@classmethod
def multi_get(cls, objid_list, exclude_null=True):
""" Get multiple objects in batch mode to reduce the time
spent on network traffic
"""
obj_dict = {}
for obj in cls.find(_id={'$in': objid_list}):
obj_dict[obj._id] = obj
if cls.use_obj_cache:
cls.obj_cache[obj._id] = obj
for objid in objid_list:
obj = obj_dict.get(objid)
if obj or not exclude_null:
yield obj
@classmethod
def get(cls, objid):
""" Get an object by objectid
"""
if objid is None:
return None
if isinstance(objid, str):
try:
objid = ObjectId(objid)
except InvalidId:
return None
assert isinstance(objid, ObjectId);
if cls.use_obj_cache:
obj = cls.obj_cache.get(objid)
if obj:
return obj
col = cls.collection()
kw = {'_id': objid}
datadict = col.find_one(kw)
if datadict is not None:
obj = cls(**force_string_keys(datadict))
if cls.use_obj_cache:
cls.obj_cache[objid] = obj
return obj
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.id and other.id and
self.id == other.id)
def __hash__(self):
return hash(self.id)
def save(self):
"""
You should be very cautious if you have setup signal handlers, and try
to call Model.save in the signal handler, you will probably produce a
Model.save recursion.
E.g. setup a pre_update signal handler for User, in that handler you
try to call User.save directly or code some where.
Think it over.
"""
new = self.id is None
col = self.collection()
for field in self.fields:
if new:
if (isinstance(field, SequenceField) and
not getattr(self, field.fieldname, None)):
setattr(self, field.fieldname, SequenceModel.get_next(field.key))
if isinstance(field, DateTimeField):
if field.auto_now:
setattr(self, field.fieldname, utc_now())
elif (field.auto_now_add
and new
and not getattr(self, field.fieldname, None)):
setattr(self, field.fieldname, utc_now())
if new:
modelsignal.pre_create.send(self.__class__,
instance=self)
else:
modelsignal.pre_update.send(self.__class__, instance=self)
if self.use_obj_cache:
self.__class__.obj_cache.pop(self.id, None)
self.id = col.save(self.get_dict())
if new:
self.on_created()
modelsignal.post_create.send(self.__class__,
instance=self)
else:
modelsignal.post_update.send(self.__class__,
instance=self)
def on_created(self):
pass
def get_dict(self):
""" Get the dict representation of an object's fields
"""
info_dict = {}
for field in self.fields:
key = field.get_key()
value = field.get_raw(self)
if value is not None:
info_dict[key] = value
return info_dict
@classmethod
def get_from_data(cls, datadict):
datadict = force_string_keys(datadict)
return cls(**datadict)
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
class SequenceModel(Model):
seq = IntegerField()
@classmethod
def get_next(cls, key):
col = cls.collection()
v = col.find_and_modify(query={'_id': key},
update={'$inc': {'seq': 1}},
upsert=True, new=True)
if v:
return v['seq']
return v
| [
"x0fengluo@gmail.com"
] | x0fengluo@gmail.com |
ab37819178678efc8832a481c7d0f60c89cf7dfe | c27e78d35cdc802e4790280c384a0f97acf636ef | /src/rulesTest.py | c7cda2cbd1a74e52e447aefbc5576b0f6f3b5dc3 | [] | no_license | undersea/Special_Topic | 99e424d9e443523a4d880ef478455bb75d7c82cd | 7bf7ed2c92b864d99790b927965bad819bfb7cfb | refs/heads/master | 2020-03-25T04:01:26.909441 | 2011-05-30T03:26:53 | 2011-05-30T03:26:53 | 3,587,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from unittest import TestCase, main
from rules import Degree
class TestRules(TestCase):
def setUp(self):
print "setUp"
self.rules = Degree()
pass
def tearDown(self):
print "tearDown"
del self.rules
pass
def testAdd(self):
count = len(self.rules.rules)
rule = ("one of","one")
self.rules.add(rule)
self.assertEqual(count, 0)
self.assertEqual(len(self.rules.rules), 1)
def testDelete(self):
rule = ("one of","one")
self.rules.rules.append(rule)
count = len(self.rules.rules)
self.assertEqual(count, 1)
self.rules.delete(rule)
self.assertEqual(len(self.rules.rules), 0)
if __name__ == "__main__":
main()
| [
"terrasea@gmail.com"
] | terrasea@gmail.com |
1c1b98d8fbc186621625663aa5a146ee1935590c | dabe85f4b2a6f683bfaa7decd358b2f282430350 | /com.lxh/learning2/day003_branch/__init__.py | 495e7143ee16c1f419387ed2c9e074171e671b28 | [] | no_license | hnz71211/Python-Basis | e6d239df7bb2873600173d05c32c136c28cc9f4b | 2893d0d3402ee0bfb5292e2f6409211845a88e26 | refs/heads/master | 2020-11-29T21:37:06.774000 | 2020-04-13T04:15:04 | 2020-04-13T04:15:04 | 230,220,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # if else
# 练习1:英制单位英寸与公制单位厘米互换。
value = float(input('请输入长度: '))
unit = input('请输入单位: ')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == 'cm' or unit == '厘米':
print('%f厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位')
# 练习2:百分制成绩转换为等级制成绩。
score = float(input('请输入成绩: '))
if score >= 90:
grade = 'A'
elif score >= 80:
grade = 'B'
elif score >= 70:
grade = 'C'
elif score >= 60:
grade = 'D'
else:
grade = 'E'
print('对应的等级是:', grade)
# 练习3:输入三条边长,如果能构成三角形就计算周长和面积。
a = float(input('a = '))
b = float(input('b = '))
c = float(input('c = '))
if a + b > c and a + c > b and b + c > a:
print('c: %f' % (a + b + c))
p = (a + b + c) / 2
s = (p * (p - a) * (p - b) * (p - c)) ** 0.5
print('s: %f' % s)
else:
print('no..') | [
"hxl71396812@gmail.com"
] | hxl71396812@gmail.com |
379027171959b6bbee4347f0cb89c5c6d832fc9a | 497309eaac79efb53ef52850e06979f4cb25e92a | /python/simple_oop.py | 9e5692514b37a63ddff1aec533c61d50f5cfef07 | [
"Apache-2.0"
] | permissive | mbdebian/code_kata_sessions | d154b4db5685eb8a465068cabeca813cd64dd228 | 2fed7f0517a455e3fdecf822a25f12fc962a0c15 | refs/heads/master | 2021-01-22T18:15:00.612563 | 2017-03-26T11:07:29 | 2017-03-26T11:07:29 | 85,070,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # This is a messy playground for OOP concepts
# This is a simple class
class ClassA:
def __init__(self):
self.__id = 0
self.__name = "ClassA - Name"
print("Class A - Instance")
# Without annotations
def getId(self):
return self.__id
def setId(self, id):
self.__id = id
# With annotations
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
def methodOne(self):
print("method One CALLED!")
# This class inherits from "ClassA"
class ClassB(ClassA):
__new_id = 0
__new_name = "ClassB - Name"
def __init__(self):
super().__init__(self)
# This is an abstract class
class AbstractClass | [
"mbdebian@gmail.com"
] | mbdebian@gmail.com |
4698bbd10d6f9865b9e14c4ccd5f0c59b5bd7996 | 8f506513cb73d9bdb5dbdd9084aaba020b1efbea | /Course_1-Algorithmic_Toolbox/Week-1/Excercise_Challenges/2_maximum_pairwise_product/max_pairwise_product.py | 16ef706e59671dba4d782c766223be8cf322274f | [] | no_license | KhanAjmal007/Data-Structures-and-Algorithms-Specialization-Coursera | 1255ecf877ecd4a91bda8b85e9c96566fe6d5e4d | ab6e618c5d8077febb072091e80c16f5f1a15465 | refs/heads/master | 2023-03-21T04:18:04.580423 | 2020-07-11T07:18:06 | 2020-07-11T07:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | def max_pairwise_product(numbers):
max1 = -999
max2 = -9999
for value in numbers:
if value > max1:
max2 = max1
max1 = value
elif value > max2:
max2 = value
return max1 * max2
if __name__ == '__main__':
input_n = int(input())
input_numbers = [int(x) for x in input().split()]
print(max_pairwise_product(input_numbers))
| [
"mokit.aust@gmail.com"
] | mokit.aust@gmail.com |
f2f4d6b715cc9b11ba5174b51906804ad1a1ca7e | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/external_updater/base_updater.py | 18d4435858c7a22b295ca26455f4abbaf44d16d4 | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | # Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all updaters."""
from pathlib import Path
import fileutils
# pylint: disable=import-error
import metadata_pb2 # type: ignore
class Updater:
"""Base Updater that defines methods common for all updaters."""
def __init__(self, proj_path: Path, old_url: metadata_pb2.URL,
old_ver: str) -> None:
self._proj_path = fileutils.get_absolute_project_path(proj_path)
self._old_url = old_url
self._old_ver = old_ver
self._new_url = metadata_pb2.URL()
self._new_url.CopyFrom(old_url)
self._new_ver = old_ver
self._has_errors = False
def is_supported_url(self) -> bool:
"""Returns whether the url is supported."""
raise NotImplementedError()
def check(self) -> None:
"""Checks whether a new version is available."""
raise NotImplementedError()
def update(self) -> None:
"""Updates the package.
Has to call check() before this function.
"""
raise NotImplementedError()
@property
def project_path(self) -> Path:
"""Gets absolute path to the project."""
return self._proj_path
@property
def current_version(self) -> str:
"""Gets the current version."""
return self._old_ver
@property
def current_url(self) -> metadata_pb2.URL:
"""Gets the current url."""
return self._old_url
@property
def latest_version(self) -> str:
"""Gets latest version."""
return self._new_ver
@property
def latest_url(self) -> metadata_pb2.URL:
"""Gets URL for latest version."""
return self._new_url
@property
def has_errors(self) -> bool:
"""Gets whether this update had an error."""
return self._has_errors
def use_current_as_latest(self):
"""Uses current version/url as the latest to refresh project."""
self._new_ver = self._old_ver
self._new_url = self._old_url
| [
"rick_tan@qq.com"
] | rick_tan@qq.com |
8a60ac27efacef9ede7031e89191f681aedcc704 | 48161ae6e9e1a328f1d4ccec84ccc2e63ca62b25 | /device monitoring.py | c8135f7dabbc71c78eba89df33704d5d81304696 | [] | no_license | palvai-harshitha/security | 41808516d0f566f21487be9d864781d3159a35ab | a5b3e0a7fc3bec27561b857d3a1c2f8a04c0f223 | refs/heads/main | 2023-03-25T03:08:15.110561 | 2021-03-23T03:12:07 | 2021-03-23T03:12:07 | 350,562,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,502 | py | #Program for device monitoring
import psutil
import platform
from datetime import datetime
import logging # It is a package which is used for writting output in text files or log files
logging.basicConfig(filename="details.txt", level=logging.DEBUG, format='%(message)s')
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
print("="*40, "System Information", "="*40)
uname = platform.uname()
logging.info(f"System: {uname.system}")
logging.info(f"Node Name: {uname.node}")
logging.info(f"Release: {uname.release}")
logging.info(f"Version: {uname.version}")
logging.info(f"Machine: {uname.machine}")
logging.info(f"Processor: {uname.processor}")
logging.info("="*40, "Boot Time", "="*40)
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
logging.info(f"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}")
logging.info("="*40, "CPU Info", "="*40)
# number of cores
logging.info("Physical cores:", psutil.cpu_count(logical=False))
logging.info("Total cores:", psutil.cpu_count(logical=True))
# CPU frequencies
cpufreq = psutil.cpu_freq()
logging.info(f"Max Frequency: {cpufreq.max:.2f}Mhz")
logging.info(f"Min Frequency: {cpufreq.min:.2f}Mhz")
logging.info(f"Current Frequency: {cpufreq.current:.2f}Mhz")
# CPU usage
logging.info("CPU Usage Per Core:")
for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
logging.info(f"Core {i}: {percentage}%")
logging.info(f"Total CPU Usage: {psutil.cpu_percent()}%")
# Memory Information
logging.info("="*40, "Memory Information", "="*40)
# get the memory details
svmem = psutil.virtual_memory()
logging.info(f"Total: {get_size(svmem.total)}")
logging.info(f"Available: {get_size(svmem.available)}")
logging.info(f"Used: {get_size(svmem.used)}")
logging.info(f"Percentage: {svmem.percent}%")
logging.info("="*20, "SWAP", "="*20)
# get the swap memory details (if exists)
swap = psutil.swap_memory()
logging.info(f"Total: {get_size(swap.total)}")
logging.info(f"Free: {get_size(swap.free)}")
logging.info(f"Used: {get_size(swap.used)}")
logging.info(f"Percentage: {swap.percent}%")
# Disk Information
logging.info("="*40, "Disk Information", "="*40)
logging.info("Partitions and Usage:")
# get all disk partitions
partitions = psutil.disk_partitions()
for partition in partitions:
logging.info(f"=== Device: {partition.device} ===")
logging.info(f" Mountpoint: {partition.mountpoint}")
logging.info(f" File system type: {partition.fstype}")
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# this can be catched due to the disk that
# isn't ready
continue
logging.info(f" Total Size: {get_size(partition_usage.total)}")
logging.info(f" Used: {get_size(partition_usage.used)}")
logging.info(f" Free: {get_size(partition_usage.free)}")
logging.info(f" Percentage: {partition_usage.percent}%")
# get IO statistics since boot
disk_io = psutil.disk_io_counters()
logging.info(f"Total read: {get_size(disk_io.read_bytes)}")
logging.info(f"Total write: {get_size(disk_io.write_bytes)}")
# Network information
logging.info("="*40, "Network Information", "="*40)
# get all network interfaces (virtual and physical)
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
logging.info(f"=== Interface: {interface_name} ===")
if str(address.family) == 'AddressFamily.AF_INET':
logging.info(f" IP Address: {address.address}")
logging.info(f" Netmask: {address.netmask}")
logging.info(f" Broadcast IP: {address.broadcast}")
elif str(address.family) == 'AddressFamily.AF_PACKET':
logging.info(f" MAC Address: {address.address}")
logging.info(f" Netmask: {address.netmask}")
logging.info(f" Broadcast MAC: {address.broadcast}")
# get IO statistics since boot
net_io = psutil.net_io_counters()
logging.info(f"Total Bytes Sent: {get_size(net_io.bytes_sent)}")
logging.info(f"Total Bytes Received: {get_size(net_io.bytes_recv)}")
| [
"noreply@github.com"
] | palvai-harshitha.noreply@github.com |
701239eabc91846ae9b35569d9b7ffdbdb02071b | c33fdf05c788fcc48a3c237100cbd4ed6e7d70fd | /tests/test_bbf_records.py | e93389d4ae9ea9a53ce76edc1a8cb2ab57265e26 | [
"BSD-3-Clause"
] | permissive | baldman/pybankreader | fb646675f54a1e8e0eb60168e2eef693a5449a0f | 3a96d6c89e408a315ccb4f7e7a3c63325c347d2d | refs/heads/master | 2021-01-21T21:39:13.347703 | 2016-05-09T15:58:44 | 2016-05-09T15:58:44 | 25,690,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,771 | py | import datetime
from decimal import Decimal
from pybankreader.formats.bbf.records import HeaderRecord, LockRecord, \
AdvmulHeaderRecord, AdvmulRecord, AdvmuzRecord
def test_header_record(header_record):
"""
Try to load the header record and test that it actually loads it without
exceptions
"""
rec = HeaderRecord()
rec.load(header_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'HEADER'
assert rec.separator is None
assert rec.rec_typ == '00'
assert rec.app_ver == '01.0000'
assert rec.app_brand == 'BBCSOB'
def test_lock_record(lock_record):
"""
Try to load the lock record and test that it actually loads it without
exceptions
"""
rec = LockRecord()
rec.load(lock_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'LOCK'
assert rec.separator is None
assert rec.rec_typ == '99'
assert rec.count == 0
assert rec.timestamp == datetime.datetime(year=2014, month=9, day=2,
hour=7, minute=9, second=22)
assert rec.seq_no == 11
def test_advmul_header_record(advmul_header_record, advmuz_header_record):
"""
Try to load the advmul header record (in both variants) and test that it
actually loads it without exceptions
"""
rec = AdvmulHeaderRecord()
rec.load(advmul_header_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'ADVMUL'
assert rec.separator is None
assert rec.rec_typ == '01'
assert rec.msg_rno == '20140930925710'
rec.load(advmuz_header_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'ADVMUZ'
assert rec.separator is None
assert rec.rec_typ == '01'
assert rec.msg_rno == '20140930925710'
def test_advmul_record(advmul_record):
"""
Try to load the advmul record and test that it actually loads it without
exceptions
"""
rec = AdvmulRecord()
rec.load(advmul_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'ADVMUL'
assert rec.separator is None
assert rec.rec_typ == '02'
assert rec.message_type is None
assert rec.transact_no == 'IBATL58813'
assert rec.weight == 100
assert rec.route_no == '0300'
assert rec.client_no == '9903252820'
assert rec.client_name == 'Whatever corp Inc.'
assert rec.client_account_no == '177148326'
assert rec.client_reference is None
assert rec.bank_reference == '20623'
assert rec.date is None
assert rec.date_process == datetime.datetime(2014, 10, 2, 0, 0)
assert rec.date_process_other is None
assert rec.amount == -6075
assert rec.currency == 'CZK'
assert rec.balance == Decimal('3608328.02')
assert rec.balance_code == 'C'
assert rec.offset_account_bank_code == '0100'
assert rec.offset_account_no == '100060018432071'
assert rec.offset_account_name == 'PO'
assert rec.constant_symbol == 3558
assert rec.variable_symbol == 26696797
assert rec.specific_symbol == 0
assert rec.variable_symbol_offset == 26696797
assert rec.specific_symbol_offset == 0
assert rec.message1 is None
assert rec.message2 is None
assert rec.message3 is None
assert rec.message4 is None
assert rec.note is None
assert rec.balance_final is None
assert rec.balance_final_code is None
assert rec.balance_time is None
def test_advmuz_record(advmuz_record):
"""
Try to load the advmuz record and test that it actually loads it without
exceptions
"""
rec = AdvmuzRecord()
rec.load(advmuz_record)
assert rec.bank_app == 'T'
assert rec.app_id == '363914'
assert rec.edi_msg == 'ADVMUZ'
assert rec.separator is None
assert rec.rec_typ == '02'
assert rec.message_type == 'CRE'
assert rec.client_no == '9903252820'
assert rec.order_reference == '019938742626501A'
assert rec.reference_item == '4083604409'
assert rec.weight == 90
assert rec.client_account_no == '183861478'
assert rec.creditor_address1 == 'Big Group a.s.'
assert rec.creditor_address2 == 'Na Pankraci 1620/1214000 Praha 4'
assert rec.creditor_address3 == 'CZ'
assert rec.creditor_address4 is None
assert rec.creditor_account_no == 'CZ2155000000005081107282'
assert rec.creditor_bank1 is None
assert rec.creditor_bank2 is None
assert rec.creditor_bank3 is None
assert rec.creditor_bank4 is None
assert rec.payment_reason1 == '/ROC/NOT PROVIDED//174914'
assert rec.payment_reason2 is None
assert rec.payment_reason3 is None
assert rec.payment_reason4 is None
assert rec.amount == Decimal('760.00')
assert rec.currency == 'EUR'
assert rec.amount_account_currency == Decimal('760.00')
assert rec.account_currency == 'EUR'
assert rec.exchange_rate == Decimal('1.0000000')
assert rec.local_fee == Decimal('70.00')
assert rec.local_currency == 'CZK'
assert rec.foreign_fee == Decimal('0.00')
assert rec.foreign_currency == 'EUR'
assert rec.other_fees == Decimal('0.00')
assert rec.other_fees_currency is None
assert rec.date == datetime.datetime(2014, 9, 19, 0, 0)
assert rec.date_process == datetime.datetime(2014, 9, 19, 0, 0)
assert rec.date_due is None
assert rec.client_advice1 == '/ROC/NOT PROVIDED//174914'
assert rec.client_advice2 is None
assert rec.client_advice3 is None
assert rec.client_advice4 is None
assert rec.client_advice5 is None
assert rec.fee_settling == 'SHA'
assert rec.swift_code == 'RZBCCZPP'
assert rec.payment_title is None
assert rec.routing_code is None
| [
"tomas@plesek.cz"
] | tomas@plesek.cz |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.