text stringlengths 38 1.54M |
|---|
from TestHelperSuperClass import testHelperSuperClass
import passwordmanpro_cli
from unittest.mock import patch
import samplePayloadsAndEnvs
class test_AppObj(testHelperSuperClass):
@patch('passwordmanpro_cli.AppObjClass._callGet')
def test_getSinglePassword(self, getResoursesResponse):
getResoursesResponse.side_effect = [
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.resourseResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.accountsResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.passwordResponseRAW}
]
fetchedPassword = passwordmanpro_cli.getSinglePassword(
resourseName="soadevteamserver-konga",
accountName="kongaadmin",
skipSSLChecks=False,
env=samplePayloadsAndEnvs.env
)
self.assertEqual(fetchedPassword, 'dummyPasswordForTest', msg='Incorrect password output')
|
import pytest
from hickory.every_launchd import *
def test_seconds_interval_to_seconds():
intervals = ["10", "10s", "10sec", "10secs", "10seconds"]
seconds = [interval_to_seconds(i) for i in intervals]
assert all([s == 10 for s in seconds])
def test_minutes_interval_to_seconds():
intervals = ["30m", "30min", "30mins", "30minutes"]
seconds = [interval_to_seconds(i) for i in intervals]
assert all([s == 1800 for s in seconds])
def test_hours_interval_to_seconds():
intervals = ["2h", "2hr", "2hrs", "2hour", "2hours"]
seconds = [interval_to_seconds(i) for i in intervals]
assert all([s == 7200 for s in seconds])
def test_fail_interval_to_seconds():
with pytest.raises(HickoryError):
interval_to_seconds("5secondz")
def test_start_interval():
intervals = ["10seconds", "10minutes", "10hours"]
output = [start_interval(i) for i in intervals]
assert output == [
{"StartInterval": 10},
{"StartInterval": 600},
{"StartInterval": 36000},
]
def test_day_to_weekday_dict():
days = ["m", "tue", "weds", "thursday", "f", "sat", "sunday"]
output = [day_to_weekday_dict(d) for d in days]
assert output == [
{"Weekday": 1},
{"Weekday": 2},
{"Weekday": 3},
{"Weekday": 4},
{"Weekday": 5},
{"Weekday": 6},
{"Weekday": 7},
]
def test_day_to_calendar_day_dict():
days = ["1", "1st", "2", "2nd", "3", "3rd", "4", "4th", "31", "31st"]
output = [day_to_calendar_day_dict(d) for d in days]
assert output == [
{"Day": 1},
{"Day": 1},
{"Day": 2},
{"Day": 2},
{"Day": 3},
{"Day": 3},
{"Day": 4},
{"Day": 4},
{"Day": 31},
{"Day": 31},
]
def test_fail_day_to_calendar_day_dict():
with pytest.raises(HickoryError):
day_to_calendar_day_dict("32")
def test_weekday_list_dict():
assert weekday_list_dict() == [
{"Weekday": 1},
{"Weekday": 2},
{"Weekday": 3},
{"Weekday": 4},
{"Weekday": 5},
]
def test_eom_list_dict():
assert eom_list_dict() == [
{"Day": 31, "Month": 1},
{"Day": 28, "Month": 2},
{"Day": 31, "Month": 3},
{"Day": 30, "Month": 4},
{"Day": 31, "Month": 5},
{"Day": 30, "Month": 6},
{"Day": 31, "Month": 7},
{"Day": 31, "Month": 8},
{"Day": 30, "Month": 9},
{"Day": 31, "Month": 10},
{"Day": 30, "Month": 11},
{"Day": 31, "Month": 12},
]
def test_day_to_list_dict():
days = ["day", "1st", "monday"]
output = [day_to_list_dict(day) for day in days]
assert output == [[{}], [{"Day": 1}], [{"Weekday": 1}]]
def test_timestamp_to_dict():
times = [
"0:01",
"1am",
"1:01am",
"12:00pm",
"12pm",
"12:01pm",
"1pm",
"11pm",
"11:59pm",
]
output = [timestamp_to_dict(t) for t in times]
assert output == [
{"Hour": 0, "Minute": 1},
{"Hour": 1, "Minute": 0},
{"Hour": 1, "Minute": 1},
{"Hour": 12, "Minute": 0},
{"Hour": 12, "Minute": 0},
{"Hour": 12, "Minute": 1},
{"Hour": 13, "Minute": 0},
{"Hour": 23, "Minute": 0},
{"Hour": 23, "Minute": 59},
]
def test_fail_timestamp_to_tuple():
with pytest.raises(HickoryError):
timestamp_to_tuple("30:30")
def test_start_calendar_interval():
intervals = [
"day@10",
"@10:10",
"monday@10:10am",
"10th@10:10am",
"10,20@10am",
"monday,w,fri@9:30am,4:30pm",
"eom@10:10am",
"10,eom@10,10pm",
"weekday@9:30,10pm",
]
output = [start_calendar_interval(i) for i in intervals]
assert output == [
{"StartCalendarInterval": {"Hour": 10, "Minute": 0}},
{"StartCalendarInterval": {"Hour": 10, "Minute": 10}},
{"StartCalendarInterval": {"Weekday": 1, "Hour": 10, "Minute": 10}},
{"StartCalendarInterval": {"Day": 10, "Hour": 10, "Minute": 10}},
{
"StartCalendarInterval": [
{"Day": 10, "Hour": 10, "Minute": 0},
{"Day": 20, "Hour": 10, "Minute": 0},
]
},
{
"StartCalendarInterval": [
{"Weekday": 1, "Hour": 9, "Minute": 30},
{"Weekday": 1, "Hour": 16, "Minute": 30},
{"Weekday": 3, "Hour": 9, "Minute": 30},
{"Weekday": 3, "Hour": 16, "Minute": 30},
{"Weekday": 5, "Hour": 9, "Minute": 30},
{"Weekday": 5, "Hour": 16, "Minute": 30},
]
},
{
"StartCalendarInterval": [
{"Day": 31, "Month": 1, "Hour": 10, "Minute": 10},
{"Day": 28, "Month": 2, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 3, "Hour": 10, "Minute": 10},
{"Day": 30, "Month": 4, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 5, "Hour": 10, "Minute": 10},
{"Day": 30, "Month": 6, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 7, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 8, "Hour": 10, "Minute": 10},
{"Day": 30, "Month": 9, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 10, "Hour": 10, "Minute": 10},
{"Day": 30, "Month": 11, "Hour": 10, "Minute": 10},
{"Day": 31, "Month": 12, "Hour": 10, "Minute": 10},
]
},
{
"StartCalendarInterval": [
{"Day": 10, "Hour": 10, "Minute": 0},
{"Day": 10, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 1, "Hour": 10, "Minute": 0},
{"Day": 28, "Month": 2, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 3, "Hour": 10, "Minute": 0},
{"Day": 30, "Month": 4, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 5, "Hour": 10, "Minute": 0},
{"Day": 30, "Month": 6, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 7, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 8, "Hour": 10, "Minute": 0},
{"Day": 30, "Month": 9, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 10, "Hour": 10, "Minute": 0},
{"Day": 30, "Month": 11, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 12, "Hour": 10, "Minute": 0},
{"Day": 31, "Month": 1, "Hour": 22, "Minute": 0},
{"Day": 28, "Month": 2, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 3, "Hour": 22, "Minute": 0},
{"Day": 30, "Month": 4, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 5, "Hour": 22, "Minute": 0},
{"Day": 30, "Month": 6, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 7, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 8, "Hour": 22, "Minute": 0},
{"Day": 30, "Month": 9, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 10, "Hour": 22, "Minute": 0},
{"Day": 30, "Month": 11, "Hour": 22, "Minute": 0},
{"Day": 31, "Month": 12, "Hour": 22, "Minute": 0},
]
},
{
"StartCalendarInterval": [
{"Weekday": 1, "Hour": 9, "Minute": 30},
{"Weekday": 2, "Hour": 9, "Minute": 30},
{"Weekday": 3, "Hour": 9, "Minute": 30},
{"Weekday": 4, "Hour": 9, "Minute": 30},
{"Weekday": 5, "Hour": 9, "Minute": 30},
{"Weekday": 1, "Hour": 22, "Minute": 0},
{"Weekday": 2, "Hour": 22, "Minute": 0},
{"Weekday": 3, "Hour": 22, "Minute": 0},
{"Weekday": 4, "Hour": 22, "Minute": 0},
{"Weekday": 5, "Hour": 22, "Minute": 0},
]
},
]
def test_every():
intervals = ["10", "10mins", "@10", "monday@10:00pm"]
output = [every(i) for i in intervals]
assert output == [
{"StartInterval": 10},
{"StartInterval": 600},
{"StartCalendarInterval": {"Hour": 10, "Minute": 0}},
{"StartCalendarInterval": {"Weekday": 1, "Hour": 22, "Minute": 0}},
]
def test_hickory_errors():
with pytest.raises(HickoryError):
every("")
with pytest.raises(HickoryError):
every("z")
with pytest.raises(HickoryError):
every("1z")
with pytest.raises(HickoryError):
every("z@z")
with pytest.raises(HickoryError):
every("@")
with pytest.raises(HickoryError):
every("100@100")
with pytest.raises(HickoryError):
every("@10:10:10")
with pytest.raises(HickoryError):
every("@10@10")
|
#-*- coding:utf-8 -*-
import xlrd
import json
import codecs
from datetime import datetime
from xlrd import xldate_as_tuple
def get_9th_daibiao():
file_path = '../data/9th_daibiao.xls'
sheetno = 0
suggest_unit_col = 0
name_col = 3
sex_col = 4
nation_col = 5
birthDay_col = 6
political_outlook_col = 7 # 政治面貌
hometown_col = 9
xueli_col = 12 # 学历
graduate_school_col = 13 # 毕业院校
major_col = 14 #专业
speciality_col = 16 #专长
maior_job_col = 17 #专业技术职务
is_zhongkeyuan_col = 35
is_gongchengyuan_col = 36
department_and_job_col = 20 #工作单位及职务
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['suggest_unit'] = table.row_values(row)[suggest_unit_col]
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
people['birthDay'] = table.row_values(row)[birthDay_col]
people['age'] = 2018 - int(table.row_values(row)[birthDay_col].split('-')[0])
people['political_outlook'] = table.row_values(row)[political_outlook_col]
people['hometown'] = table.row_values(row)[hometown_col]
people['xueli'] = table.row_values(row)[xueli_col]
people['graduate_school'] = table.row_values(row)[graduate_school_col]
people['major'] = table.row_values(row)[major_col]
people['speciality'] = table.row_values(row)[speciality_col]
people['maior_job'] = table.row_values(row)[maior_job_col]
people['is_zhongkeyuan'] = table.row_values(row)[is_zhongkeyuan_col]
people['is_gongchengyuan'] = table.row_values(row)[is_gongchengyuan_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
all_people.append(people)
return all_people
def get_15th_qingke_candidate():
file_path = '../data/15th_qingke.xls'
sheetno = 0
speciality_col = 3 # 专长
name_col = 4
sex_col = 5
nation_col = 7
birthDay_col = 6
political_outlook_col = 8 # 政治面貌
recommend_unit_col = 9 #推荐单位
unit_xingzhi_col = 11 #单位性质
maior_job_col = 13 # 专业技术职务
location_col = 12 #单位所在地
department_and_job_col = 14 # 工作单位及职务
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
# people['birthDay'] = table.row_values(row)[birthDay_col]
date = datetime(*xldate_as_tuple(table.row_values(row)[birthDay_col], 0))
people['birthDay'] = str(date.year) + '年' + str(date.month) + '月' + str(date.day) + '日'
people['age'] = 2018 - int(date.year)
people['political_outlook'] = table.row_values(row)[political_outlook_col]
people['speciality'] = table.row_values(row)[speciality_col]
people['recommend_unit'] = table.row_values(row)[recommend_unit_col]
people['unit_xingzhi'] = table.row_values(row)[unit_xingzhi_col]
people['maior_job'] = table.row_values(row)[maior_job_col]
people['location'] = table.row_values(row)[location_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
all_people.append(people)
return all_people
def all_qingke():
file_path = '../data/all_qingke.xlsx'
sheetno = 0
name_col = 1
sex_col = 3
birthDay_col = 4
nation_col = 5
political_outlook_col = 6 # 政治面貌
speciality_col = 11 # 专长
work_department_col = 2
job_col = 7
session_col = 14
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
people['birthDay'] = table.row_values(row)[birthDay_col]
if table.row_values(row)[birthDay_col] and table.row_values(row)[birthDay_col]!=1.0:
date = datetime(*xldate_as_tuple(table.row_values(row)[birthDay_col], 0))
people['birthDay'] = str(date.year)+'年'+str(date.month)+'月'+str(date.day)+'日'
people['age'] = 2018 - int(date.year)
else:
people['birthDay'] = ''
people['age'] = ''
people['political_outlook'] = table.row_values(row)[political_outlook_col]
people['speciality'] = table.row_values(row)[speciality_col]
people['work_department'] = table.row_values(row)[work_department_col]
people['job'] = table.row_values(row)[job_col]
people['session'] = table.row_values(row)[session_col]
all_people.append(people)
return all_people
def get_2017_yuanshi_candidate():
file_path = '../data/2017_recommend_liangyuan.xls'
sheetno = 0
recommend_unit_col = 2
name_col = 3
sex_col = 4
nation_col = 6
age_col = 5
xueli_col = 7
unit_xingzhi_col = 10
recommend_type_col = 12
recommend_department_col = 13
speciality_col = 14 # 专长
department_and_job_col = 9 # 工作单位及职务
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
people['age'] = table.row_values(row)[age_col]
people['recommend_unit'] = table.row_values(row)[recommend_unit_col]
people['speciality'] = table.row_values(row)[speciality_col]
people['xueli'] = table.row_values(row)[xueli_col]
people['unit_xingzhi'] = table.row_values(row)[unit_xingzhi_col]
people['recommend'] = table.row_values(row)[recommend_type_col]
people['recommend_department'] = table.row_values(row)[recommend_department_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
all_people.append(people)
return all_people
def get_all_yuanshi():
file_path = '../data/all_liangyuan.xlsx'
sheetno = 0
name_col = 2
sex_col = 3
nation_col = 4
birthDay_col = 6
yuanshi_type_col = 8
xueke_col = 9
major_col = 10
yuanshi_department_col = 11
department_and_job_col = 7
city_col = 12
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
people['birthDay'] = table.row_values(row)[birthDay_col]
if table.row_values(row)[birthDay_col]:
try:
date = datetime(*xldate_as_tuple(table.row_values(row)[birthDay_col], 0))
people['age'] = 2018 - int(date.year)
except:
print(table.row_values(row))
else:
people['age'] = None
people['yuanshi_type'] = table.row_values(row)[yuanshi_type_col]
people['xueke'] = table.row_values(row)[xueke_col]
people['major'] = table.row_values(row)[major_col]
people['yuanshi_department'] = table.row_values(row)[yuanshi_department_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
people['city'] = table.row_values(row)[city_col]
all_people.append(people)
return all_people
def get_1st_chuangxin_candidate():
file_path = '../data/2017_chuangxin_candidate.xlsx'
sheetno = 0
name_col = 1
sex_col = 2
nation_col = 3
birthDay_col = 4
political_outlook_col = 5 # 政治面貌
xueli_col = 6
job_col = 8 #职务
unit_xingzhi_col = 9
province_col = 10
xueke_col = 12
xingzheng_degree_col = 15 #行政级别
department_and_job_col = 7
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = table.row_values(row)[sex_col]
people['nation'] = table.row_values(row)[nation_col]
people['birthDay'] = str(table.row_values(row)[birthDay_col])
people['age'] = 2018 - int(str(table.row_values(row)[birthDay_col]).split('-')[0])
people['political_outlook'] = table.row_values(row)[political_outlook_col]
people['xueke'] = table.row_values(row)[xueke_col]
people['xueli'] = table.row_values(row)[xueli_col]
people['job'] = table.row_values(row)[job_col]
people['unit_xingzhi'] = table.row_values(row)[unit_xingzhi_col]
province = table.row_values(row)[province_col]
province = province.replace('市', '')
province = province.replace('省', '')
people['province'] = province
people['xingzheng_degree'] = table.row_values(row)[xingzheng_degree_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
all_people.append(people)
return all_people
def load_all_female_scientists():
file_path = '../data/all_female_scientists.xlsx'
sheetno = 0
name_col = 1
birthDay_col = 2
major_col = 3
province_col = 5
session_col = 6 #第几届
unit_xingzhi_col = 7
department_and_job_col = 4
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['sex'] = '女'
people['birthDay'] = str(table.row_values(row)[birthDay_col])
people['age'] = 2018 - int(str(table.row_values(row)[birthDay_col]).split('.')[0])
people['major'] = table.row_values(row)[major_col]
people['unit_xingzhi'] = table.row_values(row)[unit_xingzhi_col]
province = table.row_values(row)[province_col]
province = province.replace('市', '')
province = province.replace('省', '')
province = province.replace('自治区', '')
province = province.replace('大学', '')
people['province'] = province
people['session'] = table.row_values(row)[session_col]
people['department_and_job'] = table.row_values(row)[department_and_job_col]
all_people.append(people)
return all_people
def load_all_qianren():
file_path = '../data/all_qianren.xls'
sheetno = 0
name_col = 1
department_col = 2
job_col = 3
major_col = 4
sub_major_col = 5
all_people = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
people = {}
people['name'] = table.row_values(row)[name_col]
people['department'] = table.row_values(row)[department_col]
people['job'] = table.row_values(row)[job_col]
people['major'] = table.row_values(row)[major_col]
people['sub_major'] = table.row_values(row)[sub_major_col]
all_people.append(people)
return all_people
def load_5_wei():
file_path = '../data/5_wei_analysis.xlsx'
sheetno = 0
name_col = 0
year_col = 1
gdp_col = 2
science_col = 3
rd_col = 4
scholar_col = 5
edu_col = 6
paper_col = 7
patent_col = 8
quality_col = 9 #公民科学素质
all_province = []
excel_data = xlrd.open_workbook(file_path)
table = excel_data.sheets()[sheetno]
nrows = table.nrows
for row in range(1, nrows):
province = {}
province['name'] = table.row_values(row)[name_col]
province['year'] = table.row_values(row)[year_col]
province['gdp'] = table.row_values(row)[gdp_col]
province['science'] = table.row_values(row)[science_col]
province['rd'] = table.row_values(row)[rd_col]
province['scholar'] = table.row_values(row)[scholar_col]
province['edu'] = table.row_values(row)[edu_col]
province['paper'] = table.row_values(row)[paper_col]
province['patent'] = table.row_values(row)[patent_col]
province['quality'] = table.row_values(row)[quality_col]
all_province.append(province)
return all_province
if __name__ == '__main__':
json.dump(get_9th_daibiao(), codecs.open('../data/9th_daibiao.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(get_15th_qingke_candidate(), codecs.open('../data/15th_qingke_candidate.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(all_qingke(), codecs.open('../data/all_qingke.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(get_2017_yuanshi_candidate(), codecs.open('../data/2017_yuanshi_candidate.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(get_all_yuanshi(), codecs.open('../data/all_yuanshi.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(get_1st_chuangxin_candidate(), codecs.open('../data/1st_chuangxin_candidate.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(load_all_female_scientists(), codecs.open('../data/all_female_scientists.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(load_all_qianren(), codecs.open('../data/all_qianren.json', 'w', encoding='utf-8'), ensure_ascii=False)
# json.dump(load_5_wei(), codecs.open('../data/5_wei_analysis.json', 'w', encoding='utf-8'), ensure_ascii=False) |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("<h2>Porumbeii sunt agenti lui Mussolini</h2>") |
from typing import *
import hydra
import torch
import random
import numpy as np
from pytorch_lightning import Trainer
from loguru import logger
from model import Classifier
@hydra.main(config_path="config.yaml")
def train(config):
logger.info(config)
np.random.seed(42)
random.seed(42)
if torch.cuda.is_available():
torch.backends.cuda.deterministic = True
torch.backends.cuda.benchmark = False
model = Classifier(config)
trainer = Trainer(
gradient_clip_val = 0,
num_nodes=1,
gpus = None if not torch.cuda.is_available() else [i for i in range(torch.cuda.device_count())],
log_gpu_memory=True,
show_progress_bar=True,
accumulate_grad_batches=config["accumulate_grad_batches"],
max_epochs=config["max_epochs"],
min_epochs=1,
val_check_interval=0.1,
log_save_interval=100,
row_log_interval=10,
distributed_backend = "ddp",
use_amp=config["use_amp"],
weights_summary= 'top',
amp_level='O2',
num_sanity_val_steps=5,
resume_from_checkpoint=None,
)
trainer.fit(model)
pass
if __name__ == "__main__":
train()
|
from django.db import models
import datetime
from multiselectfield import MultiSelectField
from django.contrib.auth.models import User
# Create your models here.
from SanteLib import config
class Address(models.Model):
street = models.CharField(max_length=200)
city = models.CharField(max_length=30)
state = models.CharField(max_length=30, blank=True, null=True)
zip_code = models.PositiveIntegerField()
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
date_of_birth = models.DateField('Date of birth')
address = models.ForeignKey(Address, null=True, on_delete=models.SET_NULL)
sex = models.CharField(choices=config.SEX_CHOICES,default='F',max_length=1)
email = models.EmailField()
def __str__(self):
return self.first_name.capitalize() + ' ' + self.last_name.capitalize()
def age(self):
today = datetime.date.today()
age = today.year - self.date_of_birth.year
if today < datetime.date(today.year, self.date_of_birth.month, self.date_of_birth.day):
age -= 1
return age
class Nurse(Person):
user = models.OneToOneField(User, on_delete=models.CASCADE)
date_of_graduation = models.DateField('Date of graduation')
spoken_languages = MultiSelectField(max_choices=5, choices=config.LANGUAGE_CHOICES, default='FR')
rating = models.PositiveIntegerField(choices=config.RATING_CHOICES, default=0)
photo = models.ImageField(upload_to="photos/",null=True)
diploma = models.FileField(upload_to="diplomas/",null=True)
@property
def photo_thumbnail(self):
return self.photo.name.replace('photos','photos/small')
def __str__(self):
return self.user.username
class Patient(Person):
preferred_languages = MultiSelectField(choices=config.LANGUAGE_CHOICES, max_length=3, default='FR')
def __str__(self):
return self.first_name.capitalize() + ' ' + self.last_name.capitalize() |
import os, time
from bs4 import BeautifulSoup
import urllib
from selenium import webdriver
SCHEME = "https"
DOMAIN = "carbon.now.sh"
def carbon(code, lang="auto"):
config = {
"pv": "10px",
"ph": "10px",
"t": "vscode",
"wa": "false",
"l": lang,
"code": code
}
parameters = '&'.join([f"{key}={urllib.parse.quote_plus(value)}" for key, value in config.items()])
url = f"{SCHEME}://{DOMAIN}/?{parameters}"
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', os.getcwd())
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'image/png')
# profile.set_preference('extensions.logging.enabled', True)
options = webdriver.FirefoxOptions()
options.add_argument("--safe-mode")
os.environ['MOZ_HEADLESS'] = '1'
browser = webdriver.Firefox(profile, options=options)
browser.get(url)
browser.find_element_by_xpath("//*[contains(text(), 'Export')]").click()
browser.find_element_by_id("export-png").click()
time.sleep(1)
code = """package main
import "fmt"
func main() {
fmt.Println("hello world")
}
"""
carbon(code)
|
import networkx as nx
import csv
import helper as hp
import sys
from lib import structural_holes2 as sx
from lib import structural_holes as sx1
import sys,getopt
import time
def main(argv):
#Standardvalues
partitionfile = "data/partitions/final_partitions_p100_200_0.2.csv"
project = "584"
to_pajek = False
try:
opts, args = getopt.getopt(argv,"p:s:o")
except getopt.GetoptError:
print 'individual_bridging_2.py -p <project_name> -s <partitionfile> '
sys.exit(2)
for opt, arg in opts:
if opt in ("-p"):
project = arg
elif opt in ("-s"):
partitionfile = arg
else:
print 'individual_bridging_2.py -p <project_name> -s <partitionfile> '
print "##################### INDIVIDUAL BRIDGING 2 (Working on whole network) ########################"
print "Project %s " % project
print "Partition %s" % partitionfile
csv_bridging_writer = csv.writer(open('results/spss/individual bridging/%s_individual_bridging_3.csv' % project, 'wb'))
csv_bridging_writer.writerow(["Project", "Community", "Person_ID",
"Competing_lists",
"FF_bin_degree", "FF_bin_in_degree", "FF_bin_out_degree",
"FF_vol_in", "FF_vol_out",
"FF_groups_in", "FF_groups_out",
"FF_rec",
"FF_bin_betweeness", #"FF_bin_closeness", "FF_bin_pagerank",
#"FF_c_size", "FF_c_density", "FF_c_hierarchy", "FF_c_index",
"AT_bin_degree", "AT_bin_in_degree", "AT_bin_out_degree",
"AT_vol_in", "AT_vol_out",
"AT_groups_in", "AT_groups_out",
"AT_rec",
"AT_bin_betweeness",#, "AT_bin_closeness", "AT_bin_pagerank",
# FF_c_size, FF_c_density, FF_c_hierarchy, FF_c_index,
"AT_avg_tie_strength","AT_strength_centrality_in",
"RT_bin_in_degree", "RT_bin_out_degree",
"RT_vol_in", "RT_vol_out"])
#Read in the list-listings for individuals
listings = {}
indiv_reader = csv.reader(open(partitionfile))
for row in indiv_reader:
listings[row[0]] = {"group":row[1],"place":int(row[2]), "competing_lists": int(row[3])}
# Read in the centralities of nodes in their corresponding community
centralities = {}
centrality_reader = csv.reader(open('results/spss/individual bonding/%s_individual_bonding.csv' % project))
for row in centrality_reader:
centralities[row[2]] = {"ff_in_degree":row[5]}
# Read in the partition
tmp = hp.get_partition(partitionfile)
partitions = tmp[0]
groups = tmp[1]
# Read in the networks
FF_all = nx.read_edgelist('data/networks/%s_FF.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph())
AT_all = nx.read_edgelist('data/networks/%s_solr_AT.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph())
RT_all = nx.read_edgelist('data/networks/%s_solr_RT.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph())
print "Done reading in Networks"
#Determine the Maximum subset of nodes present in all Networks
maximum_subset = []
for node in FF_all.nodes():
if AT_all.has_node(node) and RT_all.has_node(node):
maximum_subset.append(node)
i = 0
for partition in partitions:
for node in partition:
FF_all.add_node(node, group = groups[i]) # Add nodes
AT_all.add_node(node, group = groups[i])
RT_all.add_node(node, group = groups[i])
i += 1
i = 0
#These measures are computed only once on the graph (we are making an error since the internal group structure is considered to load up those values)
if len(maximum_subset) < 1000:
scaling_k = len(maximum_subset)
else:
scaling_k = len(maximum_subset)/100
dFF_bin_betweeness = nx.betweenness_centrality(FF_all,k=scaling_k)
dAT_bin_betweeness = nx.betweenness_centrality(AT_all,k=scaling_k)
#dFF_struc = sx.structural_holes(FF_all)
for partition in partitions:
project_name = groups[i]
#Determine the groups that are not in the partition
all_other_groups = groups[:]
group = groups[i]
all_other_groups.remove(group)
# Get all the partitions without the current partition
partitions_without_partition = partitions[:]
partitions_without_partition.remove(partition)
#Remove the nodes that are in this partition
remaining_nodes = [item for sublist in partitions for item in sublist] #flatlist of all nodes
for nodes_to_be_deleted in partition:
remaining_nodes.remove(nodes_to_be_deleted)
#Create Subgraphs that contain all nodes except the ones that are in the partition
S_FF = FF_all.subgraph(remaining_nodes)
S_AT = AT_all.subgraph(remaining_nodes)
S_RT = RT_all.subgraph(remaining_nodes)
i += 1
for node in partition:
if node in maximum_subset:
t0 = time.time()
#Add FF nodes and edges
S_FF.add_node(node, group = group)
S_FF.add_edges_from(FF_all.in_edges(node,data=True)) # in edges
S_FF.add_edges_from(FF_all.out_edges(node,data=True)) #out edges
# Delete the nodes that we again accidentally added by importing all of the node's edges
for tmp_node in partition:
if tmp_node != node and tmp_node in S_FF:
S_FF.remove_node(tmp_node)
# Add AT nodes and edges
S_AT.add_node(node, group = group)
S_AT.add_edges_from(AT_all.in_edges(node,data=True)) # in edges
S_AT.add_edges_from(AT_all.out_edges(node,data=True)) #out edges
# Delete the nodes that we again accidentally added by importing all of the node's edges
for tmp_node in partition:
if tmp_node != node and tmp_node in S_AT:
S_AT.remove_node(tmp_node)
S_RT.add_node(node, group = group)
S_RT.add_edges_from(RT_all.in_edges(node,data=True)) # in edges
S_RT.add_edges_from(RT_all.out_edges(node,data=True)) #out edges
# Delete the nodes that we again accidentally added by importing all of the node's edges
for tmp_node in partition:
if tmp_node != node and tmp_node in S_RT:
S_RT.remove_node(tmp_node)
print "Done creating Subgraphs"
## FF measures
dFF_bin = nx.degree_centrality(S_FF)
dFF_bin_in = nx.in_degree_centrality(S_FF)
dFF_bin_out = nx.out_degree_centrality(S_FF)
#nx.load_centrality(S_FF,v=node, weight="weight")
#dFF_bin_closeness = nx.closeness_centrality(S_FF,v=node)
#dFF_bin_pagerank = nx.pagerank(S_FF, weight="weight")
dFF_total_in_groups = hp.filtered_group_volume(hp.incoming_group_volume(S_FF,node,all_other_groups),0)
dFF_total_out_groups = hp.filtered_group_volume(hp.outgoing_group_volume(S_FF,node,all_other_groups),0)
dFF_rec = hp.individual_reciprocity(S_FF,node) #number of reciprocated ties
## AT Measures
dAT_bin = nx.degree_centrality(S_AT)
dAT_bin_in = nx.in_degree_centrality(S_AT)
dAT_bin_out = nx.out_degree_centrality(S_AT)
#dAT_bin_betweeness = nx.betweenness_centrality(S_AT, k=100) #nx.load_centrality(S_AT,v=node,weight="weight")
#dAT_bin_closeness = nx.closeness_centrality(S_AT,v=node)
#dAT_bin_pagerank = nx.pagerank(S_AT,weight="weight")
dAT_total_in_groups = hp.filtered_group_volume(hp.incoming_group_volume(S_AT,node,all_other_groups),0)
dAT_total_out_groups = hp.filtered_group_volume(hp.outgoing_group_volume(S_AT,node,all_other_groups),0)
dAT_rec = hp.individual_reciprocity(S_AT,node) #number of @reciprocated ties
dAT_avg_tie = hp.individual_average_tie_strength(S_AT,node)
#Compute a combined measure which multiplies the strength of incoming ties times the centrality of that person
dAT_strength_centrality = 0
for edge in S_AT.in_edges(node,data=True):
if edge[0] in maximum_subset:
dAT_strength_centrality += edge[2]["weight"]*float(centralities[edge[0]]["ff_in_degree"]) #get the centrality of the node that the tie is incoming from
############### DEPENDENT VARIABLES ###########
dRT_in = nx.in_degree_centrality(S_RT) # At least once a retweets that a person has received
dRT_out = nx.out_degree_centrality(S_RT) # At least one retweets that a person has made
print "Done computing Measures"
try:
c_size = dFF_struc[node]['C-Size']
c_dens = dFF_struc[node]['C-Density']
c_hierarch = dFF_struc[node]['C-Hierarchy']
c_index = dFF_struc[node]['C-Index']
except:
c_size = "NaN"
c_dens = "NaN"
c_hierarch = "NaN"
c_index = "NaN"
csv_bridging_writer.writerow([project, project_name, node,
listings[node]["competing_lists"],
dFF_bin[node], dFF_bin_in[node], dFF_bin_out[node],
S_FF.in_degree(node,weight="weight"), S_FF.out_degree(node,weight="weight"),
dFF_total_in_groups, dFF_total_out_groups,
dFF_rec[node],
dFF_bin_betweeness[node],#dFF_bin_closeness[node],dFF_bin_pagerank[node],
#c_size,c_dens,c_hierarch,c_index,
dAT_bin[node], dAT_bin_in[node], dAT_bin_out[node],
S_AT.in_degree(node,weight="weight"), S_AT.out_degree(node, weight="weight"),
dAT_total_in_groups, dAT_total_out_groups,
dAT_rec[node],
dAT_bin_betweeness[node],#dAT_bin_closeness[node], dAT_bin_pagerank[node],
#dAT_struc[node]['C-Size'],dAT_struc[node]['C-Density'],dAT_struc[node]['C-Hierarchy'],dAT_struc[node]['C-Index'],
dAT_avg_tie[node],dAT_strength_centrality,
dRT_in[node],dRT_out[node],
S_RT.in_degree(node,weight="weight"), S_RT.out_degree(node,weight="weight")
])
t_delta = (time.time() - t0)
print "Count: %s Node: %s Time: %s" % (i,node,t_delta)
#Remove the nodes again
S_FF.remove_node(node)
S_AT.remove_node(node)
S_RT.remove_node(node)
if __name__ == "__main__":
main(sys.argv[1:]) |
# Lecture 5 if Statements
# import this
# a = [1, 2, 3]
# b = [1, 2, 3]
# print(id([1, 2, 3]))
# print(id(a))
# print(id(b))
# print(a == b)
# print (a is b)
#x = None
#print(id(x))
#print(id(None))
#print(x == None)
#print(x is None)
#y = []
#print(y == None)
#print(y is None)
#print(not '0')
#print( 0 or -1 )
#if 2 > 1:
# print('2 > 1')
# if 3 > 1:
# print('3 > 1')
#
#if 2 <= 1:
# print('2 <= 1')
#print('not in the if block')
#if 2 <= 1:
# print('2 <= 1')
#else:
# print('2 > 1')
#if 2 <= 2:
# print('2 <= 2')
#else:
# print('2 > 2')
if 2 <= 1:
print('2 <= 1')
elif 2 <= 2:
print('2 <= 2')
else:
print('2 > 1')
if None:
print(1)
elif {}:
print(2)
elif '0':
print(3)
else:
print(4) |
"""
This file contains some constants that represent directions like up or left.
The constants themselves are numbers.
"""
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
|
## @file
## @brief Assembler lexer.
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../../external/ply/'))
import ply.lex as lex
import Util
## @brief Reserved word types.
## @details Holds every accepted instruction and the corresponding type.
## @TODO Optimize this for easier localization.
reserved = {
'noop': 'NOOP',
'li': 'IMMEDIATE',
'not': 'SINGLE_REG',
'sl': 'SINGLE_REG',
'asr': 'SINGLE_REG',
'sr': 'SINGLE_REG',
'rl': 'SINGLE_REG',
'rr': 'SINGLE_REG',
'move': 'DOUBLE_REG',
'add': 'DOUBLE_REG',
'sub': 'DOUBLE_REG',
'and': 'DOUBLE_REG',
'or': 'DOUBLE_REG',
'xor': 'DOUBLE_REG',
'xnor': 'DOUBLE_REG',
'j': 'JUMP',
'je': 'BRANCH',
'jne': 'BRANCH',
'jl': 'BRANCH',
'jg': 'BRANCH',
'call': 'CALL',
'load': 'MEMORY',
'store': 'MEMORY'
}
## @brief Token list.
## @brief The common tokens and the unique values for the instruction types.
tokens = (
'COMMA',
'DOLLAR_SIGN',
'COLON',
'IDENTIFIER',
'NUMBER'
) + tuple(set(reserved.values()))
t_COMMA = r'\,'
t_DOLLAR_SIGN = r'\$'
t_COLON = r'\:'
t_ignore = ' \t'
def t_COMMENT(t):
r';.*'
pass
def t_IDENTIFIER(t):
r'[a-zA-z_][a-zA-Z0-9_]*'
# Check if match is a reserved word.
t.type = reserved.get(t.value, 'IDENTIFIER')
return t
def t_NUMBER(t):
r'[0-9][0-9]*'
t.value = int(t.value)
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
# Save error information on the lexer's error log.
t.lexer.error_log.append({
'lineno': t.lineno,
'column': Util.find_column(t)
})
# Discard the char that triggered the error to continue parsing.
t.lexer.skip(1)
## @brief Builds the lexer.
## @return The constructed lexer.
def build_lexer():
lexer = lex.lex()
lexer.error_log = []
return lexer
|
# -*- coding: utf-8 -*-
# Author: Konstantinos
from collections import OrderedDict
import numpy as np
import scipy as sp
import itertools as it
import scipy.sparse as sps
import matplotlib.pyplot as plt
import multiprocessing
import abc
import time
class Node:
dictionary = {'x':0, 'y':1, 'z':2, 'rx':3, 'ry':4, 'rz':5}
def __init__(self, coordinates):
self.label = np.nan
self.links = []
self.coords = np.array(coordinates)
self.dsp = np.zeros((6, 1))
self.vlc = np.zeros((6, 1))
self.acl = np.zeros((6, 1))
self.strain = np.zeros(3)
self.adof = np.ones(6)*False
self.cdof = np.ones(6)*False
self.ndof = np.ones(6)*np.nan
def __str__(self):
string = 'model.Node({})'
return string.format(self.coords)
def __repr__(self):
string = 'Node {} - <{}.{} object at {}>\n'+\
' Coordinates: {}\n'+\
' Active DoF: {}\n'+\
' Constrained DoF: {}\n'+\
' Numeration: {}\n'+\
' Links: {}\n'
return string.format(self.label,
self.__module__,
type(self).__name__,
hex(id(self)),
self.coords,
self.adof.T,
self.cdof.T,
self.ndof.T,
self.links)
def addLink(self, elementLabel):
self.links.append(elementLabel)
def setRestraint(self, dof):
for dof in dofs:
self.rdof[dictionary[dof]] = True
def SetValue(self, Name, String, Value=6*[True]):
if String == 'A':
String = self.dictionary.keys()
for i, j in enumerate(String):
self.__dict__[Name][self.dictionary[j]] = Value[i]
def AddValue(self, Name, String, Value):
if String == 'A':
String = self.dictionary.keys()
for i, j in enumerate(String):
self.__dict__[Name][self.dictionary[j]] += Value[i]
class Element:
def __init__(self, nodes, element, material, thickness, irule):
self.label = None
self.nodes = nodes
self.type = element
self.material = material
self.thickness = thickness
self.irule = irule
def getNodeCoordinates(self):
ncoords = np.array([node.coords[:2] for node in self.nodes])
return ncoords
def getNodeLabels(self):
labels = [node.label for node in self.nodes]
return labels
def getNodeDegreesOfFreedom(self):
degreesOfFreedom = np.hstack([node.ndof[:2] for node in self.nodes])
degreesOfFreedom = degreesOfFreedom.astype(int)
return degreesOfFreedom
def getIntegrationPoints(self):
ipoints = self.irule[:, :2]
return ipoints
def getType(self):
return self.type
def getStiffness(self):
ncoords = np.array([node.coords[:2] for node in self.nodes])
cmatrix = np.array([material.C for material in self.material])
thickness, irule = self.thickness, self.irule
stiffness = self.type.getStiffness(ncoords, cmatrix, thickness, irule)
return stiffness
def getMass(self):
ncoords = np.array([node.coords[:2] for node in self.nodes])
densities = np.array([material.rho for material in self.material])
thickness, irule = self.thickness, self.irule
mass = self.type.getMass(ncoords, densities, thickness, irule)
return mass
def assemble(self, glob, data, row, col, loc):
globalIndex = np.array([node.ndof[[0, 1]] for node in self.nodes])
globalIndex = globalIndex.reshape(globalIndex.size)
dimension = len(globalIndex)
length = dimension**2
loc += length
data[loc: loc+length] = glob.reshape(length)
row[loc: loc+length] = np.repeat(globalIndex, dimension)
col[loc: loc+length] = np.tile(globalIndex, dimension)
return data, row, col, loc
def deformed(self, scale=1, color='r', lnwidth=0.5):
enodes = self.nodes+[self.nodes[0]]
x = [node.coords[0]+node.dsp[0]*scale for node in enodes]
y = [node.coords[1]+node.dsp[1]*scale for node in enodes]
plt.plot(x, y, color, linewidth=lnwidth)
# def plotLabel(self):
# x = np.sum([node.coords[0]+node.dsp[0]*0 for node in self.nodes])/4
# y = np.sum([node.coords[1]+node.dsp[1]*0 for node in self.nodes])/4
# plt.text(x, y, self.label)
class Load:
def __init__(self, model):
self.model = model
def addForce(self, labels, dofs, functions):
model = self.model
dic = Node.dictionary
labels = [labels] if not isinstance(labels, list) else labels
dofs = [dofs] if not isinstance(dofs, list) else dofs
for label in labels:
node = model.nodes[label]
for dof, function in zip(dofs, functions):
if node.ndof[dic[dof]] in model.rdof.values():
continue
else:
model.loads.append(function)
model.ldof[(label, dic[dof])] = int(node.ndof[dic[dof]])
model.Sp = np.zeros((len(model.ndof), len(model.ldof)))
model.Sp[list(model.ldof.values()), range(len(model.ldof))] = 1
model.Sp = model.Sp[list(model.fdof.values())]
def addDisplacement(self, labels, dofs, value):
mesh = self.mesh
dic = Node.dictionary
if not isinstance(labels, list): labels = [labels]
if not isinstance(dofs, list): dofs = [dofs]
for label in labels:
node = mesh.nodes[label]
for dof in dofs:
if node.ndof[dic[dof]] in mesh.rdof.values():
break
else:
mesh.ldof[(label, dic[dof])] = int(node.ndof[dic[dof]])
class Constraint:
def __init__(self, model):
self.model = model
def addFixation(self, labels, dofs):
model = self.model
dic = Node.dictionary
labels = [labels] if not isinstance(labels, list) else labels
dofs = [dofs] if not isinstance(dofs, list) else dofs
for label in labels:
node = model.nodes[label]
for dof in dofs:
if node.ndof[dic[dof]] in model.rdof.values():
continue
else:
node.cdof[dic[dof]] = True
model.rdof[(label, dic[dof])] = int(node.ndof[dic[dof]])
model.fdof.pop((label, dic[dof]))
model.Sp = model.Sp[list(model.fdof.values())]
def addSpring(self, labels, dofs, values):
labels = [labels] if not isinstance(labels, list) else labels
dofs = [dofs] if not isinstance(dofs, list) else dofs
dic = Node.dictionary
for label in labels:
node = self.model.nodes[label]
for dof, value in zip(dofs, values):
self.model.springs[0].append(label)
self.model.springs[1].append(dic[dof])
self.model.springs[2].append(int(node.ndof[dic[dof]]))
self.model.springs[3].append(value)
def addMass(self, labels, dofs, value):
labels = [labels] if not isinstance(labels, list) else labels
dofs = [dofs] if not isinstance(dofs, list) else dofs
dic = Node.dictionary
for label in labels:
node = self.model.nodes[label]
for dof in dofs:
self.model.masses[0].append(label)
self.model.masses[1].append(dic[dof])
self.model.masses[2].append(int(node.ndof[dic[dof]]))
self.model.masses[3].append(value)
class Model:
def __init__(self, nodes=[], elements=[]):
self.nodes = nodes
self.elements = elements
self.ndof = OrderedDict()
self.rdof = OrderedDict()
self.fdof = OrderedDict()
self.ldof = OrderedDict()
self.loads = []
self.springs = [[], [], [], []]
self.masses = [[], [], [], []]
elementCounter = it.count(0)
nodeCounter = it.count(0)
dofCounter = it.count(0)
for element in self.elements:
element.label = next(elementCounter)
for node in element.nodes:
node.addLink(element.label)
for node in self.nodes:
node.label = next(nodeCounter)
for dof in np.flatnonzero(node.adof):
num = next(dofCounter)
node.ndof[dof] = num
self.ndof[(node.label, dof)] = num
self.fdof[(node.label, dof)] = num
self.Sp = np.zeros((len(self.fdof), len(self.ldof)))
self.constraints = Constraint(self)
def setDampingCoefficients(self, alpha, beta):
""" Specify the proportional damping coefficients. """
self.alpha = alpha
self.beta = beta
class Plot(object):
def __init__(self, mesh):
self.mesh = mesh
def undeformed2(self, split=False, elements=[]):
elements = self.mesh.elements
# x = np.zeros((len(self.mesh.ndof), 3))
# y = np.zeros((len(self.mesh.ndof), 3))
# create vector for plotting everything at once
plt.figure()
for element in elements:
element.undeformed()
plt.axis('equal')
plt.show()
def undeformed(self, split=False, elements=[]):
if not elements:
elements = self.mesh.elements
if not split:
fig = plt.figure('Undeformed View')
axis = Axes3D(fig)
else:
axis = plt.gca()
#axis.set_aspect('equal')
#axis.auto_scale_xyz()
# Do not iterate over elements but gather the data to be plotted in
# a single vector and plot them all together
plt.ion()
for element in elements:
element.Undeformed(axis)
def modes(self, modes):
pass
def deformed(self, scale=1, overwrite=False):
if not overwrite:
fig = plt.figure('Deformed View')
axis = Axes3D(fig)
else:
axis = plt.gca()
for element in self.mesh.elements:
element.Deformed(axis, scale)
def animated(self, overwrite=False):
if not(overwrite):
fig = plt.figure('Animated View')
axis = Axes3D(fig)
else:
axis = plt.gca()
# To be extended
def contours(self,overwrite=False):
if not(overwrite):
fig=plt.figure('Contour View')
axis=Axes3D(fig)
else:
axis=plt.gca()
# To be extended
def elementLabels(self):
axis = plt.gca()
for element in self.mesh.elements:
element.PlotId(axis)
def nodeLabels(self):
axis=plt.gca()
for node in self.mesh.nodes:
node.PlotId(axis)
def nodeMarks(self):
fig=plt.figure('Contour View')
axis=Axes3D(fig)
#axis=plt.gca()
for node in self.mesh.nodes:
node.Mark(axis)
class Matrix(abc.ABC):
@abc.abstractmethod
def __init__(self, model):
self.model = model
m = len(self.model.ndof)
self.full = sps.csr_matrix((int(m), int(m)), dtype=float)
rng = int(5e3)
length = (25**2)*(rng*3)
end = len(self.model.elements)
num = max(end//rng, 2)
ind = np.linspace(0, end, num, dtype=int)
for i in range(len(ind)-1):
data = np.zeros(length, dtype=float)
row = np.zeros(length, dtype=float)
col = np.zeros(length, dtype=float)
loc = 0
for elm in self.model.elements[ind[i]:ind[i+1]]:
glob = getattr(elm, self.method)()
data, row, col, loc = elm.assemble(glob, data, row, col, loc)
self.full += sps.csr_matrix((data, (row, col)), shape=(m, m))
if isinstance(self, Stiffness):
j, k = self.model.springs[2], self.model.springs[3]
self.full += sps.csr_matrix((k, (j, j)), shape=(m, m))
elif isinstance(self, Mass):
j, k = self.model.masses[2], self.model.masses[3]
self.full += sps.csr_matrix((k, (j, j)), shape=(m, m))
def getPartitionFF(self):
fdof = list(self.model.fdof.values())
ff = self.full.tocsc()[:, fdof].tocsr()[fdof, :].tocsc()
return ff
def getPartitionFR(self):
fdof = list(self.model.fdof.values())
rdof = list(self.model.rdof.values())
fr = self.full.tocsc()[:, rdof].tocsr()[fdof, :].tocsc()
return fr
def getPartitionRF(self):
fdof = list(self.model.fdof.values())
rdof = list(self.model.rdof.values())
rf = self.full.tocsc()[:, fdof].tocsr()[rdof, :].tocsc()
return rf
def getPartitionRR(self):
rdof = list(self.model.rdof.values())
rr = self.full.tocsc()[:, rdof].tocsr()[rdof, :].tocsc()
return rr
class Stiffness(Matrix):
def __init__(self, model):
self.method = 'getStiffness'
super().__init__(model)
class Damping(Matrix):
def __init__(self, model):
self.method = 'getDamping'
super().__init__(model)
class Mass(Matrix):
def __init__(self, model):
self.method = 'getMass'
super().__init__(model)
|
class Card(object):
def __init__(self, contents=None):
self.contents = contents
self.hand = None
if contents is not None:
self.name = contents.name
def can_play(self):
side = self.hand.board.playerTurn
availMana = self.hand.board.manaCurrent[side]
cost = self.contents.manaCost
return (self.hand.side == self.hand.board.get_side()) and (cost <= availMana)
def set_contents(self, contents):
"""Set the contents of a card (weapon, spell, minion)."""
self.contents = contents
self.name = contents.name
|
from flask import Blueprint, jsonify, request
import sqlalchemy
from db import db
from helpers import status_response, row_dictify
from models.user import User
blueprint = Blueprint("user", __name__)
prefix = "/user"
@blueprint.route("/")
def get_all_user():
user_list = User.query.all()
return jsonify([row_dictify(user) for user in user_list])
@blueprint.route("/<user_id>")
def get_user(user_id):
user = User.query.filter_by(id=user_id).first()
if not user:
return status_response(404, "Not Found")
return jsonify(row_dictify(user))
@blueprint.route("/<user_id>/posts")
def get_user_posts(user_id):
user = User.query.filter_by(id=user_id).first()
if not user:
return status_response(404, "Not Found")
return jsonify([row_dictify(post) for post in user.posts])
@blueprint.route("/<user_id>/likes")
def get_user_likes(user_id):
user = User.query.filter_by(id=user_id).first()
if not user:
return status_response(404, "Not Found")
return jsonify([row_dictify(like) for like in user.likes])
@blueprint.route("/", methods={"PUT"})
def put_user():
name = request.form.get("name")
if not name:
return status_response(403, "Missing 'name'")
user = User(name=name)
db.session.add(user)
try:
db.session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print(e)
return status_response(500, "Cannot Write")
return jsonify(row_dictify(user))
|
from django.db import models
from home.models import clasemodelo
from inv.models import Producto
# Create your models here.
class ComprasEnc(clasemodelo):
fecha_compra=models.DateField(null=True, blank=True)
observacion=models.TextField(blank=True,null=True)
no_factura=models.CharField(max_length=100)
fecha_factura=models.DateField()
sub_total=models.FloatField(default=0)
descuento=models.FloatField(default=0)
total=models.FloatField(default=0)
def __str__(self):
return '{}'.format(self.observacion)
def save(self):
self.total=self.sub_total-self.descuento
super(ComprasEnc,self).save()
class Meta:
verbose_name_plural="Encabezado compras"
verbose_name="encabezado compra"
class ComprasDet(clasemodelo):
compra=models.ForeignKey(ComprasEnc,on_delete=models.CASCADE)
producto=models.ForeignKey(Producto,on_delete=models.CASCADE)
cantidad=models.BigIntegerField(default=0)
precio_prv=models.FloatField(default=0)
sub_total=models.FloatField(default=0)
descuento=models.FloatField(default=0)
total=models.FloatField(default=0)
costo=models.FloatField(default=0)
def __str__(self):
return '{}'.format(self.producto)
def save(self):
self.sub_total=float(self.cantidad*self.precio_prv)
self.total=self.sub_total-float(self.descuento)
super(ComprasDet,self).save()
class Meta:
verbose_name_plural="detalles compras"
verbose_name="detalle compra" |
'''Question3 ) Write a program for array rotation?'''
n = int(input("Enter the number of elements required: "))
array = []
for i in range(0, n):
element = input("Enter the element: ")
array.append(element)
print("Before rotation :" , "\narray = ", array)
steps = int(input("Enter a positive number for left rotation and a negative number for right rotation: "))
size = len(array)
def rotate_array(array , steps, size):
if steps >0:
array[:] = array[steps:size] + array[0:steps]
elif steps <0:
steps = size + steps
array[:] = array[steps:size] + array[0:steps]
else:
array = array
return array
print("After rotation :" , "\narray = ", rotate_array(array, steps, size))
|
import turtle as t
t.pensize(5)
for i in range(0, 1440):
t.forward(1)
t.left(0.25)
t.done()
|
"""api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from launch import urls as launch_urls
from telecom import urls as telecom_urls
from guide import urls as guide_urls
from django.conf.urls import include
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^launch/', include(launch_urls, namespace='launch')),
url(r'^telecom/', include(telecom_urls, namespace='telecom')),
url(r'^guide/', include(guide_urls, namespace='guide')),
]
|
import torch
import torch.nn as nn
from models.multi_headed_attn import MultiHeadedAttention
from models.psition_ffn import PositionwiseFeedForward
from models.embedding import Embeds
class DecoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attn = MultiHeadedAttention(config)
self.enc_dec_attn = MultiHeadedAttention(config)
self.feed_forward = PositionwiseFeedForward(config)
self.layer_norm1 = nn.LayerNorm(config.model_size, eps=1e-6)
self.layer_norm2 = nn.LayerNorm(config.model_size, eps=1e-6)
self.drop = nn.Dropout(config.dropout)
def forward(self, y, encoder_outs, src_pad_mask, tgt_pad_mask):
"""
:param y: ( batch, tgt_len, model_size)
:param encoder_outs: (batch, src_len, model_size)
:param src_pad_mask: (batch, src_len, src_len)
:param tgt_pad_mask: (batch, tgt_len, tgt_len)
:return:
"""
tgt_len = tgt_pad_mask.size(-1) # (batch, tgt_len, tgt_len)
mask = torch.triu(
torch.ones((tgt_len, tgt_len), dtype=torch.uint8), diagonal=1
)
if torch.cuda.is_available():
mask = mask.type(torch.cuda.ByteTensor)
mask = mask.unsqueeze(0).repeat(y.size(0), 1, 1)
dec_mask = (tgt_pad_mask + mask).gt(0)
inputs = self.layer_norm1(y)
attn = self.self_attn(inputs, inputs, inputs, mask=dec_mask)
attn = self.drop(attn) + y
attn_norm = self.layer_norm2(attn)
decoder_outs = self.enc_dec_attn(encoder_outs, encoder_outs, attn_norm, mask=src_pad_mask)
output = self.feed_forward(self.drop(decoder_outs) + attn)
return output
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
self.pad = config.pad
self.embedding = Embeds(config.model_size, config.tgt_vocab_size, config)
self.decoder = nn.ModuleList(
[DecoderLayer(config)
for i in range(config.n_layer)])
self.layer_morm = nn.LayerNorm(config.model_size, eps=1e-6)
def forward(self, x, y, encoder_outs):
"""
:param y:(batch, tgt_len)
:param encoder_outs: (batch, src_len, model_size)
:return: (batch, src_len, model_size)
"""
output = self.embedding(y)
src_pad_mask = x.eq(self.pad).unsqueeze(1).repeat(1, y.size(1), 1)
tgt_pad_mask = y.eq(self.pad).unsqueeze(1).repeat(1, y.size(1), 1)
for layer in self.decoder:
output = layer(output, encoder_outs, src_pad_mask, tgt_pad_mask)
output = self.layer_morm(output)
return output |
from django.shortcuts import render
from django.http import JsonResponse
def index(request):
data = {
'key1': 'data1'
}
return JsonResponse(data)
|
from django.contrib import admin
from .models import Post,Userdetails
admin.site.register(Post)
admin.site.register(Userdetails) |
from compile.util.python_compiler import python_runner
from compile.util.java_compiler import java_runner
commands = {
'python': {
2: 'python',
3: 'python3'
},
'java': {
8: 'javac'
}
}
extensions = {
'python': '.py',
'java': '.java'
}
def get_command(language, version):
if language not in commands:
return False, 'Invalid language.'
if version not in commands[language]:
return False, 'Invalid version.'
return True, commands[language][version]
def get_extension(language):
return extensions[language]
def get_target_method(language):
if language == 'python':
return python_runner
elif language == 'java':
return java_runner |
import json
import os
import requests
from nose.tools import *
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
path_dir = os.path.join(ROOT_DIR, "utils")
sys.path.append(path_dir)
from main_functions import *
class TestDashboard(unittest.TestCase):
def test_get_dashboard(self):
header = {'Authorization': get_authorization_token(self)}
ID = get_company_id(self, header)
response = requests.get(f'{URL}/dashboard/{ID}', headers=header)
assert_equal(response.status_code, 200)
json_data = json.loads(response.content)
assert_in('journeys', json_data)
journeys = json_data['journeys']
first = journeys[0]
assert_in('id', first)
assert_equal(type(first['id']), int)
assert_equal(first['id'], 10576)
assert_in('sort_order', first)
assert_equal(type(first['sort_order']), int)
assert_equal(first['sort_order'], 1)
assert_in('current', first)
assert_equal(type(first['current']), bool)
assert_equal(first['current'], False)
assert_in('open', first)
assert_equal(type(first['open']), bool)
assert_equal(first['open'], False)
sec = journeys[1]
assert_in('id', sec)
assert_equal(type(sec['id']), int)
assert_equal(sec['id'], 10577)
assert_in('sort_order', sec)
assert_equal(type(sec['sort_order']), int)
assert_equal(sec['sort_order'], 2)
assert_in('current', sec)
assert_equal(type(sec['current']), bool)
assert_equal(sec['current'], False)
assert_in('open', sec)
assert_equal(type(sec['open']), bool)
assert_equal(sec['open'], False)
third = journeys[2]
assert_in('id', third)
assert_equal(type(third['id']), int)
assert_equal(third['id'], 10578)
assert_in('sort_order', third)
assert_equal(type(third['sort_order']), int)
assert_equal(third['sort_order'], 3)
assert_in('current', third)
assert_equal(type(third['current']), bool)
assert_equal(third['current'], False)
assert_in('open', third)
assert_equal(type(third['open']), bool)
assert_equal(third['open'], False)
assert_in('goals', json_data)
goals = json_data['goals']
assert_in('breakevenPoint', goals)
assert_equal(type(goals['breakevenPoint']), float)
assert_equal(goals['breakevenPoint'], 0)
assert_in('salesGoal', goals)
assert_equal(type(goals['salesGoal']), float)
assert_equal(goals['salesGoal'], 0)
assert_in('totalTaxForSale', goals)
assert_equal(type(goals['totalTaxForSale']), float)
assert_equal(goals['totalTaxForSale'], 0)
assert_in('unitBP', goals)
assert_equal(type(goals['unitBP']), float)
assert_equal(goals['unitBP'], 0)
assert_in('unitSG', goals)
assert_equal(type(goals['unitSG']), float)
assert_equal(goals['unitSG'], 0)
assert_in('progress', json_data)
progress = json_data['progress']
assert_in('progressPlaning', progress)
assert_equal(type(progress['progressPlaning']), float)
assert_equal(progress['progressPlaning'], 0)
assert_in('progressRH', progress)
assert_equal(type(progress['progressRH']), float)
assert_equal(progress['progressRH'], 0)
assert_in('progressProduction', progress)
assert_equal(type(progress['progressProduction']), int)
assert_equal(progress['progressProduction'], 0)
assert_in('progressMarketing', progress)
assert_equal(type(progress['progressMarketing']), int)
assert_equal(progress['progressMarketing'], 0)
assert_in('progressFinancial', progress)
assert_equal(type(progress['progressFinancial']), float)
assert_equal(progress['progressFinancial'], 0)
assert_in('saleDevolutionInfo', json_data)
sale = json_data['saleDevolutionInfo']
assert_in('retired', sale)
assert_equal(type(sale['retired']), int)
assert_equal(sale['retired'], 0)
assert_in('stock', sale)
assert_equal(type(sale['stock']), int)
assert_equal(sale['stock'], 0)
assert_in('totalProductsSelled', json_data)
total = json_data['totalProductsSelled']
assert_equal(type(total), float)
assert_equal(total, 0)
assert_in('percentAveragePresence', json_data)
percent = json_data['percentAveragePresence']
assert_equal(type(percent), float)
assert_equal(percent, 0)
assert_in('formulas', json_data)
formulas = json_data['formulas']
assert_in('actionsProfitability', formulas)
assert_equal(type(formulas['actionsProfitability']), float)
assert_equal(formulas['actionsProfitability'], 0)
assert_in('totalShareCapital', formulas)
assert_equal(type(formulas['totalShareCapital']), float)
assert_equal(formulas['totalShareCapital'], 0)
assert_in('productionGoal', formulas)
assert_equal(type(formulas['productionGoal']), float)
assert_equal(formulas['productionGoal'], 0)
assert_in('saleGoal', formulas)
assert_equal(type(formulas['saleGoal']), float)
assert_equal(formulas['saleGoal'], 0)
assert_in('dre1', json_data)
dre1 = json_data['dre1']
assert_in('provider', dre1)
assert_equal(type(dre1['provider']), float)
assert_equal(dre1['provider'], 0)
assert_in('income', dre1)
assert_equal(type(dre1['income']), float)
assert_equal(dre1['income'], 0)
assert_in('sales', dre1)
assert_equal(type(dre1['sales']), float)
assert_equal(dre1['sales'], 0)
assert_in('rent', dre1)
assert_equal(type(dre1['rent']), float)
assert_equal(dre1['rent'], 0)
assert_in('taxForSales', dre1)
assert_equal(type(dre1['taxForSales']), float)
assert_equal(dre1['taxForSales'], 0)
assert_in('socialCompanyCharges', dre1)
assert_equal(type(dre1['socialCompanyCharges']), float)
assert_equal(dre1['socialCompanyCharges'], 0)
assert_in('socialEmployeeCharges', dre1)
assert_equal(type(dre1['socialEmployeeCharges']), float)
assert_equal(dre1['socialEmployeeCharges'], 0)
assert_in('comissions', dre1)
assert_equal(type(dre1['comissions']), float)
assert_equal(dre1['comissions'], 0)
assert_in('netProfit', dre1)
assert_equal(type(dre1['netProfit']), float)
assert_equal(dre1['netProfit'], 0)
assert_in('taxes', dre1)
assert_equal(type(dre1['taxes']), float)
assert_equal(dre1['taxes'], 0)
assert_in('finalProfit', dre1)
assert_equal(type(dre1['finalProfit']), float)
assert_equal(dre1['finalProfit'], 0)
assert_in('dre2', json_data)
dre2 = json_data['dre2']
assert_in('provider', dre2)
assert_equal(type(dre2['provider']), float)
assert_equal(dre2['provider'], 0)
assert_in('income', dre2)
assert_equal(type(dre2['income']), float)
assert_equal(dre2['income'], 0)
assert_in('sales', dre2)
assert_equal(type(dre2['sales']), float)
assert_equal(dre2['sales'], 0)
assert_in('rent', dre2)
assert_equal(type(dre2['rent']), float)
assert_equal(dre2['rent'], 0)
assert_in('taxForSales', dre2)
assert_equal(type(dre2['taxForSales']), float)
assert_equal(dre2['taxForSales'], 0)
assert_in('socialCompanyCharges', dre2)
assert_equal(type(dre2['socialCompanyCharges']), float)
assert_equal(dre2['socialCompanyCharges'], 0)
assert_in('socialEmployeeCharges', dre2)
assert_equal(type(dre2['socialEmployeeCharges']), float)
assert_equal(dre2['socialEmployeeCharges'], 0)
assert_in('comissions', dre2)
assert_equal(type(dre2['comissions']), float)
assert_equal(dre2['comissions'], 0)
assert_in('netProfit', dre2)
assert_equal(type(dre2['netProfit']), float)
assert_equal(dre2['netProfit'], 0)
assert_in('taxes', dre2)
assert_equal(type(dre2['taxes']), float)
assert_equal(dre2['taxes'], 0)
assert_in('finalProfit', dre2)
assert_equal(type(dre2['finalProfit']), float)
assert_equal(dre2['finalProfit'], 0)
|
# Numa eleição existem três candidatos. Faça um programa que peça o número total de eleitores. Peça para cada
# eleitor votar e ao final mostrar o número de votos de cada candidato.
eleitores = int(input('Numero de eleitores: '))
count_1 = 0
count_2 = 0
count_3 = 0
for x in range(1, eleitores + 1):
votos = int(input('Aperte 1 e confirme para votar no candidato 1, Aperte 2 e confirme para votar no candidato 2, Aperte 3 e confirme para votar no candidato 3: '))
if votos == 1:
count_1 = count_1 + 1
elif votos == 2:
count_2 = count_2 + 1
elif votos == 3:
count_3 = count_3 +1
print(f'O numero de votos do candidato 1 foi de : {count_1}')
print(f'O numero de votos do candidato 2 foi de : {count_2}')
print(f'O numero de votos do candidato 3 foi de : {count_3}') |
import urllib.request, json
def main():
with urllib.request.urlopen("https://demo1684309.mockable.io/names") as url:
data = json.loads(url.read().decode())
print(data)
names_in_alphabetical_order = arrange(data)
print('---Names is---')
print(names_in_alphabetical_order)
n_first_names = n_names(names_in_alphabetical_order, 4)
print(n_first_names)
def arrange(names):
arranged_names = []
for x in names:
arranged_names.append(x['name'])
return sorted(arranged_names)
def n_names(names, n):
return names[0:n]
if __name__ == '__main__':
main() |
from django.db import models
from geofr.constants import REGIONS_WITH_CODES, DEPARTMENTS_WITH_CODES
class RegionField(models.CharField):
"Model fields to store a single french region." ""
def __init__(self, *args, **kwargs):
kwargs["max_length"] = kwargs.get("max_length", 2)
kwargs["choices"] = REGIONS_WITH_CODES
super().__init__(*args, **kwargs)
class DepartmentField(models.CharField):
"Model fields to store a single french department." ""
def __init__(self, *args, **kwargs):
kwargs["max_length"] = kwargs.get("max_length", 3)
kwargs["choices"] = DEPARTMENTS_WITH_CODES
super().__init__(*args, **kwargs)
|
#Uses python3
import sys
def lcs2(s, t):
dp_result = [[x for x in range(len(s) + 1)] for y in range(len(t) + 1)]
for y in range(len(t) + 1):
dp_result[y][0] = y
for i in range(1, len(s)+1):
for j in range(1, len(t)+1):
insert_op = dp_result[j-1][i] + 1
delete_op = dp_result[j][i-1] + 1
match_op = dp_result[j-1][i-1]
mismatch_op = dp_result[j-1][i-1] + 1
if s[i-1] == t[j-1]:
dp_result[j][i] = min(insert_op, delete_op, match_op)
else:
dp_result[j][i] = min(insert_op, delete_op, mismatch_op)
return dp_result[len(t)][len(s)]
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import sys
import time
gits_filename = "gits.txt"
backup_dir = ""
gits = []
def process_one_git(one_git):
print(one_git)
os.system(f'git clone {one_git}')
def get_date():
now = time.localtime()
now_time = time.strftime("%Y%m%d_%H%M%S", now)
return now_time
def check_dir():
global backup_dir
if backup_dir == "":
backup_dir = os.path.join(os.getcwd(), f'backup_{get_date()}')
print(f'backup_dir: {backup_dir}')
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
os.chdir(backup_dir)
print(f'working directory: {os.getcwd()}')
def read_list_file():
global gits
with open(gits_filename) as fp:
lines = fp.readlines()
for line in lines:
gits.append(line.rstrip())
print(gits)
def main():
read_list_file()
check_dir()
for one_git in gits:
process_one_git(one_git)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""Stacker module for creating an ECS Service."""
from troposphere import (
And, Equals, If, Join, Not, Output, Ref, autoscaling, ec2, iam, AWSHelperFn, Base64, ecs
)
from troposphere.autoscaling import MetricsCollection
from troposphere.policies import (
UpdatePolicy, AutoScalingRollingUpdate # , AutoScalingReplacingUpdate
)
from stacker.blueprints.base import Blueprint
from stacker.blueprints.variables.types import TroposphereType
import awacs.ec2
import awacs.s3
import awacs.ssm
import awacs.sts
import json
from awacs.aws import Allow, Policy, Principal, Statement
# Linter is incorrectly flagging the automatically generated functions in awacs
from awacs.aws import StringEquals, StringLike # noqa pylint: disable=no-name-in-module
from stacker.blueprints.variables.types import (
CFNCommaDelimitedList, CFNNumber, CFNString, EC2SecurityGroupIdList,
EC2SubnetIdList
)
from stacker.lookups.handlers.file import parameterized_codec
class ECSService(Blueprint):
"""Extend Stacker Blueprint class."""
VARIABLES = {
'ServiceName': {
'type': CFNString,
'description': 'URL to set as the upstream',
'default': 'test'
},
'TaskDefinitionARN': {
'type': CFNString
},
'ClusterName': {
'type': CFNString
},
'ECSLoadBalancers': {
'type': TroposphereType(ecs.LoadBalancer, many=True)
},
'DesiredCount': {
'type': CFNNumber
},
'SecurityGroups':{
'type': EC2SecurityGroupIdList
},
'Subnets':{
'type': EC2SubnetIdList
}
}
def add_ecs_service(self):
"""Adds ECS Service to template"""
template = self.template
variables = self.get_variables()
Service = template.add_resource(
ecs.Service(
variables['ServiceName'].value,
Cluster=variables['ClusterName'].ref,
DesiredCount=variables['DesiredCount'].ref,
LaunchType='EC2',
LoadBalancers=variables['ECSLoadBalancers'],
# Role=Ref(ecsServiceRole),
NetworkConfiguration=ecs.NetworkConfiguration(
AwsvpcConfiguration=ecs.AwsvpcConfiguration(
SecurityGroups=variables['SecurityGroups'].ref,
Subnets=variables['Subnets'].ref
)
),
TaskDefinition=variables['TaskDefinitionARN'].ref
)
)
def create_template(self):
"""Create template (main function called by Stacker)."""
template = self.template
template.add_version('2010-09-09')
template.add_description(
"Onica - ECS Service - (1.0.0)"
)
self.add_ecs_service()
# Helper section to enable easy blueprint -> template generation
# (just run `python <thisfile>` to output the json)
if __name__ == "__main__":
from stacker.context import Context
print(ECSService(
'test', Context({"namespace": "test"})
).to_json())
|
import requests
def handle_request(request):
try:
request.raise_for_status()
return request
except requests.exceptions.HTTPError as e:
print(f"HTTP error: {e}")
except requests.exceptions.ConnectionError as e:
print(f"Connection error: {e}")
except requests.exceptions.Timeout as e:
print(f"Timeout Error: {e}")
except requests.exceptions.RequestException as e:
print(f"Something, somewhere went terribly wrong: {e}")
def get_from_url(url: str, headers: dict = "") -> requests.Response:
"""
Sends a get request to the provided url adding the passed headers and params.
"""
data_request = requests.get(url, headers=headers)
return handle_request(data_request)
def post_from_url(url: str, headers: dict = "", data="", files="", params: dict = "") -> requests.Response:
"""
Sends a post request to the provided url adding the passed headers, data, files and params.
"""
data_request = requests.post(url, headers=headers, data=data, files=files, params=params)
return handle_request(data_request)
|
import rosbag
import argparse
import os
import glob
import re
import numpy as np
from cv_bridge import CvBridge
from sensor_msgs.point_cloud2 import read_points, create_cloud_xyz32
from sensor_msgs.msg import CameraInfo
from src.segmentation import PointCloudSegmentation
from src.detections import MaskRCNNDetections
from src.utils import Projection
from ldls_ros.msg import Segmentation
# COCO Class names
CLASS_NAMES = [
'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def msg_to_detections(result_msg):
"""
Parameters
----------
result_msg: Result
Returns
-------
"""
class_ids = np.array(result_msg.class_ids)
scores = None # not needed
rois = None # not needed
bridge = CvBridge()
if len(result_msg.masks) == 0:
shape = (374, 1238)
masks = np.empty((374, 1238, 0))
else:
masks_list = [bridge.imgmsg_to_cv2(m, 'mono8') for m in result_msg.masks]
shape = masks_list[0].shape
masks = np.stack(masks_list, axis=2)
masks[masks == 255] = 1
return MaskRCNNDetections(shape, rois, masks, class_ids, scores)
def keyFunc(afilename):
nondigits = re.compile("\D")
return int(nondigits.sub("", afilename))
def write_bag(intrinsics_path, input_path, gt_path, output_path, mrcnn_results_topic, pc_topic):
"""
Reads an input rosbag, and writes an output bag including all input bag
messages as well as Mask-RCNN results, written to the following topics:
mask_rcnn/result: mask_rcnn_ros.Result
mask_rcnn/visualization: Image
Parameters
----------
input_path: str
output_path: str
image_topic: str
Returns
-------
"""
intrinsics_bag = rosbag.Bag(intrinsics_path, 'r')
data_bag = rosbag.Bag(input_path, 'r')
outbag = rosbag.Bag(output_path, 'w')
pc_list = []
pc_headers = []
pc_timestamp = []
mrcnn_directory = 'mrcnn/'
if not os.path.exists(mrcnn_directory):
os.mkdir(mrcnn_directory)
# Intrinsic camera parameters for raw distorted images
# including the focal lengths (fx, fy) and principal points (ppx, ppy)
intrinsics = []
for topic, msg, t in intrinsics_bag.read_messages():
print(msg)
intrinsics = msg.K
focal_lengths = [intrinsics[0], intrinsics[4]]
print(focal_lengths)
principal_points = [intrinsics[2], intrinsics[5]]
print(principal_points)
projection = Projection(focal_lengths, principal_points)
# fill ground truth list from file
#gt_list = ["1576232553.643975019", "1576232553.710680723"]
gt_list = []
for file in sorted(glob.glob(os.path.join(gt_path, '*.txt')), key=keyFunc):
print(file)
pcd_file = os.path.basename(file)
base = os.path.splitext(pcd_file)[0]
gt_list.append(base)
print(gt_list)
# Write all input messages to the output
print("Reading messages...")
for topic, msg, t in data_bag.read_messages():
outbag.write(topic, msg, t)
# Generate LDLS results
for topic, msg, t in data_bag.read_messages(topics=[pc_topic]):
point_gen = read_points(msg)
points = np.array([p for p in point_gen])
pc_list.append(points[:, 0:3])
pc_headers.append(msg.header)
pc_timestamp.append(t)
print("Running LDLS...")
point_cloud_seg = PointCloudSegmentation(projection)
i = 0
for topic, msg, t in data_bag.read_messages(topics=[mrcnn_results_topic]):
if i % 50 == 0:
print("Message %d..." % i)
detections = msg_to_detections(msg)
print("msg header")
print(msg.header)
# Get the class IDs, names, header from the MRCNN message
class_ids = msg.class_ids
class_names = list(msg.class_names)
point_cloud = pc_list[i]
print("i: ", i)
print(len(pc_headers))
header = pc_headers[i]
ldls_res = point_cloud_seg.run(point_cloud, detections, save_all=False)
print(header)
stamp = str(msg.header.stamp.secs) + '.' + "{:09d}".format(msg.header.stamp.nsecs)
point_cloud = point_cloud[ldls_res.in_camera_view, :]
pc_msgs = []
# Get segmented point cloud for each object instance
labels = ldls_res.instance_labels()
class_labels = ldls_res.class_labels()
# only compute for evaluation candidates
if stamp in gt_list:
for inst in range(1, len(class_names) + 1):
in_instance = labels == inst
if np.any(in_instance):
inst_points = point_cloud[in_instance, :]
pc_msg = create_cloud_xyz32(header, inst_points)
pc_msgs.append(pc_msg)
ldls_msg = Segmentation()
ldls_msg.header = header
ldls_msg.class_ids = ldls_res.class_labels().tolist()
ldls_msg.class_names = np.array([CLASS_NAMES[j] for j in ldls_msg.class_ids])
ldls_msg.in_camera_view = ldls_res.in_camera_view.tolist()
ldls_msg.instance_ids = ldls_res.instance_labels()
ldls_msg.object_points = pc_msgs
ldls_msg.points = create_cloud_xyz32(header, ldls_res.points)
outbag.write('/ldls/segmentation', ldls_msg, t)
# this is mostly just for visualization
foreground = point_cloud[class_labels != 0, :]
foreground_msg = create_cloud_xyz32(header, foreground)
outbag.write('/ldls/foreground', foreground_msg, t)
i += 1
intrinsics_bag.close()
data_bag.close()
outbag.close()
if __name__ == '__main__':
# 2d segmentation results
mrcnn_results_topic = '/object_detection/results'
# pc2 in color frame
pc_topic = '/object_detection/pc2'
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-intrinsics_bag",
help="path to the bagfile to process")
parser.add_argument("-data_bag",
help="path to the bagfile to process")
parser.add_argument(
"-gt_path",
type=str, help="path to gt data"
)
args = parser.parse_args()
intrinsics_path = args.intrinsics_bag
data_path = args.data_bag
gt_path = args.gt_path
if not os.path.exists(data_path):
raise IOError("Bag file '%s' not found" % data_path)
if not os.path.exists(gt_path):
raise IOError("Ground truth file '%s' not found" % gt_path)
out_name = data_path.split('.bag')[0] + '_ldls.bag'
write_bag(intrinsics_path, data_path, gt_path, out_name, mrcnn_results_topic, pc_topic)
|
# -*- coding: utf8 -*-
from django import forms
from datetime import timedelta, date, datetime
from django.conf import settings
from inventory.models import Item, Box, Request, InventoryItem, Network
from django.contrib.auth.models import User
def load_dates_initial():
return date.today() - timedelta(days=30), date.today()
def load_dates_initial_stats():
return datetime.strptime(
settings.START_DATE, settings.FORMAT_DATE), date.today()
default_value = [('', '-' * 9)]
def convert_date_to_datetime(date):
'convert date to datetime to show last day results'
return datetime(date.year, date.month, date.day, 23, 59, 59)
class Choices:
def __init__(self, hide_deleted=True):
self.hide_deleted = hide_deleted
self.items = self.create_items_list()
self.persons = self.create_box_list(5)
self.storage = self.create_box_list(1)
self.locations = self.create_box_list(6)
self.correction = self.create_box_list(4)
self.expense = self.create_box_list(2)
self.receipt = self.create_box_list(3)
self.storage_with_locations = self.storage + self.locations
self.expense_and_storage_with_locations = (self.expense + self.storage +
self.locations) # for views
self.basic_set = (self.storage + self.persons + self.locations +
self.correction)
self.boxes_from = self.receipt + self.basic_set
self.boxes_to = self.expense + self.basic_set
self.boxes = self.receipt + self.expense + self.basic_set
self.persons_with_no_default_value = default_value + self.persons
def create_list(self, objects):
choices = []
for object in objects:
choices.append((object.pk, object.name))
return choices
def create_items_list(self):
items = Item.objects.all()
if self.hide_deleted:
items = items.exclude(deleted=True)
items = items.order_by('name')
return self.create_list(items)
def create_box_list(self, id):
boxes = Box.objects.filter(box_type=id)
if self.hide_deleted:
boxes = boxes.exclude(deleted=True)
boxes = boxes.order_by('name')
return self.create_list(boxes)
def output(self, list):
return [(0, '')] + list
def output_items(self):
return self.output(self.items)
def output_persons(self):
return self.output(self.persons)
def output_storage_with_locations(self):
return self.output(self.storage_with_locations)
def output_boxes_from(self):
return self.output(self.boxes_from)
def output_boxes_to(self):
return self.output(self.boxes_to)
def output_boxes(self):
return self.output(self.boxes)
class RequestAddForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RequestAddForm, self).__init__(*args, **kwargs)
self.fields['person'].choices = Choices().persons_with_no_default_value
person = forms.ChoiceField(label='Лицо')
class Meta:
model = Request
exclude = ('user', 'date', 'processed')
widgets = {
'request_type': forms.HiddenInput(),
'packet': forms.HiddenInput(),
}
def clean_person(self):
return Box.objects.get(pk=self.cleaned_data['person'])
class ReceiptForm(forms.ModelForm):
item = forms.CharField(label='Наименование',
widget=forms.TextInput(
attrs={'required': '', 'autofocus': ''}))
comment = forms.CharField(label='Комментарий', required=False)
def clean_item(self):
# Add a new item if it's not in the database yet.
item, _ = Item.objects.get_or_create(
name=self.cleaned_data['item'])
return item
def clean_quantity(self):
if self.cleaned_data['quantity'] < 1:
raise forms.ValidationError(
'Введите количество большее или равное 1')
return self.cleaned_data['quantity']
class Meta:
model = InventoryItem
exclude = ('box',)
widgets = {
'quantity': forms.TextInput(attrs={'required': '',
'pattern': '\d+',
'title': 'число'}),
}
class LocationForm(forms.ModelForm):
name = forms.CharField(label='Адрес', widget=forms.TextInput(
attrs={'required': '', 'autofocus': ''}))
def clean_name(self):
return self.cleaned_data['name'].strip()
class Meta:
model = Box
exclude = ('box_type', 'deleted')
class NetworkForm(forms.ModelForm):
name = forms.CharField(label="Название", widget=forms.TextInput(
attrs={'required': '', 'autofocus': ''}))
def clean_name(self):
return self.cleaned_data['name'].strip()
class Meta:
model = Network
class InventoryReportForm(forms.Form):
IS_PERMANENT_NOT_SELECTED = 0
IS_PERMANENT_YES = 1
IS_PERMANENT_NO = 2
def __init__(self, *args, **kwargs):
super(InventoryReportForm, self).__init__(*args, **kwargs)
choices = Choices()
self.fields['item'].choices = choices.output_items()
self.fields['item_is_permanent'].choices = (
(self.IS_PERMANENT_NOT_SELECTED, ''),
(self.IS_PERMANENT_NO, 'Нет'),
(self.IS_PERMANENT_YES, 'Да'))
self.fields['person'].choices = choices.output_persons()
self.fields['location'].choices = choices.output_storage_with_locations()
self.fields['network'].choices = [(0, '')] + list(Network.objects.all().values_list())
item = forms.ChoiceField(label='Наименование', required=False)
item_is_permanent = forms.ChoiceField(label='Наименование является постоянным', required=False)
person = forms.ChoiceField(label='Лицо', required=False)
location = forms.ChoiceField(label='Узел', required=False)
network = forms.ChoiceField(label='Сеть', required=False)
def clean_item(self):
if not int(self.cleaned_data['item']):
return
return Item.objects.get(pk=self.cleaned_data['item'])
def clean_item_is_permanent(self):
value = int(self.cleaned_data['item_is_permanent'])
if value == self.IS_PERMANENT_YES:
return True
if value == self.IS_PERMANENT_NO:
return False
def clean_person(self):
if not int(self.cleaned_data['person']):
return
return Box.objects.get(pk=self.cleaned_data['person'])
def clean_location(self):
if not int(self.cleaned_data['location']):
return
return Box.objects.get(pk=self.cleaned_data['location'])
def clean_network(self):
if not int(self.cleaned_data['network']):
return
return Network.objects.get(pk=self.cleaned_data['network'])
def create_form_date_fields(dates):
def create_form_date_field(label, initial_date):
widget_attrs = {'required': '', 'pattern': '^\d{2}\.\d{2}\.\d{4}$'}
return forms.DateField(label=label,
initial=initial_date,
widget=forms.DateInput(
attrs=widget_attrs,
format=settings.FORMAT_DATE),
input_formats=(settings.FORMAT_DATE,))
date_from = create_form_date_field('От', dates[0])
date_to = create_form_date_field('До', dates[1])
return (date_from, date_to)
class StatsReportForm(forms.Form):
def __init__(self, *args, **kwargs):
super(StatsReportForm, self).__init__(*args, **kwargs)
users = User.objects.all()
users = [(user.pk, user.get_full_name()) for user in users]
self.fields['user'].choices = default_value + users
user = forms.ChoiceField(label='Пользователь', required=False)
period = forms.ChoiceField(label='Период', required=True, choices=[
(7, 'неделя'),
(30, 'месяц'),
(180, 'полгода')]
)
date_from, date_to = create_form_date_fields(load_dates_initial_stats())
def clean_user(self):
if not self.cleaned_data['user']:
return
return User.objects.get(pk=self.cleaned_data['user'])
def clean_period(self):
return int(self.cleaned_data['period'])
class MovementsReportForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MovementsReportForm, self).__init__(*args, **kwargs)
choices = Choices(False)
self.fields['box'].choices = choices.output_boxes()
self.fields['box_from'].choices = choices.output_boxes_from()
self.fields['box_to'].choices = choices.output_boxes_to()
self.fields['item'].choices = choices.output_items()
box = forms.ChoiceField(label='Коробка', required=False)
box_from = forms.ChoiceField(label='Откуда', required=False)
box_to = forms.ChoiceField(label='Куда', required=False)
item = forms.ChoiceField(label='Наименование', required=False)
date_from, date_to = create_form_date_fields(load_dates_initial())
comment_sort = forms.BooleanField(label='Сортировка по комментарию',
required=False)
def clean_item(self):
if not int(self.cleaned_data['item']):
return
return Item.objects.get(pk=self.cleaned_data['item'])
def clean_box_from(self):
if not int(self.cleaned_data['box_from']):
return
return Box.objects.get(pk=self.cleaned_data['box_from'])
def clean_box_to(self):
if not int(self.cleaned_data['box_to']):
return
return Box.objects.get(pk=self.cleaned_data['box_to'])
def clean_box(self):
if not int(self.cleaned_data['box']):
return
return Box.objects.get(pk=self.cleaned_data['box'])
def clean_date_to(self):
date_to = self.cleaned_data['date_to']
return convert_date_to_datetime(date_to)
class RequestsListProcessedForm(forms.Form):
def __init__(self, *args, **kwargs):
super(RequestsListProcessedForm, self).__init__(*args, **kwargs)
self.fields['person'].choices = Choices().persons_with_no_default_value
person = forms.ChoiceField(label='Лицо', widget=forms.Select(
attrs={'required': ''}))
date_from, date_to = create_form_date_fields(load_dates_initial())
def clean_person(self):
if not int(self.cleaned_data['person']):
return
return Box.objects.get(pk=self.cleaned_data['person'])
def clean_date_to(self):
date_to = self.cleaned_data['date_to']
return convert_date_to_datetime(date_to)
|
from subprocess import call
import sys
from os import listdir
from os.path import isfile, join
# Schedule multiple optical flow error tests
# Version numbers for each version to run
#vers_to_run = [1]
vers_to_run = [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 36, 37]
for index in range(0,len(vers_to_run)):
print 'Version ' + str(vers_to_run[index])
call('python opt_flow.py '+str(vers_to_run[index])+
' > log_'+str(vers_to_run[index])+'.txt', shell=True)
|
from bdpair_reg import bdpair_reg
from Gau_one import Gau_one
import pandas as pd
import scipy as sp
class tune_table(object):
def __init__(self, pandasfiledataframe):
self.pfdf = pandasfiledataframe
self.FittingAll = pd.DataFrame(columns = ['aij_int', 'bij_int'])
#
def gettable(self, ifsaveunselected = False):
for Row in self.pfdf.iterrows():
C_series = Row[1].dropna()
# iterrows() returns a tuple: (name, series), where name is the index of the row, equivalent to series.name.
# Use .iloc to choose the wanted data column(s).
# For each pair, define the object from bdpair_reg
# It will calculate the average if, plot the regression, and save the xlsx file for the bead pair.
bdp_reg = bdpair_reg(C_series.name, C_series.iloc[3:].values, C_series.iloc[3:].index, C_series.iloc[0] ,C_series.iloc[0])
#
# Make dataframes (all and below threshold) for the bead pair. The second argument controls the output of the discarded conformations which are above the energy threshold.
bdp_reg.mk2df(C_series.iloc[2], ifsaveunselected)
#
# Call plotavg() to perform the regression and plot the fitted curve.
bdp_reg.plotavg()
#
# After all, save the fitted parameters, as long as the file list, for a single bead pair.
bdp_reg.writexlsx(C_series.name+'_avg_reg.xlsx')
# Single run done in the loop of bead pairs.
#
# Save the parameters
self.FittingAll = self.FittingAll.append( pd.DataFrame( [[bdp_reg.slope, bdp_reg.intercept]] , index = [bdp_reg.nm] ,columns = self.FittingAll.columns.values ) )
print self.FittingAll
# The function gettable() is done!
def tunevalues(self, R_critical, tunefactor):
self.factor = tunefactor
self.rc = R_critical
self.FittingAll['aij_int_astr'] = self.FittingAll['aij_int']/self.factor/self.rc/self.rc
self.FittingAll['bij_int_astr'] = self.FittingAll['bij_int']/self.factor
self.FittingAll['aij_ex'] = self.FittingAll['aij_int_astr'] + self.FittingAll['bij_int_astr']
self.FittingAll['aij'] = self.FittingAll['aij_ex'] +25.0
def writefittingallxlsx(self, xlsxfilename):
self.Fitxlsxfn = xlsxfilename
writer = pd.ExcelWriter(self.Fitxlsxfn)
self.FittingAll.to_excel(writer)
writer.save()
# example to run:
if __name__ == '__main__':
ft = pd.DataFrame([[-305.113287901, -305.113287901, 100.0, 'OH-OH_550.log','OH-OH_600.log', 'OH-OH_650.log','OH-OH_700.log']] , index = ['OH-OH'], columns = ['Ebd1_Hartree','Ebd2_Hartree','Ethreshold_kcal/mol', 5.5, 6., 6.5, 7.])
print ft
tt = tune_table(ft)
tt.gettable(True)
tt.tunevalues(7.11, 2.0)
print tt.FittingAll
tt.writefittingallxlsx('FittingAll.xlsx')
|
from __future__ import absolute_import
# Tests for ThriftField.
# Some parts based on http://www.djangosnippets.org/snippets/1044/
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import os
import sys
sys.path.insert(1, os.path.join(os.path.dirname(__file__), "gen-py"))
from djangothrift_test_gen.ttypes import TestStruct
import unittest
from desktop.lib.django_test_util import configure_django_for_test, create_tables
configure_django_for_test()
from django.db import models
from desktop.lib.djangothrift import ThriftField
from desktop.lib import django_util
class ThriftTestModel(models.Model):
class Meta(object):
app_label = "TEST_THRIFT_APP"
my_int = models.IntegerField()
my_struct = ThriftField(TestStruct)
class TestThriftField(unittest.TestCase):
def test_store_and_retrieve(self):
create_tables(ThriftTestModel)
struct = TestStruct()
struct.a = "hello world"
struct.b = 12345
x = ThriftTestModel()
x.my_int = 3
x.my_struct = struct
x.save()
y = ThriftTestModel.objects.all()[0]
self.assertEqual(x.my_int, y.my_int)
self.assertEqual(django_util.encode_json(x.my_struct), y.my_struct)
y.delete()
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
import os
import pytest
import rospy
def get_output_file(argv):
for arg in argv:
if arg.startswith('--gtest_output'):
return arg.split('=xml:')[1]
raise RuntimeError('No output file has been passed')
def get_add_args(argv):
# don't include the --gtest, as that is the first non-usersupplied arg
end = next(argv.index(arg) for arg in argv if arg.startswith('--gtest_output'))
# strip executable path as well
return argv[1:end]
def run_pytest(argv):
output_file = get_output_file(argv)
test_module = rospy.get_param('test_module')
module_path = os.path.realpath(test_module)
add_args = get_add_args(argv)
call_args = [module_path, '--junitxml={}'.format(output_file)]
call_args.extend(add_args)
return pytest.main(call_args)
|
from django.contrib.auth.mixins import UserPassesTestMixin
from django.views.generic import TemplateView, CreateView, UpdateView, DeleteView, DetailView
from django.urls import reverse
from matirbank.filters import MatirBankFilter
from matirbank.forms import MatirBankForm
from matirbank.models import MatirBank
from people.models import UserProfile
from qlcms.utils import get_custom_paginator
class MatirBanks(UserPassesTestMixin, TemplateView):
template_name = 'matirbank/all.html'
def get_context_data(self, **kwargs):
branch_id = UserProfile.objects.filter(user=self.request.user)[0].branch.id
matirbank_list = MatirBank.objects.filter(branch=branch_id).order_by('-pk')
# Django Filter
matirbank_filter = MatirBankFilter(self.request.GET, queryset=matirbank_list, user=self.request.user, request=self.request)
context = super().get_context_data(**kwargs)
# members with filtering, members.form.as_p will work
context['banks'] = get_custom_paginator(matirbank_filter.qs, self.request, 10)
context['filter'] = matirbank_filter
context['form_type'] = 'new-form'
return context
def test_func(self):
return self.request.user.has_perm('matirbank.view_matirbank')
class MatirBankCreateViews(UserPassesTestMixin, CreateView):
model = MatirBank
template_name = 'matirbank/create.html'
form_class = MatirBankForm
def get_context_data(self, **kwargs):
context = super(MatirBankCreateViews, self).get_context_data(**kwargs)
context['view_form_title'] = "Add new bank"
context['form_type'] = 'new-form'
return context
def test_func(self):
return self.request.user.has_perm('matirbank.add_matirbank')
def form_valid(self, form):
branch_id = UserProfile.objects.filter(user=self.request.user)[0].branch.id
form.save(branch_id, self.request.user.pk)
return super(MatirBankCreateViews, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(MatirBankCreateViews, self).get_form_kwargs()
kwargs.update({'user': self.request.user, 'formType': 'new'})
return kwargs
def get_success_url(self, *args, **kwargs):
return reverse("banks")
class MatirBankUpdateViews(UserPassesTestMixin, UpdateView):
# class view will automatically create context = member
model = MatirBank
template_name = 'matirbank/create.html'
form_class = MatirBankForm
def get_context_data(self, **kwargs):
context = super(MatirBankUpdateViews, self).get_context_data(**kwargs)
context['view_form_title'] = "Update matir bank"
context['form_type'] = 'update-form'
return context
def test_func(self):
return self.request.user.has_perm('matirbank.change_matirbank')
def get_form_kwargs(self):
kwargs = super(MatirBankUpdateViews, self).get_form_kwargs()
kwargs.update({'user': self.request.user, 'formType': 'update'})
return kwargs
def get_success_url(self, *args, **kwargs):
return reverse("banks")
class MatirBankDeleteViews(UserPassesTestMixin, DeleteView):
model = MatirBank
template_name = 'matirbank/delete.html'
def test_func(self):
return self.request.user.has_perm('matirbank.delete_matirbank')
def get_success_url(self, *args, **kwargs):
return reverse("banks")
class MatirBankDetailViews(UserPassesTestMixin, DetailView):
model = MatirBank
template_name = 'matirbank/show.html'
def get_context_data(self, **kwargs):
context = super(MatirBankDetailViews, self).get_context_data(**kwargs)
bank = MatirBank.objects.get(id=self.kwargs.get('pk'))
context['bank_history'] = bank.bankhistory_set.all().order_by('-id')
return context
def test_func(self):
return self.request.user.has_perm('matirbank.view_matirbank')
|
import struct
import numpy as np
import sys
from sklearn.decomposition import PCA
class Constants:
path_images = 'train-images.idx3-ubyte'
path_labels = 'train-labels.idx1-ubyte'
class PreprocessData:
@classmethod
def get_images(cls, path):
with open(path, 'rb') as f:
# loading headers
magic_number, num_images = struct.unpack('>II', f.read(8))
img_size = struct.unpack('>II', f.read(8))
return np.frombuffer(f.read(), dtype=np.uint8).reshape(num_images, img_size[0], img_size[1])[:800]
@classmethod
def get_labels(cls, path):
with open(path, 'rb') as f:
# loading headers
magic_number, num_labels = struct.unpack('>II', f.read(8))
return np.frombuffer(f.read(), dtype=np.uint8)[:800]
@classmethod
def reshape_flatten_images(cls, images):
return [x.flatten() for x in images]
@classmethod
def train_test_split(cls, images, labels, test_num):
return images[test_num:], labels[test_num:], images[:test_num], labels[:test_num]
class DataTransformation:
def __init__(self, fit_data, dimensions):
self.model = PCA(n_components=dimensions, svd_solver='full').fit(fit_data)
def apply_pca_transformation(self, train_images, test_images):
return self.model.transform(train_images), self.model.transform(test_images)
class KNNClassifier:
def __init__(self, train_images, train_labels):
self.train_images = train_images
self.train_labels = train_labels
def calc_distance(self, point):
dist_dict = {}
for i, sample in enumerate(self.train_images):
dist_dict[i] = sum([(point[x] - sample[x])**2 for x in range(len(point))])
return dist_dict
def calc_vote(self, point):
dist_map = {k: v for k, v in sorted(self.calc_distance(point).items(), key=lambda item: item[1])}
return sorted([(index, 1/dist) for index, dist in dist_map.items()], reverse=True, key=lambda x: x[1])
def make_predictions(self, test_data):
predictions = []
for point in test_data:
vote_map = self.calc_vote(point)[:K]
vote_map = [(train_labels[x[0]], x[1]) for x in vote_map]
knn_labels = [x[0] for x in vote_map]
ans = max(set(knn_labels), key=knn_labels.count)
predictions.append(ans)
return predictions
def write_results(self, predictions, test_labels):
results = [' '.join([str(pred), str(label)]) for pred, label in zip(predictions, test_labels)]
with open('results.txt', 'w') as result_file:
result_file.write('\n'.join(results))
if __name__ == '__main__':
# read arguments
K, D, N = map(int, sys.argv[1:4])
PATH_TO_DATA_DIR = sys.argv[4]
# task_1 : preprocess data
images = PreprocessData.reshape_flatten_images(PreprocessData.get_images('/'.join([PATH_TO_DATA_DIR, Constants.path_images])))
labels = PreprocessData.get_labels('/'.join([PATH_TO_DATA_DIR, Constants.path_labels]))
train_images, train_labels, test_images, test_labels = PreprocessData.train_test_split(images, labels, N)
avg_first_image = np.average(images[0])
# task_2: PCA
transformed_train_images, transformed_test_images = DataTransformation(train_images, D).apply_pca_transformation(train_images, test_images)
# task_3: KNN
classifier = KNNClassifier(transformed_train_images, train_labels)
predictions = classifier.make_predictions(transformed_test_images)
classifier.write_results(predictions, test_labels)
correct = 0
for x, y in zip(predictions, test_labels):
if x==y:
correct += 1
print(" ".join(map(str, [correct, correct/N])))
print('Done')
|
from enum import Enum
from pydantic import BaseModel
class ImmutableModel(BaseModel):
class Config:
allow_mutation = False
class DrivingMode(str, Enum):
ECO = "ECO"
COMFORT = "COMFORT"
SPORT = "SPORT"
class AggressiveMode(Enum):
Mode1 = 1
Mode2 = 1.2
Mode3 = 1.3
class EngineRPMS(ImmutableModel):
value: float
def __mul__(self, factor: float):
return EngineRPMS(value=self.value * factor)
def __attrs_post_init__(self):
if self.value < 0:
raise ValueError()
def is_above(self, range: "RPMSRange") -> bool:
return range.end_lower_than(self)
def is_below(self, range: "RPMSRange") -> bool:
return range.start_greater_than(self)
class RPMSRange(ImmutableModel):
left: EngineRPMS
right: EngineRPMS
def start_greater_than(self, rpms: EngineRPMS) -> bool:
return self.left.value > rpms.value
def end_lower_than(self, rpms: EngineRPMS) -> bool:
return self.right.value < rpms.value
class GasPressure(ImmutableModel):
pressure: int
KICKDOWN_TH = 50
AGGR_KICKDOWN_TH = 70
def __attrs_post_init__(self):
if self.pressure < 0 or self.pressure > 100:
raise ValueError()
def is_kickdown(self):
return self.pressure > self.KICKDOWN_TH
def is_aggressive_kickdown(self):
return self.pressure > self.AGGR_KICKDOWN_TH
|
# for i in range(1,20):
# print ("i is now {0}".format(i))
#
# number = "123,456,789,0000"
# for i in range(0, len(number)):
# print (number[i])
number = "123,456,789,0000"
cleanedNumber = ''
for i in range(0, len(number)):
if number[i] in "0123456789":
cleanedNumber = cleanedNumber + number[i]
newNumber = int(cleanedNumber)
print("The number is {}".format(newNumber)) |
from sklearn import tree
#Impeative programming in python
a = [10,20]
len(a)
type(a)
print(type(a))
#OOP programming in python
dt = tree.DecisionTreeClassifier()
#Traditional functional programming
age = [1,2,3,4,]
i = 0
for e in age:
age[i] = e + 10
i = i + 1
print(age)
#Convert above traditional functional programming to better in python
age = [1,2,3,4,]
i = 0
for i,e in enumerate(age):
age[i] = e + 10
print(age)
#Convert above traditional functional programming to MOST EFFICIENT in python by using
#map object is more effective and parallel process operator instead of for loop
#map object more scalable. Use map object instead of for loop
age = [1,2,3,4,]
i = 0
def incr(e):
return e+10
age = map(incr, age)
print(age) #This returns an object.
age = list(map(incr, age)) #Let's convert this object to list for display purpose
print(age)
#Let us write even shorter code
#Using lambda. Lambda is anonymous function/in-line funtion
age = [1,2,3,4,]
i = 0
age = list(map(lambda e:e+10, age)) #labda is replace incr function.
print(age)
#Note that lambda is used only when you don't want to re-use the function and just at one place. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image
import os
import sys
import glob
path = '/Users/tusharnema/Desktop/opencv/google-images-deep-learning/images/taylor'
dirs = os.listdir(path)
save = '/Users/tusharnema/Desktop/opencv/google-images-deep-learning/images/new-taylor'
import glob
imagePaths = [f for f in glob.glob(os.path.join(path,'*.jpg'))] # or .png, .tif, etc
def resize():
for item in imagePaths:
im = Image.open(item)
im = im.convert('RGB')
f = os.path.basename(item)
imResize = im.resize((200, 200), Image.ANTIALIAS)
imResize.save(os.path.join(save,f), 'JPEG', quality=90)
resize()
|
import sys
from component import *
def daily():
"""this function should be called only once daily"""
pass
def hourly():
"""this function should be called hourly"""
# changes add's the watch items into playlist
WatchToPlaylist().init()
"""check if the function that needs to be called has been passed """
if len(sys.argv) >= 2:
locals()[sys.argv[1]]()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from datetime import datetime
def mask_data(df, n, _seed ,ignore=[]):
df = df.copy()
vals = df.values
# Create a list of features names without the ignored-features
f_names = list(df.columns)
f_without_ignore = [x for x in f_names if x not in ignore]
all_data_filling = df.values
masked_data_filling = {}
masked_vals = []
# mask each row
for i, row in enumerate(vals):
# Pick n features without replacement
np.random.seed(_seed)
feats = list(np.random.choice(f_without_ignore, size=n, replace=False))
masked_data_filling[i] = {}
for f in feats:
masked_data_filling[i][f] = all_data_filling[i][f]
row[feats] = None
masked_vals.append(row)
# Construct a DataFrame from the masked values and return
masked_data = pd.DataFrame(masked_vals, columns=f_names)
return masked_data, masked_data_filling
def _current_time_string():
now = datetime.now() # current date and time
date_time = now.strftime("%m-%d-%Y %H-%M-%S")
return date_time
def save_results(acc_lst, method, parameters, _seed, title):
title += ' - accuracy per feature acquisition'
fig, ax = plt.subplots(figsize=(10, 6))
plt.plot(range(len(acc_lst)), acc_lst, label=title)
plt.xlabel('Used Features')
plt.ylabel('Accuracy')
# plt.xticks(range(0, len(aucs[0])), range(0, len(aucs[0])))
# Make a plot with major ticks that are multiples of 20 and minor ticks that
# are multiples of 5. Label major ticks with '%d' formatting but don't label
# minor ticks.
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
ax.xaxis.set_minor_locator(MultipleLocator(1))
plt.title(title)
now_str = _current_time_string()
fig.savefig('results//' + title + ' ' + now_str + '.png')
plt.close(fig)
f = open('results//' + title + ' ' + now_str + '.txt', 'a')
f.write('Method: ' + method + '\n')
f.write('Parameters: ' + str(parameters) + '\n')
f.write('Seed: ' + str(_seed) + '\n')
f.write('Accuracy after each feature acquisition: ' + str(acc_lst) + '\n')
f.close()
|
import os
import sys
def link_duplicates():
with open("hashes.txt", 'r', encoding='utf_8', errors='replace') as hashfile:
hashmap = []
for f in hashfile:
filename = "".join(f.rsplit(" - ", 1)[0:-1])
hash_value = f.rsplit(" - ", 1)[-1]
hashmap.append((filename, hash_value))
for i in range(len(hashmap)):
for j in range(i+1, len(hashmap)):
if hashmap[i][1] == hashmap[j][1]:
if os.stat(hashmap[i][0]).st_ino != os.stat(hashmap[j][0]).st_ino:
print("Duplicate found! ", hashmap[i][0], "=", hashmap[j][0])
print("Deleting duplicate...", end="")
try:
os.remove(hashmap[j][0])
print("Linking...", end="")
os.link(hashmap[i][0], hashmap[j][0])
print("OK.")
except FileNotFoundError:
print("File not found!")
except PermissionError:
print("No permission!")
except OSError:
print("OS Error!")
link_duplicates()
|
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Miller"
# Datetime: 2019/10/29 14:31
import socket
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.connect(("127.0.0.1", 8000))
while True:
sk.send(input(">>>").encode())
data = sk.recv(1024)
print(data.decode())
|
#!/usr/bin/env python
# --------------------------------------------------------
# cuts for the telescope planes
# created on March 30th 2022 by M. Reichmann (remichae@phys.ethz.ch)
# --------------------------------------------------------
from numpy import all
from mod.dut_cuts import DUTCut
class TelCut(DUTCut):
def __init__(self, ana):
super().__init__(ana, meta_sub_dir='tel_cuts')
def make(self, redo=False):
pass
def make_all_cluster(self):
return all([self.make_cluster(pl) for pl in self.Ana.planes], axis=0)
|
import imtools
from PIL import Image, ImageDraw
from numpy import *
from numpy.ma import floor
from pylab import *
import os
import pickle
def example_pca(save=False):
imlist = imtools.get_imlist('fonts')
im = array(Image.open(imlist[0])) # open one image to get size
m,n = im.shape[0:2] # get the size of the images
# create matrix to store all flattened images
immatrix = array([array(Image.open(im).convert('L')).flatten() for im in imlist],'f')
# perform PCA
V,S,immean = imtools.pca(immatrix)
if save:
with open('font_pca_modes.pkl', 'wb') as f:
pickle.dump(immean, f)
pickle.dump(V, f)
# show some images (mean and 7 first modes)
figure()
gray()
subplot(2,4,1)
imshow(immean.reshape(m,n))
for i in range(7):
subplot(2,4,i+2)
imshow(V[i].reshape(m,n))
show()
def example_project_on_first_2_pc():
""" visualize projecting on the first 2 principal components.
projecting on 2 pc results in a rank 2 vector. """
# get list of images
imlist = imtools.get_imlist('fonts/')
imnbr = len(imlist)
# load model file
with open('font_pca_modes.pkl','rb') as f:
immean = pickle.load(f)
V = pickle.load(f)
# create matrix to store all flattened images
immatrix = array([array(Image.open(im)).flatten() for im in imlist],'f')
# project on the first and second components
projected = array([dot(V[[0, 2]], immatrix[i] - immean) for i in range(imnbr)])
# height and width
h, w = 1200, 1200
# create a new image with a white background
img = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(img)
# draw axis
draw.line((0, h / 2, w, h / 2), fill=(255, 0, 0))
draw.line((w / 2, 0, w / 2, h), fill=(255, 0, 0))
# scale coordinates to fit
scale = abs(projected).max(0)
scaled = floor(array([(p / scale) * (w / 2 - 20, h / 2 - 20) +
(w / 2, h / 2) for p in projected]))
# paste thumbnail of each image
for i in range(imnbr):
nodeim = Image.open(imlist[i])
nodeim.thumbnail((25, 25))
ns = nodeim.size
img.paste(nodeim, (int(scaled[i][0] - ns[0] // 2), int(scaled[i][1] -
ns[1] // 2), int(scaled[i][0] + ns[0] // 2 + 1), int(scaled[i][1] + ns[1] // 2 + 1)))
figure()
imshow(img)
show()
# example_pca()
example_project_on_first_2_pc() |
import pickle
from matplotlib import pyplot as plt
from Utils.general import count_zeros_in_gradient
path_to_files_gs_15 = './Results/gradients/grad_gs_25/gradients_100.pkl'
path_to_files_gs_67 = './Results/gradients/grad_gs_67/gradients_100.pkl'
path_to_files_igr_50 = './Results/gradients/grad_igr_50/gradients_100.pkl'
path_to_files_igr_10 = './Results/gradients/grad_igr_10/gradients_100.pkl'
with open(file=path_to_files_gs_15, mode='rb') as f:
gs_15 = pickle.load(f)
with open(file=path_to_files_gs_67, mode='rb') as f:
gs_67 = pickle.load(f)
with open(file=path_to_files_igr_10, mode='rb') as f:
igr_10 = pickle.load(f)
with open(file=path_to_files_igr_50, mode='rb') as f:
igr_50 = pickle.load(f)
grad_gs_15 = count_zeros_in_gradient(grad_dict=gs_15)
grad_gs_67 = count_zeros_in_gradient(grad_dict=gs_67)
grad_igr_10 = count_zeros_in_gradient(grad_dict=igr_10)
grad_igr_50 = count_zeros_in_gradient(grad_dict=igr_50)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ===========================================================================================================
# Plots
# ===========================================================================================================
plt.style.use(style='ggplot')
plt.figure(dpi=150)
plt.plot(grad_igr_50, label='IGR(0.50)', color='blue')
plt.plot(grad_gs_67, label='GS(0.67)', color='green')
plt.plot(grad_igr_10, label='IGR(0.10)', color='blue', linestyle='--')
plt.plot(grad_gs_15, label='GS(0.25)', color='green', linestyle='--')
plt.ylabel('% of Vanished Gradients in Batch')
plt.xlabel('Epochs')
plt.legend()
plt.savefig('./Results/Outputs/gradients.png')
plt.tight_layout()
plt.show()
# ===========================================================================================================
|
from pathlib import Path
from CommonHelper import GVar
from CommonHelper.Common import GetFileName, GetTrainedModelFolder, LoadFileToDict, GetPredictESNFolder
from CommonHelper.GVar import tblFunctionList, tblModel
from keras.models import load_model
from LogHelper.ReadFile import LogFileToTraceList
from LogHelper.SavePredictESN import SavePredictESNFile
from LogHelper.Trace import IntListToTraceSequence, RawTraceToActPer, ActPerToRawTrace
from MachineLearningHelper.trainningHelper import lstm_get_data_from_trace, one_hot_decode, \
multi_decode_with_probability, encode_trace_sequence
from MySQLHelper import Query
from MySQLHelper.Query import GetCellValue, UpdateFinishFuntionInDB
import platform
import os
def LoadModel(logFile, stepIn):
_keySeparateInside = '!!'
folderPath = str(Path(os.getcwd()).parent) + GetTrainedModelFolder(logFile)
output = "Trained model does not exist!"
stepOut = "1"
predictType = "1F_Activity_Performer"
feature = "1.5"
resultdf = GetCellValue('id', tblModel, ['name', 'stepin', 'stepout', 'predicttype', 'feature'],
[logFile, str(stepIn), stepOut, predictType, feature])
if len(resultdf) == 0:
print(output)
return None, None, None
name_to_int_set = LoadFileToDict(
folderPath + GetCellValue('name_to_int_set', tblModel,
['name', 'stepin', 'stepout',
'predicttype', 'feature'],
[logFile, str(stepIn), stepOut, predictType,
feature])[0])
int_to_name_set = LoadFileToDict(
folderPath + GetCellValue('int_to_name_set', tblModel,
['name', 'stepin', 'stepout',
'predicttype', 'feature'],
[logFile, str(stepIn), stepOut, predictType,
feature])[0])
modelName = resultdf[0]
folderPath = str(Path(os.getcwd()).parent) + GetTrainedModelFolder(logFile)
_saveModel = load_model(folderPath + modelName)
return _saveModel, name_to_int_set, int_to_name_set
def PredictPer(_saveModel, actList, stepIn, name_to_int_set, int_to_name_set, perList):
feature, stepOut, predictType = "1.5", "1", "Performer"
stepIn, stepOut, feature = int(stepIn), int(stepOut), float(feature)
[perList.append("END") for _ in range(stepIn, len(actList))]
combineTrace = GVar.CombineTrace(actList, perList)
subX, _ = lstm_get_data_from_trace(combineTrace, predictType, feature, name_to_int_set, stepIn, stepOut)
subyhat = _saveModel.predict(subX, batch_size=1, verbose=1)
subyhat_decoded = one_hot_decode(subyhat)
original_subyhat = IntListToTraceSequence(subyhat_decoded, int_to_name_set)
perList = perList[:stepIn]
[perList.append(y) for y in original_subyhat[1:]]
return perList
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 10:48:07 2015
@author: Administrator
"""
import pyaudio
import wave
import threading
import queue
import numpy as np
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
data =[]
p = pyaudio.PyAudio()
q = queue.Queue()
# define callback (2)
def callback(in_data, frame_count, time_info, status):
global data
q.put(in_data)
data=in_data
global ad_rdy_ev
ad_rdy_ev.set()
if counter <= 0:
return (None,pyaudio.paComplete)
else:
return (None,pyaudio.paContinue)
# open stream using callback (3)
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=False,
frames_per_buffer=CHUNK,
stream_callback=callback)
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
stream.start_stream()
frames=[]
counter=250
def read_audio_thead(q,stream,frames,ad_rdy_ev):
while stream.is_active():
ad_rdy_ev.wait(timeout=1)
if not q.empty():
frames.append(q.get())
ad_rdy_ev.clear()
ad_rdy_ev=threading.Event()
t=threading.Thread(target=read_audio_thead,args=(q,stream,frames,ad_rdy_ev))
t.daemon=True
t.start()
stream.stop_stream()
stream.close()
p.terminate()
print("* done recording")
wf.writeframes(b''.join(frames))
wf.close()
|
from typing import List, Dict, Tuple
from collections import defaultdict, namedtuple
Edge = namedtuple("Edge", ["start", "end"]) # represents an edge
def form_de_bruijn_graph(kmers: List[str]):
graph = defaultdict(list) # map kmer string to node object
prefix_to_node = defaultdict(list)
for kmer in kmers:
graph[kmer[:-1]].append(kmer[1:])
return graph
def find_roots(graph):
indegree_counts = defaultdict(lambda: 0)
outdegree_counts = defaultdict(lambda: 0)
for node in graph:
outdegree_counts[node] += len(graph[node])
for child in graph[node]:
indegree_counts[child] += 1
roots = []
for node in graph:
if indegree_counts[node] == 0:
roots.append(node)
return roots, indegree_counts, outdegree_counts
def find_contigs_old(root, graph, all_contigs, curr_contig, visited_edges):
children = graph[root]
if len(children) == 0:
all_contigs.append(curr_contig)
elif len(children) == 1:
if (root, children[0]) not in visited_edges:
curr_contig += children[0][-1]
visited_edges.append((root, children[0]))
find_contigs(children[0], graph, all_contigs, curr_contig, visited_edges)
else:
all_contigs.append(curr_contig)
elif len(children) > 1:
all_contigs.append(curr_contig)
for child in children:
if (root, child) not in visited_edges:
visited_edges.append((root, child))
find_contigs(child, graph, all_contigs, root+child[-1], visited_edges)
else:
all_contigs.append(curr_contig)
def find_nonbranching_paths(graph, indegree_counts, outdegree_counts):
paths = []
one_to_one_nodes = set()
for node in graph:
if not (indegree_counts[node] == 1 and outdegree_counts[node] == 1):
if outdegree_counts[node] > 0:
for child in graph[node]:
edge = Edge(node, child)
nonbranching_paths = [edge]
while indegree_counts[child] == 1 and outdegree_counts[child] == 1:
edge = Edge(child, graph[child][0])
nonbranching_paths.append(edge)
child = graph[child][0]
paths.append(nonbranching_paths)
else:
one_to_one_nodes.add(node)
isolated_cycles = []
while len(one_to_one_nodes) > 0:
isolated_cycle = []
node = one_to_one_nodes.pop()
while graph[node][0] in one_to_one_nodes:
edge = Edge(node, graph[node][0])
isolated_cycle.append(edge)
node = graph[node][0]
one_to_one_nodes.discard(node)
if len(isolated_cycle) > 0 and graph[node][0] == isolated_cycle[0][0]:
edge = Edge(node, graph[node][0])
isolated_cycle.append(edge)
isolated_cycles.append(isolated_cycle)
paths.extend(isolated_cycles)
return paths
def get_all_contigs(all_paths):
all_contigs = []
for path in all_paths:
contig = path[0][0]
for edge in path:
contig += edge[1][-1]
all_contigs.append(contig)
return all_contigs
if __name__ == "__main__":
file = "3-10-test.txt"
file = "dataset_317292_5 (4).txt"
# file = "Contigs/inputs/test3.txt"
with open(file, "r") as f:
kmers = f.read().strip("\n").split("\n")
graph = form_de_bruijn_graph(kmers)
roots, indegree_counts, outdegree_counts = find_roots(graph)
print (f"indegree_counts: {indegree_counts}")
print (f"outdegree_counts: {outdegree_counts}")
# print (roots)
# print (graph)
# for root in roots:
# contigs = find_contigs(root, graph, all_contigs, root, [])
all_paths = find_nonbranching_paths(graph, indegree_counts, outdegree_counts)
# print (all_paths)
all_contigs = get_all_contigs(all_paths)
print (" ".join(all_contigs)) |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
# Bitcoin market report from 2018
loan_data = "BTC.xls"
# reading in of .xls data
loan_book = xlrd.open_workbook(loan_data, encoding_override="uft-8")
loan_sheet = loan_book.sheet_by_index(0)
data = np.asarray([[loan_sheet.row_values(i)[3], loan_sheet.row_values(i)[4]]
for i in range(1, loan_sheet.nrows)])
n_samples = loan_sheet.nrows
# input 1 variables (Lowest daily value(x) & adjusted closing value(y))
X = tf.placeholder(tf.float32, name='Low')
Y = tf.placeholder(tf.float32, name='Close')
# creating weights and biases
weight = tf.Variable(0.0, name='weights')
biases = tf.Variable(0.0, name='bias')
# model for predicting y
Y_predict = X * weight + biases
loss = tf.square(Y - Y_predict, name='loss')
# optimizing learning_rate to get the minimum loss, 0.1 * 10^-9 best fit line and least amount of loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.000000001).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
tf.summary.scalar('Prediction', Y_predict)
tf.summary.scalar('Loss', loss)
merged_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('/Users/marcpepperman/Desktop/DL_Lab_1_Team6_Student24/graphs/linear_reg',
graph=tf.get_default_graph())
# increase range to insure best possible regression line
for i in range(3000):
total_loss = 0
for xs, ys in data:
_, l, summary = sess.run([optimizer, loss, merged_summary], feed_dict={X: xs, Y: ys})
summary_writer.add_summary(summary, i)
total_loss += l
print('Epoch {0}: {1}'.format(i, total_loss / n_samples))
weight, biases = sess.run([weight, biases])
X, Y = data.T[0], data.T[1]
plt.plot(X, Y, 'bo', label='Real Data')
plt.plot(X, X * weight + biases, 'r', label='Predicted Data')
plt.legend()
plt.show()
plt.savefig('DL_Lab_Number_One.png')
|
from time import sleep
import os
import csv
import usb.core
import numpy as np
import time
# Helper Functions
def translate(value, leftMin, leftMax, rightMin, rightMax):
""" This helper function scales a value from one range of values to another. It is used in our python spring implementation.
Much thanks to Adam from stackoverflow:
http://stackoverflow.com/questions/1969240/mapping-a-range-of-values-to-another/1969296 """
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
class miniproject2:
""" The high level class for controlling our haptic feedback device. """
def __init__(self):
""" Initialize values."""
self.TOGGLE_LED1 = 0
self.SET_DUTY_MOTOR_FORWARD = 1
self.GET_DUTY_MOTOR_FORWARD = 2
self.SET_DUTY_MOTOR_BACK = 3
self.GET_DUTY_MOTOR_BACK = 4
self.GET_ANGLE = 5
self.GET_CURRENT = 6
self.SET_BEHAVIOR = 7
self.dev = usb.core.find(idVendor = 0x6666, idProduct = 0x0003)
if self.dev is None:
raise ValueError('no USB device found matching idVendor = 0x6666 and idProduct = 0x0003')
self.dev.set_configuration()
self.currents = [32500] * 10
def close(self):
""" This function disconnects from the USB device. """
self.dev = None
def toggle_led1(self):
""" This function makes an LED blink on and off.
It was used for testing and learning how to interface with the PIC. """
try:
self.dev.ctrl_transfer(0x40, self.TOGGLE_LED1)
except usb.core.USBError:
print "Could not send TOGGLE_LED1 vendor request."
def set_duty_motor_forward(self, duty):
""" This function takes in a duty as an input and
sets the motor to run forward with a PWM of that duty cycle."""
try:
self.dev.ctrl_transfer(0x40, self.SET_DUTY_MOTOR_FORWARD, int(duty))
except usb.core.USBError:
print "Could not send SET_DUTY vendor request."
def get_duty_motor_forward(self):
""" This function returns the value of the duty cycle of the motor running forward. """
try:
ret = self.dev.ctrl_transfer(0xC0, self.GET_DUTY_MOTOR_FORWARD, 0, 0, 2)
except usb.core.USBError:
print "Could not send GET_DUTY_ vendor request."
else:
return int(ret[0])+int(ret[1])*256
def set_duty_motor_back(self, duty):
""" This function takes in a duty as an input and
sets the motor to run backward with a PWM of that duty cycle."""
try:
self.dev.ctrl_transfer(0x40, self.SET_DUTY_MOTOR_BACK, int(duty))
except usb.core.USBError:
print "Could not send SET_DUTY_MOTOR_BACK vendor request."
def get_duty_motor_back(self):
""" This function returns the value of the duty cycle of the motor running backward. """
try:
ret = self.dev.ctrl_transfer(0xC0, self.GET_DUTY_MOTOR_BACK, 0, 0, 2)
except usb.core.USBError:
print "Could not send GET_DUTY_MOTOR_BACK vendor request."
else:
return int(ret[0])+int(ret[1])*256
def get_angle(self):
""" This function returns the angular displacement from the value of the encoder."""
try:
ret = self.dev.ctrl_transfer(0xC0, self.GET_ANGLE, 0, 0, 2)
except usb.core.USBError:
print "Could not send GET_ANGLE vendor request."
else:
angle_val = int(ret[0])+int(ret[1])*256
if angle_val <13000:
angle_val += 16398
return angle_val
def get_current(self):
""" This function returns the value of the A0 pin, which is proportional to current and torque. """
try:
ret = self.dev.ctrl_transfer(0xC0, self.GET_CURRENT, 0, 0, 2)
except usb.core.USBError:
print "Go straight to jail, do not pass go, do not collect $200"
else:
current = int(ret[0])+int(ret[1])*256
return current
def set_behavior(self, behavior):
""" Change the behavior setting. Controls which simple mechanical concept is simulated."""
try:
self.dev.ctrl_transfer(0x40, self.SET_BEHAVIOR, int(behavior))
print "setting behavior to ", behavior
behaviors = {1: 'spring', 2: 'wall', 3: 'texture', 4: 'damper'}
print "Behavior = Virtual", behaviors[behavior]
except usb.core.USBError:
print "You goofed it real good with the bevavhavior setting thing."
def get_rolling_current(self):
""" This function returns the rolling average of the last 10 current values. """
self.currents.pop(0)
self.currents.append(self.get_current())
current = np.mean(self.currents)
return int(current)
def angle_calibration(self):
""" This function can be used to create a graph of expected angle vs encoder reading. """
angle_calibration = {}
angle_calibration_data = []
for angle in range(0, 360, 5):
print 'Turn to ' + str(angle) + ' degrees'
raw_input("Press Enter to continue...")
print str(angle) +' degrees: ' + str(self.get_angle())
angle_val = self.get_angle()
angle_calibration_data. append({'angle': angle, 'val': angle_val})
angle = angle + 5
fp = '/home/brenna/courses/elecanisms/miniproject2/angle_calibration.csv'
print angle_calibration_data
keys = angle_calibration_data[0].keys()
with open('angle_calibration.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(angle_calibration_data)
def spin_down(self):
""" This function can be used to run a spindown test and create a graph. """
self.set_duty_motor_forward(0xffff)
sleep(5)
self.set_duty_motor_forward(0x0000)
angle_spindown_data = []
angle_val = 0
previous_angle_val = 0
start_time = time.time()
rotation_data = []
last_time = start_time
num_rotations = 0
previous_diff = 0
for i in range(0, 500):
angle_val = self.get_angle()
angle_val_original = angle_val
if angle_val - previous_angle_val > 100:
i = i + 1
num_rotations += 1
print 'num rotations =' + str(num_rotations)
print 'angle val: ' + str(angle_val) + 'previous angle: ' + str(previous_angle_val)
#angle_val = angle_val + (num_rotations* 8200)
diff = angle_val - previous_angle_val
if diff > 8000:
diff = previous_diff
previous_diff = diff
diff = -1 * diff
now = time.time()
time_since_start = now - start_time
angle_spindown_data.append({'angle': angle_val, 'diff': diff, 'time': time_since_start})
#print angl5e_val
sleep(.01)
previous_angle_val = angle_val_original
i = i + 1
keys = angle_spindown_data[0].keys()
with open('angle_spindown.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(angle_spindown_data)
def save_rolling_current(self):
""" This function saves the rolling current values to a csv. """
mp.set_duty_motor_forward(0x8000)
current_data = []
for i in range(0,1000):
mp.set_duty_motor_forward(i*65)
current_val=mp.get_rolling_current()
current_data.append({'current': current_val})
print i
keys = current_data[0].keys()
with open('current_data_sweep_rolling.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(current_data)
print "done"
mp.set_duty_motor_forward(0x0000)
def virtual_spring(self):
""" This function creates a virtual spring with duty proportional to displacement"""
angle = self.get_angle()
diff = abs(angle - 20000)
if diff < 50:
self.set_duty_motor_back(0x0000)
self.set_duty_motor_forward(0x0000)
elif angle > 20000 and angle < 22000:
#180 degrees
duty_forward = translate(diff, 0, 2000, 20000, 65000)
self.set_duty_motor_back(duty_forward)
self.set_duty_motor_forward(0x0000)
elif angle <20000 and angle >18000:
duty_back = translate(diff, 0, 2000, 20000, 65000)
self.set_duty_motor_forward(duty_back)
self.set_duty_motor_back(0x0000)
else:
self.set_duty_motor_back(0x0000)
self.set_duty_motor_forward(0x0000)
def virtual_spring_torque(self):
""" This function creates a virtual spring with torque proportional to displacement"""
angle = self.get_angle()
target_angle = 20000
target_torque = (angle-target_angle)*6
measured_torque = self.get_rolling_current()
duty = 0
p = 6
#print "angle " + str(angle-target_angle) + ' target_torque ' + str(target_torque) + ' measured_torque ' + str(measured_torque)
if abs(abs(target_torque - measured_torque) - 32000) < 100:
print "stop"
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(0x0000)
elif -20000 < target_torque < 0:
#self.set_duty_motor_forward
duty = (abs(32000-target_torque-measured_torque)) or 0x0000
# print "left " + str((duty*p))+ " \t actual " + str(self.get_duty_motor_forward())
print('left')
self.set_duty_motor_forward(duty*p)
self.set_duty_motor_back(0x0000)
elif 0 < target_torque < 20000:
duty = (abs(32000-target_torque-measured_torque)) or 0x0000
# print "right " + str((duty*p)) + "\t actual " + str(self.get_duty_motor_back())
print str(measured_torque)
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(duty*p)
else:
print "stop"
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(0x0000)
def bumpy(self):
""" This function creates a virtual bumpy texture."""
#old_angle = self.get_angle()
for bump in [18000, 20000, 22000, 24000]:
old_angle = self.get_angle()
sleep(.001)
new_angle = self.get_angle()
if abs(old_angle-new_angle) > 1:
if new_angle < bump < old_angle:
print "BOOO"
self.set_duty_motor_forward(0x8000)
self.set_duty_motor_back(0x0000)
elif new_angle > bump > old_angle:
print "HOOO"
self.set_duty_motor_back(0x8000)
self.set_duty_motor_forward(0x0000)
else:
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(0x0000)
def virtual_wall(self):
""" This function creates a virtual wall. """
old_angle = self.get_angle()
sleep(.0001)
angle = self.get_angle()
if angle > 22000 and old_angle < 22000:
self.set_duty_motor_back(0x0000)
self.set_duty_motor_forward(0xffff)
elif angle < 22000 and old_angle > 22000:
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(0x0000)
def virtual_damper(self):
""" This function creates a virtual damper. """
kp = 6
old_angle = self.get_angle()
sleep(.001)
new_angle = self.get_angle()
w = new_angle - old_angle
if w <= 0:
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(0x0000)
else:
self.set_duty_motor_forward(0x0000)
self.set_duty_motor_back(w*kp)
def plot_angle_v_torque(self, title):
"""This function saves a .csv file of torque and angle over time."""
plot_data = []
print "plotting"
for i in range(0, 1000):
angle = self.get_angle()
torque_val = self.get_rolling_current()
plot_data.append({'angle': angle, 'torque': torque_val})
sleep(.01)
fp = '/home/brenna/courses/elecanisms/miniproject2/' + title + '.csv'
print plot_data
keys = plot_data[0].keys()
with open(title + '.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(plot_data)
def plot_omega_v_torque(self, title):
"""This function saves a .csv file of torque and angular velocity over time."""
plot_data = []
previous_angle = 0
print "plotting"
for i in range(0, 100):
angle = self.get_angle()
omega = angle - previous_angle
torque_val = self.get_rolling_current()
plot_data.append({'omega': omega, 'torque': torque_val})
sleep(.1)
previous_angle = angle
fp = '/home/brenna/courses/elecanisms/miniproject2/' + title + '.csv'
print plot_data
keys = plot_data[0].keys()
with open(title + '.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(plot_data)
if __name__ == "__main__":
DISKO = miniproject2()
|
from TeachersGUI import *
from tkinter import messagebox
from Query import *
class Teachers:
def __init__(self, master, main_window):
self.master = master
self.main_window = main_window
self.grade_list = []
self.subject_list = []
self.teachers = TeachersGUI(self.master)
self.db = Query()
self.teachers.back_btn.configure(command=self.back_function)
self.teachers.addGrade_btn.configure(command=self.add_grade)
self.teachers.addSubject_btn.configure(command=self.add_subject)
self.teachers.subject_box.bind("<Button-1>", self.subjectBox_function)
self.teachers.addTeacher_btn.configure(command=self.add_teacher)
def add_teacher(self):
self.get_firstname = self.teachers.firstName_var.get()
self.get_middlename = self.teachers.middleName_var.get()
self.get_lastname = self.teachers.lastName_var.get()
self.get_minutes = self.teachers.totalHours_var.get()
self.get_minutes = self.get_minutes * 60
self.t = [[self.get_firstname, self.get_middlename, self.get_lastname, self.get_minutes]]
if self.get_firstname == "":
return messagebox.showerror("Error", "First name field cannot be empty")
if self.get_middlename == "":
return messagebox.showerror("Error", "Middle name field cannot be empty")
if self.get_lastname == "":
return messagebox.showerror("Error", "Last name field cannot be empty")
if self.get_minutes == 0:
return messagebox.showerror("Error", "Total hours field cannot be 0")
if self.grade_list == []:
return messagebox.showerror("Error", "Grade is empty")
if self.subject_list == []:
return messagebox.showerror("Error", "Subject is empty")
self.reply = messagebox.askquestion("Add teacher", "Are you sure?")
if self.reply.lower() == "yes":
self.db.insert_teacher(self.t, self.subject_list, self.grade_list)
messagebox.showinfo("", "Success")
self.teachers.firstName_var.set("")
self.teachers.middleName_var.set("")
self.teachers.lastName_var.set("")
self.teachers.totalHours_var.set(0)
self.teachers.grade_var.set("")
self.teachers.subject_var.set("")
self.teachers.grade_treeview.delete(*self.teachers.grade_treeview.get_children())
self.teachers.subject_treeview.delete(*self.teachers.subject_treeview.get_children())
self.grade_list = []
self.subject_list = []
def add_grade(self):
self.get_grade = self.teachers.grade_var.get()
if self.get_grade == "":
return messagebox.showerror("Error", "Grade field cannot be empty")
self.grade_list.append(self.get_grade)
self.teachers.grade_treeview.insert("", "end", values=(self.get_grade))
self.teachers.grade_var.set("")
def subjectBox_function(self, event):
try:
self.subjects = []
for i in range(len(self.grade_list)):
self.fetch_subjects = self.db.search_subjects(self.grade_list[i])
for j in range(len(self.fetch_subjects)):
self.subjects.append(self.fetch_subjects[j][0])
print(self.fetch_subjects)
print(self.subjects)
self.teachers.subject_box.configure(values=list(self.subjects))
except:
messagebox.showerror("Error", "Add grade first")
def add_subject(self):
self.get_subject = self.teachers.subject_var.get()
if self.get_subject == "":
return messagebox.showerror("Error", "Subject field cannot be empty")
self.subject_list.append(self.get_subject)
self.teachers.subject_treeview.insert("", "end", values=(self.get_subject))
self.teachers.subject_var.set("")
def back_function(self):
self.master.destroy()
self.main_window.deiconify() |
#!/usr/bin/python3
"""test"""
import unittest
from models.base import Base
class TestBase(unittest.TestCase):
"""test"""
def test1(self):
"""test 1"""
b = Base(48)
self.assertEqual(b.id, 48)
def test2(self):
"""test 2"""
b = Base()
self.assertEqual(b.id, 1)
def test3(self):
"""test 3"""
b = Base("Holberton")
self.assertEqual(b.id, "Holberton")
def test4(self):
"""test 4"""
b = Base(None)
self.assertEqual(b.id, 1)
def test6(self):
"""test 5"""
self.assertTrue(len(Base.__doc__) > 0) |
"""
141. Linked List Cycle
src: https://leetcode.com/problems/linked-list-cycle/
Given head, the head of a linked list, determine if the linked list has a cycle in it.
There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
Return true if there is a cycle in the linked list. Otherwise, return false.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list, where the tail connects to the 0th node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
"""
# Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def has_cycle(head: ListNode, pos: Optional[int] = None) -> bool:
"""
src: https://en.wikipedia.org/wiki/Cycle_detection
"""
if not head:
return False
slow = head
fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False
list1 = ListNode(3)
list1.next = ListNode(2)
list1.next.next = ListNode(0)
list1.next.next.next = ListNode(-4)
list1.next.next.next = list1.next
def test_has_cycle():
assert has_cycle(list1, 1) == True
has_cycle(list1, 1)
|
# -*- coding:UTF-8 -*-
def countBirths() :
ret = []
for y in range(1880 , 2018) :
count = 0
fileName = 'names/yob%d.txt'%y
with open(fileName , 'r') as f :
data = f.readlines()
for d in data :
if d[-1] == '\n' : # d 의 마지막요소가 개행문자인 경우
d = d[:-1] # 개행문자 제거
# print(d)
count += int(d.split(',')[2])
ret.append((y , count))
return ret
result = countBirths()
# print(result)
with open('birth_by_years.csv', 'w') as w :
for year , birth in result :
data = '%s , %s\n' %(year , birth)
print(data)
w.write(data) |
import pytest
from appium import webdriver
class TestElementLocation():
def setup(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
# desired_caps['platformVersion'] = '6.0'
desired_caps['deviceName'] = '127.0.0.1:21503'
desired_caps['appPackage'] = 'com.xueqiu.android'
desired_caps['appActivity'] = '.view.WelcomeActivityAlias'
desired_caps['noRest'] = 'true'
desired_caps['dontStopAppOnReset'] = 'true'
desired_caps['skipDeviceInitialzation'] = 'true'
desired_caps['unicodeKeyBoard'] = 'true'
desired_caps['resetKeyBoard'] = 'true'
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
self.driver.implicitly_wait(5)
def teardown(self):
self.driver.quit()
def test_search(self):
el1 = self.driver.find_element_by_id("com.xueqiu.android:id/tv_search")
el1.click()
el2 = self.driver.find_element_by_id("com.xueqiu.android:id/search_input_text")
el2.send_keys("阿里巴巴")
el3 = self.driver.find_element_by_xpath("//*[@resource-id='com.xueqiu.android:id/name' and @text='阿里巴巴']")
el3.click()
current_price = float(self.driver.find_element_by_xpath("//*[@resource-id='com.xueqiu.android:id/stockName' and @text='阿里巴巴']/../..//*[@resource-id='com.xueqiu.android:id/current_price']").text)
print(current_price)
if __name__ == '__main__':
pytest.main() |
from django.urls import path
from . import views
app_name='diary'
urlpatterns = [
path('', views.IndexView.as_view(), name="index"),
path('inquiry/', views.InquiryView.as_view(), name='inquiry'),
path('diary-list/', views.DiaryListView.as_view(), name="diary_list"),
path('diary-detail/<int:pk>/', views.DiaryDetailView.as_view(), name="diary_detail"),
path('dairy-create/', views.DiaryCreateView.as_view(), name="diary_create"),
path('diary-upgrade/<int:pk>/', views.DiaryUpdateView.as_view(), name="diary_upgrade"),
path('diary-delete/<int:pk>/', views.DiaryDeleteView.as_view(), name="diary_delete"),
]
|
# 下面介绍两种文本向量化
# 方法一:
def tfidf(corpus):
# 词频矩阵:矩阵元素a[i][j] 表示j词在i类文本下的词频
vectorizer = CountVectorizer()
# 统计每个词语的tf-idf权值
transformer = TfidfTransformer()
freq_word_matrix = vectorizer.fit_transform(corpus)
#获取词袋模型中的所有词语
word = vectorizer.get_feature_names()
tfidf = transformer.fit_transform(freq_word_matrix)
# 元素w[i][j]表示j词在i类文本中的tf-idf权重
weight = tfidf.toarray()
# 方法二:
def doc2vec():
#训练并保存模型
import gensim
sentences = gensim.models.doc2vec.TaggedLineDocument(token_path)
model = gensim.models.Doc2Vec(sentences, size=100, window=2, min_count=3)
model.train(sentences,total_examples=model.corpus_count, epochs=1000)
model.save('../model/demoDoc2Vec.pkl')
# 下面介绍两种常用的聚类算法
# 1)kmeans
def kmeans():
print('Start K-means:')
from sklearn.cluster import KMeans
clf = KMeans(n_clusters=20)
s = clf.fit(model.docvecs)
print s
#20个中心点
print(clf.cluster_centers_)
#每个样本所属的簇
print(clf.labels_)
i = 1
while i <= len(clf.labels_):
print i, clf.labels_[i-1]
i = i + 1
#用来评估簇的个数是否合适,距离越小说明簇分的越好,选取临界点的簇个数
print(clf.inertia_)
# 2)dbscan
def dbscan():
from sklearn.cluster import DBSCAN
# Compute DBSCAN
db = DBSCAN(eps=0.005, min_samples=10).fit(weight)
print(db.core_sample_indices_)
db.labels_
|
import webapp2
import jinja2
import os
import urllib
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def govt_open():
u = urllib.urlopen('http://www.nasa.gov/')
return not u.url == 'http://notice.usa.gov'
class MainHandler(Handler):
def get(self):
self.render('home.html', is_open=govt_open())
app = webapp2.WSGIApplication([
('/', MainHandler),
], debug=True)
|
# Solution 1
# O(n) time / O(n) space
def findSuccessor(tree, node):
inOrderTraversalOrder = getInOrderTraversalOrder(tree)
for idx, currentNode in enumerate(inOrderTraversalOrder):
if currentNode != node:
continue
if idx == len(inOrderTraversalOrder) - 1:
return None
return inOrderTraversalOrder[idx + 1]
def getInOrderTraversalOrder(node, order=[]):
if node is None:
return order
getInOrderTraversalOrder(node.left, order)
order.append(node)
getInOrderTraversalOrder(node.right, order)
return order
# Solution 2
# O(h) time / O(1) space
# h - height of the tree
def findSuccessor(tree, node):
if node.right is not None:
return getLeftmostChild(node.right)
return getRightmostParent(node)
def getLeftmostChild(node):
currentNode = node
while currentNode.left is not None:
currentNode = currentNode.left
return currentNode
def getRightmostParent(node):
currentNode = node
while currentNode.parent is not None and currentNode.parent.right == currentNode:
currentNode = currentNode.parent
return currentNode.parent
# This is the Binary Tree input class.
class BinaryTree:
def __init__(self, value, left=None, right=None, parent=None):
self.value = value
self.left = left
self.right = right
self.parent = parent
|
#coding=utf-8
__author__ = 'hqx'
import time
#登陆操作
def login(run,phone,code):
try:
run.input_phone(phone)
run.input_code(code)
run.click_privacy()
run.click_login()
run.mes_person()
run.saveScreenshot('login')
time.sleep(1)
except Exception as e:
print(e)
#登出操作
def logout(run):
run.click_personhome4()
run.click_setting7()
run.click_signout()
run.click_surebutton()
time.sleep(1)
run.mes_logout()
time.sleep(1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
img = cv2.imread('photo1.jpg')
result3 = img.copy()
# img = cv2.GaussianBlur(img,(3,3),0) # 高斯滤波,用来降噪
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # 灰度化,边缘检测要求输入灰度图像
# edges= cv2.Canny(gray,50,150,apertureSize=3) # Canny检测边缘,方便找用于透视变换的顶点
# cv2.imwrite('canny.jpg',edges) # 保存图像
'''
注意这里src和dst的输入并不是图像,而是图像对应的顶点坐标。
'''
src = np.float32([[207,151],[517,285],[17,601],[343,731]]) # 原始图像的4个顶点坐标,可以用画图工具或matplotlib来找
import math
width = int(math.sqrt((207-517)**2+(151-285)**2)) # 337
height = int(math.sqrt((207-17)**2+(151-601)**2)) # 488
print(f'变换后图像的高度:{height},宽度:{width}')
dst = np.float32([[0,0],[337,0],[0,488],[337,488]]) # 目标图像的4个顶点坐标
# 生成透视变换矩阵,进行透视变换
m = cv2.getPerspectiveTransform(src,dst)
result = cv2.warpPerspective(result3,m,(337,488)) # (337,488)为目标图像尺寸(宽度,高度)
cv2.imshow('src',img)
cv2.imshow('result',result)
cv2.waitKey(0) |
"""urlshortener URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from shortener.views import (
AboutView,
ContactFormView,
PageURLListView,
DashboardView,
ShortenView,
NewURLFormView,
UpdateURLFormView,
StatView,
delete_url,
visiturl,
CampaignListView,
CampaignCreateView,
CampaignUpdateView,
CampaignDetailView,
NewCampaignURLFormView,
BulkCampaignCreateView,
PixelListView, PixelCreateView
)
urlpatterns = [
url(r'^about/$', AboutView.as_view(), name="about"),
url(r'^accounts/', include('allauth.urls')),
url(r'^invitations/', include('invitations.urls', namespace='invitations')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^ajax/', include('shortener.api.urls')),
url(r'^contact/$', ContactFormView.as_view(), name="contact"),
url(r'^control/', include('control.urls')),
url(r'^settings/', include('core.urls')),
url(r'^tests/', include('splittests.urls')),
url(r'^campaigns/$', CampaignListView.as_view(), name="campaigns"),
url(r'^campaigns/new/$', CampaignCreateView.as_view(), name="campaign-create"),
url(r'^campaigns/bulk/$', BulkCampaignCreateView.as_view(), name="campaign-bulk"),
url(r'^campaigns/(?P<pk>\d+)/edit/$',
CampaignUpdateView.as_view(), name="campaign-update"),
url(r'^campaigns/(?P<pk>\d+)/$',
CampaignDetailView.as_view(), name="campaign-detail"),
url(r'^campaigns/(?P<pk>\d+)/newurl$',
NewCampaignURLFormView.as_view(), name="campaign-url"),
url(r'^dashboard/$', DashboardView.as_view(), name="dashboard"),
url(r'^api/', include('userapi.urls')),
url(r'^list/$', PageURLListView.as_view(), name="urllist"),
url(r'^pixels/$', PixelListView.as_view(), name="pixels"),
url(r'^pixels/create/$', PixelCreateView.as_view(), name="pixels_create"),
url(r'^new/$', NewURLFormView.as_view(), name="newurl"),
url(r'^$', ShortenView.as_view(), name="home"),
url(r'^(?P<url_id>\w+)/stat$', StatView.as_view(), name="stat"),
url(r'^(?P<url_id>\w+)/edit$', UpdateURLFormView.as_view(), name="editurl"),
url(r'^(?P<url_id>\w+)/delete$', delete_url, name="delete_url"),
url(r'^(?P<url_id>\w+)/$', visiturl, name="visiturl"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import requests
from settings import valid_email, valid_password, name, animal_type, age
class PetFriends:
def __init__(self):
self.base_url="http://petfriends1.herokuapp.com/"
def get_api_key(self, email, password):
headers={
'email': valid_email,
'password': valid_password
}
res = requests.get(self.base_url+'api/key', headers=headers)
status = res.status_code
result=""
try:
result = res.json()
except:
result = res.text
return status, result
def get_list_of_pets(self, auth_key, filter):
headers={'auth_key': auth_key}
filter={'filter': filter}
res=requests.get(self.base_url+'api/pets', headers=headers, params=filter)
status=res.status_code
result=""
try:
result=res.json()
except:
result=res.text
return status, result
def post_pet_simple(self, auth_key, name, animal_type, age):
data={
'name': name,
'animal_type': animal_type,
'age': age
}
headers = {
'auth_key': auth_key,
#'Content-Type': data.content_type
}
res = requests.post(self.base_url + 'api/create_pet_simple', headers=headers, data=data)
status=res.status_code
result=""
try:
result = res.json()
except:
result = res.text
return status, result |
import math
l = int(input("Jumlah mahasiswa laki-laki : "))
p = int(input("Jumlah mahasiswa perempuan : "))
JUmlahL = "*"*math.ceil(l/10)
jumlahP = "*"*math.ceil(p/10)
print("Laki-laki : " + JUmlahL + "("+str(l)+")")
print("Perempuan : " + jumlahP + "("+str(p)+")") |
import math
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
def score_(y, pred):
print('RMSE : ', math.sqrt(mean_squared_error(y, pred)))
print('MAE : ', mean_absolute_error(y, pred))
print('R2 : ', r2_score(y, pred) * 100) |
class Price:
def __init__(self, name: str, symbol: str, price: float, last_updated: str):
self.name = name
self.price = price
self.symbol = symbol
self.last_updated = last_updated
def to_dict(self) -> dict:
return {
'name': self.name,
'symbol': self.symbol,
'price': round(self.price, 2),
'last_updated': self.last_updated,
}
|
from __future__ import annotations
import pkgutil
import tomllib
from collections.abc import Callable, Iterable, Iterator, Mapping, Sequence
from importlib.metadata import entry_points
from importlib.resources import files
from typing import TypeVar, Generic, Final
__all__ = ['PluginT', 'Plugin']
#: The plugin type variable.
PluginT = TypeVar('PluginT')
class Plugin(Generic[PluginT], Iterable[tuple[str, type[PluginT]]]):
"""Plugin system, typically loaded from :mod:`importlib.metadata`
`entry points
<https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata>`_.
>>> example: Plugin[Example] = Plugin('plugins.example')
>>> example.add('two', ExampleTwo)
>>> example.registered
{'one': <class 'examples.ExampleOne'>,
'two': <class 'examples.ExampleTwo'>}
Note:
Plugins registered from *group* entry points are lazy-loaded. This is
to avoid cyclic imports.
Args:
group: The entry point group to load.
default: The name of the :attr:`.default` plugin.
"""
def __init__(self, group: str, *, default: str | None = None) -> None:
super().__init__()
self.group: Final = group
self._default = default
self._loaded: dict[str, type[PluginT]] | None = None
self._added: dict[str, type[PluginT]] = {}
def __iter__(self) -> Iterator[tuple[str, type[PluginT]]]:
return iter(self.registered.items())
@property
def registered(self) -> Mapping[str, type[PluginT]]:
"""A mapping of the registered plugins, keyed by name."""
loaded = self._load()
return {**loaded, **self._added}
@property
def default(self) -> type[PluginT]:
"""The default plugin implementation.
This property may also be assigned a new string value to change the
name of the default plugin.
>>> example: Plugin[Example] = Plugin('plugins.example', default='one')
>>> example.default
<class 'examples.ExampleOne'>
>>> example.default = 'two'
>>> example.default
<class 'examples.ExampleTwo'>
Raises:
KeyError: The default plugin name was not registered.
"""
if self._default is None:
raise KeyError(f'{self.group!r} has no default plugin')
else:
return self.registered[self._default]
@default.setter
def default(self, default: str | None) -> None:
self._default = default
def _check_extras(self, extras: Sequence[str]) -> bool:
extras_path = files(__name__).joinpath('extras.toml')
with extras_path.open('rb') as extras_file:
extras_data = tomllib.load(extras_file)
extras_names = extras_data['extras']['check']
for extra_name in extras:
for name in extras_names[extra_name]:
try:
pkgutil.resolve_name(name)
except ImportError:
return False
return True
def _load(self) -> Mapping[str, type[PluginT]]:
loaded = self._loaded
if loaded is None:
loaded = {}
for entry_point in entry_points(group=self.group):
if not self._check_extras(entry_point.extras):
continue
plugin: type[PluginT] = entry_point.load()
loaded[entry_point.name] = plugin
self._loaded = loaded
return loaded
def add(self, name: str, plugin: type[PluginT]) -> None:
"""Add a new plugin by name.
Args:
name: The identifying name of the plugin.
plugin: The plugin object.
"""
self._added[name] = plugin
def register(self, name: str) -> Callable[[type[PluginT]], type[PluginT]]:
"""Decorates a plugin implementation.
Args:
name: The identifying name of the plugin.
"""
def deco(plugin: type[PluginT]) -> type[PluginT]:
self.add(name, plugin)
return plugin
return deco
def __repr__(self) -> str:
return f'Plugin({self.group!r})'
|
ammo= 17
fear= 3
dealoffer= 0
sulfurmines= 0
tradeoffer= 0
vault= 0
vaultprotect= 0
farms= 0
coalmines= 0
metalmines= 0
walls= 0
quarrytot= 0
metalmineproduce= 0
sulfurmineproduce= 0
oildrillproduce= 0
turnrotate= 1
coalmineproduce= 0
coal= 0
oil= 24
metalore= -1
raidedtimes= 0
quarry= 0
sentpeople= 6
day= 0
camplog= 1
wood= 38
meat= 7
oildrills= 0
guns= 8
axe= 4
trademat= 1
meals= 26
pop= 9
sulfur= 57
gunpowder= 0
metal= 10000
kitchen= 0
radio= 1
gen= 0
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request,'home/index.html')
def contacto(request):
contacto = "Gerardo Medina"
list_directions = ['Cerro del Perote #136 Colinas del Cimatario', 'Casa2']
administradores = [
{'nombre':"Luis", 'apellidos': "Salazar Torres", 'email': "makako@gmail.com", 'puesto': True},
{'nombre':"Leidi", 'apellidos': "Gomez", 'email': "simio@gmail.com", 'encargado': False},
{'nombre':"Fabioladora", 'apellidos': "Olvera", 'email': "fabz@gmail.com", 'encargado': True}
]
return render(request, 'contacto.html',{'contacto1':contacto, 'direcciones':list_directions,
'admins': administradores})
def agregar(request):
return render(request,'home/agregar.html') |
import pandas as pd
import numpy as np
from .data_reader import read, get_data, merge
"""Gets data from data_reader.py and cleans it"""
directory_path = "/Users/jasonterry/Documents/Scripts/Misc/My_stuff/" \
"Kaggle/astro"
metadata_columns = ["object_id", "ra", "decl", "gal_l", "gal_b" "ddf",
"hostgal_specz", "hostgal_photoz", "hostgal_photoz_err",
"distmod", "mwebv", "target"]
data_columns = ["object_id", "mjd", "passband", "flux", "flux_err",
"detected"]
def compare_galaxies(test=False):
"""Looks at entries with zero distance modulus or galaxy spectra
and impute them with values from objects in same galaxy or drops if
there are no other objects in that galaxy"""
data, metadata = read(test)
galaxy_cols = ["hostgal_specz", "hostgal_photoz_err", "distmod",
"gal_l", "gal_b"]
for index, row in metadata.iterrows():
if row["hostgal_specz"] == 0 or row["distmod"].isnull():
for index1, row1 in metadata.iterrows():
if row["gal_l"] == row1["gal_l"] and \
row["gal_b"] == row1["gal_b"]:
for feature in galaxy_cols[:3]:
row[feature] = row1[feature]
metadata = metadata.dropna()
if test:
set = "test_"
else:
set = "train_"
new_data = merge(data, metadata)
new_data.to_csv(directory_path + "/data/" + set + "galaxy_merged.csv",
index=False)
|
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/api")
def index():
return jsonify({
"message": "api"
})
|
import util
def part1(timestamp, buses):
buses = list(filter(lambda bus: bus != "x", buses))
deltas = {}
for bus in buses:
time = 0
while time < timestamp:
time += bus
deltas[bus] = time - timestamp
bus = min(deltas, key=deltas.get)
return bus * deltas[bus]
def part2(_, buses):
timestamp = 0
delta = 1
idx = 0
while idx < len(buses):
bus = buses[idx]
if bus == "x":
idx += 1
continue
if (timestamp + idx) % bus == 0:
delta *= bus
idx += 1
continue
timestamp += delta
return timestamp
notes = util.read_lines("input/13.txt")
timestamp = int(notes[0])
buses = [int(bus) if bus != "x" else bus for bus in notes[1].split(",")]
util.run(part1, part2, timestamp, buses)
|
from AdditionalFunctions import *
class Unit(object):
def __init__(self,name, id, parentId, employees, children):
self.Name = name
self.Id = id
self.ParentId = parentId
self.Children = children
self.Employees = employees
def AddChild(self, unit):
self.Children.append(unit)
def GetAllDocuments(self):
documents = []
q = []
q.append(self)
while (len(q) != 0):
curUnit = q.pop(0)
for i in range(len(curUnit.Employees)):
documents = MergeListWithoutDuplication(documents, curUnit.Employees[i].Documents)
for i in range(len(curUnit.Children)):
q.append(curUnit.Children[i])
return documents
|
'''
This script handling the training process.
'''
import os
import argparse
import math
import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import transformer.Constants as Constants
import dataset
import vocab
from transformer.Models import Transformer
from transformer.Optim import ScheduledOptim
import pdb
def cal_performance(pred, gold, smoothing=False):
''' Apply label smoothing if needed '''
loss = cal_loss(pred, gold, smoothing)
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
non_pad_mask = gold.ne(Constants.PAD)
n_correct = pred.eq(gold)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
# pdb.set_trace()
# (Pdb) a
# pred = tensor([2136, 956, 996, ..., 0, 0, 0], device='cuda:0')
# gold = tensor([2136, 956, 996, ..., 0, 0, 0], device='cuda:0')
# smoothing = True
# (Pdb) print(pred.size(), gold.size())
# torch.Size([1920]) torch.Size([1920])
return loss, n_correct
def cal_loss(pred, gold, smoothing):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
# pdb.set_trace()
# (Pdb) gold
# tensor([1419, 2911, 397, ..., 0, 0, 0], device='cuda:0')
# (Pdb) gold.size()
# torch.Size([1920])
# (Pdb) one_hot.size()
# torch.Size([1920, 3149])
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
# if one_hot == 0:
# one_hot == eps/(n_class - 1)
# else:
# one_hot = (1 - eps)
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(Constants.PAD)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() # average later
else:
loss = F.cross_entropy(pred, gold, ignore_index=Constants.PAD, reduction='sum')
return loss
def train_epoch(model, training_data, optimizer, device, smoothing):
''' Epoch operation in training phase'''
model.train()
total_loss = 0
n_word_total = 0
n_word_correct = 0
for batch in tqdm(
training_data, mininterval=2,
desc=' - (Training) ', leave=False):
# prepare data
src_seq, src_pos, tgt_seq, tgt_pos = map(lambda x: x.to(device), batch)
gold = tgt_seq[:, 1:]
# pdb.set_trace()
# smoothing = True
# (Pdb) print(type(src_seq), src_seq.size(), src_seq)
# <class 'torch.Tensor'> torch.Size([64, 26]) tensor([[ 2, 2434, 1736, ..., 0, 0, 0],
# [ 2, 2434, 71, ..., 0, 0, 0],
# [ 2, 1557, 1071, ..., 0, 0, 0],
# ...,
# [ 2, 2434, 729, ..., 0, 0, 0],
# [ 2, 1557, 2010, ..., 0, 0, 0],
# [ 2, 1252, 1, ..., 0, 0, 0]], device='cuda:0')
# (Pdb) print(type(src_pos), src_pos.size(), src_pos)
# <class 'torch.Tensor'> torch.Size([64, 26]) tensor([[1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# ...,
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0]], device='cuda:0')
# (Pdb) print(type(tgt_seq), tgt_seq.size(), tgt_seq)
# <class 'torch.Tensor'> torch.Size([64, 25]) tensor([[ 2, 2136, 645, ..., 0, 0, 0],
# [ 2, 2136, 2296, ..., 0, 0, 0],
# [ 2, 251, 1146, ..., 0, 0, 0],
# ...,
# [ 2, 2136, 1914, ..., 0, 0, 0],
# [ 2, 251, 1484, ..., 0, 0, 0],
# [ 2, 2136, 164, ..., 0, 0, 0]], device='cuda:0')
# (Pdb) print(type(tgt_pos), tgt_pos.size(), tgt_pos)
# <class 'torch.Tensor'> torch.Size([64, 25]) tensor([[1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# ...,
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0],
# [1, 2, 3, ..., 0, 0, 0]], device='cuda:0')
# forward
optimizer.zero_grad()
pred = model(src_seq, src_pos, tgt_seq, tgt_pos)
# (Pdb) print(type(pred), pred.size(), pred)
# <class 'torch.Tensor'> torch.Size([1536, 3149]) tensor([[-0.6007, 2.4810, -1.2152, ..., -1.2766, -1.5151, -0.7077],
# [-0.1354, 8.6667, -3.4856, ..., -1.4034, -0.7370, -0.5578],
# [-0.6275, 4.8268, -0.1633, ..., -2.6749, -1.8535, -0.6473],
# ...,
# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],
# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],
# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],
# device='cuda:0', grad_fn=<ViewBackward>)
# backward
loss, n_correct = cal_performance(pred, gold, smoothing=smoothing)
# (Pdb) print(type(loss), loss.size(), loss)
# <class 'torch.Tensor'> torch.Size([]) tensor(1821.7072, device='cuda:0', grad_fn=<SumBackward0>)
# (Pdb) print(type(n_correct), n_correct)
# <class 'int'> 667
loss.backward()
# update parameters
optimizer.step_and_update_lr()
# note keeping
total_loss += loss.item()
non_pad_mask = gold.ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def eval_epoch(model, validation_data, device):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss = 0
n_word_total = 0
n_word_correct = 0
with torch.no_grad():
for batch in tqdm(
validation_data, mininterval=2,
desc=' - (Validation) ', leave=False):
# prepare data
src_seq, src_pos, tgt_seq, tgt_pos = map(lambda x: x.to(device), batch)
gold = tgt_seq[:, 1:]
# forward
pred = model(src_seq, src_pos, tgt_seq, tgt_pos)
loss, n_correct = cal_performance(pred, gold, smoothing=False)
# note keeping
total_loss += loss.item()
non_pad_mask = gold.ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def train(model, training_data, validation_data, optimizer, device, opt):
''' Start training '''
log_train_file = None
log_valid_file = None
# (Pdb) print(opt.log)
# None
if opt.log:
log_train_file = opt.log + '.train.log'
log_valid_file = opt.log + '.valid.log'
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
# pdb.set_trace()
# (Pdb) a
# model = Transformer(
# (encoder): Encoder(
# (src_word_emb): Embedding(2911, 512, padding_idx=0)
# (position_enc): Embedding(53, 512)
# (layer_stack): ModuleList(
# (0): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (1): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (2): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (3): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (4): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (5): EncoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# )
# )
# (decoder): Decoder(
# (tgt_word_emb): Embedding(3149, 512, padding_idx=0)
# (position_enc): Embedding(53, 512)
# (layer_stack): ModuleList(
# (0): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (1): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (2): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (3): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (4): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# (5): DecoderLayer(
# (slf_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (enc_attn): MultiHeadAttention(
# (w_qs): Linear(in_features=512, out_features=512, bias=True)
# (w_ks): Linear(in_features=512, out_features=512, bias=True)
# (w_vs): Linear(in_features=512, out_features=512, bias=True)
# (attention): ScaledDotProductAttention(
# (dropout): Dropout(p=0.1)
# (softmax): Softmax()
# )
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (fc): Linear(in_features=512, out_features=512, bias=True)
# (dropout): Dropout(p=0.1)
# )
# (pos_ffn): PositionwiseFeedForward(
# (w_1): Conv1d(512, 2048, kernel_size=(1,), stride=(1,))
# (w_2): Conv1d(2048, 512, kernel_size=(1,), stride=(1,))
# (layer_norm): LayerNorm(torch.Size([512]), eps=1e-05, elementwise_affine=True)
# (dropout): Dropout(p=0.1)
# )
# )
# )
# )
# (tgt_word_prj): Linear(in_features=512, out_features=3149, bias=False)
# )
# training_data = <torch.utils.data.dataloader.DataLoader object at 0x7fb9c3cef0f0>
# validation_data = <torch.utils.data.dataloader.DataLoader object at 0x7fb9c3cef320>
# optimizer = <transformer.Optim.ScheduledOptim object at 0x7fb960e680f0>
# device = device(type='cuda')
# opt = Namespace(batch_size=64, cuda=True, d_inner_hid=2048, d_k=64, d_model=512, d_v=64, d_word_vec=512, data='data/multi30k.atok.low.pt', dropout=0.1, embs_share_weight=False, epoch=200, label_smoothing=True, log=None, max_token_seq_len=52, n_head=8, n_layers=6, n_warmup_steps=4000, no_cuda=False, proj_share_weight=True, save_mode='best', save_model='trained', src_vocab_size=2911, tgt_vocab_size=3149)
valid_accus = []
for epoch_i in range(opt.epoch):
print('[ Epoch', epoch_i, ']')
start = time.time()
train_loss, train_accu = train_epoch(
model, training_data, optimizer, device, smoothing=opt.label_smoothing)
print(' - (Training) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu,
elapse=(time.time()-start)/60))
start = time.time()
valid_loss, valid_accu = eval_epoch(model, validation_data, device)
print(' - (Validation) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu,
elapse=(time.time()-start)/60))
valid_accus += [valid_accu]
model_state_dict = model.state_dict()
checkpoint = {
'model': model_state_dict,
'settings': opt,
'epoch': epoch_i}
if opt.save_model:
if opt.save_mode == 'all':
model_name = opt.save_model + '_accu_{accu:3.3f}.chkpt'.format(accu=100*valid_accu)
torch.save(checkpoint, model_name)
elif opt.save_mode == 'best':
model_name = opt.save_model + '.chkpt'
if valid_accu >= max(valid_accus):
torch.save(checkpoint, model_name)
print(' - [Info] The checkpoint file has been updated.')
if log_train_file and log_valid_file:
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=train_loss,
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=valid_loss,
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu))
def main():
''' Main function '''
parser = argparse.ArgumentParser()
# parser.add_argument('-data', required=True)
parser.add_argument('-train_atok', required=True)
parser.add_argument('-valid_atok', required=True)
parser.add_argument('-epoch', type=int, default=200)
parser.add_argument('-batch_size', type=int, default=8)
parser.add_argument('-d_word_vec', type=int, default=512)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-d_inner_hid', type=int, default=2048)
parser.add_argument('-d_k', type=int, default=64)
parser.add_argument('-d_v', type=int, default=64)
parser.add_argument('-n_head', type=int, default=8)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-n_warmup_steps', type=int, default=4000)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-embs_share_weight', action='store_true')
parser.add_argument('-proj_share_weight', action='store_true')
parser.add_argument('-log', default=None)
parser.add_argument('-save_model', default=None)
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-label_smoothing', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.d_word_vec = opt.d_model
#========= Loading Dataset =========#
train_atok = torch.load(opt.train_atok)
valid_atok = torch.load(opt.valid_atok)
train_vocab = vocab.Vocab(train_atok['settings'].vocab)
training_data = dataset.translation_dataloader(train_atok, opt.batch_size, shuffle=True)
validation_data = dataset.translation_dataloader(valid_atok, opt.batch_size, shuffle=False)
# data = torch.load(opt.data)
opt.max_token_seq_len = train_atok['settings'].max_seq_len
# training_data, validation_data = prepare_dataloaders(data, opt)
opt.src_vocab_size = train_vocab.size()
opt.tgt_vocab_size = train_vocab.size()
#========= Preparing Model =========#
# if opt.embs_share_weight:
# assert training_data.dataset.src_word2idx == training_data.dataset.tgt_word2idx, \
# 'The src/tgt word2idx table are different but asked to share word embedding.'
print(opt)
device = torch.device('cuda' if opt.cuda else 'cpu')
transformer = Transformer(
opt.src_vocab_size,
opt.tgt_vocab_size,
opt.max_token_seq_len,
tgt_emb_prj_weight_sharing=opt.proj_share_weight,
emb_src_tgt_weight_sharing=opt.embs_share_weight,
d_k=opt.d_k,
d_v=opt.d_v,
d_model=opt.d_model,
d_word_vec=opt.d_word_vec,
d_inner=opt.d_inner_hid,
n_layers=opt.n_layers,
n_head=opt.n_head,
dropout=opt.dropout).to(device)
if os.path.exists("trained.chkpt"):
x = torch.load("trained.chkpt")
# print(type(x["model"]))
transformer.load_state_dict(x["model"])
optimizer = ScheduledOptim(
optim.Adam(
filter(lambda x: x.requires_grad, transformer.parameters()),
betas=(0.9, 0.98), eps=1e-09),
opt.d_model, opt.n_warmup_steps)
train(transformer, training_data, validation_data, optimizer, device, opt)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
"""Hyperinterval mapping
Repeated hyperinterval finding.
(c) 2011 Jouke Witteveen
"""
import hint
hint.cli_args()
fh = open( '2dboxes', 'w' )
judgement = [ [], [] ]
try:
for hinterval, complexity, keep in hint.hints():
print( "Found:", hinterval, "KEPT" if keep else "DISCARDED" )
xdelta, ydelta = map( lambda a, b: ( b - a ) / 2, *hinterval )
x = hinterval[0][0] + xdelta
y = hinterval[0][1] + ydelta
judgement[keep].append( "{}\t{}\t{}\t{}\n".format( x, y, xdelta, ydelta ) )
except KeyboardInterrupt:
print( "Interrupted" )
for line in judgement[1]: fh.write( line )
fh.write( "\n\n" )
for line in judgement[0]: fh.write( line )
|
from math import radians, sin, cos, atan, atan2, sqrt
from simplemapplot import make_us_state_map
from time import sleep
#######################
# Name:readStateCenterFile(stateCenterDict)
# Input:
# Output:
# Purpose: reads in the file of state centers as described in the handout
#
def readStateCenterFile(stateCenterDict):
stateFile = open("stateCenters.txt")
for line in stateFile:
value = line.split(',')
stateCenterDict[value[0]] = (float(value[1]), float(value[2]))
stateFile.close()
########################
########################
# Name:readTweetFile(tweetList)
# Input:
# Output:
# Purpose: reads the tweets from the file into a list. The file is specified in
# the "open" command. Change the "open" call when you want the big file.
#
def readTweetFile(tweetList):
tweetFile = open("allTweets.txt", encoding="utf-8")
for line in tweetFile:
try:
value = line.split("\t")
lat,long = value[0].split(",")
lat = float(lat[1:])
long = float(long[:-1])
tweetList.append(((lat,long),str(value[3])))
except:
None
tweetFile.close()
#########################
###########################
# Name:distance (lat1, lon1, lat2, lon2)
# Input:
# Output:
# Purpose:takes a latitude and longitude for two given points and returns
# the great circle distance between them in miles
#
def distance (lat1, lon1, lat2, lon2):
earth_radius = 3963.2 # miles
lat1 = radians(float(lat1))
lat2 = radians(float(lat2))
lon1 = radians(float(lon1))
lon2 = radians(float(lon2))
dlat, dlon = lat2-lat1, lon2-lon1
a = sin(dlat/2) ** 2 + sin(dlon/2) ** 2 * cos(lat1) * cos(lat2)
c = 2 * atan2(sqrt(a), sqrt(1-a));
return earth_radius * c;
#
############################
############
# Function to assign frequency colors to map
def assignStateColor(stateCountDict, stateColor):
dictMin = min(stateCountDict.values())
dictMax = max(stateCountDict.values())
lightest = dictMax*.1
lighter = dictMax*.3
light = dictMax*.5
dark = dictMax*.7
darker = dictMax*.8
darkest = dictMax
for i in stateCountDict:
if stateCountDict[i] == 0:
stateColor[i] = 0
elif 0 <= stateCountDict[i] <= lightest:
stateColor[i] = 1
elif lightest <= stateCountDict[i] <= lighter:
stateColor[i] = 2
elif lighter <= stateCountDict[i] <= light:
stateColor[i] = 3
elif light <= stateCountDict[i] <= dark:
stateColor[i] = 4
elif dark <= stateCountDict[i]<= darker:
stateColor[i] = 5
elif darker <= stateCountDict[i]<= darkest:
stateColor[i] = 6
return(stateColor)
############
############################
#MAIN
############################
def main():
stateCenterDict = {} #Key: state abbrev Value: 2-tuple of (lat,long) of state center
tweetList = [] #list of two items, first is 2-tuple (lat,long), second is tweet
stateCountDict = {} #Key: state abbrev Value: count for word
stateColor = {} #Key, state abbrev Value: frequency count
readStateCenterFile(stateCenterDict)
readTweetFile(tweetList)
for key in stateCenterDict:
stateCountDict[key] = 0
stateColor[key] = 0
wordOfInterest = input("what word are you looking for? ")
wordOfInterest = wordOfInterest.lower()
# find location of tweet and assign word count
for item in tweetList:
state= None
minDist = 400
pos = item[0]
tweet = item[1]
lat = pos[0]
long = pos[1]
if lat > 55:
state = 'AK'
elif lat < 26:
state = 'HI'
else:
for s in stateCenterDict.keys():
sLat = stateCenterDict[s][0]
sLong = stateCenterDict[s][1]
d = distance(sLat, sLong, lat, long)
if d < minDist:
state = s
minDist = d
# assign value and sentiment score to each state
if wordOfInterest in tweet.lower():
if state in stateCenterDict.keys():
stateCountDict[state]+=1
assignStateColor(stateCountDict, stateColor)
print(stateCountDict)
print(stateColor)
COLORS = ["#FFFFFF", "#EBAD99", "#E08566", "#D65C33", "#CC3300", "#A32900", "#7A1F00"]
make_us_state_map(stateColor, COLORS)
#
#############################
main()
|
import socket
import numpy as np
from select import select
import threading
import time
def clamp(x,min,max):
"Clamps the value x between `min` and `max` "
if x < min:
return min
elif x > max:
return max
else:
return x
def check_server(ip='191.30.80.131', port=23, timeout=3):
"""
# `check_server(ip, port, timeout)`
Check if the server at `ip` on port `port` is responding.
It waits for `timeout` seconds before returning `False`
## Arguments
* `ip`: IP address of the server
* `port`: Port number the connection should be attempted
* `timeout`: Time in seconds that the function should wait before giving up
## Return
* `True` if the server responded
* `False`otherwise
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((ip,port))
return True
except:
return False
class Packet(object):
"""
Handles EU with time DSA-3217 packets
"""
def __init__(self, packet_info):
self.model = packet_info['model']
self.packlen = packet_info['packlen']
self.press = packet_info['press']
self.temp = packet_info['temp']
self.t = packet_info['t']
self.time = packet_info['time']
self.tunit = packet_info['tunit']
self.acquiring = False
self.samplesread = 0
self.fps = 1
self.buf = None
self.allocbuffer(1)
self.dataread = False
self.time1 = None
self.time2 = None
self.timeN = None
self.stop_reading = False
def allocbuffer(self, fps):
"""
Allocates a buffer with `fps` elements
"""
self.buf = np.zeros((fps, self.packlen), np.uint8)
self.fps = fps
def scan(self, s, dt):
"""
Execute the scan command and read the frames into a buffer.
"""
fps = self.fps
self.dt = dt
s.settimeout(max(0.5, 3 * dt))
s.send(b"SCAN\n")
self.acquiring = True
self.time1 = time.monotonic()
s.recv_into(self.buf[0], self.packlen)
self.time2 = time.monotonic()
self.timeN = self.time2
self.dataread = True # There is data read
self.samplesread = 1
for i in range(1,fps):
if self.stop_reading:
print("STOP_READING")
break
s.recv_into(self.buf[i], self.packlen)
self.timeN = time.monotonic()
self.samplesread = i+1
self.acquiring = False
def get_pressure(self):
"""
Given a a buffer filled with frames, return the pressure
"""
if not self.dataread:
raise RuntimeError("No pressure to read from scanivalve!")
nsamp = self.samplesread
P = np.zeros((nsamp, 16), np.float64)
for i in range(nsamp):
np.copyto(P[i], self.buf[i,self.press].view(np.float32))
return P
def get_time(self, meas=True):
"""
Return the sampling time calculated from acquisition parameters.
"""
nsamp = self.samplesread
if meas:
if nsamp > 4:
return (self.timeN - self.time2) / (nsamp-1)
elif nsamp > 0:
return (self.timeN - self.time1) / nsamp
if not self.t:
return -1000.0
ttype = self.buf[0,self.tunit].view(np.int32)[0]
tmult = 1e6 if ttype==1 else 1e3
t1 = self.buf[0,self.time].view(np.int32)[0]
t2 = self.buf[self.samplesread-1,104:108].view(np.int32)[0]
ns = max(1, self.samplesread-1)
dt = (t2 - t1) / (tmult * ns)
return self.dt
def clear(self):
if self.acquiring is not False:
raise RuntimeError("Still acquiring data from scanivalve!")
self.acquiring = False
self.samplesread = 0
self.dataread = False
self.time1 = None
self.time2 = None
self.timeN = None
self.stop_reading = False
def isacquiring(self):
"Is the scanivalve acquiring data?"
return self.acquiring
def read(self, meas=True):
"Read the data from the buffers and return a pair with pressure and sampling rate"
if self.samplesread > 0:
p = self.get_pressure()
dt = self.get_time(meas)
return p, 1.0/dt
else:
raise RuntimeError("Nothing to read from scanivalve!")
def stop(self):
self.stop_reading = True
return None
class ScanivalveThread(threading.Thread):
"""
Handles asynchronous threaded data acquisition.
Objects of this class, handle the threading part of the acquisition
"""
def __init__(self, s, dt, pack):
threading.Thread.__init__(self)
self.pack = pack
self.s = s
self.dt = dt
def run(self):
self.pack.clear()
self.pack.scan(self.s, self.dt)
def isacquiring(self):
return self.pack.isacquiring()
valid_lists = ['FPS', 'AVG', 'PERIOD', 'XSCANTRIG']
class Scanivalve(object):
"""
# Data Aquisition from DSA3217
Handles data acquisition from Scanivalve DSA-3217
To initialize, the IP address of the scanivalve device should be used.
```python
import scanivalve
s = scanivalve.Scanivalve(ip)
```
"""
def __init__(self, ip='191.30.80.131', tinfo=False):
# Create the socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip = ip
self.port = 23
self.acquiring = False
self.s.settimeout(5)
# Connect to socket
try:
self.s.connect((self.ip,self.port))
except:
self.s = None
raise RuntimeError("Unable to connect to scanivalve on IP:{}!".format(ip))
# Clear errors and configure the scanivalve
self.clear()
self.numchans = 16
self.FPS = 1
self.PERIOD=500
self.AVG=16
self.XSCANTRIG = 0
self.time = 2 if tinfo else 0
self.set_var("BIN", 1)
self.set_var("EU", 1)
self.set_var("UNITSCAN", "PA")
self.set_var("XSCANTRIG", 0)
self.set_var("QPKTS", 0)
self.set_var("TIME", self.time)
self.set_var("SIM", 0)
self.set_var("AVG", self.AVG)
self.set_var("PERIOD", self.PERIOD)
self.set_var("FPS", self.FPS)
self.dt = self.PERIOD*1e-6*16 * self.AVG
self.packet_info = self.packet_info()
self.model = self.packet_info['model']
self.pack = Packet(self.packet_info)
self.pack.allocbuffer(self.FPS)
self.thread = None
def packet_info(self, tinfo=True):
model = self.get_model().strip()
if model=='3017':
tinfo = False
packlen = 104
tt = None
tunit = None
elif model=='3217':
press = slice(8, 72)
temp = slice(72,104)
if tinfo:
packlen = 112
tt = slice(104, 108)
tunit = slice(108, 112)
else:
packlen = 104
tt = None
tunit = None
else:
raise RuntimeError("Model {} not recognized!".format(model))
return dict(model=model, packlen=packlen, press=press, temp=temp, t=tinfo, time=tt, tunit=tunit)
def is_pending(self, timeout=0.5):
"Check whether the scanivalve sent some information"
r, w, x = select([self.s], [], [], timeout)
if r == []:
return None
else:
return True
def list_any(self, command, timeout=0.2):
"""
Most query commands of the DSA-3X17 consists of
something like LIST S\n
This method simplys sends the LIST command to the scanivalve and returns
the data.
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
cmd = ("LIST %s\n" % (command)).encode()
self.s.send(cmd)
buffer = b''
while self.is_pending(timeout):
buffer = buffer + self.s.recv(1492)
return [b.split(' ') for b in buffer.decode().strip().split('\r\n')]
def list_any_map(self, command, timeout=0.5):
"""
Takes data obtained from `list_any` method and builds a dictionary with the
different parameters
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
buffer = self.list_any(command, timeout)
list = {}
for i in range(len(buffer)):
list[buffer[i][1]] = buffer[i][2]
return list
def hard_zero(self):
"Command to zero the DSA-3X17"
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"CALZ\n")
def set_var(self, var, val):
"""
Set the value of a parameter in the scanivalve by using the command
SET var val
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
cmd = ( "SET %s %s\n" % (var, val) ).encode()
self.s.send(cmd)
def get_model(self):
"""
Returns the model of the scanivalve
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
return self.list_any_map("I")["MODEL"]
def stop(self):
"""
Stop the scanivalve
"""
self.pack.stop_reading = True
self.pack.acquiring = False
self.s.send(b"STOP\n")
self.acquiring = False
self.thread = None
time.sleep(0.2)
buffer = b''
while self.is_pending(0.5):
buffer = buffer + self.s.recv(1492)
return None
def clear(self):
"""
Clear the error buffer in the scanivalve
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"CLEAR\n")
def error(self):
"""
Returns a list of errors detected by the scanivalve.
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"ERROR\n")
buffer = b''
while self.is_pending(1):
buffer = buffer + self.s.recv(1492)
return buffer
return buffer.strip().split('\r\n')
def config1(self, FPS=1, PERIOD=500, AVG=16, xtrig=False):
"""
Configures data aquisition
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
XSCANTRIG = int(xtrig)
if self.model=='3017':
self.PERIOD = clamp(PERIOD, 500, 62500) # 325 if raw packets: not implemented!
self.FPS = clamp(FPS, 1, 2**31) # Could be 0. Not implemented for now!
self.AVG = clamp(AVG, 1, 32767)
else:
self.PERIOD = clamp(PERIOD, 125, 65000)
self.FPS = clamp(FPS, 1, 2**30) # Could be 0. Not implemented for now!
self.AVG = clamp(AVG, 1, 240)
self.dt = self.PERIOD*1e-6*16 * self.AVG
self.set_var("FPS", self.FPS)
self.pack.allocbuffer(self.FPS)
self.set_var("AVG", self.AVG)
self.set_var("PERIOD", self.PERIOD)
self.set_var("XSCANTRIG", XSCANTRIG)
def config(self, **kw):
"""
Configures data aquisition
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
isold = self.model=='3017'
for k in kw.keys():
K = k.upper()
if K == 'XSCANTRIG':
val = int(kw[k])
self.XSCANTRIG = val
elif K=='PERIOD':
x = int(kw[k])
val = clamp(x, 500, 62500) if isold else clamp(x, 160, 650000)
self.PERIOD = val
elif K=='AVG':
x = int(kw[k])
val = clamp(x, 1, 32767) if isold else clamp(x, 1, 240)
self.AVG = val
elif K=='FPS':
x = int(kw[k])
val = clamp(x, 1, 2**31) if isold else clamp(x, 1, 2**30)
self.FPS = val
self.pack.allocbuffer(self.FPS)
else:
RuntimeError("Illegal configuration. SET {} {} not implemented!".format(K, kw[k]))
self.set_var(K, val)
self.dt = self.PERIOD*1e-6*16 * self.AVG
def acquire(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.pack.scan(self.s, self.dt)
p,freq = self.pack.read()
self.pack.clear()
return p, freq
def start(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.thread = ScanivalveThread(self.s, self.dt, self.pack)
self.thread.start()
self.acquiring = True
def read(self):
if self.thread is not None:
self.thread.join()
if self.pack.samplesread > 0:
p, freq = self.pack.read()
self.pack.clear()
self.thread = None
self.acquiring = False
return p, freq
else:
#raise RuntimeError("Nothing to read")
print("ERRO EM READ")
def samplesread(self):
if self.thread is not None:
return self.pack.samplesread
else:
raise RuntimeError("Scanivalve not reading")
def samplerate(self, meas=True):
if self.thread is not None:
dt = self.pack.get_time(True)
if dt < -1.0:
dt = self.dt
return 1.0/dt
else:
raise RuntimeError("Scanivalve not reading")
def isacquiring(self):
if self.thread is not None:
return self.pack.isacquring()
else:
raise RuntimeError("Scanivalve not reading")
def close(self):
if self.acquiring:
self.stop()
self.thread = None
self.s.close()
self.s = None
def nchans(self):
return 16
def channames(self):
return ["{:02d}".format(i+1) for i in range(self.nchans())]
def list_config(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
conf = dict(devtype='pressure', manufacturer='scanivalve', model=self.model,
parameters=self.list_any_map('S'))
return conf
|
#!/home/moozg/venvs/kasatest/bin/python
#coding: utf-8
#!/home/moozg/venvs/kasa33/bin/python
from __future__ import division, absolute_import, print_function, unicode_literals
import unittest, os
# misc
from kasaya.conf import set_value, settings
from kasaya.core.lib import comm
from gevent import socket
import gevent, random
class AddrDecoderTest(unittest.TestCase):
def test_decode_addr(self):
res = comm.decode_addr("tcp://127.0.0.1:123")
self.assertItemsEqual( res, ('tcp',('127.0.0.1',123), socket.AF_INET, socket.SOCK_STREAM) )
res = comm.decode_addr("ipc:///tmp/my_socket.sock")
self.assertItemsEqual( res, ('ipc',"/tmp/my_socket.sock", socket.AF_UNIX, socket.SOCK_STREAM) )
class MaliciousSender(comm.Sender):
"""
Special sender which is able to send broken messages
"""
def send_raw(self, rawdata):
self.SOCK.sendall( rawdata )
def _setup_connecion():
global MLOOP, SENDER, grlt
addr = "tcp://127.0.0.1:56780"
# message loop
MLOOP = comm.MessageLoop(addr)
grlt = gevent.spawn(MLOOP.loop) # spawn listener
# sender
SENDER = MaliciousSender(addr)
def _cleanup_connection():
global MLOOP, SENDER
MLOOP.kill()
MLOOP.close()
SENDER.close()
class SocketServerTest(unittest.TestCase):
@classmethod
def setUpClass(self):
_setup_connecion()
@classmethod
def tearDownClass(self):
_cleanup_connection()
def random_key(self, length=8):
n = ""
for i in range(length):
n+=random.choice("0123456789abcdefgihjklmnopqrstuvwxyz")
return n
def random_val(self):
t = random.choice("fis")
if t=="f":
return random.random()*1000
elif t=="i":
return random.randint(-10000,10000)
elif t=="s":
return self.random_key(80)
def test_simple_transmission(self):
global MLOOP, SENDER
self.test_size = 0
self.pattern = None
def response(msg):
# there will be error raised if data don't arrive
self.assertItemsEqual(self.pattern, msg)
self.test_size -= 1
# spawn listener
MLOOP.register_message("test", response)
# send bunch of messages
for msg in range(10):
self.test_size += 1
msg = {"message":"test"}
for n in range(8):
msg[self.random_key()]=self.random_val()
# send it
self.pattern=msg
SENDER.send(msg)
# wait until receiver get incoming message
while self.test_size>0:
gevent.sleep(0.01)
# cleanup
del self.test_size
del self.pattern
def test_broken_and_proper_transmission(self):
global MLOOP, SENDER
self.test_size = 0
self.pattern = None
def response(msg):
# there will be error raised if data don't arrive
self.assertItemsEqual(self.pattern, msg)
self.test_size -= 1
# spawn listener
MLOOP.register_message("test", response)
self.test_size += 1
msg = {"message":"test"}
# send it
self.pattern=msg
SENDER.send(msg)
# wait until receiver get incoming message
while self.test_size>0:
gevent.sleep(0.01)
# cleanup
del self.test_size
del self.pattern
if __name__ == '__main__':
unittest.main()
|
import os
from data_generator.common import *
corpus_dir = os.path.join(data_path, "enwiki")
train_path = os.path.join(corpus_dir, "enwiki_train.txt")
eval_path = os.path.join(corpus_dir, "enwiki_eval.txt")
from data_generator.mask_lm.chunk_lm import DataLoader
class EnwikiLoader(DataLoader):
def __init__(self, seq_length, shared_setting):
super(EnwikiLoader).__init__(seq_length, shared_setting)
def get_train_generator(self):
reader = tf.gfile.Open(train_path, "r")
return self.case_generator(reader)
def get_test_generator(self):
reader = tf.gfile.Open(eval_path, "r")
return self.case_generator(reader)
|
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import scipy.optimize as opt
file = open("../result/result.txt", "w")
def standard(x):
return 2 + 2 * x
def standard_with_error(x):
return [stats.norm.rvs(0, 1) + i for i in standard(x)]
def mnk_parameter(x, y):
beta_1 = (np.mean(x * y) - np.mean(x) * np.mean(y)) / (np.mean(x * x) - np.mean(x) ** 2)
beta_0 = np.mean(y) - beta_1 * np.mean(x)
return beta_0, beta_1
def mnk(x, y):
beta_0, beta_1 = mnk_parameter(x, y)
file.write("MNK: beta_0 = " + str(beta_0) + ", beta_1 = " + str(beta_1) + "\n")
return [beta_0 + beta_1 * i for i in x]
def mnm_min(beta, x, y):
beta_0, beta_1 = beta
Sum = 0
for i in range(len(x)):
Sum += abs(y[i] - beta_0 - beta_1 * x[i])
return Sum
def mnm_parameter(x, y):
beta_0, beta_1 = mnk_parameter(x, y)
result = opt.minimize(mnm_min, [beta_0, beta_1], args=(x, y), method='SLSQP')
coefs = result.x
alpha_0, alpha_1 = coefs[0], coefs[1]
return alpha_0, alpha_1
def mnm(x, y):
beta_0, beta_1 = mnm_parameter(x, y)
file.write("MNM: beta_0 = " + str(beta_0) + ", beta_1 = " + str(beta_1) + "\n")
return [beta_0 + beta_1 * element for element in x]
def build_graphic(x, y, name):
y_mnk = mnk(x, y)
y_mnm = mnm(x, y)
dist_mnk = sum([(standard(x)[i] - y_mnk[i])**2 for i in range(len(y))])
dist_mnm = sum([abs(standard(x)[i] - y_mnm[i]) for i in range(len(y))])
file.write("mnk distance = " + str(dist_mnk) + ", mnm distance = " + str(dist_mnm) + "\n")
plt.plot(x, standard(x), color="red", label="Эталон")
plt.plot(x, y_mnk, color="green", label="МНК")
plt.plot(x, y_mnm, color="orange", label="МНМ")
plt.scatter(x, y, c="blue", label="Выборка")
plt.xlim([-2, 2.2])
plt.grid()
plt.legend()
plt.title(name)
plt.show()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list_view.as_view(),name="blog-home"),
path('user/<str:username>', views.user_post_list_view.as_view(),name="user-posts"),
path("about/",views.about,name="blog-about"),
path('post/<int:pk>',views.post_detail_view.as_view(),name='post-detail'),
path("post/new/",views.post_create_view.as_view(),name="post-create"),
path('post/update/<int:pk>',views.post_update_view.as_view(),name='post-update'),
]
|
from Extract_Character import *
from Character_Recognizer import *
from digit_recognizer_ import *
import Check_Stolen_Plates
class Get_Plate_Characters:
def __init__(self):
self.cr = Character_Recognizer()
self.nr = Number_Recognizer()
self.Ec = Extract_Characters()
def GetPlateChars(self, PlateNumber):
image = cv2.imread("Plates From Model/" + str(PlateNumber) + ".png")
numbers, characters = self.Ec.extract(image)
word = []
for i in range(len(numbers)):
word.append(self.nr.ocr(numbers[i]))
for i in range(len(characters)):
word.append(self.cr.ocr(characters[i]))
word.reverse()
Found = Check_Stolen_Plates.Compare_Plates(word)
if Found:
print("Is Stolen" + str(word))
|
from PythonFiles.Screens.screen_inventory import InventoryScreen
from PythonFiles.Screens.screen_game_over import GameOverScreen
from kivy.uix.screenmanager import ScreenManager, NoTransition
from PythonFiles.Screens.screen_crafting import CraftingScreen
from PythonFiles.Screens.screen_shelter import ShelterScreen
from PythonFiles.Screens.screen_hunting import HuntingScreen
from PythonFiles.Screens.screen_travel import TravelScreen
from PythonFiles.Screens.screen_pause import PauseScreen
from PythonFiles.Screens.screen_game import GameWindow
from PythonFiles.Screens.screen_start import StartMenu
from PythonFiles.Screens.screen_fire import FireScreen
from kivy.lang import Builder
# Build Kivy files
Builder.load_file('KivyFiles/kivy_random_widgets.kv')
Builder.load_file('KivyFiles/kivy_crafting.kv')
Builder.load_file('KivyFiles/kivy_fire.kv')
Builder.load_file('KivyFiles/kivy_game.kv')
Builder.load_file('KivyFiles/kivy_game_over.kv')
Builder.load_file('KivyFiles/kivy_hunting.kv')
Builder.load_file('KivyFiles/kivy_inventory.kv')
Builder.load_file('KivyFiles/kivy_pause.kv')
Builder.load_file('KivyFiles/kivy_shelter.kv')
Builder.load_file('KivyFiles/kivy_start.kv')
Builder.load_file('KivyFiles/kivy_travel.kv')
# Initialize Window Manager
sm = ScreenManager(transition=NoTransition())
# Game screens/windows/menus
screens = [StartMenu(name="start"), GameWindow(name="game"), GameOverScreen(name="over"), PauseScreen(name="pause"),
ShelterScreen(name="shelter"), FireScreen(name="fire"), CraftingScreen(name="crafting"),
InventoryScreen(name="inventory"), HuntingScreen(name="hunting"), TravelScreen(name="travel")]
# Add the screens to the screen manager
for screen in screens:
sm.add_widget(screen)
# Set the first screen as the start menu
sm.current = "start"
|
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Alignment, Border, Side, Font
def excel单元格填入(sheet, value, color="FF8C69"):
sheet.value = value
sheet.fill = PatternFill("solid", fgColor=color) # 颜色代码:http://www.114la.com/other/rgb.htm
# 设置数据垂直居中和水平居中
sheet.alignment = Alignment(horizontal='center', vertical='center')#, wrap_text=True)
border = Border(left=Side(style='thin', color='FF000000'), right=Side(style='thin', color='FF000000'),
top=Side(style='thin', color='FF000000'), bottom=Side(style='thin', color='FF000000'),
diagonal=Side(style='thin', color='FF000000'), diagonal_direction=0,
outline=Side(style='thin', color='FF000000'), vertical=Side(style='thin', color='FF000000'),
horizontal=Side(style='thin', color='FF000000'))
sheet.border = border
wb = Workbook()
ws = wb.active
ws.title = '机器学习sheet'
ws = wb.create_sheet('新建了一个新的sheet')
excel单元格填入(ws['A1'], '机器学习测试结论')
ws.merge_cells('A1:B1')
data = {
'样本数':207684,
'分类正确':207386,
'准确率':99.8,
'错误率':0.14,
'正例':1300,
'反例':200000,
'精确率':99.123,
'召回率':90.456
}
i = 2
for k, v in data.items():
excel单元格填入(ws['A' + str(i)], k, 'FFFFFF')
excel单元格填入(ws['B' + str(i)], v, 'FFFFFF')
i += 1
wb.save('机器学习测试.xlsx') |
from setuptools import setup, find_packages
# read the contents of your README file
from os import path
THISDIRECTORY = path.abspath(path.dirname(__file__))
with open(path.join(THISDIRECTORY, "README.md")) as f:
LONGDESC = f.read()
setup(
name="tehran-stocks",
version="0.8.2.2",
description="Data Downloader for Tehran stock market",
url="http://github.com/ghodsizdeh/tehran-stocks",
author="Mehdi Ghodsizadeh",
author_email="mehdi.ghodsizadeh@gmail.com",
license="MIT",
long_description=LONGDESC,
long_description_content_type="text/markdown",
package_dir={"": "src"},
packages=[
"tehran_stocks",
"tehran_stocks.download",
"tehran_stocks.models",
"tehran_stocks.config",
],
install_requires=["wheel", "pandas", "sqlalchemy", "requests", "jdatetime"],
zip_safe=False,
python_requires=">=3.6",
scripts=["bin/ts-get", "bin/ts-get.bat"],
include_package_data=True,
)
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView, CreateView
from django.views.generic.edit import DeleteView, UpdateView
from.models import models
from .models import Vehicle
from django.urls import reverse_lazy
class VehicleListView(LoginRequiredMixin,ListView):
model = Vehicle
template_name = 'vehicle_list.html'
class VehicleDetailView(LoginRequiredMixin,DetailView):
model = Vehicle
template_name = 'vehicle_detail.html'
login_url = 'login'
class VehicleUpdateView(LoginRequiredMixin,UpdateView):
model = Vehicle
fields = ('year', 'make', 'car_model', 'vin_number', 'price')
template_name = 'vehicle_edit.html'
class VehicleDeleteView(LoginRequiredMixin,DeleteView):
model = Vehicle
template_name = 'vehicle_delete.html'
success_url = reverse_lazy('vehicle_list')
class VehicleCreateView(LoginRequiredMixin,CreateView):
model = Vehicle
template_name = 'vehicle_new.html'
fields = ('year', 'make', 'car_model', 'vin_number', 'price')
login_url = 'login'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
|
from we7dm.util.commonDM import systemInit
systemInit()
import we7dm.config
import we7dm.recsys.configs
from we7dm.databases.offline import DmDatabase
from we7dm.recsys import ui
from we7dm.recsys.ui import filtering
from we7dm.recsys.predictors.pop import PopPredictor
from we7dm.recsys import similarity
from we7dm import optimizers
from we7dm.recsys.evaluation import evaluators
confidence_params_grid = {
'predictor.item2item.similarityMeasureType': ['pairwise lift'],
'predictor.item2item.minUserSupport': [0, 1, 2, 5],
'predictor.item2item.minItemSupport': 2,
'predictor.item2item.minItemConsumption': 2, # 1 best for lift, 4 for conf
'predictor.item2item.useUserNormalization': True, # just better,
'predictor.item2item.useItemNormalization': False, # conf = lift is True,
'predictor.item2item.commonCntMin': [0, 2, 5, 10, 20, 40, 60] # does it plateau after 80 for lift?,
}
pop_params_grid = {'predictor.class': PopPredictor}
similarity_params_grid = {
'predictor.item2item.similarityMeasureType': similarity.func_by_name.keys(),
'predictor.item2item.minUserSupport': 0,
'predictor.item2item.minItemSupport': 10,
'predictor.item2item.minItemConsumption': 5,
'predictor.item2item.useUserNormalization': True,
'predictor.item2item.useItemNormalization': False,
'predictor.item2item.commonCntMin': 50,
}
params_to_evaluate = optimizers.generate_params_from_grids(
search_grids=[pop_params_grid, confidence_params_grid, similarity_params_grid],
default_params=we7dm.recsys.configs.config_dict)
print "%i params to evaluate" % (len(params_to_evaluate),)
test_topn = we7dm.config.get_test_recall_at()
predictor_evaluator = evaluators.PredictorEvaluator.from_uis(
test_topn, ui_train, ui_test,
users_with_history_only=we7dm.config.get_test_users_with_history_only(),
allow_history_predictions=we7dm.config.get_test_allow_history_predictions(),
metric_names=['recall', 'pop_discounted_recall', 'precision', 'AUC', 'GlobalItemCounter'])
|
def div(a, b):
print("before")
print(a/b)
print("after")
div(1, 0)
# before
# Traceback (most recent call last):
# File "examples/exceptions/divide_by_zero.py", line 8, in <module>
# div(1, 0)
# File "examples/exceptions/divide_by_zero.py", line 5, in div
# print(a/b)
# ZeroDivisionError: integer division or modulo by zero
|
fpath = 'input.txt'
contents = open(fpath, 'r').read().split(' ')
contents = list(map(int, contents))
global mdsum
mdsum = 0
def dig(chlist):
global mdsum
while chlist[0] > 0:
chlist[:] = chlist[0:2] + dig(chlist[2:])
chlist[0] -= 1
while chlist[1] > 0:
mdsum += chlist.pop(2)
chlist[1] -= 1
chlist[:] = chlist[2:]
return chlist
dig(contents)
print(mdsum) |
def postopek(vsebina_datoteke, vnos):
seznam = list(map(int, vsebina_datoteke.split(",")))
for _ in range(40000000):
seznam.append(0)
output = 0
i = 0
baza = 0
while seznam[i] != 99:
if seznam[i] == 1:
a = seznam[seznam[i+1]]
b = seznam[seznam[i+2]]
seznam[seznam[i+3]] = a + b
i += 4
elif seznam[i] == 2:
a = seznam[seznam[i+1]]
b = seznam[seznam[i+2]]
seznam[seznam[i+3]] = a * b
i += 4
elif seznam[i] == 3:
seznam[seznam[i+1]] = vnos
i += 2
elif seznam[i] == 4:
a = seznam[seznam[i+1]]
output = a
i += 2
elif seznam[i] == 5:
if seznam[seznam[i+1]] != 0:
i = seznam[seznam[i+2]]
else:
i += 3
elif seznam[i] == 6:
if seznam[seznam[i+1]] == 0:
i = seznam[seznam[i+2]]
else:
i += 3
elif seznam[i] == 7:
if seznam[seznam[i+1]] < seznam[seznam[i+2]]:
seznam[seznam[i+3]] = 1
else:
seznam[seznam[i+3]] = 0
i += 4
elif seznam[i] == 8:
if seznam[seznam[i+1]] == seznam[seznam[i+2]]:
seznam[seznam[i+3]] = 1
else:
seznam[seznam[i+3]] = 0
i += 4
elif seznam[i] == 9:
baza += seznam[seznam[i]]
i += 2
else:
opt = seznam[i] % 100
m1 = (seznam[i] // 100) % 10
m2 = (seznam[i] // 1000) % 10
m3 = (seznam[i] // 10000) % 10
if opt in [1, 2, 7, 8]:
if m1 == 0:
a = seznam[seznam[i+1]]
elif m1 == 1:
a = seznam[i+1]
else:
a = seznam[seznam[i+1] + baza]
if m2 == 0:
b = seznam[seznam[i+2]]
elif m2 == 1:
b = seznam[i+2]
else:
b = seznam[seznam[i+2] + baza]
if opt == 1:
if m3 == 0:
seznam[seznam[i+3]] = a + b
elif m3 == 1:
seznam[i+3] = a + b
else:
seznam[seznam[i+3] + baza] = a + b
elif opt == 2:
if m3 == 0:
seznam[seznam[i+3]] = a * b
elif m3 == 1:
seznam[i+3] = a * b
else:
seznam[seznam[i+3] + baza] = a * b
elif opt == 7:
if m3 == 0:
if a < b:
seznam[seznam[i+3]] = 1
else:
seznam[seznam[i+3]] = 0
elif m3 == 1:
if a < b:
seznam[i+3] = 1
else:
seznam[i+3] = 0
else:
if a < b:
seznam[seznam[i+3] + baza] = 1
else:
seznam[seznam[i+3] + baza] = 0
elif opt == 8:
if m3 == 0:
if a == b:
seznam[seznam[i+3]] = 1
else:
seznam[seznam[i+3]] = 0
elif m3 == 1:
if a == b:
seznam[i+3] = 1
else:
seznam[i+3] = 0
else:
if a == b:
seznam[seznam[i+3] + baza] = 1
else:
seznam[seznam[i+3] + baza] = 0
i += 4
elif opt in [4, 9]:
if m1 == 0:
a = seznam[seznam[i+1]]
elif m1 == 1:
a = seznam[i+1]
else:
a = seznam[seznam[i+1] + baza]
if opt == 4:
output = a
else:
baza += a
i += 2
elif opt in [5, 6]:
if m1 == 0:
a = seznam[seznam[i+1]]
elif m1 == 1:
a = seznam[i+1]
else:
a = seznam[seznam[i+1] + baza]
if m2 == 0:
b = seznam[seznam[i+2]]
elif m2 == 1:
b = seznam[i+2]
else:
b = seznam[seznam[i+2] + baza]
if opt == 5:
if a != 0:
i = b
else:
i += 3
else:
if a == 0:
i = b
else:
i += 3
return output
def naloga1(vsebina_datoteke):
return str(postopek(vsebina_datoteke, 1))
if __name__ == '__main__':
with open('day_9.in', encoding='utf-8') as f:
vsebina_datoteke = f.read()
odgovor1 = naloga1(vsebina_datoteke)
with open('day_9_1.out', 'w', encoding='utf-8') as f:
f.write(odgovor1)
# odgovor2 = naloga1(vsebina_datoteke, 5)
# with open('day_5_2.out', 'w', encoding='utf-8') as f:
# f.write(odgovor2) |
### Taking a string and changing a letter in the string
def mutate_string(string, position, character):
### Convert string to a list
l = list(string)
### At the given position, change to the given character
l[position] = character
### Join the list back into a string
string = ''.join(l)
### Print that shit
print(string)
### Slice the string, change to the given character at the given postion
### Add the rest of the string after the added character
string = string[:position] + character + string[position+1:]
print(string)
mutate_string('Frank Sinatra and the Rat Pack', 14, 's') |
import re
class Item:
def scrap(self,soup_item,cat):
img_div = soup_item.find('div', {'class': 'listagem-img'})
img_a = img_div.find('a')
img_el = img_div.find('img')
fab = soup_item.find('li',{'class': 'imagem-fabricante'})
fab = fab.find('img')
cash_price = self.price_handle(soup_item,'div','listagem-precoavista')
price = self.price_handle(soup_item,'div','listagem-preco')
mkt_place_low_price = self.price_handle(soup_item,'b','mktplace_preco_menor')
stars = soup_item.find('div',{'class': 'H-estrelas'})
stars = stars.attrs['class'][1].replace('e','')
stars = int(stars) if stars != '' else 0
rating = soup_item.find('li',{'class': 'notas'})
rating = rating.text.strip()
link = soup_item.find('h2',{'class':'H-titulo'}).find('a')
evaluation = 0
if rating != '':
rgx = re.search("[0-9]+", rating)
evaluation = int(rating[rgx.start():rgx.end()])
el = {
'code': int(img_a.attrs['data-id']), # Item code
'title': img_el.attrs['alt'], # Item Title,
'category': cat ,
'img_src': img_el.attrs['src'], # Item img src
'link': link.attrs['href'], # Item link
'maker': fab.attrs['alt'].replace('Logo ',''), # Item maker
'evaluation': evaluation, # Item Evaluation
'star': stars, # Item stars
'price': price, # Cash price
'cash_price': cash_price, # Cash price
'mkt_place_low_price': mkt_place_low_price, # Cash price
}
return el
def price_handle(self, soup_item, tag, class_name):
price_el = soup_item.find(tag,{'class': class_name})
if price_el is not None:
price = price_el.text
price = re.sub("(R\$)|(\.)|(\,)",'',price).strip()
price = float(price[:-2] + '.' + price[-2:])
else:
price = 0.0
return price
|
from mingus.containers import Note
class Tuning:
def __init__(self, notes = None):
self.notes = notes
def __iter__(self):
for note in self.notes:
yield note
Standard = Tuning((Note('E', 4), Note('B', 3), Note('G', 3), Note('D', 3), Note('A', 2), Note('E', 2)))
|
# Doc Handler.py
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.httpclient
import tornado.gen
import tornado.options
from tornado.escape import json_decode,json_encode
import json, hashlib, pickle, sys, os
from assignment2.IndexerHandler import IndexerHandler
from assignment2.inventory import *
class DocHandler(tornado.web.RequestHandler):
DOCSERVER_ID = 0
def initialize (self, port):
self.port = port
self.pageList = [] # store pages partition
idx = INDEX_PORTS.index(self.port)
with open( os.path.join( JOB_PATHS['docs'] , "%d.out" % idx), 'rb' ) as doc_fd:
self.doc_dict = pickle.load(doc_fd)
@staticmethod
def createSnippet(q, text):
index_q = text.casefold().find(q.casefold())
len_q = len(q)
start = max(0, index_q - 100)
end = min(len(text)-1, index_q+100)
ret = text[start:end]
q_in_text = text[index_q:index_q+len_q]
ret = ret.replace( q_in_text, "<strong>" + q_in_text + "</strong>")
return ret
def get(self):
id = self.get_argument("id")
q = self.get_argument("q")
score = self.get_argument("score")
partition_idx = PARTITIONER(id, NUM_DOC_PART)
if INDEX_PORTS[partition_idx] == self.port:
result = {}
result["id"] = id
result["question1"] = self.doc_dict[id][0] #question1
# result["url"] = page_found.url
result["question2"] = self.doc_dict[id][1] #question2
result["isDup"] = self.doc_dict[id][2]
result["score"] = score
# result["tf_idf"] = self
resp = json.dumps({"results": result},sort_keys=True)
self.write(resp)
if __name__ == "__main__":
apps = {}
for doc_port in DOC_PORTS:
url = BASE_ADDR % doc_port
# print("Doc Handler%d url: %s" %(DocHandler.DOCSERVER_ID, url))
DocHandler.DOCSERVER_ID+=1
apps[doc_port] = tornado.web.Application(handlers=[(r"/doc", DocHandler, dict(port = doc_port))],debug = True)
apps[doc_port].listen(doc_port)
tornado.ioloop.IOLoop.instance().start()
|
import random
import numpy as np
from game_state import GameState
__author__ = 'Anthony Rouneau'
class AlphaBeta(object):
"""
Simple implementation of a cutoff alpha-beta
Assert that the player using this Alpha Beta is the "MAX" player.
"""
def __init__(self, eval_fct, _max_depth=6):
"""
:param eval_fct: objective function that computes a score given a state for one player
:type eval_fct: function
:param _max_depth: the maximum depth of the tree the algorithm can explore
:type _max_depth: int
"""
self.eval = eval_fct
self.max_depth = _max_depth
self.random = random.Random()
self.actions = {} # Will retain the best action for a given state (will speed up the tree search)
def alphaBetaSearching(self, state):
"""
:param state: The current state of the game (including the current player)
:type state: GameState
:return: the best action among the possible ones
"""
if self.actions.get(state) is not None:
value, action = self.actions[state]
else:
value, action, _ = self.maxValue(state, -float('inf'), float('inf'), 0)
return action
def maxValue(self, state, alpha, beta, depth):
"""
:param state: the state of the current node
:type state: GameState
:param alpha: the alpha bound
:param beta: the beta bound
:param depth: the current depth in the tree
:return: the best value and the best action among its children or
the value of the terminal state
True if the action has reached a end state
Computes the best step possible for the "MAX" Player
"""
# Check if we reached the end of the tree
if depth > self.max_depth:
return self.eval(state, other_player=False), None, False
# Check if the game state is final
elif np.array(state.terminalTest()).any():
return self.eval(state, other_player=False), None, True
# Initializing the best values
best_value = -float('inf')
best_actions = []
best_reached_end = False
# If we already made the computations, no need to do more
if self.actions.get(state) is not None:
return self.actions.get(state), True
# Explore every possible actions from this point
for action in state.possibleActions():
value, _, reached_end = self.minValue(state.simulateAction(action), alpha, beta, depth + 1)
if value > best_value:
best_value = value
best_actions = [action]
best_reached_end = reached_end
if best_value >= beta:
best_action = best_actions[self.random.randint(0, len(best_actions)-1)]
if best_reached_end: # If the reached state was final, we can stock the best action for this state
self.actions[state] = best_value, best_action
return best_value, best_action, best_reached_end
elif value == best_value:
best_actions.append(action)
alpha = max(alpha, value)
best_action = best_actions[self.random.randint(0, len(best_actions) - 1)]
if best_reached_end: # If the reached state was final, we can stock the best action for this state
self.actions[state] = best_value, best_action
return best_value, best_action, best_reached_end
def minValue(self, state, alpha, beta, depth):
"""
:param state: the state of the current node
:type state: GameState
:param alpha: the alpha bound
:param beta: the beta bound
:param depth: the current depth in the tree
:return: the best value and the best action among its children or
the value of the terminal state
Computes the best step possible for the "MAX" Player
"""
# Check if we reached the end of the tree
if depth > self.max_depth:
return self.eval(state, other_player=True), None, False
# Check if the game state is final
if np.array(state.terminalTest()).any():
return self.eval(state, other_player=True), None, True
# Initializing the best values
best_value = float('inf')
best_actions = []
best_reached_end = False
# If we already made the computations, no need to do more
if self.actions.get(state) is not None:
return self.actions.get(state), True
# Explore every possible actions from this point
for action in state.possibleActions():
value, _, reached_end = self.maxValue(state.simulateAction(action), alpha, beta, depth + 1)
if value < best_value:
best_value = value
best_actions = [action]
best_reached_end = reached_end
if best_value <= alpha:
best_action = best_actions[self.random.randint(0, len(best_actions) - 1)]
if best_reached_end: # If the reached state was final, we can stock the best action for this state
self.actions[state] = best_value, best_action
return best_value, best_action, best_reached_end
elif value == best_value:
best_actions.append(action)
beta = min(beta, value)
best_action = best_actions[self.random.randint(0, len(best_actions) - 1)]
if best_reached_end: # If the reached state was final, we can stock the best action for this state
self.actions[state] = best_value, best_action
return best_value, best_action, best_reached_end
|
#
# these files are created by
# Aryaman Godara
#
#
# Any changes for better is accepted, cause these are made in the initial phase of my coding
# It is a bot for sending messages to your whatsapp contacts..
# con:-
# it is not able to find a contact if you haven't recently talked to him
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://web.whatsapp.com/")
print("Welcome to Godara automated Whatsapp Messenger..\n")
name = input("Enter name of person or the group name: ") # enter the exact value from your mobile contact list
msg = input("Enter the message: ")
count = int(input("Enter the no. of times you want to send the message! : "))
input("Enter anything after scanning of qr code! ")
user = driver.find_element_by_xpath("//span[@title='{}']".format(name))
user.click()
msgbox = driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[2]/div/div[2]")
msgbox.click()
for i in range(count):
msgbox.send_keys(msg)
sendButton = driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[3]/button/span")
sendButton.click()
print("Bang! Success") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.