index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,400 | f4d4a6b960fc4071a2fe24c66c3dc62b9de1bfe5 | from PyQt5 import QtCore, QtGui, QtWidgets
import sys, os
import ui_sched
import home2
import handler
class Main(QtWidgets.QMainWindow, ui_sched.Ui_MainWindow):
def __init__(self):
super(Main, self).__init__()
self.setupUi(self)
self.timeList()
self.bsave.clicked.connect(self.Save)
self.bcancel.clicked.connect(self.Cancel)
def Save(self):
date = self.dateEdit.date()
dates = date.toPyDate()
index = self.comboBox.currentIndex()
time = index + 1
pos = self.pos
if self.rbprimary.isChecked():
back = 0
elif self.rbbackup.isChecked():
back = 1
handler.AddSched(pos, time, dates, back)
def Cancel(self):
self.dialog = home2.Main()
self.dialog.show()
self.close()
def timeList(self):
self.comboBox.clear()
results = dh.TimeList()
lists = [row[0] for row in results]
lists.insert(0,"---Select---")
self.comboBox.addItems(lists)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
form = Main()
form.show()
sys.exit(app.exec_()) |
18,401 | 6cf890883392b155a3a960b0641b3a636dd78e66 | import partIIch1.chk_utils as chk_utils
import partIIch1.network as network
# %%测试一维线性的梯度
x, theta = 2, 4
difference = chk_utils.gradient_check(x, theta)
# %%测试维度检查
layers_dims = [4, 5, 3, 1]
parameters = network.initialize_parameters_he(layers_dims)
grads = chk_utils.backward_propagation()
# 没找到那么大的四维数据集
|
18,402 | 1c4e52c9fa40a22ef7a6ef1a5cdb73d2ab9d422d | #coding=utf-8
'''
音频特征提取类, mfcc量化特征 和 指纹特征
'''
import os
import sys
import scipy
import librosa
import numpy as np
import pandas as pd
class FeatureType:
FEATURE_MFCC = 0 # mfcc量化特征
FEATURE_FINGERS = 1 # 指纹特征
class AudioFeature():
def __init__(self, n_fft=400, hop_length=200):
self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
plus = min(quan_level, max(0, plus))
word = word * quan_level + plus
if zero_num == len(l):
word = 0
feat_list.append(word)
words_list.append(feat_list)
feature = np.array(words_list)
return feature
class Audio:
"""音频类
"""
def __init__(self, audio_path:str, start_time:int=0, end_time:int=None):
self.audio_obj = AudioFeature()
self.audio_path = audio_path
self.audio_name = os.path.basename(audio_path).split(".")[0]
self.start_time = start_time
self.end_time = end_time
self.get_audio_params(self.audio_path)
def get_audio_params(self, audio_path:str):
# self.y, self.sr = read_audio(audio_path, 0, None)
self.y, self.sr = librosa.load(audio_path, sr=None, mono=True)
self.audio_feature = self.audio_obj.get_audio_feature(self.y, self.sr, 1)
print("path:", self.audio_path, " sr:", self.sr, " duration:", len(self.y)/self.sr, " feature.shape:", np.array(self.audio_feature).shape) |
18,403 | 770629991763374524f9b34a171551997e0b97d9 | from myclass import Shape
class MyBox:
def __init__( self ):
self._theItems = list()
def __len__( self ):
return len(self._theItems())
def add( self, item ):
self._theItems.append(item)
def remove( self, item ):
assert item in self._theItems
idx = self._theItems.index( item )
return self._theItems.pop(idx)
def __contains__( self, item ):
return item in self._theItems
def __iter__( self, item ):
return MyBoxIterator(self._theItems)
myshape=Shape(3,3)
myshape.define()
myshape.setvalues(9,9)
myshape.define()
|
18,404 | 53e3b41fa71a73c7d94b3d986d0ba54c3244e837 | # -*- coding:utf-8 -*-
import os
import sys
from robot.api import logger
from robot.libraries import BuiltIn
from titanrun.common.Core import rh_replace_arg_dic, quit_driver, get_driver, split_input_arg, get_csv_by_no
class Model(object):
def __init__(self,data_resouce, model="ride",version='',browser="chrome"):
self.data_resouce = data_resouce
self.model = model
self.version = version
self.browser = browser
def web_model(self):
if self.model == "ride":
path = os.getcwd()
else:
path = os.path.abspath('..')
Verify = BuiltIn._Verify()
driver = get_driver()
class_dict = dict()
arg_dic = {}
for i in range(len(self.data_resouce)):
api, num = split_input_arg(self.data_resouce[i])
api_name = api.split("_")[0]
val_path = os.path.join(path,"config","web",api_name,"data","val","%s.csv"%api)
elem_path = os.path.join(path,"config", "web", api_name, "data", "elem", "%s.csv" % api)
val_data = get_csv_by_no(val_path, num)
elm_data = get_csv_by_no(elem_path, 1)
rh_replace_arg_dic(arg_dic, val_data)
if i == 0:
script_path = os.path.join(path,"business","script")
sys.path.append(script_path)
lists = os.listdir(script_path)
for l in lists:
mod = __import__(l[:-3])
for c in dir(mod):
obj_class = getattr(mod, c)
class_dict[c] = obj_class
for cla, obj in class_dict.items():
obj_method = getattr(obj, api, None)
if obj_method:
obj_method = getattr(obj(driver,self.browser,self.version), api)
result = obj_method(val_data,elm_data)
if not result.flag:
quit_driver(driver)
Verify.fail("%s failed,%s" %(obj_method.__name__,result.msg))
rh_replace_arg_dic(result.arg, arg_dic)
logger.info("%s success!" %obj_method.__name__)
if __name__ == '__main__':
# data_resouce = [
# "gcm_login:1",
# "gcm_report:1",
# "gcm_deal:1",
# "gcm_enter:1",
# "gcm_review:1",
# "gcm_logout:1",
# "gcm_login:3",
# "gcm_approve:1",
# "gcm_logout:1",
# "gcm_login:1",
# "gcm_notice:1",
# "gcm_atp_enter:1",
# "gcm_atp_check:1"
# ]
# data_resouce = [
# "gps_login:1",
# "gps_apply:3",
# "gps_enter:1",
# "gps_add_insure:1",
# # "gps_change_insurance:1",
# "gps_check:1",
# "gps_logout:1",
# "gnc_login:1",
# "gnc_gps_manu_uw:1",
# "gnc_logout:1",
# "gps_login:1",
# "gps_pay_pool:1",
# "gps_pay_modify:1",
# "gps_logout:1",
# "gps_login:3",
# "gps_atp_enter:1",
# "gps_atp_check:1",
# "gps_logout:1",
# "gps_login:1",
# "gps_confirm:1",
# "gps_effective:1"
# ]
# data_resouce = [
# "gnc_login:1",
# "gnc_task_fill_query:1",
# "gnc_apply_allocation:2",
# "gnc_invoice_info_input:1",
# "gnc_cont_grp_insured_input:2",
# "gnc_apply_allocation_finish:1",
# "gnc_logout:1",
# "gnc_login:3",
# "gnc_atp_receive_and_pay:1",
# "gnc_atp_receive_and_paycheck:1",
# "gnc_logout:1",
# "gnc_login:1",
# "gnc_group_pol_approve:1",#fail
# "gnc_group_uw_auto:1",
# "gnc_manu_uw_all:1",
# "gnc_group_pol_sign:1",
# "gnc_get_url:2",
# "gnc_scanmock_vsc:2",
# "gnc_get_url:1",
# "gnc_policy_receipt_verify:1",
#
# ]
# data_resouce = [
# "gnc_login:1",
# "gnc_task_fill_query:1",
# "gnc_apply_allocation:3",
# "gnc_invoice_info_input:1",
# "gnc_cont_grp_insured_input:1",
# "gnc_apply_allocation_finish:1",
# "gnc_logout:1",
# "gnc_login:3",
# "gnc_atp_receive_and_pay:1",
# "gnc_atp_receive_and_paycheck:1",
# "gnc_logout:1",
# "gnc_login:1",
# "gnc_group_pol_approve:1",#fail
# "gnc_group_uw_auto:1",
# "gnc_manu_uw_all:1",
# "gnc_group_pol_sign:1",
# "gnc_get_url:2",
# "gnc_scanmock_vsc:2",
# "gnc_get_url:1",
# "gnc_policy_receipt_verify:1",
#
# ]
# data_resouce = [
# "uw_login:1",
# "uw_incept_entry:10",
# "uw_get_url:2",
# "uw_scanmock_vsc:2",
# "uw_get_url:1",
# "uw_batch_ending:1",
# "uw_form_task_assign:1",
# "uw_check_policy_info_ins_client_info:1",
# "uw_check_policy_info_app_client_info:1",
# "uw_check_policy_info_account:1",
# "uw_check_policy_info_beneficiary_info:1,",
# "uw_check_policy_info_report:1",
# "uw_check_policy_info_main_product:10",
# "uw_check_policy_info:2",
# "uw_assign_check_task:1",
# "uw_logout:1",
# "uw_login:2",
# "uw_check_list:1",
# "uw_dispatch_undwrt_task_page:1",
# "uw_logout:1",
# "uw_login:1",
# "uw_undwrt_task_list_closeproblem:1",
# "uw_undwrt_task_list_evaluateform:1",
# "uw_undwrt_task_list:1",
# "uw_logout:1",
# "uw_login:3",
# "uw_fin_receive:1",
# "uw_fin_receive_check:1",
# "uw_logout:1",
# "uw_login:1",
# "uw_issue_indivival_main:1",
# "uw_sms_send_check:1",
# "uw_print_task_check:1"
# ]
data_resouce = [
"uw_login:1",
"uw_incept_entry:1",# 8-5005
# "uw_logout:1",
# "uw_login:2",
# "uw_get_url:2",
# "uw_scanmock_vsc:1",# 1-1001/5005;2-2002
# "uw_get_url:1",
# "uw_batch_ending:1",
# "uw_form_task_assign:1",
# "uw_check_policy_info_ins_client_info:18",
# "uw_check_policy_info_app_client_info:20",
# # "uw_check_policy_info_beneficiary_info:1,", # list,必须加上","
# "uw_check_policy_info_account:1",
# "uw_check_policy_info_report:1",
# "uw_check_policy_info_main_product:20",# 8-5005
# # "uw_check_policy_info_add_product:1,", # 没有就不要
# "uw_check_policy_info:3",# 3-5005
# "uw_assign_check_task:1",
# "uw_logout:1",
# "uw_login:2", # zyb login
# "uw_check_list:1",
# "uw_dispatch_undwrt_task_page:1",
# "uw_logout:1",
# "uw_login:1", # szx login
# # "uw_undwrt_task_list_note:1",
# # "uw_undwrt_task_list_vsc:1",
# # "uw_get_url:2",
# # "uw_scanmock_vsc:3",
# # "uw_get_url:1",
# # "uw_undwrt_task_list_writeoff:1", #容易出错
# "uw_undwrt_task_list_closeproblem:1",
# "uw_undwrt_task_list_evaluateform:1", # 非标为2
# "uw_undwrt_task_list:1",
# # 下发核保决定(非标体通过)后需要增加如下5个操作
# # "uw_undwrt_task_list_vsc:2",
# # "uw_get_url:2",
# # "uw_scanmock_vsc:4",
# # "uw_get_url:1",
# # "uw_undwrt_task_list_writeoff:2",
# "uw_logout:1",
# "uw_login:3", # wyf login
# "uw_fin_receive:1",
# "uw_fin_receive_check:1",
# "uw_logout:1",
# "uw_login:1",
# "uw_issue_indivival_main:1",
# "uw_sms_send_check:1",
# "uw_print_task_check:1",
]
# data_resouce = [
# "pos_login:1",
# "pos_entry_client_locate:1",
# "pos_entry_client_select:1",
# "pos_entry_apply_info_input:2", #退保2,犹豫期退保1
# "pos_entry_accept_item_confirm:1",
# "pos_entry_binding_order_adjust:1",
# "pos_entry_accept_detail_input:2", #退保2,犹豫期退保1
# "pos_entry_accept_result:1",
# "pos_entry_accept_finish:1",
# "pos_integrate_query:1",
# "pos_logout:1",
# "pos_login:2",
# "pos_fin_payment:1",
# "pos_fin_payment_check:1"
# ]
# data_resouce = [
# "claim_login:1",
# # "claim_report:3",
# # "claim_deal:1",
# # "claim_get_url:2",
# # "claim_scanmock_vsc:1",
# # "claim_get_url:1",
# # "claim_deal2:1",
# # "claim_case_input_applicant:1",
# # "claim_case_input_payee:1",
# # "claim_case_input_accident_disease:3",
# "claim_examination_main:1",
# "claim_logout:1",
# "claim_login:2", # zyb login
# "claim_review:1",
# "claim_notice:1",
# "claim_logout:1",
# "claim_login:3",
# "claim_fin_payment:1",
# "claim_fin_payment_check:1",
# ]
# data_resouce=[
# # "bep_login:1",
# # "bep_bill_new_page_base3:1",
# # "bep_bill_new_page_expense3:4,",
# # "bep_bill_new_page_submit3:1",
# # "bep_personal_documents:1",
# # "bep_get_url:2",
# # "bep_scanmock_vsc:1",
#
# # "bep_login:1",
# # "bep_bill_new_page_base:1",
# # "bep_bill_new_page_expense:1,",
# # "bep_bill_new_page_submit:1",
# # "bep_personal_documents:1",
# # "bep_get_url:2",
# # "bep_scanmock_vsc:1",
#
#
# # "bep_login:2",
# # "bep_bill_new_page_base:4",
# # "bep_bill_new_page_expense:7,",
# # "bep_bill_new_page_submit:1",
# # "bep_personal_documents:1",
# # "bep_get_url:2",
# # "bep_scanmock_vsc:1",
#
# "bep_login:7",
# "bep_group_work:1",
# "bep_center_task_trial:1",
# "bep_group_work:2",
# "bep_center_task_trial:1",
# ]
# data_resouce=["atp_login:1",
# # "atp_transfer_draw:1",
# "atp_agreement_batch:1"
# ]
# path = "D:/worksvn/rh_auto_test_9.30"
# file = "D:/worksvn/rh_auto_test_9.30/config/web/claim/cfg/claim_configuration.ini"
web_model(data_resouce, "python") |
18,405 | 8e8d5ae257186624a230b59a909bfa561286773e | import gym
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import concurrent.futures
import math
try:
#取得實體GPU數量
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
#將GPU記憶體使用率設為動態成長
#有建立虛擬GPU時不可使用
#tf.config.experimental.set_memory_growth(gpu, True)
#建立虛擬GPU
tf.config.experimental.set_virtual_device_configuration(
gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit = 800)])
#確認虛擬GPU資訊
#logical_gpus = tf.config.experimental.list_logical_devices('GPU')
#for logical_gpu in logical_gpus:
# tf.config.experimental.set_memory_growth(logical_gpu, True)
#print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except Exception as e:
print(e)
env = gym.make('CartPole-v0')
a = state = env.reset()
#取得動作種類的數量
num_actions = env.action_space.n
#取得環境狀態的數量
num_states = env.observation_space.shape[0]
print(tf.keras.backend.floatx())
tf.keras.backend.set_floatx('float64')
print(tf.keras.backend.floatx())
class Deep_Q_Learning:
def __init__(self, num_states, num_actions, gamma,
memory_capacity, min_memory, batch_size, learning_rate):
#每個state包含的參數數量
self.num_states = num_states
#可以選擇的action數量
self.num_actions = num_actions
#計算next_q_value的期望值
#數值越大對next_q_value越重視
self.gamma = gamma
#保留舊資料做訓練
#每一筆memory資料包含 state, action, reward, next_state
self.memory = {'state': [], 'action': [], 'reward': [], 'next_state': [], 'done': []}
#最多要保留幾筆紀錄
self.memory_capacity = memory_capacity
#最少保留幾筆紀錄後開始做訓練
self.min_memory = min_memory
#每次訓練的取樣數量
self.batch_size = batch_size
#目前主流的Deep Q learning會用兩個一樣的模型來做訓練
#只對train_model做訓練
#target_model只被動接收權重
#訓練模型
self.train_model = creat_model()
#目標模型
self.target_model = creat_model()
#設定最佳化方式
self.optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate)
#設定損失函數
self.loss_function = tf.keras.losses.MeanSquaredError()
self.loss_value = None
def get_action(self, state, random_action_rate):
if np.random.random() < random_action_rate:
action = np.random.randint(0, self.num_actions)
else:
#action_rate = self.train_model(np.reshape(state, [1, self.num_states]))
action_rate = self.train_model.predict(np.reshape(state, [1, self.num_states]))
action = np.argmax(action_rate)
return action
def save_memory(self, new_memory):
#如果紀錄滿了丟掉最舊的
if len(self.memory['state']) >= self.memory_capacity:
for key in self.memory.keys():
self.memory[key].pop(0)
#新增紀錄
for key, value in new_memory.items():
self.memory[key].append(value)
def get_memory_size(self):
return len(self.memory['state'])
def get_loss_value(self):
return self.loss_value
#對於在eager model中的tensorflow運算可以在function前面加上@tf.function來改善運算效率
#但是該function會不能用debug監看
@tf.function
def calculate_gradient(self, train_state, train_action, nextq_value):
#在GradientTape中計算loss_value以便計算梯度
with tf.GradientTape() as tape:
train_model_output = self.train_model(train_state)
q_value = tf.math.reduce_sum(train_model_output
* tf.one_hot(train_action, self.num_actions, dtype = 'float64'), axis=1)
loss_value = self.loss_function(nextq_value, q_value)
#計算梯度
weight = self.train_model.trainable_variables
gradients = tape.gradient(loss_value, weight)
#根據梯度更新權重
self.optimizer.apply_gradients(zip(gradients, weight))
#self.loss_value = loss_value.numpy()
def training_model(self):
if len(self.memory['state']) > self.min_memory:
#取得一次批量的訓練資料
sample_index = np.random.choice(len(self.memory['state']), self.batch_size)
train_state = np.asarray([self.memory['state'][index] for index in sample_index])
train_action = np.asarray([self.memory['action'][index] for index in sample_index])
train_reward = np.asarray([self.memory['reward'][index] for index in sample_index])
train_next_state = np.asarray([self.memory['next_state'][index] for index in sample_index])
train_done = np.asarray([self.memory['done'][index] for index in sample_index])
#取的目標模型對next_state的預測結果
#taeget_predict = np.max(self.target_model(train_next_state), axis = 1)
taeget_predict = np.max(self.target_model.predict(train_next_state), axis = 1)
#計算next_q value
#如果選擇的動作會導致done發生就直接輸出reward,不考慮next_state帶來的回饋
#nextq_value = train_reward + (self.gamma * taeget_predict)
nextq_value = np.where(train_done, train_reward, train_reward + (self.gamma * taeget_predict))
self.calculate_gradient(train_state, train_action, nextq_value)
def copy_weight(self):
#將Train Model的權重複製到Target Model
self.target_model.set_weights(self.train_model.get_weights())
def save_model(self):
self.train_model.save('E:/python program/增強式學習結果/Model/DQL_Model_second_train',
include_optimizer = False)
def creat_model():
#匯入模型
return load_model('E:/python program/增強式學習結果/Model/DQL_Model')
def training_loop(epochs, num_states, num_actions, gamma, random_action_rate, target_replace_count, memory_capacity,
min_memory, batch_size, learning_rate):
DQL_model = Deep_Q_Learning(num_states, num_actions, gamma,
memory_capacity, min_memory, batch_size, learning_rate)
step_list = []
reward_list = []
step_mean_list = []
loss_list = []
target_step = 0
#建立一個loop先把最少memory需求補齊
#讓 environment 重回初始狀態
state = env.reset()
#統計在一次epoch中總共做了計次動作才結束
step_times = 0
while DQL_model.get_memory_size() < (min_memory - 1):
#取得模型選擇的動作
action = DQL_model.get_action(state, random_action_rate)
#在這次的環境中根據給予的動作會得到四個回傳值
# next_state:互動後的新環境
# reward:新環境給予的回饋值
# done:是否已達到環境的結束條件
#action = train_model.get_action(state, random_action_rate)
next_state, reward, done, info = env.step(action)
#theta單位是弧度
#和theta_threshold_radians 相同
x, v, theta, omega = next_state
##改善reward所代表的意義以提升訓練效果
##小車離中間越近越好
#r1 = ((env.x_threshold - abs(x)) / env.x_threshold) * 0.2
##柱子越正越好
#r2 = ((env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians) * 0.8
#reward = r1 + r2
step_times =+ 1
if done:
reward = 0
if step_times == 200:
reward = 1
DQL_model.save_model()
#建立環境經驗
new_memory = {'state': state, 'action': action, 'reward': reward, 'next_state': next_state, 'done': False}
else:
#建立環境經驗
new_memory = {'state': state, 'action': action, 'reward': reward, 'next_state': next_state, 'done': done}
DQL_model.save_memory(new_memory)
#更新環境資訊
if done:
step_times = 0
state = env.reset()
else:
state = next_state
#print(len(DQL_model.get_memory()['state']))
for epoch in range(epochs):
#讓 environment 重回初始狀態
state = env.reset()
#累計各epoch中的reward
rewards = 0
#統計在一次epoch中總共做了計次動作才結束
step_times = 0
loss = []
while True:
#呈現 environment
#env.render()
#取得模型選擇的動作
action = DQL_model.get_action(state, random_action_rate)
#在這次的環境中根據給予的動作會得到四個回傳值
# next_state:互動後的新環境
# reward:新環境給予的回饋值
# done:是否已達到環境的結束條件
#action = train_model.get_action(state, random_action_rate)
next_state, reward, done, info = env.step(action)
#theta單位是弧度
#和theta_threshold_radians 相同
x, v, theta, omega = next_state
##改善reward所代表的意義以提升訓練效果
##小車離中間越近越好
#r1 = ((env.x_threshold - abs(x)) / env.x_threshold) * 0.2
##柱子越正越好
#r2 = ((env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians) * 0.8
#reward = r1 + r2
step_times += 1
target_step += 1
if done:
reward = 0
if step_times == 200:
reward = 1
DQL_model.save_model()
#建立環境經驗
new_memory = {'state': state, 'action': action, 'reward': reward, 'next_state': next_state, 'done': False}
else:
#建立環境經驗
new_memory = {'state': state, 'action': action, 'reward': reward, 'next_state': next_state, 'done': done}
#計算這次epoch中的reward總和
rewards += reward
#將現有資訊封裝後放入記憶體以便之後訓練
DQL_model.save_memory(new_memory)
#有足夠的資料後開始訓練
DQL_model.training_model()
#loss_value = DQL_model.get_loss_value()
#if loss_value != None:
# loss.append(loss_value)
#如果step_times超過門檻就複製模型權重
#if target_step >= target_replace_threshold:
if (target_step % target_replace_count) == 0:
DQL_model.copy_weight()
target_step = 0
if done:
#loss_list.append(np.mean(loss))
step_list.append(step_times)
reward_list.append(rewards)
print('Episode: {} Steps: {} Rewards: {}'.format(epoch, step_times, rewards))
#print('Loss: {}'.format(np.mean(loss)))
if (epoch % 50 == 0) and (epoch > 49):
#if np.mean(step_list[-100:]) > 100:
# target_replace_threshold = int(np.mean(step_list[-100:]) * increase_rate)
step_mean_list.append(np.mean(step_list[-50:]))
# print('Last 100 Episode: {} Mean Step: {} timesteps, Mean_rewards {}'.format(epoch, np.mean(step_list[-100:])
# , np.mean(reward_list[-100:])))
# #print('Mean loss: {}'.format(np.mean(loss_list[-100:])))
break
#更新環境資訊
state = next_state
return step_list, step_mean_list, learning_rate
if __name__ == '__main__':
learning_rate = 0.001
#每次取樣的數量
batch_size = 32
#隨機選擇動作的機率
#讓模型能有機會學到不同動作帶來的反饋
random_action_rate = 0.1
#對next q_value的期望值
gamma = 0.9
#target network 更新間隔
target_replace_count = 50
#Q learning的訓練會保留一定數量的結果在對目標模型做訓練
#這樣能讓模型更容易完成收斂
#決定要保留多少次的輸入和輸出結果
memory_capacity = 2500
min_memory = 50
#進行遊戲的次數
epochs = 200
step_list, hidden, learning_rate = training_loop(epochs, num_states, num_actions, gamma, random_action_rate,
target_replace_count, memory_capacity, min_memory, batch_size, learning_rate)
##處理程序數量
#number_of_process = 5
##註冊多process管理器
#executor = concurrent.futures.ProcessPoolExecutor(max_workers = number_of_process)
#if True:
##for hidden_index in range(len(hidden_list)):
# #註冊的process列表
# processlist = []
# for i in range(5):
# processlist.append(executor.submit(training_loop, epochs, num_states, num_actions, hidden, gamma, random_action_rate,
# reduce_rate, min_rate, target_replace_count, target_replace_threshold,increase_rate,
# max_replace_threshold, memory_capacity, min_memory, batch_size, learning_rate))
# plot_count = 0
# for process in concurrent.futures.as_completed(processlist):
# step_list, step_mean_list, learning_rate = process.result()
# #for i in range(5):
# # step_list, step_mean_list, learning_rate = training_loop(epochs, num_states, num_actions, hidden, gamma, random_action_rate,
# # reduce_rate, min_rate, target_replace_count, target_replace_threshold,increase_rate,
# # max_replace_threshold, memory_capacity, min_memory, batch_size, learning_rate)
# plt.figure(figsize=(16,9))
# plt.subplot(2,1,1)
# title = 'Model_with_[7, 5]_hidden_layer ' + str(plot_count)
# plt.title(title)
# plt.xlabel('Epoch')
# plt.ylabel('Steps in each Epoch')
# plt.plot(range(len(step_list)), step_list,
# label='Step')
# plt.legend()
# plt.subplot(2,1,2)
# title = 'Model_with_[7, 5]_hidden_layer ' + str(plot_count)
# plt.title(title)
# plt.xlabel('Epoch')
# plt.ylabel('Mean Steps per 50 Epoch')
# plt.plot(range(len(step_mean_list)), step_mean_list,
# label='Mean Step')
# plt.legend()
# plt.savefig('E:/python program/增強式學習結果/增強式學習結果' + title + '.png')
# plt.close()
# plot_count += 1
#input('pause Enter') |
18,406 | 39143be146e57af781577425690b5b55472b98b5 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0019_auto_20141218_0024'),
]
operations = [
migrations.CreateModel(
name='Cooperation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('what', models.TextField()),
('when', models.DateTimeField()),
('quantity', models.IntegerField(default=1)),
('project', models.ForeignKey(related_name=b'cooperations', to='project.Project')),
('where', models.ForeignKey(related_name=b'cooperations', to='project.Direction')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='donation',
name='cooperation',
field=models.ForeignKey(related_name=b'cooperations', to='project.Cooperation', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='donation',
name='type_donation',
field=models.CharField(default=b'T', max_length=1, choices=[(b'C', b'Cooperation'), (b'T', b'Thing')]),
preserve_default=True,
),
migrations.AlterField(
model_name='donation',
name='thing',
field=models.ForeignKey(related_name=b'donations', to='project.Thing', null=True),
),
]
|
18,407 | f15d4d4a1bc9e8245f75e128dad9ac112ac18743 | # subclass
from GeometricObject import GeometricObject
from math import pi
class Circle(GeometricObject):
def __init__(self, radius = 10):
super().__init__() # initialize geometric as super
self.__radius = radius
def getRadius(self):
return self.__radius
def setRadius(self, radius):
self.__radius = radius
def getArea(self):
return self.__radius**2 * pi
def getDiameter(self):
return self.__radius * 2
def getPerimeter(self):
return 2 * pi * self.__radius
def __str__(self):
return f"{super().__str__()} radius: {str(self.__radius)}"
|
18,408 | 1871ab02c2bda11c59f536e7d47e906885affdc5 | # This includes all buildings that produce military units
from buildings.bldng_class import Building
from resources import Resources, Wood
from units.barracks_units import Pikeman, Swordsman
from units.archers import Archer
from research_classes import BronzeShields, BronzeSwords
class Barracks(Building):
cost = Resources({Wood: 150})
size = (3, 3)
letter_abbreviation = 'B'
kind = 'barracks'
time_to_build = 50
def units_which_can_be_built(self):
"""Returns a list of unit kind strings."""
what_can_be_built = [Pikeman.kind]
player = self.player
if player.age in ('bronze age', 'iron age'):
shields = BronzeShields
swords = BronzeSwords
if all(s.name in player.things_researched for s in (shields, swords)):
what_can_be_built.append(Swordsman.kind)
return what_can_be_built
def build_unit(self, unit_type):
player = self.player
if unit_type not in self.units_which_can_be_built():
# This should never happen because the function units_which_can_be_built
# is called in in the command_handling/insert_commands.py (in the
# function insert_build_unit_command)
print("Error! unit_type is not in self.units_which_can_be_built()")
return
if unit_type == Pikeman.kind:
Pikeman(self, player)
else:
Swordsman(self, player)
class ArcheryRange(Building):
cost = Resources({Wood: 150})
size = (5, 3)
letter_abbreviation = 'A'
kind = 'archeryrange'
time_to_build = 50
def units_which_can_be_built(self):
return ['archers']
def build_unit(self, unit_type):
player = self.player
if unit_type != 'archers':
# This should never happen.
print("Error! An ArcheryRange is trying to build a non-archer.")
return
Archer(self.build_position, player)
class Stable(Building):
cost = Resources({Wood: 150})
size = (3, 3)
letter_abbreviation = 'S'
kind = 'stable'
time_to_build = 50
def units_which_can_be_built(self):
return []
class SiegeWorks(Building):
cost = Resources({Wood: 150})
size = (3, 3)
letter_abbreviation = 'W'
kind = 'siegeworks'
time_to_build = 50
def units_which_can_be_built(self):
return []
|
18,409 | bb12ffbb2da46d5d355c52953a0264fe7b59856c | ######################################################################
# #
# MATT CAM #
# Version : 1.1 #
# #
# Description : #
# Raspberry PI Photobooth Software #
# Author : #
# Matt Inglis #
# #
######################################################################
#IMPORTS
import RPi.GPIO as gpio
import picamera
import pygame
import time
import os
import PIL.Image
import ImageDraw
import cups
from threading import Thread
from pygame.locals import *
from time import sleep
from PIL import Image
#initialise global variables
closeme = True #Loop Control Variable
timepulse = 999 #Pulse Rate of LED
LEDon = False #LED Flashing Control
gpio.setmode(gpio.BCM) #Set GPIO to BCM Layout
Numeral = "" #Numeral is the number display
Message = "" #Message is a fullscreen message
SmallMessage = "" #SmallMessage is a lower banner message
TotalImageCount = 1 #Counter for Display and to monitor paper usage
PhotosPerCart = 16 #Selphy takes 16 sheets per tray
#initialise pygame
pygame.mixer.pre_init(44100, -16, 1, 1024*3) #PreInit Music, plays faster
pygame.init() #Initialise pygame
screen = pygame.display.set_mode((800,480),pygame.FULLSCREEN) #Full screen 640x480
background = pygame.Surface(screen.get_size()) #Create the background object
background = background.convert() #Convert it to a background
#UpdateDisplay - Thread to update the display, neat generic procedure
def UpdateDisplay():
#init global variables from main thread
global Numeral
global Message
global SmallMessage
global TotalImageCount
global screen
global background
global pygame
# SmallText = "Matt Cam" #Default Small Message Text
# if(TotalImageCount >= (PhotosPerCart - 2)): #Low Paper Warning at 2 images less
# SmallText = "Paper Running Low!..."
# if(TotalImageCount >= PhotosPerCart): #Paper out warning when over Photos per cart
# SmallMessage = "Paper Out!..."
# TotalImageCount = 0
# background.fill(pygame.Color("black")) #Black background
# smallfont = pygame.font.Font(None, 50) #Small font for banner message
# SmallText = smallfont.render(SmallText,1, (255,0,0))
# background.blit(SmallText,(10,445)) #Write the small text
# SmallText = smallfont.render(`TotalImageCount`+"/"+`PhotosPerCart`,1, (255,0,0))
# background.blit(SmallText,(710,445)) #Write the image counter
if(Message != ""): #If the big message exits write it
font = pygame.font.Font(None, 180)
text = font.render(Message, 1, (255,0,0))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
textpos.centery = background.get_rect().centery
background.blit(text, textpos)
elif(Numeral != ""): # Else if the number exists display it
font = pygame.font.Font(None, 800)
text = font.render(Numeral, 1, (255,0,0))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
textpos.centery = background.get_rect().centery
background.blit(text, textpos)
screen.blit(background, (0,0))
pygame.draw.rect(screen,pygame.Color("red"),(10,10,770,430),2) #Draw the red outer box
pygame.display.flip()
return
#Pulse Thread - Used to pulse the LED without slowing down the rest
def pulse(threadName, *args):
#gpio.setmode(gpio.BCM)
global gpio
gpio.setup(17, gpio.OUT)
#print timepulse
while closeme:
global LEDon
#print LEDon
if timepulse == 999:
gpio.output(17, False)
LEDon = True
else:
if LEDon:
gpio.output(17, True)
time.sleep(timepulse)
LEDon = False
else:
gpio.output(17, False)
time.sleep(timepulse)
LEDon = True
#Main Thread
def main(threadName, *args):
#Setup Variables
gpio.setup(24, gpio.IN) #Button on Pin 24 Reprints last image
gpio.setup(22, gpio.IN) #Button on Pin 22 is the shutter
global closeme
global timepulse
global TotalImageCount
global Numeral
global SmallMessage
global Message
Message = "Loading..."
UpdateDisplay()
time.sleep(5) #5 Second delay to allow USB to mount
#Initialise the camera object
camera = picamera.PiCamera()
#Transparency allows pigame to shine through
camera.preview_alpha = 120
camera.vflip = False
camera.hflip = True
camera.rotation = 90
camera.brightness = 45
camera.exposure_compensation = 6
camera.contrast = 8
camera.resolution = (1280,720)
#Start the preview
camera.start_preview()
Message = "USB Check..."
UpdateDisplay()
#Following is a check to see there is a USB mounted if not it loops with a USB message
# usbcheck = False
# rootdir = '/media/'
# while not usbcheck:
# dirs = os.listdir(rootdir)
# for file in dirs:
# folder = os.path.join(rootdir,file)
# if not file == 'SETTINGS' and os.path.isdir(folder):
# usbcheck = True
# imagedrive = os.path.join(rootdir,file)
# imagefolder = os.path.join(imagedrive,'PhotoBooth')
# #If a photobooth folder on the usb doesn't exist create it
# if not os.path.isdir(imagefolder):
# os.makedirs(imagefolder)
# Message = "Initialise"
# UpdateDisplay()
# #Procedure checks if a numerical folder exists, if it does pick the next number
# #each start gets a new folder i.e. /photobooth/1/ etc
# notfound = True
# folderno = 1
# while notfound:
# tmppath = os.path.join(imagefolder,`folderno`)
# if not os.path.isdir(tmppath):
# os.makedirs(tmppath)
# imagefolder = tmppath
# notfound = False
# else:
# folderno = folderno + 1
# imagecounter = 0
Message = ""
UpdateDisplay()
#Main Loop
while closeme:
try:
for event in pygame.event.get():
if event.type == pygame.QUIT:
closeme = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
closeme = False
except KeyboardInterrupt:
closeme = False
#input_value is the shutter
input_value = gpio.input(22)
#input_value2 is photo reprint
input_value2 = gpio.input(24)
UpdateDisplay()
#Load a beep music file
#pygame.mixer.music.load('/home/pi/Desktop/Beep.mp3')
#Reprint Button has been pressed
# if input_value2==False:
# #If the temp image exists send it to the printer
# if os.path.isfile('/home/pi/Desktop/tempprint.jpg'):
# #Open a connection to cups
# conn = cups.Connection()
# #get a list of printers
# printers = conn.getPrinters()
# #select printer 0
# printer_name = printers.keys()[0]
# Message = "Re-Print..."
# UpdateDisplay()
# #print the buffer file
# printqueuelength = len(conn.getJobs())
# if printqueuelength > 1:
# Message = "PRINT ERROR"
# conn.enablePrinter(printer_name)
# UpdateDisplay()
# elif printqueuelength == 1:
# SmallMessage = "Print Queue Full!"
# UpdateDisplay()
# conn.enablePrinter(printer_name)
# conn.printFile(printer_name,'/home/pi/Desktop/tempprint.jpg',"PhotoBooth",{})
# time.sleep(20)
# Message = ""
# UpdateDisplay()
#input_value is the shutter release
if input_value == False:
subimagecounter = 0
#Increment the image number
imagecounter = imagecounter + 1
#play the beep
#pygame.mixer.music.play(0)
#Display the countdown number
Numeral = "5"
UpdateDisplay()
#Flash the light at half second intervals
timepulse = 0.5
# 1 second between beeps
time.sleep(1)
#pygame.mixer.music.play(0)
Numeral = "4"
UpdateDisplay()
timepulse = 0.4
time.sleep(1)
#pygame.mixer.music.play(0)
Numeral = "3"
UpdateDisplay()
timepulse = 0.3
time.sleep(1)
#pygame.mixer.music.play(0)
Numeral = "2"
UpdateDisplay()
timepulse = 0.2
time.sleep(1)
#pygame.mixer.music.play(0)
Numeral = "1"
UpdateDisplay()
timepulse = 0.1
time.sleep(1)
#Camera shutter sound
#pygame.mixer.music.load('/home/pi/Desktop/camera.mp3')
#pygame.mixer.music.play(0)
Numeral = ""
Message = "Smile!"
UpdateDisplay()
#increment the subimage
subimagecounter = subimagecounter + 1
#create the filename
filename = 'image'
filename += `imagecounter`
filename += '_'
filename += `subimagecounter`
filename += '.jpg'
#capture the image
camera.capture(os.path.join(imagefolder,filename))
#create an image object
im = PIL.Image.open(os.path.join(imagefolder,filename)).transpose(Image.FLIP_LEFT_RIGHT)
#thumbnail the 4 images
im.thumbnail((560,400))
# im2.thumbnail((560,400))
# im3.thumbnail((560,400))
# im4.thumbnail((560,400))
# #paste the thumbnails to the background images
# bgimage.paste(im,(15,20))
# bgimage.paste(im2,(15,410))
# bgimage.paste(im3,(15,820))
# bgimage.paste(im4,(15,1230))
# #two columns of 4
# bgimage.paste(im,(620,20))
# bgimage.paste(im2,(620,410))
# bgimage.paste(im3,(620,820))
# bgimage.paste(im4,(620,1230))
#Create the final filename
# Final_Image_Name = os.path.join(imagefolder,"Final_"+`imagecounter`+".jpg")
# #Save it to the usb drive
# bgimage.save(os.path.join(imagefolder,"Final_"+`imagecounter`+".jpg"))
# #Save a temp file, its faster to print from the pi than usb
# bgimage.save('/home/pi/Desktop/tempprint.jpg')
# #Connect to cups and select printer 0
# conn = cups.Connection()
# printers = conn.getPrinters()
# printer_name = printers.keys()[0]
# #Increment the large image counter
# TotalImageCount = TotalImageCount + 1
# Message = "Print..."
# UpdateDisplay()
# #print the file
# printqueuelength = len(conn.getJobs())
# #If multiple prints in the queue error
# if printqueuelength > 1:
# Message = "PRINT ERROR"
# conn.enablePrinter(printer_name)
# UpdateDisplay()
# elif printqueuelength == 1:
# SmallMessage = "Print Queue Full!"
# conn.enablePrinter(printer_name)
# UpdateDisplay()
# conn.printFile(printer_name,'/home/pi/Desktop/tempprint.jpg',"PhotoBooth",{})
# time.sleep(20)
Message = ""
UpdateDisplay()
timepulse = 999
#reset the shutter switch
while input_value == False:
input_value = gpio.input(22)
#we are exiting so stop preview
camera.stop_preview()
#launch the main thread
Thread(target=main, args=('Main',1)).start()
#launch the pulse thread
Thread(target=pulse, args=('Pulse',1)).start()
#sleap
time.sleep(5)
|
18,410 | d071be897662371136a6587669fc924580c6ada7 | from rest_framework import serializers
from food.models import Weight, Schedule
class WeightSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Weight
fields = ['id', 'time', 'weight']
class ScheduleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Schedule
fields = ['id', 'time', 'amount', 'hasFeeded']
|
18,411 | 850643f0e7b25de40b35c73fe6216c138158db98 | """
Script that listens tweets from the Twitter Streaming API (according to searching query) and stores them into the given sqlite db
"""
production = 1
# https://marcobo# https://www.dataquest.io/blog/streaming-data-python/
# https://marcobonzanini.com/2015/03/02/mining-twitter-data-with-python-part-1/
# http://adilmoujahid.com/posts/2014/07/twitter-analytics/
# http://sebastianraschka.com/Articles/2014_sqlite_in_python_tutorial.html
import tweepy
import sqlite3
from datetime import datetime
from pprint import pprint
# SETUP DB
if production == 0:
sqlite_file = 'hyper_live.db'
else:
sqlite_file = '/home/ebonada/python/hyper/hyper_live.db'
connection = sqlite3.connect(sqlite_file)
db = connection.cursor()
# track number of tweets (developing purposes)
max_tweets_to_store = -1 # maximum number of tweets to store before shutting down the streaming (-1 for non stop)
# Class that manages the events received from streaming API
class TweetsListener(tweepy.StreamListener):
def __init__(self):
""" Initialize listener """
self.count = 0
super(TweetsListener, self).__init__()
def on_status(self, status):
""" Manage 'status' event."""
self.count = self.count + 1
tweet_info = status._json;
print("Tweet #{} => {}".format(self.count, tweet_info['text']))
#pprint(status)
# print(tweet_info['id_str'])
# Connect to the database sqlite file
connection = sqlite3.connect(sqlite_file)
db = connection.cursor()
# Store the tweet in DB
try:
db.execute("""INSERT OR IGNORE INTO TweetsRaw (tweetId,createdAt,storedAt,tweetText,favsCount,rtsCount,language,userId,userFriendsCount,userFollowersCount,userStatusesCount,userFavsCount,userLocation)
VALUES ('{tweetId}','{createdAt}','{storedAt}','{tweetText}','{favsCount}','{rtsCount}','{language}','{userId}','{userFriendsCount}','{userFollowersCount}','{userStatusesCount}','{userFavsCount}','{userLocation}')""".format(
tweetId=tweet_info['id_str'],
createdAt=tweet_info['created_at'],
storedAt=datetime.now().strftime("%a %b %d %H:%M:%S +0200 %Y"),
tweetText=tweet_info['text'].replace("'","''"),
favsCount=tweet_info['favorite_count'],
rtsCount=tweet_info['retweet_count'],
language=tweet_info['lang'],
userId=tweet_info['user']['id_str'],
userFriendsCount=tweet_info['user']['friends_count'],
userFollowersCount=tweet_info['user']['followers_count'],
userStatusesCount=tweet_info['user']['statuses_count'],
userFavsCount=tweet_info['user']['favourites_count'],
userLocation='')
)
# tweet_info['user']['location'].replace("'","''"))
except sqlite3.Error as e:
print("####################\nError: {}\n####################\n".format(e))
# Commit and close
connection.commit()
connection.close()
# shut down streaming if maximum number of tweets has been reached
if max_tweets_to_store != -1:
if self.count == max_tweets_to_store:
return False
return True
def on_error(self, status):
""" Manage 'error' event."""
print("Error TweetsListener:on_error => %s" % str(status))
return True
# Setup twitter API access
consumer_key = 'Ib3yDL5HYSLxAqENZ6QCHRFex'
consumer_secret = 'TuTQKld9os111vx7oMSM3PTfoNz9dZDcnACxIvHGL9euIvLE8I'
access_token = '74265344-UOJgWD9vzB9wJvgnet3f63bkQdJ0rLGz9gg67fqDP'
access_secret = '4AFqod7kCScnSDf9OcgmVeIdnxwa9ZKn9pwwFMBbpLi7u'
# Manage twitter API access
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
# Create tweepy instance
api = tweepy.API(auth)
# Setup streaming
twitter_stream = tweepy.Stream(auth, TweetsListener())
# Launch streaming
print("Streaming running...");
twitter_stream.filter(track=['#VidaFestival2017'])
|
18,412 | 51c092b34a9d71b1618a3f99f57898af8c8b194c | import os
import pandas as pd
from docx import Document
import pandas as pd
import matplotlib.pyplot as plt
from docx.shared import Inches
import seaborn as sns
from utils.constants import Armoire_GROUP,Armoire_PICK
from utils.constants import PL_GROUP,PL_PICK
from utils.constants import Int_GROUP,Int_PICK
from utils.constants import VILLE_NAME
class Cluster(object):
def __init__(self, datasavedir=""):
if datasavedir == "":
self.basedir = os.path.abspath(os.path.dirname(__file__))
self.datasavedir = os.path.abspath(
os.path.join(self.basedir, '../data_save'))
else:
self.datasavedir = datasavedir
def merge_file(self, foldername='Armoire', villelst=VILLE_NAME, add_region=True, Var_lst=None):
# retrieve the column names
data_forcol = pd.read_excel(os.path.abspath(
os.path.join(self.datasavedir, 'excel/{}/{}_{}.xlsx'.format(foldername, foldername, villelst[0]))))
merge_data = pd.DataFrame(columns=data_forcol.columns)
for ville in villelst:
data_tp = pd.read_excel(os.path.abspath(
os.path.join(self.datasavedir, 'excel/{}/{}_{}.xlsx'.format(foldername, foldername, ville))))
data_tp['region'] = ville
merge_data = pd.concat([merge_data, data_tp])
if Var_lst is not None:
merge_data = merge_data[Var_lst+['region']]
if not add_region:
merge_data.drop(['region'], axis=1, inplace=True)
merge_data.reset_index(inplace=True, drop=True)
merge_data.drop_duplicates(inplace=True)
return merge_data
def cat_encode(self, data, var, regroup_dict=None):
"""
:param data:
:param var:
:param one_hot:
:param regroup_dict: dict(new class: [old classes])
:return:
"""
new_data = data.fillna(value={var:'NA'})
grouped = data.groupby(new_data[var])
classes = list(grouped.groups.keys())
def rep_vsdict(key,dict):
return dict[key]
if regroup_dict is None:
dict_encode = dict(zip(classes, range(len(classes))))
new_data[var+'_encode'] = new_data[var].apply(lambda x: rep_vsdict(x, dict=dict_encode))
else:
dict_encode = dict(zip(regroup_dict.keys(), range(len(regroup_dict))))
dict_encode_ = dict.fromkeys(classes, None)
for key, values in regroup_dict.items():
for value in values:
assert value in classes, "The value: {} of group_dict does not correspond to the actual classes".format(value)
dict_encode_[value] = dict_encode[key]
new_data[var+'_encode'] = new_data[var].apply(lambda x: rep_vsdict(x, dict=dict_encode_))
return dict_encode, new_data
def num_encode(self, data, var, proper_range=None):
new_data = data.copy()
new_data[var+'_NA'] = new_data[var].isnull().apply(lambda x: x*1)
new_data = new_data.fillna(value={var: 0})
if proper_range is not None:
new_data[var+'_clip'] = new_data[var].clip(proper_range[0], proper_range[1])
return new_data
def add_Int(self, data_row, data_Int):
new_data = data_row.copy()
data_pick = data_Int.loc[(data_Int['pan_CodeEqt'] == new_data['eq_Code']) & (data_Int['region'] == new_data['region'])]
new_data[]
if data_pick.empty:
print('data_Pick======================')
print(type(data_pick))
print(data_pick)
|
18,413 | 6b0f55a642de68fd69e2e0125ced946e6cb66edb | #!/usr/bin/env python3
import bs4 as bs
import urllib
from urllib import request
#abre o arquivo
xml = open('index.xml')
#instancia
soup = bs.BeautifulSoup(xml, 'xml')
URL_PLAY = "http://play.google.com/store/apps/details?id="
error = open("error.log", 'w')
print("app | sdkver | targetsdk | source | numDownloads")
#loop para percorrer todas as app
for app in soup.findAll('application'):
#pega o id da app. sempre sera a primeira linha id e sdkver
id = app.get('id')
url_app = URL_PLAY + id
try:
r = request.urlopen(url_app)
soup_html = bs.BeautifulSoup(r.read(), 'html.parser')
num_downloads = soup_html.find_all("div", attrs={"itemprop":"numDownloads"})
# some app doesn't have numDownloads
if num_downloads:
source = app.source.string
#loop para pegar todos os packages
for pkg in app.findAll('package'):
targetSdk = pkg.targetSdkVersion.string if pkg.targetSdkVersion!=None else ""
print(id, "|", pkg.sdkver.string, "|", targetSdk, "|", source, "|", num_downloads[0].string)
except urllib.error.HTTPError as e:
error.write(url_app)
error.write((str(e)))
error.close()
|
18,414 | 4cc726d294e6110ec396623d6a88b1f84fa35972 | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from config.settings import secret
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = secret
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'wishlist.db')
app.config['DEBUG'] = True
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "login"
login_manager.init_app(app)
import models
import views |
18,415 | 502d9d59282d39e19bf428c0b75d32ceafdba2a8 | """
License:
Copyright (C) 2020
All rights reserved.
Arahant Ashok Kumar (aak700@nyu.edu)
Module: Validation
Objectives:
1. TQC Braiding Nanowire Algorithm - Validation phase
Functions:
1. validate_nanowire_state
2. validate_empty_branches
3. validate_multi_modal_crossing
4. validate_path_particle
5. validate_path_gates
6. verify_cutoff_pair
7. get_voltage_gate_values
8. check_unibranch_validity
9. validate_particle_positions
10. validate_branch_config
"""
from . import exception
from .utility import Utility
def validate_nanowire_state(nw, positions, utility, positions_single, voltages, nanowire,type,msg):
"""
Nanowire Validation Algorithm which returns a score
"""
try:
min_free_branch = 0
if type==2:
min_free_branch = 1
elif type==1:
min_free_branch = 2
score = validate_empty_branches(nw, min_free_branch, msg)
if score>0:
validate_multi_modal_crossing(positions, positions_single, voltages, utility, nanowire,msg)
return score
except exception.InvalidNanowireStateException:
raise
def validate_empty_branches(nanowire, min_free_branch, msg):
"""
Checks if there are at least 2 empty branches in every intersection
"""
score = 0
valid = False
for intersection in nanowire:
free_b = 0
for branch in intersection:
min_free_pos = len(branch)
free_p = 0
for tup in branch:
if not isinstance(tup, dict):
continue
if list(tup.values())[0] == 0:
free_p += 1
else:
free_p = 0
if free_p>=min_free_pos:
free_b += 1
if free_b>=min_free_branch:
valid = True
if valid:
score += 1
# if score==0:
# raise exception.NoEmptyBranchException(msg)
return score
def validate_multi_modal_crossing(positions, positions_single, voltages, utility, nanowire, msg):
"""
*Check if resulting nanowire violates Rule 3 - Particle-Zero mode isolation
"""
perm = Utility.get_permutations(positions_single, 2)
for pair in perm:
flag1 = utility.check_particle_pair_zmode(pair, positions, positions_single, None)
flag2 = verify_cutoff_pair(nanowire.cutoff_pairs_adj, pair, voltages)
flag3 = verify_cutoff_pair(nanowire.cutoff_pairs_opp, pair, voltages)
if flag1 is False and (flag2 is True or flag3 is True):
raise exception.MultiModalCrossingException(msg)
def validate_path_particle(path, positions, vertices, par):
"""
Checks if any other particle blocks the path
"""
block = []
for el in path:
pos = vertices[el]
if pos in positions:
block.append(pos)
block.pop()
if len(block)>1:
route = [vertices[e] for e in path]
msg = "The Particle ({}) with Path [{}] is blocked in [{}]"\
.format(par, ', '.join(route), ', '.join(block))
raise exception.PathBlockedException(msg)
return block
def validate_path_gates(par, path, vertices, voltages, cutoff_pairs, cutoff_pairs_opp):
"""
Checks if a shut voltage gate blocks the path
"""
p1 = vertices[path[0]]
pn = vertices[path[len(path)-1]]
pair = [p1, pn]
gates = []
flag1 = verify_cutoff_pair(cutoff_pairs, pair, voltages)
gate1 = get_voltage_gate_values(flag1)
if gate1 is not None:
gates.append(gate1)
else:
flag2 = verify_cutoff_pair(cutoff_pairs_opp, pair, voltages)
gate2 = get_voltage_gate_values(flag2)
if gate2 is not None:
gates.append(gate2)
if flag1>=0 or flag2>=0:
route = [vertices[e] for e in path]
msg = "The Particle ({}) with Path [{}] is blocked by Voltage Gate {}"\
.format(par, ', '.join(route), gates)
raise exception.PathBlockedException(msg)
return True
def verify_cutoff_pair(cutoff, pair, voltages):
"""
Returns [0-4] if the pair is in the cutoff_pair
"""
flag = -1
for i in range(len(cutoff)):
pairs = cutoff[i]
if pair in pairs or list(reversed(pair)) in pairs:
if voltages[i] is 'S':
flag = i
return flag
return flag
def get_voltage_gate_values(flag):
"""
For output format
"""
gate = None
if flag is 0:
gate = 'x11'
elif flag is 1:
gate = 'x12'
elif flag is 2:
gate = 'x21'
elif flag is 3:
gate = 'x22'
return gate
def check_unibranch_validity(pair, positions, intersection):
"""
Check if the pair is in the same branch
"""
assert(intersection is not None)
check = []
for par in pair:
b = 0
pos = positions[par-1]
for branch in intersection:
b +=1
for tup in branch:
if not isinstance(tup, dict):
continue
if pos in tup:
check.append(b)
if check[0] == check[1]:
return True
return False
def validate_particle_positions(nanowire_obj, nanowire_b, positions, branch_cfg, group):
"""Checks if the given particle positions conform to the given valid gate branch config"""
n_branches = -1
check = True
intersections = []
intersections_final = []
branches = []
branches_final = []
particles = []
# 1. extracting the intersections of the particles
for pos in positions:
inter = Utility.get_intersection(nanowire_obj.nanowire, pos)
intersections.append(nanowire_obj.nanowire.index(inter))
if n_branches == -1:
n_branches = len(inter)
# 2. extracting the branches of the particles
for pos in positions:
for branch in nanowire_b:
if str(pos) in branch:
branches.append(nanowire_b.index(branch))
# 3. Ccheck if the particles belong to the same intersection
i = 0
for idx in group.split(','):
j = int(idx)
inter = intersections[i:i+j]
pars = [e+1 for e in range(i,i+j)]
if len(set(inter)) > 1 and not "single" in branch_cfg:
check = False
particles.extend(pars)
intersections_final = inter
branches_final = branches[i:i+j]
break
i = j
# 4. check if branch config is valid
if check:
i = 0
particles = []
for idx in group.split(','):
j = int(idx)
branch = branches[i:i+j]
pars = [e+1 for e in range(i,i+j)]
if len(branch) != 2*len(set(branch)):
msg = "The particles {} are not a valid zero-mode pair".format(pars)
raise exception.InvalidNanowireStateException(msg)
elif not validate_branch_config(branch_cfg, branch, n_branches):
check = False
particles.extend(pars)
branches_final = branch
intersections_final = intersections[i:i+j]
i = j
return check, intersections_final, branches_final, particles
def validate_branch_config(branch_cfg, branch, n):
"""Checks if the particles within an intersection conform the branch config"""
res = False
if len(branch) == 4:
if "double" in branch_cfg:
diff1 = branch[0]-branch[1]
diff3 = branch[2]-branch[3]
diff2 = branch[1]%n-branch[2]%n
diff_adj_clk = [-3, 1]
diff_adj_clk_ctr = [-e for e in diff_adj_clk]
diff_opp = [2, -2]
if "adjacent" and "clockwise" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk:
res = True
elif "adjacent" and "counter clockwise" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk_ctr:
res = True
elif "opposite" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_opp:
res = True
elif "single" in branch_cfg:
res = True
elif len(branch) == 2:
res = True
return res
|
18,416 | d3ae98fe9fa0cb677312b6d1fe2212f473c0d80c | from flask_script import Manager
from flask import current_app
from metabrainz import create_app
from metabrainz.model.access_log import AccessLog
from metabrainz.model.utils import init_postgres, create_tables as db_create_tables
manager = Manager(create_app)
@manager.command
def create_db():
"""Create and configure the database."""
init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])
@manager.command
def create_tables():
db_create_tables(current_app.config['SQLALCHEMY_DATABASE_URI'])
@manager.command
def cleanup_logs():
with create_app().app_context():
AccessLog.remove_old_ip_addr_records()
if __name__ == '__main__':
manager.run()
|
18,417 | 5ee66700febef2bc55099f82501586ef4ba8847f | #!/usr/bin/env python
import sys
import numpy as np
from numpy.linalg import inv
z=[-1,1,1]
x=[1,-1,1]
a=[[4.1026463509, 0.0000000000, 0.0000000000],
[2.0513231754, 3.5529959626, 0.0000000000],
[2.0513231754, 1.1843319875, 3.3497967182]]
z1=np.dot(z,a)
x1=np.dot(x,a)
print('z1',z1)
print('x1',x1)
|
18,418 | 856aa3dede0aeba25cb62df3c68588e5ca6417ee | scaling_constant = 1.001
unscaled_ephemeris_settings = body_settings.get( 'Jupiter' ).ephemeris_settings
body_settings.get( 'Jupiter' ).ephemeris_settings = environment_setup.ephemeris.scaled(
unscaled_ephemeris_settings, scaling_constant )
|
18,419 | 8b019f6c6552310f9e5eb1fa4c1a56ef9028fe82 | #!/usr/bin/env python3
import argparse
import copy
import functools as ft
import json
import select
import socket
from typing import Dict, List
#DEBUG = True
DEBUG = False
# Parse Command Line for ASN, and networks (used in Router creation)
parser = argparse.ArgumentParser(description='route packets')
parser.add_argument('asn', type=int, help="AS Number")
parser.add_argument('networks', metavar='networks', type=str, nargs='+', help="networks")
args = parser.parse_args()
##########################################################################################
# Message Fields
TYPE = "type"
SRCE = "src"
DEST = "dst"
MESG = "msg"
TABL = "table"
# Message Types
DATA = "data"
DUMP = "dump"
UPDT = "update"
RVKE = "revoke"
NRTE = "no route"
# Update Message Fields
NTWK = "network"
NMSK = "netmask"
ORIG = "origin"
LPRF = "localpref"
APTH = "ASPath"
SORG = "selfOrigin"
# internal route info
CUST = "cust"
PEER = "peer"
PROV = "prov"
##########################################################################################
class Router:
"""Main Router class, which handles the bulk of the BGP Router
functionality.
"""
routes = None
updates = None
relations = None
sockets = None
forwarding_table = None
revoked = None
def __init__(self, asn: str, networks: List[Dict[str, str]]):
"""Initializes a new Router object, given an Autonomous System Number
and a list of network objects in the form of a Python dictionary from
string to string.
"""
# map from port to list of IP addresses it can reach
self.routes = {}
# cached copy of update announcement (whole packets)
self.updates = []
# map from ip address to type of relationship
self.relations = {}
# map from ip address to connection object
self.sockets = {}
# array of map<string, string> from description to IP address
self.forwarding_table = []
# this router's Autonomous System Number
self.asn = asn
# accumulating list of revoked routes
self.revoked = []
for relationship in networks:
network, relation = relationship.split("-")
if DEBUG:
print("Starting socket for", network, relation)
# start a new UNIX domain socket
self.sockets[network] = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.sockets[network].setblocking(0)
# connect UNIX domain socket to specified network
self.sockets[network].connect(network)
# add entry to relations map, mapping given network IP to relation in Router
self.relations[network] = relation
return
def lookup_routes(self, daddr):
"""Lookup all valid routes for a given address."""
outroutes = []
for route in self.forwarding_table:
anded_address = self.and_addresses(daddr, route[NMSK])
if anded_address == route[NTWK]:
outroutes.append(route)
return outroutes
def and_addresses(self, daddr: str, other_addr: str) -> str:
"""Bitwise AND operation for two given IP Addresses."""
# split both given IPs into arrays of their int values
# (separate on periods)
daddr_sept = daddr.split(".")
other_addr_sept = other_addr.split(".")
# accumulate anded addresses
all_anded = []
# iterate parallelly through daddr and other_addr
# bitwise AND'ing them together
for i in range(len(daddr_sept)):
all_anded.append(str(int(daddr_sept[i]) & int(other_addr_sept[i])))
# reformat results into valid IP address format
return ft.reduce(lambda x, y: x + "." + y, all_anded)
def get_shortest_as_path(self, routes) -> List[str]:
"""Select the route with the shortest AS Path."""
if len(routes) <= 0:
return []
# start shortest path as the first route's path
shortest_path = [routes[0]]
# start the length of the shortest path as that
# of the first route's path
min_path = len(routes[0][APTH])
# iterate through all routes in given list and
# find the shortest AS Path
for route in routes:
r_len = len(route[APTH])
if r_len < min_path:
min_path = r_len
shortest_path = [route]
elif r_len == min_path:
shortest_path.append(route)
return shortest_path
def get_highest_preference(self, routes):
"""Select the route with the highest localPref."""
# start highest lpref route as the first route's path
highest_lprf_route = [routes[0]]
# start the highest lpref as that
# of the first route's path
highest_lprf = int(routes[0][LPRF])
# iterate through all routes in given list and
# find the one with the highest local pref
for route in routes:
r_lprf = int(route[LPRF])
if r_lprf > highest_lprf:
highest_lprf = r_lprf
highest_lprf_route = [route]
elif r_lprf == highest_lprf:
highest_lprf_route.append(route)
return highest_lprf_route
def get_self_origin(self, routes):
"""Select all self originating routes."""
outroutes = []
all_non_self = True
for route in routes:
if route[SORG]:
outroutes.append(route)
all_non_self = False
if all_non_self:
return routes
return outroutes
def get_origin_routes(self, routes):
"""Select origin routes with the following
ranking system: IGP > EGP > UNK.
"""
outroutes = []
current_best = "UNK"
# iterate through routes in given list updating the current best if a better
# option is discovered
for route in routes:
if route[ORIG] == current_best:
outroutes.append(route)
elif (route[ORIG] == "EGP" and current_best != "IGP") or route[ORIG] == "IGP":
# if the current best is worse than EGP and the current is EGP,
# update best and start a new list
# if the current best is worse than IGP and the current is IGP,
# update best and start a new list
current_best = route[ORIG]
outroutes = [route]
return outroutes
def filter_relationships(self, srcif, routes):
"""Don't allow Peer->Peer, Peer->Prov, or Prov->Peer forwards."""
outroutes = []
rel = self.relations[srcif]
for route in routes:
opp_rel = self.relations[route[PEER]]
if (rel == CUST or opp_rel == CUST) or (rel == PROV and opp_rel == PROV):
outroutes.append(route)
return outroutes
def get_lowest_ip(self, routes):
"""Select route with the lowest ip address."""
outroutes = []
min_ip = "255.255.255.255"
for route in routes:
r_ip = route[PEER]
if self.compare_ip(min_ip, r_ip) == 1:
min_ip = r_ip
outroutes = [route]
elif self.compare_ip(min_ip, r_ip) == 0:
outroutes.append(route)
return outroutes
def compare_ip(self, curr_ip: str, peer: str):
"""Compare the two given IP Addresses. Return
positive 1 if the former is greater than the latter, -1 if
the latter is greater than the former, and 0 if equal.
"""
curr_nums = list(map(int, curr_ip.split(".")))
peer_nums = list(map(int, peer.split(".")))
# IPv4 have four 8-bit fields separated by periods (dotted quad)
for i in range(4):
if curr_nums[i] > peer_nums[i]:
return 1
elif curr_nums[i] < peer_nums[i]:
return -1
return 0
def get_route(self, srcif, daddr):
"""Select the best route for a given address, based on the priority ranking system."""
routes = self.lookup_routes(daddr)
# Rules go here
if routes:
routes = self.longest_prefix_match(routes)
# 1. Highest Preference
routes = self.get_highest_preference(routes)
# 2. Self Origin
routes = self.get_self_origin(routes)
# 3. Shortest ASPath
routes = self.get_shortest_as_path(routes)
# 4. IGP > EGP > UNK
routes = self.get_origin_routes(routes)
# 5. Lowest IP Address
routes = self.get_lowest_ip(routes)
# Final check: enforce peering relationships
routes = self.filter_relationships(srcif, routes)
return routes[0] if len(routes) > 0 else None
def forward(self, srcif, packet) -> bool:
"""Forward a data packet."""
chosen_route = self.get_route(srcif, packet[DEST])
if chosen_route is None:
return False
self.sockets[chosen_route[PEER]].sendall(json.dumps(packet).encode())
return True
def coalesce(self, packet) -> bool:
"""Coalesce any routes that are right next to each other. Apply
aggregation/disaggregation functionality.
"""
have_coalesced = False
# at any point if we aggregate or disaggregate, mark have_colesced = True
packet[MESG][SRCE] = packet[SRCE]
packet[MESG][DEST] = packet[DEST]
for route in self.forwarding_table:
if self.adj_numerically(route, packet[MESG]) and self.same_attributes(route, packet[MESG]):
copy_of_route = self.aggregate_routes(route, packet[MESG])
self.forwarding_table.append(copy_of_route)
# remove old routes from forwarding_table
self.forwarding_table.remove(route)
have_coalesced = True
return have_coalesced
def aggregate_routes(self, route1, route2):
"""Combine routes into a single aggregated route."""
# get current prefix length and subtract 1
if route1[NMSK] != route2[NMSK] or route1[NTWK] != route2[NTWK]:
p_len = self.get_prefix(route1) - 1
else:
p_len = self.get_prefix(route1)
# convert new prefix length into netmask ip format (255.255.255.0, e.g.)
# append (32 - (prefix length)) many 0's -> bin to ip that
bin_nmsk = ("1" * p_len).ljust(32, "0")
nmsk_ip = self.binary_to_ipv4(bin_nmsk)
# make a new entry with ^^^^ NMSK and lower of the two IPs (compare_ip())
lower_ip = None
if self.compare_ip(route1[NTWK], route2[NTWK]) == 1:
lower_ip = route2[NTWK]
else:
lower_ip = route1[NTWK]
# create new route with original SRCE, DEST and new msg -> NTWK and NMSK
copy_of_route = copy.deepcopy(route1)
copy_of_route[NTWK] = lower_ip
copy_of_route[NMSK] = nmsk_ip
copy_of_route["CIDR"] = p_len
return copy_of_route
def binary_to_ipv4(self, binary):
"""Converts given binary to IPv4 format (dotted quad)."""
first = str(int(binary[:8], 2))
second = str(int(binary[8:16], 2))
third = str(int(binary[16:24], 2))
fourth = str(int(binary[24:32], 2))
return ".".join([first, second, third, fourth])
def same_attributes(self, route, packet) -> bool:
"""Checks if route and packet have all the same attributes (including next hop router)."""
msg = packet
return route[LPRF] == msg[LPRF] and route[SORG] == msg[SORG] and route[APTH] == msg[APTH] and route[ORIG] == msg[ORIG] and route[PEER] == packet[SRCE]
def adj_numerically(self, route, packet) -> bool:
"""Checks if route and packet are numerically adjacent."""
if route[NMSK] != packet[NMSK]:
return False
prefix = self.get_prefix(route)
# ensure prefixes are equal
if prefix != self.get_prefix(packet):
return False
# check if route[NTWK] == packet[NTWK] up to (prefix length - 1)
bin_route = '.'.join([bin(int(x)+256)[3:] for x in route[NTWK].split('.')])
bin_packet = '.'.join([bin(int(x)+256)[3:] for x in packet[NTWK].split('.')])
return bin_route[:prefix - 1] == bin_packet[:prefix - 1]
def get_prefix(self, route):
"""Gets the prefix value of a given route's netmask."""
bin_nmsk = '.'.join([bin(int(x)+256)[3:] for x in route[NMSK].split('.')])
return self.len_pref(bin_nmsk)
def len_pref(self, nmsk):
"""Gets the length of the prefix."""
if "01" not in nmsk:
return nmsk.count("1")
else:
return -1
def longest_prefix_match(self, routes):
"""Gets the longest prefix match in the given routes list."""
outroutes = []
longest_prefix = 0
for route in routes:
bin_nmsk = '.'.join([bin(int(x)+256)[3:] for x in route[NMSK].split('.')])
curr = self.len_pref(bin_nmsk)
if curr > longest_prefix:
outroutes = [route]
longest_prefix = curr
elif curr == longest_prefix:
outroutes.append(route)
return outroutes
def update(self, srcif, packet) -> bool:
"""Handle update packets."""
# attach SRCIF in case needed later
packet["srcif"] = srcif
# update forwarding_table with new entry
self.update_table(packet)
# create a deep copy of this update packet to send to neighbors
copy_of_packet = copy.deepcopy(packet)
copy_of_packet[MESG][APTH].append(int(self.asn))
self.forward_to_neighbors(copy_of_packet, srcif)
return True
def update_table(self, packet):
"""Updates the forwarding_table with a new packet."""
# add packet to list of updates (cache for later)
self.updates.append(packet)
packetMessage = packet[MESG]
# if we don't need to coalesce, add entry to forwarding table
if not self.coalesce(packet):
# add a new entry into forwarding table
# storing network, netmask, peer, localPref,
# selfOrigin, AS Path, and Origin information
self.forwarding_table.append({
# SRCE
SRCE: packet[SRCE],
# DEST
DEST: packet[DEST],
# NTWK
NTWK: packetMessage[NTWK],
# NMSK
NMSK: packetMessage[NMSK],
# PEER IP
PEER: packet[SRCE],
# Local Pref
LPRF: packetMessage[LPRF],
# Self Origin
SORG: packetMessage[SORG],
# AS Path
APTH: packetMessage[APTH],
# Origin
ORIG: packetMessage[ORIG],
# CIDR Prefix Length
"CIDR": self.get_prefix(packetMessage)
})
# def rebuild_table(self, packet):
# """Rebuilds our forwarding_table when an update/revocation message
# is received. Reconstructs table using aggregation/disaggregation.
# """
# # create copy of updates thus far
# curr_updates = copy.deepcopy(self.updates)
# # wipe updates and forwarding_table
# self.updates = []
# self.forwarding_table = []
# for update in curr_updates:
# for dead_entry in packet[MESG]:
# if update[TYPE] == UPDT:
# sameSource = route[SRCE] == packet[SRCE]
# sameDest = route[DEST] == packet[DEST]
# part_of_dead_entry = sameSource and sameDest and dead_entry[NTWK] == route[NTWK] and dead_entry[NMSK] == route[NMSK]
# if not part_of_dead_entry:
# self.update_table(update)
# # we see a RVKE message that is not related to the current_best
# # route we are trying to revoke, move on to revoking the newly found one
# if update[TYPE] == RVKE:
# # get rid of any dead entries for a found revocation in new forwarding table
# self.rebuild_table(update)
# # keep all RVKE statements live in updates table
# self.updates.append(update)
def revoke(self, packet) -> bool:
"""Handle revoke packets; remove paths from forwarding table and send
revoke messages to neighbors as necessary.
"""
# rebuild whole table, taking into consideration
# any necessary disaggregation and removing dead entries
# by not including in the rebuilt table
# self.rebuild_table(packet)
self.remove_dead_entries(packet)
self.updates.append(packet)
# create a deep copy of this update packet to send to neighbors
copy_of_packet = copy.deepcopy(packet)
self.forward_to_neighbors(copy_of_packet, packet[SRCE])
return True
def remove_dead_entries(self, packet):
"""Remove the dead entries specified in the packet's message block
from the forwarding_table.
"""
for route in self.forwarding_table:
for dead_entry in packet[MESG]:
sameSource = route[SRCE] == packet[SRCE]
sameDest = route[DEST] == packet[DEST]
if sameSource and sameDest and dead_entry[NTWK] == route[NTWK] and dead_entry[NMSK] == route[NMSK]:
self.forwarding_table.remove(route)
self.revoked.append(route)
break
def forward_to_neighbors(self, packet, srcif):
"""Sends the given packet to all neighbors based on relationships."""
source_relationship = self.relations[srcif]
# if update received from a customer: send updates to all other neighbors
if source_relationship == CUST:
for neighbor in self.sockets.keys():
if neighbor != srcif:
packet[SRCE] = ('.').join(neighbor.split('.', 3)[:3]) + '.1'
packet[DEST] = neighbor
sending_msg = json.dumps(packet).encode()
self.sockets[neighbor].sendall(sending_msg)
# if update received from a peer or a provider: only send updates to your customers
else:
for neighbor in self.sockets.keys():
if neighbor != srcif and self.relations[neighbor] not in [PROV,PEER]:
packet[SRCE] = ('.').join(neighbor.split('.', 3)[:3]) + '.1'
packet[DEST] = neighbor
sending_msg = json.dumps(packet).encode()
self.sockets[neighbor].sendall(sending_msg)
def dump(self, packet) -> bool:
"""Handles dump table requests by crafting a table message."""
# create deep copy of packet
self.forwarding_table = self.compress()
copy_of_packet = copy.deepcopy(packet)
# swap dest and srce of packet
copy_of_packet[SRCE] = packet[DEST]
copy_of_packet[DEST] = packet[SRCE]
# change type of message to "table"
copy_of_packet[TYPE] = TABL
msg_arr = []
# iterate through forwarding table and append selected information
# into the "msg" field in copy_of_packet
for route in self.forwarding_table:
msg_arr.append({NTWK: route[NTWK], NMSK: route[NMSK], PEER: route[PEER]})
copy_of_packet[MESG] = msg_arr
# Convert JSON object to string and encode
sending_msg = json.dumps(copy_of_packet).encode()
# send table response to original source (who requested the dump data)
self.sockets[copy_of_packet[DEST]].sendall(sending_msg)
return True
def compress(self):
"""Compresses the resulting forwarding_table to account for any entries
that are still adj_numerically and same_attributes.
"""
sorted_table = sorted(self.forwarding_table, reverse=True, key=lambda x: x["CIDR"])
i = 0
while i + 1 < len(sorted_table):
at_i = sorted_table[i]
at_i_plus = sorted_table[i+1]
if self.adj_numerically(at_i, at_i_plus) and self.same_attributes(at_i, at_i_plus):
copy_of_route = self.aggregate_routes(at_i, at_i_plus)
sorted_table[i] = copy_of_route
sorted_table.pop(i+1)
i += 1
return sorted_table
def handle_packet(self, srcif, packet) -> bool:
"""Dispatches a packet to the correct method."""
typeOfPacket = packet["type"]
if typeOfPacket == DATA:
return self.forward(srcif, packet)
elif typeOfPacket == DUMP:
return self.dump(packet)
elif typeOfPacket == UPDT:
return self.update(srcif, packet)
elif typeOfPacket == RVKE:
return self.revoke(packet)
else:
return False
def send_error(self, conn, msg, srcif):
"""Send a no_route error message."""
message = {}
message[SRCE], message[DEST] = ('.').join(srcif.split('.', 3)[:3]) + '.1', msg[SRCE]
message[TYPE] = NRTE
message[MESG] = {}
sending_msg = json.dumps(message).encode()
conn.sendall(sending_msg)
return True
def run(self):
"""The main method that runs the BGP Router program."""
while True:
socks = select.select(self.sockets.values(), [], [], 0.1)[0]
for conn in socks:
try:
k = conn.recv(65535)
except:
# either died on a connection reset, or was SIGTERM's by parent
return
if k:
for sock in self.sockets:
if self.sockets[sock] == conn:
srcif = sock
msg = json.loads(k)
if not self.handle_packet(srcif, msg):
self.send_error(conn, msg, srcif)
else:
return
if __name__ == "__main__":
router = Router(args.asn, args.networks)
router.run()
|
18,420 | 3eaf7fc2a7055ac6d2c6c3d8fe3e7a4cbccd801d | from excercises.PracticePython_CharacterInput import centenary_calculator
user_name = input('Enter your name: ')
user_age = int(input('Enter your age: '))
current_year = int(input('What is the year? '))
centenary_year = centenary_calculator(user_age, current_year)
print('Hello', user_name, 'you will turn 100 in the year', centenary_year, '.')
|
18,421 | 04b2a514aa44cc979a1f5caee62e7a78bd6b5f34 | from viewClass import view
import core_logic
import canvasapi
from canvasapi import Canvas
import taskClass
import tkinter
class grade_peer_review_assignment(view):
def __init__(self, canvas: Canvas, user: canvasapi.user, course, grade_dict : dict, root : tkinter.Tk) :
super().__init__(canvas, user)
self.root = root
self.course = course
self.canvas = canvas
self.grade_dict = grade_dict
self.assignments = self.course.get_assignments()
self.assignment = 0
self.user_in = 0
def run(self):
core_logic.print_assignments(self.assignments)
print('b ) back\nq ) quit')
self.user_in = input('Please Choose an assignment to upload the grades to: ').strip()
if self.user_in == 'b':
print("\n")
return taskClass.backTask()
elif self.user_in == 'q':
return taskClass.quitTask()
else:
self.assignment = self.assignments[int(self.user_in) - 1]
core_logic.upload_grades(self.assignment,self.grade_dict)
print("\n")
return taskClass.backTask()
|
18,422 | a0996c9608ccab930b9db3b809ea88b82397793c | class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
s = 0
n = len(mat)
for i in range(n):
s += mat[i][i]
for j in range(n):
s += mat[j][n-1-j]
if n % 2:
return s-mat[n//2][n//2]
else:
return s |
18,423 | 7c4f7aaf7536a871c1af20f686dbb29c6ba09a51 | email = "email@email.com"
password = "password"
# https://dev.netatmo.com/apps/
app_name = ""
client_id = "abcdefghijklmnopqrstuvwx"
client_secret = "abcdefghijklmnopqrstuvwxyz01234"
|
18,424 | 95763606e8b7157a574466ac0cfff4933e679d19 |
n = int(input())
for i in range(n):
string = input()
a = int(string[0])
b = int(string[len(string) - 1])
sum = a + b
print("Sum = {}".format(sum))
|
18,425 | 2e8f0bc9c9ed27eedda50ff5cac671fd7ee3c78e | from suds.client import Client
from suds import WebFault
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client(self.app.config['soap']['url'])
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_user_projects(self, username, password):
client = Client(self.app.config['soap']['url'])
try:
response = list(client.service.mc_projects_get_user_accessible(username, password))
return response
except WebFault:
return False
|
18,426 | ea21f9e5ce204b3fbacc9a66cc5af8d073996fb3 | test_case = int(input())
for _ in range(test_case):
num_list = list(map(int, input().split()))
num_list.sort(reverse = True)
print(num_list[2]) |
18,427 | fe51a83bcdfe1b3c292ce8c56f200efd7b389420 | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from struct import pack as p
from .enum import Enum
from .binary import *
from binascii import hexlify
############# MachO General ######################
class MH(Enum):
OBJECT = 0x1
EXECUTE = 0x2
FVMLIB = 0x3
CORE = 0x4
PRELOAD = 0x5
DYLIB = 0x6
DYLINKER = 0x7
BUNDLE = 0x8
DYLIB_STUB = 0x9
DSYM = 0xa
KEXT_BUNDLE = 0xb
class VM_PROT(Enum):
READ = 0x1
WRITE = 0x2
EXEC = 0x4
def shortString(self, perm):
toReturn = ''
toReturn += 'R' if perm & int(self.READ) > 0 else ' '
toReturn += 'W' if perm & int(self.WRITE) > 0 else ' '
toReturn += 'E' if perm & int(self.EXEC) > 0 else ' '
return toReturn
class TypeFlags(Enum):
MASK = 0xff000000
ABI64 = 0x01000000
class CpuType(Enum):
ANY = -1
I386 = 7
X86_64 = I386 | TypeFlags.ABI64
MIPS = 8
ARM = 12
ARM64 = ARM | TypeFlags.ABI64
SPARC = 14
POWERPC = 18
POWERPC64 = POWERPC | TypeFlags.ABI64
LC_SEGMENT = 1
LC_SEMGENT_64 = 0x19
S_ATTR_SOME_INSTRUCTIONS = 0x400
S_ATTR_PURE_INSTRUCTIONS = 0x80000000
class CpuSubTypeARM(Enum):
ALL = 0
V4T = 5
V6 = 6
V5 = 7
XSCALE = 8
V7 = 9
V7S = 11
V7K = 12
V6M = 14
V7M = 15
V7EM = 16
class CpuSubTypeARM64(Enum):
ALL = 0
V8 = 1
E = 2
class SubTypeFlags(Enum):
MASK = 0xff000000
LIB64 = 0x80000000
class CPU_SUBTYPE_X86(Enum):
X86 = 3
X86_64 = X86 | SubTypeFlags.LIB64
X86_64_H = 8
I486 = 4
I486SX = 0x84
I586 = 5
PENTPRO = 0x16
PENTII_M3 = 0x36
PENTII_M5 = 0x56
CELERON = 0x67
CELERON_MOBILE = 0x77
PENTIUM_3_M = 0x18
PENTIUM_3_XEON = 0x28
PENTIUM_M = 0x09
PENTIUM_4 = 0x0a
PENTIUM_4_M = 0x1a
ITANIUM = 0x0b
ITANIUM_2 = 0x1b
XEON = 0x0c
XEON_MP = 0x1c
class LC(Enum):
SEGMENT = 0x00000001
SYMTAB = 0x00000002
SYMSEG = 0x00000003
THREAD = 0x00000004
UNIXTHREAD = 0x00000005
LOADFVMLIB = 0x00000006
IDFVMLIB = 0x00000007
IDENT = 0x00000008
FVMFILE = 0x00000009
PREPAGE = 0x0000000A
DYSYMTAB = 0x0000000B
LOAD_DYLIB = 0x0000000C
ID_DYLIB = 0x0000000D
LOAD_DYLINKER = 0x0000000E
ID_DYLINKER = 0x0000000F
PREBOUND_DYLIB = 0x00000010
ROUTINES = 0x00000011
SUB_FRAMEWORK = 0x00000012
SUB_UMBRELLA = 0x00000013
SUB_CLIENT = 0x00000014
SUB_LIBRARY = 0x00000015
TWOLEVEL_HINTS = 0x00000016
PREBIND_CKSUM = 0x00000017
LOAD_WEAK_DYLIB = 0x80000018
SEGMENT_64 = 0x00000019
ROUTINES_64 = 0x0000001A
UUID = 0x0000001B
RPATH = 0x8000001C
CODE_SIGNATURE = 0x0000001D
SEGMENT_SPLIT_INFO = 0x0000001E
REEXPORT_DYLIB = 0x8000001F
LAZY_LOAD_DYLIB = 0x00000020
ENCRYPTION_INFO = 0x00000021
DYLD_INFO = 0x00000022
DYLD_INFO_ONLY = 0x80000022
LOAD_UPWARD_DYLIB = 0x80000023
VERSION_MIN_MACOSX = 0x00000024
VERSION_MIN_IPHONEOS = 0x00000025
FUNCTION_STARTS = 0x00000026
DYLD_ENVIRONMENT = 0x00000027
MAIN = 0x80000028
DATA_IN_CODE = 0x00000029
SOURCE_VERSION = 0x0000002A
DYLIB_CODE_SIGN_DRS = 0x0000002B
ENCRYPTION_INFO_64 = 0x0000002C
LINKER_OPTIONS = 0x0000002D
LINKER_OPTIMIZATION_HINT = 0x0000002E
class S_ATTR(Enum):
SOME_INSTRUCTIONS = 0x00000400
PURE_INSTRUCTIONS = 0x80000000
class LcStr(Union):
_pack_ = 4
_fields_ = [('offset', c_uint)]
class LoadCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint)]
class UuidCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('uuid', c_ubyte * 16)]
class TwoLevelHintsCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('offset', c_uint),
('nhints', c_uint)]
class TwoLevelHint(LittleEndianStructure):
_pack_ = 4
_fields_ = [('isub_image', c_uint),
('itoc', c_uint)]
class Dylib(LittleEndianStructure):
_pack_ = 4
_fields_ = [('name', LcStr),
('timestamp', c_uint),
('current_version', c_uint),
('compatibility_version', c_uint),
]
class DylibCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('dylib', Dylib),
]
class DylinkerCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('name', LcStr)
]
########################### 32 BIT Structures ###########################
class LSB_32_MachHeader(LittleEndianStructure):
_pack_ = 4
_fields_ = [('magic', c_uint),
('cputype', c_uint),
('cpusubtype', c_uint),
('filetype', c_uint),
('ncmds', c_uint),
('sizeofcmds', c_uint),
('flags', c_uint)
]
class LSB_32_SegmentCommand(LittleEndianStructure):
_pack_ = 4
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('segname', c_char * 16),
('vmaddr', c_uint),
('vmsize', c_uint),
('fileoff', c_uint),
('filesize', c_uint),
('maxprot', c_uint),
('initprot', c_uint),
('nsects', c_uint),
('flags', c_uint)]
class LSB_32_Section(LittleEndianStructure):
_pack_ = 4
_fields_ = [('sectname', c_char * 16),
('segname', c_char * 16),
('addr', c_uint),
('size', c_uint),
('offset', c_uint),
('align', c_uint),
('reloff', c_uint),
('nreloc', c_uint),
('flags', c_uint),
('reserved1', c_uint),
('reserved2', c_uint)
]
class LSB_32(object):
Section = LSB_32_Section
SegmentCommand = LSB_32_SegmentCommand
MachHeader = LSB_32_MachHeader
########################### 64 BIT Structures ###########################
class LSB_64_MachHeader(LittleEndianStructure):
_pack_ = 8
_fields_ = [('magic', c_uint),
('cputype', c_uint),
('cpusubtype', c_uint),
('filetype', c_uint),
('ncmds', c_uint),
('sizeofcmds', c_uint),
('flags', c_uint),
('reserved', c_uint),
]
class LSB_64_SegmentCommand(LittleEndianStructure):
_pack_ = 8
_fields_ = [('cmd', c_uint),
('cmdsize', c_uint),
('segname', c_char * 16),
('vmaddr', c_ulonglong),
('vmsize', c_ulonglong),
('fileoff', c_ulonglong),
('filesize', c_ulonglong),
('maxprot', c_uint),
('initprot', c_uint),
('nsects', c_uint),
('flags', c_uint)]
class LSB_64_Section(LittleEndianStructure):
_pack_ = 8
_fields_ = [('sectname', c_char * 16),
('segname', c_char * 16),
('addr', c_ulonglong),
('size', c_ulonglong),
('offset', c_uint),
('align', c_uint),
('reloff', c_uint),
('nreloc', c_uint),
('flags', c_uint),
('reserved1', c_uint),
('reserved2', c_uint)
]
class LSB_64(object):
Section = LSB_64_Section
SegmentCommand = LSB_64_SegmentCommand
MachHeader = LSB_64_MachHeader
############################# Fat/Universal ###########################
class FatHeader(BigEndianStructure):
_pack_ = 4
_fields_ = [('magic', c_uint),
('nfat_arch', c_uint)
]
class FatArch(BigEndianStructure):
_pack_ = 4
_fields_ = [('cputype', c_uint),
('cpusubtype', c_uint),
('offset', c_uint),
('size', c_uint),
('align', c_uint)
]
############################### Container #############################
class MachHeaderData(Container):
"""
header = MachHeader
"""
class LoadCommandData(Container):
"""
header = LoaderCommand
bytes = bytes of the command bytearray
raw = bytes of the command c_ubyte_array
SegmentCommand
sections = list of SectionData
UuidCommand
uuid = uuid (str)
TwoLevelHintsCommand
twoLevelHints = list of TwoLevelHintData
DylibCommand
name = name of dylib (str)
DylinkerCommand
name = name of dynamic linker
"""
class SectionData(Container):
"""
header = Section
"""
class TwoLevelHintData(Container):
"""
header = TwoLevelHint
"""
class MachO(Binary):
def __init__(self, fileName, fileContent=None):
super(MachO, self).__init__(fileName, fileContent)
self.__fatArches = self._tryParseFat(self._bytes)
if self.__fatArches:
return
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__machHeader = self._parseMachHeader(self._bytes)
self.__loadCommands = self._parseLoadCommands(self._bytes, self.machHeader)
@property
def _classes(self):
return self.__classes
@property
def machHeader(self):
assert not self.__fatArches
return self.__machHeader
@property
def isFat(self):
return self.__fatArches is not None
@property
def fatArches(self):
assert self.__fatArches
return self.__fatArches
@property
def loadCommands(self):
assert not self.__fatArches
return self.__loadCommands
@property
def entryPoint(self):
return 0x0
@property
def imageBase(self):
for loadCommand in self.loadCommands:
if loadCommand.header.cmd == LC.SEGMENT or loadCommand.header.cmd == LC.SEGMENT_64:
for section in loadCommand.sections:
if section.header.flags & S_ATTR.SOME_INSTRUCTIONS or section.header.flags & S_ATTR.PURE_INSTRUCTIONS:
return section.header.addr - section.header.offset
return 0x0
@property
def type(self):
return 'MachO'
def _getSuitableClasses(self, data):
classes = None
if data[7] == 0:
classes = LSB_32
elif data[7] == 1:
classes = LSB_64
return classes
def _tryParseFat(self, data):
header = FatHeader.from_buffer(data)
if header.magic != 0xcafebabe:
return None
offset = sizeof(FatHeader)
arches = []
for i in range(header.nfat_arch):
arch = FatArch.from_buffer(bytearray(data[offset:]))
cputype = CpuType[arch.cputype]
thin_data = bytearray(data[arch.offset : arch.offset+arch.size])
thin = MachO('{}.{}'.format(self.fileName, cputype), thin_data)
arches.append(thin)
offset += sizeof(FatArch)
return arches
def _parseMachHeader(self, data):
header = self._classes.MachHeader.from_buffer(data)
if header.magic not in (0xfeedface, 0xfeedfacf, 0xcefaedfe, 0xcffaedfe):
raise BinaryError('No valid MachO file')
return MachHeaderData(header=header)
def _parseLoadCommands(self, data, machHeader):
offset = sizeof(self._classes.MachHeader)
load_commands = []
for i in range(machHeader.header.ncmds):
command = LoadCommand.from_buffer(data, offset)
raw = (c_ubyte * command.cmdsize).from_buffer(data, offset)
if command.cmd == LC.SEGMENT or command.cmd == LC.SEGMENT_64:
command = self.__parseSegmentCommand(data, offset, raw)
elif command.cmd == LC.UUID:
command = self.__parseUuidCommand(data, offset, raw)
elif command.cmd == LC.TWOLEVEL_HINTS:
command = self.__parseTwoLevelHintCommand(data, offset, raw)
elif command.cmd in (LC.ID_DYLIB, LC.LOAD_DYLIB, LC.LOAD_WEAK_DYLIB):
command = self.__parseDylibCommand(data, offset, raw)
elif command.cmd in (LC.ID_DYLINKER, LC.LOAD_DYLINKER):
command = self.__parseDylibCommand(data, offset, raw)
else:
command = LoadCommandData(header=command)
load_commands.append(command)
offset += command.header.cmdsize
return load_commands
def __parseSegmentCommand(self, data, offset, raw):
sc = self._classes.SegmentCommand.from_buffer(data, offset)
sections = self.__parseSections(data, sc, offset+sizeof(self._classes.SegmentCommand))
return LoadCommandData(header=sc, name=sc.segname.decode('ASCII'), sections=sections, bytes=bytearray(raw), raw=raw)
def __parseUuidCommand(self, data, offset, raw):
uc = UuidCommand.from_buffer(data, offset)
return LoadCommandData(header=uc, uuid=hexlify(uc.uuid), bytes=bytearray(raw), raw=raw)
def __parseTwoLevelHintCommand(self, data, offset, raw):
tlhc = TwoLevelHintsCommand.from_buffer(data, offset)
hints = self.__parseTwoLevelHints(data, tlhc)
return LoadCommandData(header=tlhc, twoLevelHints=hints, bytes=bytearray(raw), raw=raw)
def __parseTwoLevelHints(self, data, twoLevelHintCommand):
offset = twoLevelHintCommand.offset
hints = []
for i in twoLevelHintCommand.nhints:
tlh = TwoLevelHint.from_buffer(data, offset)
hints.append(TwoLevelHintData(header=tlh))
return hints
def __parseDylibCommand(self, data, offset, raw):
dc = DylibCommand.from_buffer(data, offset)
name = get_str(raw, dc.dylib.name.offset)
return LoadCommandData(header=dc, bytes=bytearray(raw), raw=raw, name=name)
def __parseDylinkerCommand(self, data, offset, raw):
dc = DylinkerCommand.from_buffer(data, offset)
name = get_str(raw, dc.name.offset)
return LoadCommandData(header=dc, bytes=bytearray(raw), raw=raw, name=name)
def __parseSections(self, data, segment, offset):
sections = []
for i in range(segment.nsects):
sec = self._classes.Section.from_buffer(data, offset)
if self._classes.Section == LSB_64_Section:
offset += 80
else:
offset += sizeof(self._classes.Section)
if self.machHeader.header.filetype != MH.DSYM or segment.segname == b"__DWARF":
raw = (c_ubyte * sec.size).from_buffer(data, sec.offset)
bytes = bytearray(raw)
else:
raw = None
bytes = None
sections.append(SectionData(header=sec, name=sec.sectname.decode('ASCII'), bytes=bytes, raw=raw))
return sections
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
magic = bytearray(fileContent)[:4]
magics = (
p('>I', 0xfeedface),
p('>I', 0xfeedfacf),
p('>I', 0xcafebabe),
p('<I', 0xfeedface),
p('<I', 0xfeedfacf),
p('<I', 0xcafebabe),
)
return magic in magics
|
18,428 | 70dd1177495e940afabd1bd48d7936727445a33f | from plark_game.classes.agent import Agent
from plark_game.classes.observation import Observation
import numpy as np
import torch
import datetime
import os
import json
import csv
#One should never instantiate an NNAgent - one should always instantiate one of its
#subclasses
class NNAgent(Agent):
def __init__(self, num_inputs, num_outputs,
num_hidden_layers=0, neurons_per_hidden_layer=0,
file_dir_name=None, agent_type=None, game=None,
stochastic_actions=False, driving_agent=True,
in_tournament=False):
self.agent_type = agent_type
self.stochastic_actions = stochastic_actions
self.driving_agent = driving_agent
self.in_tournament = in_tournament
#For reading and writing models
self.base_path = '/data/agents/evo_models/'
#Number of outputs is always given via the subclass call to this constructor
self.num_outputs = num_outputs
#If file directory name is given, read from file
if file_dir_name is not None:
metadata, genotype = self._read_agent_from_file(file_dir_name)
self.num_inputs = metadata['num_inputs']
self.num_hidden_layers = metadata['num_hidden_layers']
self.neurons_per_hidden_layer = metadata['neurons_per_hidden_layer']
self.stochastic_actions = metadata['stochastic_actions']
obs_kwargs = {}
obs_kwargs['driving_agent'] = self.agent_type
obs_kwargs['normalise'] = metadata['normalise']
obs_kwargs['domain_params_in_obs'] = metadata['domain_params_in_obs']
self.obs_kwargs = obs_kwargs
if not self.in_tournament:
assert game is not None, "Need to hand NewGame object to NNAgent " \
"constructor in order to build the Observation class"
self.observation = Observation(game, **obs_kwargs)
#Build neural net
self._build_nn()
#Set read genotype as weights
self.set_weights(genotype)
else:
#Check that num_inputs is not None
if num_inputs is None:
print('One needs to give either a number of inputs or a file directory'
' name to build an NNAgent')
exit()
self.num_inputs = num_inputs
self.num_hidden_layers = num_hidden_layers
self.neurons_per_hidden_layer = neurons_per_hidden_layer
self.observation = None
#Build neural net
self._build_nn()
def _build_nn(self):
layers = []
if self.num_hidden_layers == 0:
layers.append(torch.nn.Linear(self.num_inputs, self.num_outputs))
else:
layers.append(torch.nn.Linear(self.num_inputs, self.neurons_per_hidden_layer))
#Hidden layers have ReLU activation
layers.append(torch.nn.ReLU())
for i in range(self.num_hidden_layers-1):
layers.append(torch.nn.Linear(self.neurons_per_hidden_layer,
self.neurons_per_hidden_layer))
layers.append(torch.ReLU())
layers.append(torch.nn.Linear(self.neurons_per_hidden_layer, self.num_outputs))
#Final layer goes through a softmax
layers.append(torch.nn.Softmax(dim=0))
self.nn = torch.nn.Sequential(*layers).double()
#Takes a list, passes through the network and returns a list
def _forward_pass(self, x):
x = torch.tensor(x, dtype=torch.float64)
net_out = self.nn.forward(x)
return net_out.tolist()
#Randomly sample action from network output probability distribution
def _sample_action(self, net_out):
action_nums = list(range(len(net_out)))
return np.random.choice(action_nums, p=net_out)
#Get the most probable action from the network output probability distribution
def _get_most_probable_action(self, net_out):
return np.argmax(net_out)
def getAction(self, state):
#If state dictionary comes through, convert to numpy array.
#This will happen when the NNAgent is the non-driving agent.
if not self.driving_agent:
state = self.observation.get_observation(state)
assert len(state) == self.num_inputs, "State length: {}, num inputs: {}" \
.format(len(state), self.num_inputs)
#Push state through network
net_out = self._forward_pass(state)
#Get action from nework output
if self.stochastic_actions:
action = self._sample_action(net_out)
else:
action = self._get_most_probable_action(net_out)
if (not self.driving_agent) or self.in_tournament:
action = self.action_lookup(action)
return action
#Stitch together state from tournament
def getTournamentAction(self, obs, obs_normalised, domain_parameters,
domain_parameters_normalised, state):
if self.obs_kwargs['normalise']:
stitched_obs = obs_normalised
else:
stitched_obs = obs
if self.obs_kwargs['domain_params_in_obs']:
if self.obs_kwargs['normalise']:
stitched_obs = np.concatenate((stitched_obs, domain_parameters_normalised))
else:
stitched_obs = np.concatenate((stitched_obs, domain_parameters))
return self.getAction(stitched_obs)
#Returns the number of weights
def get_num_weights(self):
num_weights = 0
for layer in self.nn:
for params in layer.parameters():
num_weights += params.numel()
return num_weights
def print_weights(self):
for layer in self.nn:
for params in layer.parameters():
print(params)
def _set_weights_err_msg(self, weights_len, num_weights_required):
return "Trying to set {} weights to an NN that requires {} weights" \
.format(weights_len, num_weights_required)
#Sets a list of weights
def set_weights(self, new_weights):
#Check new weights is of correct size
num_weights_required = self.get_num_weights()
assert num_weights_required == len(new_weights), \
self._set_weights_err_msg(len(new_weights), \
num_weights_required)
weight_index = 0
for layer in self.nn:
for params in layer.parameters():
#Slice out new weights
p_weights = new_weights[weight_index : weight_index + params.numel()]
weight_index += params.numel()
#Resize and set new weights
params.data = torch.tensor(np.reshape(p_weights, params.size()), \
dtype=torch.float64)
#Return weights as a 1d list
def get_weights(self):
weights = []
for layer in self.nn:
for params in layer.parameters():
weights += params.flatten().tolist()
return weights
def _save_metadata(self, dir_path, player_type, obs_normalise, domain_params_in_obs):
metadata = {}
metadata['playertype'] = player_type
metadata['normalise'] = obs_normalise
metadata['domain_params_in_obs'] = domain_params_in_obs
metadata['stochastic_actions'] = self.stochastic_actions
metadata['num_inputs'] = self.num_inputs
metadata['num_hidden_layers'] = self.num_hidden_layers
metadata['neurons_per_hidden_layer'] = self.neurons_per_hidden_layer
file_path = dir_path + '/metadata.json'
with open(file_path, 'w') as outfile:
json.dump(metadata, outfile)
def _save_genotype(self, dir_path):
#Save genotype as a csv - it is just a list
file_path = dir_path + '/genotype.csv'
with open(file_path, 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_writer.writerow(self.get_weights())
def _save_agent_to_file(self, player_type, obs_normalise, domain_params_in_obs,
file_name_suffix=''):
#Construct full directory path
date_time = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
dir_name = player_type + '_' + date_time + file_name_suffix
dir_path = self.base_path + dir_name
#Create directory for model
os.makedirs(dir_path, exist_ok=True)
os.chmod(dir_path, 0o777)
#Save metadata
self._save_metadata(dir_path, player_type, obs_normalise, domain_params_in_obs)
#Save genotype
self._save_genotype(dir_path)
def _read_metadata(self, metadata_filepath):
with open(metadata_filepath, 'r') as metadata_file:
metadata = json.load(metadata_file)
return metadata
def _read_genotype(self, genotype_filepath):
with open(genotype_filepath, 'r') as genotype_file:
reader = csv.reader(genotype_file)
genotype = list(map(float, list(reader)[0]))
return genotype
def _read_agent_from_file(self, dir_name):
dir_path = self.base_path + dir_name + '/'
if self.in_tournament:
dir_path = "/plark_ai_public" + dir_path
#Read metadata
metadata = self._read_metadata(dir_path + 'metadata.json')
#Read genotype
genotype = self._read_genotype(dir_path + 'genotype.csv')
return metadata, genotype
|
18,429 | fd814e1baa710c2be2f6be70112733de0a719c25 | import os
import torch
from parse import parse
__all__ = ['Saver']
CKPT_FMT = '{:04d}.ckpt'
STATE = 'state'
OPTIM = 'optim'
STEP = 'step'
EPOCH = 'epoch'
class Saver(object):
def __init__(self, config):
self.config = config
self.param_dir = os.path.join(self.config.ckpt_path, 'param')
if not os.path.exists(self.param_dir):
os.makedirs(self.param_dir)
self.params = self.get_params_on_path(self.config.max_to_keep)
def get_params_on_path(self, max_to_keep):
params = []
if os.path.exists(self.param_dir):
file_list = os.listdir(self.param_dir)
for param in file_list:
parsed_list = parse(CKPT_FMT, param)
if parsed_list:
epoch = parsed_list[0]
params.append(epoch)
params = sorted(params)
params = params[-max_to_keep:]
return params
def load(self):
model, optim, step, epoch = self.create()
if self.params:
ckpt = torch.load(os.path.join(self.param_dir, CKPT_FMT.format(self.params[-1])),
map_location=self.config.device)
model.load_state_dict(ckpt[STATE])
model.to(self.config.device)
optim.load_state_dict(ckpt[OPTIM]) # may create new optimizer
step = ckpt[STEP]
epoch = ckpt[EPOCH]
print("Loaded the model at {} epoch and {} step".format(epoch, step))
else:
print("Start from scratch ... ")
return model, optim, step, epoch
def load_target(self, target_epoch):
model, optim, step, epoch = self.create()
for dirpath, dirnames, filenames in os.walk(self.param_dir):
for filename in filenames:
parsed_list = parse(CKPT_FMT, filename)
if parsed_list is not None:
epoch = parsed_list[-1]
if epoch == target_epoch:
ckpt = torch.load(
os.path.join(self.param_dir, filename))
model.load_state_dict(ckpt[STATE])
optim.load_state_dict(ckpt[OPTIM])
step = ckpt[STEP]
epoch = ckpt[EPOCH]
return model, optim, step, epoch
raise Exception('Cannot find specific model: epoch %d' % target_epoch)
def save(self, model, optim, step, epoch):
filename_to_save = os.path.join(self.param_dir, CKPT_FMT.format(epoch))
torch.save(
{
STATE: model.state_dict(),
OPTIM: optim.state_dict(),
STEP: step,
EPOCH: epoch,
},
filename_to_save
)
self.params.append(epoch)
if len(self.params) > self.config.max_to_keep:
param_to_del = self.params.pop(0)
filepath_to_del = os.path.join(self.param_dir,
CKPT_FMT.format(param_to_del))
os.remove(filepath_to_del)
def create(self):
model = self.config.model(**self.config.model_param).to(self.config.device)
if hasattr(model, 'init_weights'):
model.init_weights()
if hasattr(model, 'transfer_weights') and hasattr(self.config, 'transfer_param'):
model.transfer_weights(**self.config.transfer_param)
optim = self.config.optim(model.parameters(), **self.config.optim_param)
step = 0
epoch = -1
return model, optim, step, epoch
|
18,430 | 5c2bff6b551c9eca0b41348fa617accdb3085128 | # -*- coding: utf-8 -*-
from flask import Blueprint, abort, render_template
from flask import current_app as app
from jinja2 import TemplateNotFound
from flask.ext.login import current_user
bp = Blueprint(
'simplepages',
__name__,
template_folder='templates',
static_folder='static'
)
@bp.route('/', defaults={'slug': 'default'})
@bp.route('/<slug>')
def index(slug):
page_login_required = slug in app.config['SIMPLEPAGES_LOGIN_REQUIRED']
auth = not current_user.is_authenticated()
if auth and page_login_required:
return app.login_manager.unauthorized()
try:
return render_template('simplepages/{0}.html'.format(slug))
except TemplateNotFound, e:
app.logger.error(e)
abort(404)
|
18,431 | 183b84f1a1eff976e71bd842e15a7aa87c409980 | import pytest
from unittest.mock import Mock
from src.Validations.Validator import Validator
def test_check_failure():
fake_validation = Mock()
validations = [fake_validation]
report = Mock()
student = Mock()
course_section = Mock()
validator = Validator(student, course_section)
validator.check_for_failures(validations, report)
fake_validation.getInstance.assert_called_once
|
18,432 | 221cd250356089c398cff86981189c4f568c6533 | /usr/share/pyshared/numpy/random/setupscons.py |
18,433 | f1a3656695c6d59d7cffa3028ff6aab2bca6dd85 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-09 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Deck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('sort_order', models.IntegerField()),
('ship', models.CharField(max_length=25)),
('image_link', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Ship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('wieght', models.FloatField()),
('capacity', models.IntegerField()),
('year', models.IntegerField()),
('description', models.CharField(default='', max_length=250)),
],
),
]
|
18,434 | bb0aa3ca6482351cc24225c65c64fbc1f5bbbd20 | import json
import numpy as np
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# import matplotlib.pyplot as plt
from tqdm import tqdm
from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from skspatial.plotting import plt
from util import Image, perpendicular_linecuts, load_magnetic_data, normalised_gaussian, hist_and_fit_gauss
RESAMPLE_FACTOR = 20
def magnetic_edge(x, x0, Ms, theta, phi, d_x, d_z, t):
u = x-x0
u2 = u**2
return 2 * Ms * t * ( np.sin(theta) * np.cos(phi) * d_x / (u**2 + d_x**2) - np.cos(theta) * u / (u**2 + d_z**2) )
def evaluate_cuts(params, lcx, lcy, t):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
flcx = magnetic_edge(lcx, x0_x, Ms, theta, phi, d_x, d_z, t) + c_x
flcy = magnetic_edge(lcy, x0_y, Ms, theta, phi + ((-1)**int(rot))*np.pi/2, d_x, d_z, t) + c_y
return flcx, flcy
def evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t):
RESAMPLE_FACTOR = 10 # to interpolate and then decimate by
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
fx = interp1d(lcx, flcx)
fy = interp1d(lcy, flcy)
x_smooth = np.linspace(lcx[0], lcx[-1], lcx.shape[0] * RESAMPLE_FACTOR)
y_smooth = np.linspace(lcy[0], lcy[-1], lcy.shape[0] * RESAMPLE_FACTOR)
dx = x_smooth[1] - x_smooth[0]
dy = y_smooth[1] - y_smooth[0]
kernel_x = normalised_gaussian(x_smooth-(lcx[0]+lcx[-1])/2, fwhm)
kernel_y = normalised_gaussian(y_smooth-(lcy[0]+lcy[-1])/2, fwhm)
cflcx_smooth = np.convolve(fx(x_smooth), kernel_x, mode='same') * dx
cflcy_smooth = np.convolve(fy(y_smooth), kernel_y, mode='same') * dy
# from scipy.integrate import trapezoid
# print(trapezoid(x_smooth, kernel_x))
# fig, axes = plt.subplots(2, 3)
# axes[0][0].plot(fx(x_smooth), label="edge")
# axes[0][1].plot(cflcx_smooth, label="edge*G")
# axes[0][2].plot(x_smooth, kernel_x, label="G")
# axes[1][0].plot(fy(y_smooth), label="edge")
# axes[1][1].plot(cflcy_smooth, label="edge*G")
# axes[1][2].plot(y_smooth, kernel_y, label="G")
# [[ax.legend() for ax in row] for row in axes]
# plt.show()
# quit()
# now decimate
cflcx = cflcx_smooth[::RESAMPLE_FACTOR]
cflcy = cflcy_smooth[::RESAMPLE_FACTOR]
return cflcx, cflcy
def evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
zx = np.linspace(d_x, d_x+NVt, 10)
zy = np.linspace(d_z, d_z+NVt, 10)
fx = np.zeros((zx.size, lcx.size))
fy = np.zeros((zy.size, lcy.size))
for i in range(len(zx)):
_params = compact_params(x0_x, x0_y, Ms, theta, phi, rot, zx[i], zy[i], c_x, c_y)
fx[i], fy[i] = evaluate_gaussian_cuts(_params, lcx, lcy, fwhm, t)
return trapz(fx, zx, axis=0) / NVt, trapz(fy, zy, axis=0) / NVt
def extract_params(params):
x0_x = params[0]
x0_y = params[1]
Ms = params[2]
theta = params[3]
phi = params[4]
rot = params[5]
d_x = params[6]
d_z = params[7]
c_x = params[8]
c_y = params[9]
return x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y
def compact_params(x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y):
return [x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y]
def two_cut_residual(params, lcx, lcxv, lcy, lcyv, t):
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
return np.concatenate([lcxv-flcx, lcyv-flcy])
def two_cut_gaussian_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t):
cflcx, cflcy = evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t)
return np.concatenate([lcxv-cflcx, lcyv-cflcy])
def two_cut_gaussian_layer_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
icflcx, icflcy = evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt)
return np.concatenate([lcxv-icflcx, lcyv-icflcy])
def get_bounds(lcx, lcy):
lower_bounds = compact_params(
x0_x=lcx[0],
x0_y=lcy[0],
Ms=-1,
theta=0,
phi=-180*np.pi/180,
rot=-np.inf,
d_x=0,
d_z=0,
c_x=-np.inf,
c_y=-np.inf
)
upper_bounds = compact_params(
x0_x=lcx[-1],
x0_y=lcy[-1],
Ms=1,
theta=90*np.pi/180,
phi=180*np.pi/180,
rot=np.inf,
d_x=np.inf,
d_z=np.inf,
c_x=np.inf,
c_y=np.inf
)
bounds = np.zeros((2, len(lower_bounds)))
bounds[0] = lower_bounds
bounds[1] = upper_bounds
return bounds
def get_x_guess(lcx, lcy):
return compact_params(
x0_x=lcx[len(lcx)//2],
x0_y=lcy[len(lcy)//2],
Ms=1e-2,
theta=30*np.pi/180,
phi=0,
rot=0,
d_x=3e-6,
d_z=3e-6,
c_x=0,
c_y=0
)
def fit_magnetic_edge(lcx, lcxv, lcy, lcyv, t):
result = least_squares(two_cut_residual, args=(
lcx, lcxv, lcy, lcyv, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, fwhm, t):
result = least_squares(two_cut_gaussian_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
result = least_squares(two_cut_gaussian_layer_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t, NVt), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
######################################################################################
def main():
magnetic_layer_thickness = 1e-9
NV_layer_thickness = 1e-6
N_linecuts = 10
linecut_width = 20e-6
img, optical_fwhm, p1, p2, p3, p4 = load_magnetic_data("magnetic_20x.json")
optical_fwhm_px = optical_fwhm / img.px_to_m(1) * RESAMPLE_FACTOR
assert optical_fwhm_px > 10
d_x_vals = []
d_y_vals = []
Ms_vals = []
theta_vals = []
plot_every = N_linecuts**2 // 5
d_pts = np.zeros((2, N_linecuts, 2))
d_map = np.zeros((2, N_linecuts))
pbar = tqdm(total=N_linecuts)
iterx = perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True)
itery = perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True)
for ((lcx, lcxv), cx), ((lcy, lcyv), cz) in zip(iterx, itery):
pbar.update()
result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
if 0 < d_x < 40e-6 and 0 < d_z < 40e-6:
d_pts[0,pbar.n-1] = cx
d_pts[1,pbar.n-1] = cz
d_map[0,pbar.n-1] = d_x
d_map[1,pbar.n-1] = d_z
d_pts = d_pts.reshape(2*N_linecuts, 2)
# plt.scatter(pts[:, 0], pts[:, 1], c=d_map.ravel())
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.scatter(pts[:, 0], pts[:, 1], d_map.ravel())
# plt.show()
pts = np.zeros((2*N_linecuts, 3))
pts[:,0] = d_pts[:,0]
pts[:,1] = d_pts[:,1]
pts[:,2] = d_map.ravel()
pts = pts[np.where(pts[:,2] != 0)]
pts = Points(pts)
print(pts)
plane = Plane.best_fit(pts)
xlim, ylim = img.get_size_m()
print(xlim, ylim)
plot_3d(
pts.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-xlim/2, xlim/2), lims_y=(-ylim/2, ylim/2)),
)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
ax = plt.gca()
ax.set_zlim(0, 5e-6)
X, Y, Z = plane.to_mesh(lims_x=(-ylim/2, ylim/2), lims_y=(-ylim/2, ylim/2))
x = np.linspace(0, xlim, img.data.shape[0])
y = np.linspace(0, ylim, img.data.shape[1])
xx, yy = np.meshgrid(x, y)
zz = np.linspace(start=np.linspace(Z[0][0],Z[0][1], img.data.shape[0]), stop=np.linspace(Z[1][0],Z[1][1], img.data.shape[0]), num=img.data.shape[1])
# zz = # np.ones_like(xx) * np.max(d_map)
# ax.contourf(xx, yy, img.data, 0, zdir='z', vmin=-20e-6, vmax=20e-6, cmap="BrBG")
data = np.clip((img.data+20e-6)/40e-6, 0, 1)
ax.plot_surface(xx, yy, zz, rstride=16, cstride=16, facecolors=plt.cm.bwr(data), shade=False, alpha=0.2)
xx, yy = np.meshgrid([x[0],x[-1]], [y[0],y[-1]])
zz = np.zeros_like(xx)
ax.plot_surface(xx, yy, zz)
# plt.figure()
# plt.imshow(data)
# plt.colorbar()
print(f"angle = {np.arccos(np.dot([0, 0, 1], plane.normal))}")
plt.show()
# for lcx, lcxv, cx in perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True):
# for lcy, lcyv, cz in perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True):
# pbar.update()
# # result = fit_magnetic_edge(lcx, lcxv, lcy, lcyv, magnetic_layer_thickness)
# # flcx, flcy = evaluate_cuts(result.x, lcx, lcy)
# result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
# # result = fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
# if d_x < 10e-6 and d_z < 10e-6:
# Ms_vals.append(abs(Ms))
# d_x_vals.append(d_x)
# d_y_vals.append(d_z)
# theta_vals.append(theta)
# if pbar.n % plot_every == 0:
# flcx, flcy = evaluate_gaussian_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness)
# # flcx, flcy = evaluate_gaussian_layer_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# fig, axes = plt.subplots(1, 2)
# axes[0].plot(lcx*1e6, lcxv, 'x')
# axes[1].plot(lcy*1e6, lcyv, 'x')
# axes[0].set_xlabel('x (um)')
# axes[1].set_xlabel('y (um)')
# axes[0].plot(lcx*1e6, flcx)
# axes[1].plot(lcy*1e6, flcy)
# # print()
# # print(d)
# # plt.show()
# # quit()
# print()
# print(f"mean d_x = {np.mean(d_x_vals)*1e6:.2f}um")
# print(f"std d_x = {np.std(d_x_vals)*1e6:.2f}um")
# print(f"mean d_z = {np.mean(d_y_vals)*1e6:.2f}um")
# print(f"std d_z = {np.std(d_y_vals)*1e6:.2f}um")
# print(f"mean Ms = {np.mean(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"std Ms = {np.std(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"mean theta = {np.mean(theta_vals)*180/np.pi:.2f} deg")
# print(f"std theta = {np.std(theta_vals)*180/np.pi:.2f} deg")
# print()
# fit_d_x, std_d_x = hist_and_fit_gauss(np.array(d_x_vals), plot=True, title="d_x")
# fit_d_y, std_d_y = hist_and_fit_gauss(np.array(d_y_vals), plot=True, title="d_z")
# fit_theta, std_theta = hist_and_fit_gauss(np.array(theta_vals), plot=True, title="theta")
# fit_Ms, std_Ms = hist_and_fit_gauss(np.array(Ms_vals), plot=True, logplot=True, title="Ms")
# print(f"fit d_x = {fit_d_x*1e6:.2f} +/- {std_d_x*1e6:.2f} um")
# print(f"fit d_z = {fit_d_y*1e6:.2f} +/- {std_d_y*1e6:.2f} um")
# print(f"fit theta = {fit_theta*180/np.pi:.2f} +/- {std_theta*180/np.pi:.2f} um")
# print(f"fit Ms = {fit_Ms*1e7/1e6:.2f} +/- {std_Ms*1e7/1e6:.2f} MA/m")
# plt.show()
if __name__ == "__main__":
main() |
18,435 | 7048d6c8236262b4939a9907e7dc966c3852f19f | #!/bin/python3
import math
import os
import random
import re
import sys
import json
import pandas as pd
RULES = ['{native-country=United-States,capital-gain=None}=>{capital-loss=None}',
'{capital-gain=None,capital-loss=None}=>{native-country=United-States}',
'{native-country=United-States,capital-loss=None}=>{capital-gain=None}'
]
#
# Complete the 'arrangingRules' function below.
#
# The function is expected to return a STRING_ARRAY.
# The function accepts STRING_ARRAY rules as parameter.
#
def normalize_csv(csv):
table = pd.read_csv(csv)
table.columns = [item.split('=')[0] for item in table.columns.values.tolist()]
table = table.applymap(lambda x: x.split('=')[1])
return table
def normalize_rules(rules):
n_rules = {}
for rule in rules:
splitted_rule = re.sub('[\{\}]', '', rule)
splitted_rule = re.sub('=>', ',', splitted_rule).split(',')
for index, item in enumerate(splitted_rule):
splitted_rule[index] = item.split('=')
n_rules[rule] = splitted_rule
return n_rules
def measure_rule(rules, table):
X_attribute = table
X_Y_attribute = table
for item in rules:
if item == rules[-1]:
X_Y_attribute = X_attribute[(X_attribute[item[0]] == item[1])]
else:
X_attribute = X_attribute[(X_attribute[item[0]] == item[1])]
X_attribute_support = X_attribute.shape[0]/table.shape[0]
X_Y_attribute_support = X_Y_attribute.shape[0]/table.shape[0]
rule_confidence = X_Y_attribute_support/X_attribute_support
return rule_confidence
def arrangingRules(rules):
# Write your code here
table = normalize_csv('Census/census.csv')
rules_norm = normalize_rules(rules)
for rule in rules_norm:
rules_norm[rule] = measure_rule(rules_norm[rule], table)
sorted_rules = sorted(rules_norm.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
return_rules = [rule[0] for rule in sorted_rules]
return
if __name__ == '__main__':
arrangingRules(RULES)
# print(result) |
18,436 | c5ae4f6204d606fbd987bc68a7c432c25a6c12b4 | import torch
import torch.nn as nn
class RadarNet(nn.Module):
def __init__(self, fe_net: nn.Module, pooling: nn.Module, normalize: bool = False):
# fe_net: feature extraction network
# pooling_net: pooling module (e.g. GeM)
super().__init__()
self.fe_net = fe_net
self.pooling = pooling
self.normalize = normalize
def forward(self, x: torch.Tensor):
x = x['radar_batch']
x = self.fe_net(x)
x = self.pooling(x) # (batch_size, n, 1, 1) tensor
x = x.flatten(1) # Reshape to (batch_size, n) tensor
if self.normalize:
x = nn.functional.normalize(x, p=2, dim=1)
return x
|
18,437 | c73af609952ec6322c868756c2a864c2ac6b643c | import sklearn #library for ML algorithm
from sklearn.datasets import load_breast_cancer #fetches database related to breast cancer
from sklearn.model_selection import train_test_split #library to divide data into train and test part
from sklearn.naive_bayes import GaussianNB #library for naive bayes
from sklearn.metrics import accuracy_score # library for accuracy between 0 to 1
data=load_breast_cancer() #store it into varibale data
label_names=data['target_names'] #class names
labels=data['target']
feature_names =data['feature_names']
features=data['data']
print(label_names)
print(labels[0])
print(feature_names[0])
print(features[0])
train,test,train_labels,test_labels=train_test_split(features,labels,test_size =0.40,random_state=42)
gnb=GaussianNB() #import gaussianNB module
model=gnb.fit(train,train_labels) #train model with data train and its label and store in model variable
preds=gnb.predict(test) #predection on our test data
print("predected data in binary=\n",preds)
print("accuracy is =",accuracy_score(test_labels,preds))#gives accuracy with test label and predected test data |
18,438 | 5e8e527622a64bcaa709fddb13f69e055d056bc9 | from django.shortcuts import render
from django.http import request
def home(request):
import json, requests
return render(request,'home.html')
|
18,439 | ea93727965c29d625f35bef78cb175381b392b53 | from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class DataLimitKindEnumSimple(Enum):
"""
:cvar MAX: Limitation to maximum value
:cvar MIN: Limitation to minimum value
:cvar NONE: No limitation applicable
"""
MAX = "MAX"
MIN = "MIN"
NONE = "NONE"
|
18,440 | d505194a83bf29674a0f5f2ab1de0d4e41d0b3d0 | from __future__ import unicode_literals
from django.db import models
class NewsPost(models.Model):
title = models.CharField(max_length=200)
content = models.TextField()
text_content = models.TextField(null=True, blank=True)
thumbnail = models.ImageField(upload_to='news', null=True)
reference_date = models.DateField()
publish_date = models.DateField()
blog_page = models.URLField(null=True, blank=True)
active = models.BooleanField(default=False)
def __unicode__(self):
return self.title
|
18,441 | 77d9e02b83616328868e10154f2fa40ec006d737 | from django.apps import AppConfig
class ManushlagbecoreConfig(AppConfig):
name = 'Manushlagbecore'
|
18,442 | 075f7bfadae3e3b2610eca6eceacdfa21a17144e | def loadTable(db, keys, headers, values):
mydb = db
mycursor = mydb.cursor()
mycursor.execute("DROP TABLE IF EXISTS jamf_database.AllComputers;")
mycursor.execute("CREATE TABLE jamf_database.AllComputers (" + headers + ");")
sql = "INSERT INTO jamf_database.AllComputers (" + keys + ") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = values
mycursor.executemany(sql, val)
mydb.commit()
print(mycursor.rowcount, "record inserted.") |
18,443 | 48752df164d9e518362d908e661c69cf2bfe6006 | #!/usr/bin/sudo python3
import logging
import os
#logging.basicConfig(level=logging.DEBUG)
L = logging.getLogger(__name__)
from controller import Controller, setup_main_thread_function_call_queue
c = Controller()
from flask import Flask, Request, Response, jsonify, redirect
app = Flask("Slow Turbo - Nintendo Switch Joycon Robot")
@app.route("/")
def default_root():
return redirect("/static/index.html")
@app.route("/api/status")
def status():
return jsonify(c.get_status())
@app.route("/api/stop")
def stop():
c.stop()
return jsonify({
"success": True,
"msg": "Stop requested.",
}), 200
@app.route("/api/start/<string:task>/<string:cond>")
def start(task, cond):
success, result = c.start(task, cond)
return jsonify(result)
@app.route("/api/*")
@app.after_request
def no_cache(r: Response):
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = "public, max-age=0"
return r
def _main(queues):
c._async_exec_queue = queues
app.run("0.0.0.0")
if __name__ == "__main__":
setup_main_thread_function_call_queue(_main)
|
18,444 | 1b426401c0362ecab42d3166c0a9e8d7cd21d6c0 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.10.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x00\xec\
\x00\
\x00\x04\x36\x78\x9c\x9d\x93\xcd\x0d\xc2\x30\x0c\x85\x8d\xc4\x00\
\x8c\xc0\x91\x7b\xa5\x0e\xc0\x9d\x35\xb2\x93\xd7\xe8\x04\x1d\xc0\
\x53\x20\xf5\x8c\x98\x20\xf8\x39\x71\xff\x48\xda\x42\xa5\x27\xbb\
\x4e\x3e\xbb\x6e\x9c\xfb\xa3\x3d\x93\x3d\xad\xea\xa6\xba\x64\x9d\
\xe8\x9a\x16\xf2\xfa\xfc\x89\x31\x16\xf5\x7c\xbd\xab\x6b\x5b\xcc\
\x30\x0c\x24\x22\x66\x8f\xe6\x70\xae\xeb\x3a\x12\x66\xe2\x10\x4c\
\xf6\xbe\x93\xcb\xea\x65\x06\x16\x42\xdc\x72\x20\xae\x42\x1e\xec\
\x2b\xf1\xce\x45\x71\xc9\x18\x6f\x02\x8f\xaa\xd5\xc7\xbe\x89\x5d\
\xf2\x41\x7d\x88\x38\xc5\x7c\xed\x37\x3e\x52\x03\x5e\x7d\x09\xdf\
\xe7\xe5\x7c\xea\x3d\x2c\x78\x56\x9f\x95\x47\xff\x51\xd9\x2d\xde\
\xec\xec\x1b\x9d\xef\xfb\xde\xb8\x92\xd6\xf5\x93\x3f\xf1\xb0\x35\
\x76\xcd\x97\xfa\x37\x1e\x7d\xfd\xc9\x63\x76\x70\x6e\x76\xbe\x3b\
\xfd\x97\x78\x76\x1f\x56\xb8\xca\xd7\xe6\x47\xb2\xdf\x8c\xe7\xcf\
\xc5\x19\xb2\x7f\xcc\x79\x7e\x65\x9a\x5f\xcc\x8d\xeb\xc8\x3d\x42\
\xbd\xf9\xfd\x39\xc2\x7c\x00\xbb\x93\x5d\x9d\
\x00\x00\x00\x60\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x14\x33\x30\xcc\x24\x0a\xe3\xd6\x7b\x06\x0b\x9e\x89\x44\xe3\
\x36\x03\x22\xfe\x9f\x68\x33\x70\xdb\x4f\x9c\x19\xf8\xed\x27\x6c\
\x06\x6e\xfd\xc4\x98\x81\x5d\xff\xcc\x99\xff\x89\x36\x03\x97\x7e\
\xe2\xcc\xc0\x17\x87\xe4\xc5\xff\x50\xc5\x00\x94\x94\x18\x72\
\x00\x00\x00\x76\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\xb1\
\x62\xe3\x99\x67\x18\x18\xd2\x66\xe2\xc5\xb8\xf4\xc2\xf4\xcf\x04\
\xd2\xb8\xf0\x70\xd2\x7f\x66\x66\x1a\x06\x26\x55\x3f\xa5\xf6\x13\
\xa3\x1f\x24\x86\x8c\xc9\xd1\x7f\x06\x4b\xd8\x91\xa2\x1f\x5b\xb8\
\xe3\xd2\x8f\x0d\x63\x8b\x2b\x6c\xfa\x29\x95\x47\x8f\x4f\x72\xf4\
\x3f\x7d\xfa\x94\xa0\x5e\x42\xf6\x13\xd2\x8b\x2b\xfd\x11\xab\x17\
\x00\xda\x04\x87\x4e\
\x00\x00\x00\x46\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x07\
\x0d\x26\x06\x0c\x94\x3b\x48\xd5\xdb\xd0\xd0\x00\xe6\x83\x68\x52\
\xcc\xc0\xa6\x76\x54\x3f\x7d\xf4\x63\x8b\x73\x6c\x62\xc3\x09\x03\
\x00\x18\xf9\x66\x27\
\x00\x00\x00\xa1\
\x00\
\x00\x04\x36\x78\x9c\x9d\x52\xc1\x0d\x80\x20\x0c\xac\x89\x03\x38\
\x82\x4f\xff\x26\x0e\xe0\xdf\x35\xd8\x89\x9d\xd8\x09\x29\xda\xd8\
\x54\xaa\xa7\x24\x17\x4a\xef\xae\x69\x81\x75\x5b\x7a\xaa\x6b\x29\
\x98\x0a\x86\x13\x1d\x8d\x07\x71\xf2\x7a\xe5\x9c\x29\x05\xa2\x38\
\x1f\xe0\x98\x73\x2d\xb4\x74\x92\x13\x8d\x57\xc3\xd3\xd9\xbc\x70\
\xd6\xdf\xd2\xa0\x3d\x20\x3d\xea\xd9\xf4\x8c\x5e\xde\xbb\x23\xab\
\xb5\xf0\x7c\xb7\x1a\x31\x5e\xe7\x12\xa3\x5e\xd1\xf3\x1e\x42\xa8\
\x90\xf8\x8b\x37\xa5\x54\x21\x5e\x8e\xdf\x6a\x58\x2f\xeb\x5b\x39\
\xd4\x8b\x70\x08\xff\xa4\x41\xfb\x43\xe6\x43\xee\xd8\xea\xf5\x5b\
\xfd\x79\xdf\xfa\x37\xd4\x5f\xf9\x52\x83\xb1\x03\x70\x0e\x50\xd3\
\
\x00\x00\x00\x3c\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x14\x13\x02\xa4\x9a\x81\x4f\x6c\x28\xe8\x1f\x69\xfe\x27\x04\
\x06\x3a\x7d\xd2\x02\x03\x00\x0b\x7b\x4e\x3c\
\x00\x00\x00\x43\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x07\
\x05\x26\x15\x0c\xb4\x7b\xc9\x71\xff\x40\xbb\x91\x5a\xe1\x3f\xd0\
\x6e\x25\xc7\xfd\x03\xed\x3e\x6a\x86\x3f\x35\xfd\x03\x00\x64\x1c\
\x63\x27\
\x00\x00\x00\x43\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x07\
\x05\x26\x15\x0c\xb4\x7b\x29\xf5\xcb\x40\xbb\x91\xd2\x38\x18\x68\
\xb7\x52\xea\x97\x81\x76\x1f\x25\x61\x4f\x4d\xf7\x03\x00\xa9\x01\
\x63\x27\
\x00\x00\x00\x41\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x14\xe3\x02\xe4\x9a\xd3\xd0\xd0\x40\x96\x3b\x40\xfa\x28\x8d\
\x9f\x51\x37\x50\xee\x06\x6a\xa4\x85\xc1\x8e\x01\xec\x24\xa9\x6b\
\
\x00\x00\x00\x60\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x14\x33\xcc\x64\x20\x0a\xe3\xd4\x7b\x06\x0b\x9e\x89\x44\xe3\
\x31\x03\x2c\xfe\x9f\x78\x33\x70\xda\x4f\xa4\x19\x78\xed\x27\xc2\
\x0c\x9c\xfa\x89\x31\x03\x87\xfe\x99\xff\x67\x12\x6d\x06\x2e\xfd\
\x44\x99\x81\x2f\x0e\xc9\x8c\xff\xa1\x8a\x01\xdf\xe2\x18\x72\
\x00\x00\x00\x9e\
\x00\
\x00\x04\x36\x78\x9c\xad\x92\xc1\x0d\x80\x20\x0c\x45\x6b\xe2\x00\
\x8e\xe0\xd1\xbb\x89\x03\x78\x77\x0d\x76\xea\x4e\x5d\x83\x39\xb0\
\x20\x18\x04\xac\xa0\x9a\xbc\x98\xf0\x7d\xbf\x09\x76\xdd\x96\x1e\
\xdc\xb3\x30\x13\x33\x78\x3a\x18\x8f\xc0\xe7\xf1\x63\x8c\xb9\x85\
\x50\x3d\xf2\xe4\xd7\xe6\xa0\x30\xa3\xd6\x9f\x91\x00\xf9\x9d\xd2\
\xe2\xd3\x47\xdf\x7d\xff\xc3\x7c\xfa\x61\x3e\x09\x7e\xe9\xff\x95\
\xe6\x93\xe0\x6b\xad\x2f\xd9\x9b\xf9\xf1\x99\x34\xbf\x66\xe7\x82\
\x5f\xea\x48\xef\xd4\xee\x95\xe4\x93\x02\xc0\x39\x87\x1a\x7c\x77\
\xce\x2b\x1c\x70\x1d\x3e\x2f\xf9\xa1\x23\x10\x77\x58\x37\xce\xa4\
\x7d\xb8\xdc\x33\x77\x9c\x3d\x02\x3b\x16\xf5\x79\x29\
\x00\x00\x00\x58\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x9d\xf0\x99\x99\x69\x0c\x33\x8d\xb1\x87\x39\x48\x1c\x1d\xe3\
\x52\x07\x32\x87\x90\x7e\x7c\xee\xc0\x26\x4f\xac\x5e\x42\xfa\x71\
\xc9\x13\xa3\x1f\x97\x79\xc4\xf8\x1f\x9f\x5d\xc8\x98\x14\xbd\x23\
\x01\x03\x00\x9f\x93\xbc\x67\
\x00\x00\x00\x58\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x95\xf0\x4c\x63\x06\x86\x33\x33\xd3\xc8\xd2\x0b\xd2\x07\xd2\
\x8f\xcb\x5c\x74\x8c\x4d\x0d\x21\xb7\x51\xa2\x1f\xdd\x0c\x72\xec\
\x47\x36\x83\x58\xff\x63\x53\x8f\xcb\x2e\x98\x19\xf8\xdc\x39\x8a\
\x51\x31\x00\xe6\x5a\xbc\x67\
\x00\x00\x00\x5e\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x47\
\x31\x14\xcf\x04\x86\x07\x31\x18\x97\xde\x33\x58\xf0\x4c\x24\x1a\
\x9f\x19\x60\x31\x12\xcc\xc0\x65\x3f\xb1\x66\xe0\xb3\x9f\x18\x33\
\x70\xe9\x27\xc6\x0c\x9c\xfa\x67\xa6\x11\x6d\x06\x4e\xfd\x44\x98\
\x81\x2f\x0e\xc9\x8d\xff\xa1\x8a\x01\x68\x5a\x17\xd9\
\x00\x00\x00\x3f\
\x00\
\x00\x04\x36\x78\x9c\x73\xf2\x35\x63\x61\x00\x03\x33\x20\xd6\x00\
\x62\x01\x28\x66\x64\x50\x80\x48\x40\xe5\x91\xc1\xff\xff\xff\x07\
\x05\x26\x15\x0c\xb4\x7b\xc9\xf5\xc7\x40\xbb\x8f\x1a\x71\x30\xd0\
\xee\x1c\x8e\xe1\x4f\x2a\xa0\x96\xbd\x00\x7f\xd3\x69\x21\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x07\
\x04\x65\x49\x20\
\x00\x31\
\x00\x33\x00\x32\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x0a\x4b\x49\x20\
\x00\x37\
\x00\x31\x00\x38\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x05\x35\x49\x20\
\x00\x32\
\x00\x30\x00\x32\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x37\x49\x20\
\x00\x35\
\x00\x30\x00\x34\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x05\x33\x49\x20\
\x00\x32\
\x00\x30\x00\x30\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x33\x49\x20\
\x00\x35\
\x00\x30\x00\x30\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x3b\x49\x20\
\x00\x35\
\x00\x30\x00\x38\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x45\x49\x20\
\x00\x35\
\x00\x31\x00\x32\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x35\x49\x20\
\x00\x35\
\x00\x30\x00\x32\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x0a\x57\x49\x20\
\x00\x37\
\x00\x32\x00\x34\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x05\x37\x49\x20\
\x00\x32\
\x00\x30\x00\x34\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x05\x39\x49\x20\
\x00\x32\
\x00\x30\x00\x36\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x05\x3b\x49\x20\
\x00\x32\
\x00\x30\x00\x38\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x0a\x5b\x49\x20\
\x00\x37\
\x00\x32\x00\x38\x00\x2e\x00\x62\x00\x6d\x00\x70\
\x00\x07\
\x08\x43\x49\x20\
\x00\x35\
\x00\x31\x00\x30\x00\x2e\x00\x62\x00\x6d\x00\x70\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x0f\x00\x00\x00\x03\
\x00\x00\x00\x22\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x72\x00\x01\x00\x00\x00\x01\x00\x00\x02\x18\
\x00\x00\x00\x4a\x00\x01\x00\x00\x00\x01\x00\x00\x01\x54\
\x00\x00\x00\xea\x00\x01\x00\x00\x00\x01\x00\x00\x04\x34\
\x00\x00\x00\xfe\x00\x01\x00\x00\x00\x01\x00\x00\x04\xd6\
\x00\x00\x01\x12\x00\x01\x00\x00\x00\x01\x00\x00\x05\x32\
\x00\x00\x00\x86\x00\x01\x00\x00\x00\x01\x00\x00\x02\xbd\
\x00\x00\x00\xc2\x00\x01\x00\x00\x00\x01\x00\x00\x03\x8b\
\x00\x00\x00\x5e\x00\x01\x00\x00\x00\x01\x00\x00\x01\xce\
\x00\x00\x00\x9a\x00\x01\x00\x00\x00\x01\x00\x00\x02\xfd\
\x00\x00\x01\x3a\x00\x01\x00\x00\x00\x01\x00\x00\x05\xf0\
\x00\x00\x00\xae\x00\x01\x00\x00\x00\x01\x00\x00\x03\x44\
\x00\x00\x00\x36\x00\x01\x00\x00\x00\x01\x00\x00\x00\xf0\
\x00\x00\x00\xd6\x00\x01\x00\x00\x00\x01\x00\x00\x03\xd0\
\x00\x00\x01\x26\x00\x01\x00\x00\x00\x01\x00\x00\x05\x8e\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x0f\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x58\xd1\xf5\xbd\x85\
\x00\x00\x00\x72\x00\x01\x00\x00\x00\x01\x00\x00\x02\x18\
\x00\x00\x01\x58\xd1\xf6\x5a\x31\
\x00\x00\x00\x4a\x00\x01\x00\x00\x00\x01\x00\x00\x01\x54\
\x00\x00\x01\x58\xd1\xf6\x68\x44\
\x00\x00\x00\xea\x00\x01\x00\x00\x00\x01\x00\x00\x04\x34\
\x00\x00\x01\x58\xd1\xf6\x78\xa9\
\x00\x00\x00\xfe\x00\x01\x00\x00\x00\x01\x00\x00\x04\xd6\
\x00\x00\x01\x58\xd1\xf6\x87\x97\
\x00\x00\x01\x12\x00\x01\x00\x00\x00\x01\x00\x00\x05\x32\
\x00\x00\x01\x58\xd1\xf6\x98\x97\
\x00\x00\x00\x86\x00\x01\x00\x00\x00\x01\x00\x00\x02\xbd\
\x00\x00\x01\x58\xd1\xfc\x7f\x72\
\x00\x00\x00\xc2\x00\x01\x00\x00\x00\x01\x00\x00\x03\x8b\
\x00\x00\x01\x58\xd1\xfc\x90\x53\
\x00\x00\x00\x5e\x00\x01\x00\x00\x00\x01\x00\x00\x01\xce\
\x00\x00\x01\x58\xd1\xfc\x9c\x64\
\x00\x00\x00\x9a\x00\x01\x00\x00\x00\x01\x00\x00\x02\xfd\
\x00\x00\x01\x58\xd1\xfc\xb5\x7f\
\x00\x00\x01\x3a\x00\x01\x00\x00\x00\x01\x00\x00\x05\xf0\
\x00\x00\x01\x58\xd1\xfc\xc5\xa5\
\x00\x00\x00\xae\x00\x01\x00\x00\x00\x01\x00\x00\x03\x44\
\x00\x00\x01\x58\xd1\xfc\xd4\xe1\
\x00\x00\x00\x36\x00\x01\x00\x00\x00\x01\x00\x00\x00\xf0\
\x00\x00\x01\x58\xd1\xf1\x81\x71\
\x00\x00\x00\xd6\x00\x01\x00\x00\x00\x01\x00\x00\x03\xd0\
\x00\x00\x01\x58\xd1\xf1\xb5\x3d\
\x00\x00\x01\x26\x00\x01\x00\x00\x00\x01\x00\x00\x05\x8e\
\x00\x00\x01\x58\xd1\xf1\xd8\x66\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
18,445 | 70daa87e4a11ae90ebe482fa9d58620740a9e11c | from application_business_rules.memo_handle_interactor import MemoHandleInteractor
from flask import request
class FlaskController:
def get(self, memo_id: int):
return MemoHandleInteractor().get(memo_id)
def save(self, memo_id: int, request: request):
# memo = request.form["memo"]
memo = request.json["memo"]
return MemoHandleInteractor().save(memo_id, memo)
|
18,446 | 80e8df09ec42ab5bb5236213225d9137b72db656 | # https://github.com/carpedm20/emoji
# http://www.unicode.org/emoji/charts/full-emoji-list.html
class Button:
Camera = u'\U0001F4F7'
FileFolder = u'\U0001F4C1'
FileFolderOpen = u'\U0001F4C2'
RightArrowUp = u'\U00002934'
RightArrowDown = u'\U00002935'
CrossMarkRed = u'\U0000274C'
CheckMark = u'\U00002714'
CheckMarkGreen = u'\U00002705'
CircleArrow = u'\U0001F504'
QuestionMark = u'\U00002754'
MarkWarning = u'\U000026A0'
NoEntry = u'\U000026D4'
Like = u'\U0001F44D'
Dislike = u'\U0001F44E\U0001F3FF'
Writing = u'\U0000270D'
Automobile = u'\U0001F697'
Hourglass = u'\U000023F3'
WatchClock = u'\U0000231A'
AlarmClock = u'\U000023F0'
Bell = u'\U0001F514'
Telephone = u'\U0000260E'
HammerWork = u'\U00002692'
WrenchWork = u'\U0001F527'
Worker = u'\U0001F477'
ManShadow = u'\U0001F464'
GroupShadow = u'\U0001F465'
Money = u'\U0001F4B0'
Calendar = u'\U0001F4C5'
Clipboard = u'\U0001F4CB'
WasteBasket = u'\U0001F5D1'
ChequeredFlag = u'\U0001F3C1'
Man = u'\U0001F468'
SpeakingHead = u'\U0001F5E3'
class Country:
UnitedKingdom = u'\U0001F1EC\U0001F1E7'
Russia = u'\U0001F1F7\U0001F1FA'
Armenia = u'\U0001F1E6\U0001F1F2'
Azerbaijan = u'\U0001F1E6\U0001F1FF'
Belarus = u'\U0001F1E7\U0001F1FE'
China = u'\U0001F1E8\U0001F1F3'
Estonia = u'\U0001F1EA\U0001F1EA'
Georgia = u'\U0001F1EC\U0001F1EA'
Kyrgyzstan = u'\U0001F1F0\U0001F1EC'
Kazakhstan = u'\U0001F1F0\U0001F1FF'
Lithuania = u'\U0001F1F1\U0001F1F9'
Latvia = u'\U0001F1F1\U0001F1FB'
Moldova = u'\U0001F1F2\U0001F1E9'
Poland = u'\U0001F1F5\U0001F1F1'
Tajikistan = u'\U0001F1F9\U0001F1EF'
Turkmenistan = u'\U0001F1F9\U0001F1F2'
Ukraine = u'\U0001F1FA\U0001F1E6'
Uzbekistan = u'\U0001F1FA\U0001F1FF'
USA = u'\U0001F1FA\U0001F1F8'
|
18,447 | deeb88b6d7cef7e0266660a1783ed7c370184b87 | from collections import deque
def my_q(n):
q = deque()
for i in range(0,n):
q.append(input("put stuff in q\n"))
print(q)
for i in range(0,n):
print(q.popleft())
my_q(5)
|
18,448 | 4da7f5cd5650eb8a60b8d624d25fa0900b2be33c | """2. Пользователь вводит время в секундах. Переведите время в часы,
минуты и секунды и выведите в формате чч:мм:сс. Используйте форматирование строк. """
user_date = int(input('Пользователь вводит время в секундах: '))
if user_date > 345600:
print('Пользователь ввёл слишком большое число, за гранью четвёртых суток!!! Программа завершена. ')
quit()
def time(seconds):
hour = seconds // 3600
minute = seconds % 3600 // 60
second = seconds % 3600 % 60
return '{:02d}:{:02d}:{:02d}'.format(hour, minute, second)
time_in = time(user_date)
print(time_in)
|
18,449 | f867f9f0f0d263a9dd726095b076402072fc213a | # coding: utf-8
from __future__ import division, print_function
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
def conv2d(inputs, filters, kernel_size, strides=1):
"""
这个函数作用
if(strides=1) kernel_size=7 假设输入2,24,24,3 总填充6 2,30,30,3 VALID卷积
( 30-7)/1+1=23+1=24 最终尺寸不变 为啥要这样写,因为这样写,运行的速度较快
if(strides>1) 那么这个_fixed_padding()填充函数就是不执行,就是一个slim.conv2d()函数进行
卷积降维 操作
:param inputs: 输入的图片数据
:param filters: 输入卷积需要的深度
:param kernel_size: 卷积核的尺寸
:param strides:卷积的步长
:return:
"""
def _fixed_padding(inputs, kernel_size):
#填充padding 比如输入 卷积核尺寸 7 7-1=6 6/2=3 就是在输入的图像的宽和高上下左右填充 3
#填充的目的是为了:
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]], mode='CONSTANT')
return padded_inputs
if strides > 1:
inputs = _fixed_padding(inputs, kernel_size)
inputs = slim.conv2d(inputs, filters, kernel_size, stride=strides,
padding=('SAME' if strides == 1 else 'VALID'))
return inputs
def darknet53_body(inputs):
"""
全程不用加激活函数和BN 在前面arg_scope已经定义好了,
:param i nputs:
:return: (-1, 52, 52, 256) (-1, 26, 26, 512) (-1, 13, 13, 1024)
"""
def res_block(inputs, filters):
"""
这里stridesz只能等于1
比如输入 (-1, 207, 207,64) strides=1
输出 (-1, 207, 207,64) 尺寸不变
:param inputs:输入的batch (-1,413,413,3)图像
:param filters:输入的图像深度 32
:return:
"""
shortcut = inputs #(-1,416,416,3)
net = conv2d(inputs, filters * 1, 1)
net = conv2d(net, filters * 2, 3)
net = net + shortcut
return net
# first two conv2d layers darknet53_body最开始的两个卷积 没有残差项
net = conv2d(inputs, 32, 3, strides=1)
net = conv2d(net, 64, 3, strides=2)
# res_block * 1 输入 残差块 有1*1 与 3*3 和 +
net = res_block(net, 32)
net = conv2d(net, 128, 3, strides=2)#作用降维
# res_block * 2
for i in range(2):
net = res_block(net, 64)
net = conv2d(net, 256, 3, strides=2)
# res_block * 8
for i in range(8):
net = res_block(net, 128)
route_1 = net
net = conv2d(net, 512, 3, strides=2)
# res_block * 8
for i in range(8):
net = res_block(net, 256)
route_2 = net
net = conv2d(net, 1024, 3, strides=2)
# res_block * 4
for i in range(4):
net = res_block(net, 512)
route_3 = net
return route_1, route_2, route_3
def yolo_block(inputs, filters):
"""
:param inputs: (-1, 13, 13, 1024)
:param filters: 512 其中strides默认为 1 就是说尺寸是不变的 变化的是深度
:return:
"""
net = conv2d(inputs, filters * 1, 1) #512
net = conv2d(net, filters * 2, 3) #1024
net = conv2d(net, filters * 1, 1) #512
net = conv2d(net, filters * 2, 3) #1024
net = conv2d(net, filters * 1, 1) #512
route = net #512
net = conv2d(net, filters * 2, 3) #1024
return route, net #(-1, 13, 13, 512) (-1, 13, 13, 1024)
def upsample_layer(inputs, out_shape):
"""
函数的作用就是 上采样 改变图片的尺寸 使图片的尺寸增大
:param inputs: 输入的图片
:param out_shape: 需要将图片上采样到什么尺寸
:return:
"""
new_height, new_width = out_shape[1], out_shape[2]
# NOTE: here height is the first
inputs = tf.image.resize_nearest_neighbor(inputs, (new_height, new_width), align_corners=True, name='upsampled')
return inputs
#自己加的测试尺寸代码
# inputs = tf.random_normal(shape=((1,416,416,3)))
#
# route_1, route_2, route_3 = darknet53_body(inputs)
# print(route_1.get_shape(),route_2.get_shape(),route_3.get_shape())
|
18,450 | 8dabb768c29bd97092f30178ac3d30edde0680b3 | import numpy as np
VERBOSE = True
DATA_DIR = "../data/"
MODEL_DIR = "../models/"
MNIST = "mnist"
CIFAR = "cifar"
SVHN = "svhn"
DATASET_NAMES = [MNIST, CIFAR, SVHN]
BIM = "bim"
CW = "cw"
FGSM = "fgsm"
JSMA = "jsma"
PGD = "pgd"
APGD = "apgd"
DF = "deepfool"
NF = "newtonfool"
SA = "squareattack"
SHA = "shadowattack"
ST = "spatialtransformation"
WA = "wasserstein"
ATTACK_NAMES = [APGD, BIM, CW, DF, FGSM, JSMA, NF, PGD, SA, SHA, ST, WA]
# the data is in range(-.5, .5)
def load_data(dataset_name):
assert dataset_name in DATASET_NAMES
x_train = np.load(DATA_DIR + dataset_name + '/benign/x_train.npy')
y_train = np.load(DATA_DIR + dataset_name + '/benign/y_train.npy')
x_test = np.load(DATA_DIR + dataset_name + '/benign/x_test.npy')
y_test = np.load(DATA_DIR + dataset_name + '/benign/y_test.npy')
return x_train, y_train, x_test, y_test
def load_adv_data(dataset, model, attack):
adv_dir = "{}{}/adv/{}/{}/".format(DATA_DIR, dataset, model, attack)
x_adv_path = "{}x_test.npy".format(adv_dir)
y_adv_path = "{}y_test.npy".format(adv_dir)
x_adv = np.load(x_adv_path)
y_adv = np.load(y_adv_path)
return x_adv, y_adv
|
18,451 | 8b81c73149171bdb89a00849614e67d5e2a22db1 | #! -*- coding: utf-8 -*-
# Control database acesses and data queries.
# TODO list ==============================================================
# Incrementar query para checar se ja existe determinado ProfileData no
# banco de dados
#=========================================================================
import os
import re
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.postgresql import array
from geoalchemy import *
from model import *
import matplotlib.pyplot as plt
import numpy as np
# from wx.lib.pubsub import setupkwargs
# from wx.lib.pubsub import pub as Publisher
import platform
class SessionManager(object):
def __init__(self):
if platform.system() == 'Windows':
self.engine = create_engine('postgresql://postgres:1234@localhost:5432/peldDB', echo=False)
else:
self.engine = create_engine('postgresql://:5432/peldDB', echo=False)
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
S = SessionManager()
def dbQuit():
S.session.close()
class InsertCruise(object):
def __init__(self, cruise):
self.obj_profiles = []
self.cruise = cruise
self.metadata = []
def get_or_create(self, S, table, **kwargs):
""" Generic method to get some data from db, if does not exists yet, creates a new record."""
instance = S.session.query(table).filter_by(**kwargs).first()
if not instance:
instance = table(**kwargs)
S.session.add(instance)
print "NEW %s instance created!" %(table)
S.session.commit()
self.data_exists = False
else:
print "Instance of %s EXISTS" %(table)
self.data_exists = True
return instance
def save_cruise(self, S):
if self.cruise['cruise']['institution']:
if self.cruise['cruise']['institution']['country']:
country = self.cruise['cruise']['institution']['country']
country_obj = self.get_or_create(S, Country, name=country)
country_id = country_obj.id
else:
country_id = None
institution = self.cruise['cruise']['institution']['name']
institution_obj = self.get_or_create(S, Institution, name=institution,
country=country_id)
institution_id = institution_obj.id
else:
institution_id = None
cruise = self.cruise['cruise']
self.cruise_obj = self.get_or_create(S, Cruise, cruise_name=cruise['cruise_name'],
platform_name=cruise['platform_name'],
platform_type=cruise['platform_type'],
institution=institution_id,
start_date=cruise['start_date'],
end_date=cruise['end_date'], )
S.session.commit()
def save_profile(self, S, profile):
temp_variable = dict(long_name=u'Temperature', short_name=u'temp', unit=u'C')
salt_variable = dict(long_name=u'Salinity', short_name=u'salt', unit=u' ')
instrument = self.cruise['instrument']
if instrument['itype'] == 'CTD':
temp_obj = self.get_or_create(S, Variable, long_name=temp_variable['long_name'],
short_name=temp_variable['short_name'],
unit=temp_variable['unit'])
salt_obj = self.get_or_create(S, Variable, long_name=salt_variable['long_name'],
short_name=salt_variable['short_name'],
unit=salt_variable['unit'])
elif instrument['itype'] == 'XBT':
temp_obj = self.get_or_create(S, Variable, long_name=temp_variable['long_name'],
short_name=temp_variable['short_name'],
unit=temp_variable['unit'])
elif instrument['itype'] == 'ARGO':
pass
else:
pass
instrument = self.cruise['instrument']
filename = profile.filename.split('/')[-1]
inst_obj = self.get_or_create(S, Instrument, itype=instrument['itype'])
# Create Point object
profile.lat = interpret_coord(profile.lat)
profile.lon = interpret_coord(profile.lon)
point = WKTSpatialElement('POINT(%f %f)' % (profile.lon, profile.lat), 4326)
# print "Creating profile object..."
# print profile.filename
# print '-----------------------------------------'
profile_obj = self.get_or_create(S, Profile,
coord=point,
date=profile.date,
time=profile.time,
local_depth=profile.local_depth,
instrument=inst_obj.id,
cruise=self.cruise_obj.id, # because cruise comes from another function
origin_header=str(profile.header),
filename=filename)
date = profile.date.strftime("%d/%m/%Y")
if not profile_obj.eval_cargo:
eval_cargo = list("0"*21)
else:
eval_cargo = []
for char in profile_obj.eval_cargo:
eval_cargo.append(char)
metadata = []
metalist = [ filename, self.cruise['cruise']['cruise_name'], eval_cargo,
"%0.2f" %(profile.lat), "%0.2f" %(profile.lon),
date, self.cruise['cruise']['platform_name'],
instrument['itype'], profile_obj.id ]
for value in metalist:
if type(value) == list:
metadata.extend(value)
else:
metadata.append(value)
self.metadata.append(metadata)
# ========================================================================================
# This is a band-aid. We need to figure out how to test it at get_or_create using arrays
# We need to try to make a more suitable QUERY that considers arrays comparison
if not self.data_exists:
print "NEW %s instance created!" %(ProfileData)
if instrument['itype'] == 'CTD':
print " --------------> Inserting TEMPERATURE data!"
data_obj = ProfileData(values=profile.temp,
depths=profile.depth,
status='raw',
variable=temp_obj.id,
profile_id=profile_obj.id)
S.session.add(data_obj)
print " --------------> Inserting SALINITY data!"
data_obj = ProfileData(values=profile.salt,
depths=profile.depth,
status='raw',
variable=salt_obj.id,
profile_id=profile_obj.id)
S.session.add(data_obj)
elif instrument['itype'] == 'XBT':
print " --------------> Inserting TEMPERATURE data!"
data_obj = ProfileData(values=profile.temp,
depths=profile.depth,
status='raw',
variable=temp_obj.id,
profile_id=profile_obj.id)
S.session.add(data_obj)
elif instrument['itype'] == 'ARGO':
pass
else:
pass
else:
print "Instance of %s EXISTS" %(ProfileData)
# ========================================================================================
S.session.commit()
def save_last_loaded(self):
if "last_cruise.meta" in os.listdir('.'):
os.remove("last_cruise.meta")
f = open("last_cruise.meta", 'w')
for line in self.metadata:
for field in line:
f.write(str(field) + ";")
f.write("\n")
f.close()
class QualifyCruise(object):
"""docstring for Qualify"""
def __init__(self, cruise_metafile):
super(QualifyCruise, self).__init__()
self.cruise = self.load_metafile(cruise_metafile)
self.instrument = self.cruise[0][-3]
self.query_profiles()
def load_metafile(self, metafile):
f = open(metafile)
cruise = []
for line in f.readlines():
cruise.append(line.split(';'))
return cruise
def query_profiles(self):
q = QueryCargoDB()
self.profiles = q.get_last_cruise(self.cruise)
def test_single_profile(self, single_profile):
self.tester = ProfileTests(single_profile, self.instrument, S)
single_profile[0].profile.eval_cargo = self.tester.eval_cargo
def test_all_profiles(self):
report = open('qualify_reports/%s_qualify_report.txt' %(self.cruise[0][1].replace(' ','')), 'w')
log = open('qualify_reports/%s.log' %(self.cruise[0][1].replace(' ','')), 'w')
evals_cargo = []
for p in range(len(self.profiles)):
report.writelines("\n" + "="*50)
report.writelines( "\n TESTING PROFILE %s \n" %self.profiles[p][0].profile.filename )
report.writelines("="*50 + "\n")
tester = self.test_single_profile(self.profiles[p])
evals_cargo.append(self.tester.eval_cargo)
for line in self.tester.output:
report.writelines(line)
report.write("\n")
report.writelines("ERROS: \n")
for error in self.tester.errors.values():
report.write(error.encode("utf-8", "replace"))
report.write("\n")
report.writelines("\nWARNINGS: \n")
for warning in self.tester.warnings.values():
report.write(warning.encode("utf-8", "replace"))
report.write("\n\n\n")
report.write(" * --- * --- * --- * --- * --- * --- * --- * --- * \n")
for line in self.tester.log:
log.write(line)
log.write("\n")
report.close()
log.close()
S.session.commit()
self.metafile_writer(evals_cargo)
def metafile_writer(self, evals_cargo):
if "last_cruise.meta" in os.listdir('.'):
os.remove("last_cruise.meta")
f = open('last_cruise.meta', 'w')
first_eval = 2
last_eval = (first_eval + len(evals_cargo[0])) - 1
metadata = []
row = 0
for station in self.cruise:
line = []
col = 0
for field in station:
if col >= first_eval and col <= last_eval:
line.append(evals_cargo[row][col-first_eval])
col += 1
else:
line.append(field)
col += 1
metadata.append(line)
row += 1
for line in metadata:
for field in line:
if field == "\n":
pass
else:
f.write(field + ";")
f.write("\n")
class ExportProfiles(QualifyCruise):
"""docstring for ExportProfiles"""
def __init__(self, cruise_metafile):
super(ExportProfiles, self).__init__(cruise_metafile)
def export_ascii(self, pathname):
for profile in self.profiles:
depth = profile[0].depths
lon = profile[0].profile.coord.coords(S.session)[0]
lat = profile[0].profile.coord.coords(S.session)[1]
for profile_data in profile:
if profile_data.varname.long_name == 'Temperature':
temp = profile_data.values
elif profile_data.varname.long_name == 'Salinity':
salt = profile_data.values
with open( "%s/%s.dat" %(pathname, profile[0].profile.filename.split('.')[0]), 'w') as f:
f.writelines("Coordenadas --> LON: %s ; LAT: %s\n" %(lon, lat))
f.writelines("Profundidade Temperatura Salinidade\n")
for d, t, s in zip(depth, temp, salt):
f.writelines("%0.4f %0.4f %0.4f \n" %(d, t, s))
def export_mat(self, pathname):
pass
def export_netcdf(self, pathname):
pass
class ExportReport(QualifyCruise):
"""docstring for ExportReport"""
def __init__(self, cruise_metafile):
super(ExportReport, self).__init__(cruise_metafile)
def export(self, dirpath, filename):
origin_dir = "qualify_reports/"
report_name = "%s_qualify_report.txt" %self.cruise[0][1].replace(' ','')
log_name = "%s.log" %self.cruise[0][1].replace(' ','')
os.system("cp %s%s %s/%s.txt" %(origin_dir, report_name, dirpath, filename) )
os.system("cp %s%s %s/%s.log" %(origin_dir, log_name, dirpath, filename) )
class QueryCargoDB(object):
def __init__(self):
pass
def get_last_cruise(self, metadata):
# instrument = metadata[-3]
query = []
for profile in metadata:
query.append( S.session.query(ProfileData).filter_by(profile_id=int(profile[-2])).all() )
return query
def get_single_profile(self, name):
q = S.session.query(Profile).filter_by(filename=name).first()
lon = q.coord.coords(S.session)[0]
lat = q.coord.coords(S.session)[1]
print q
depth = q.data[0].depths
temp = q.data[0].values
salt = q.data[1].values
return np.array(depth), np.array(temp), np.array(salt), lon, lat
|
18,452 | 0874b91c1d246279881b21c6698bfcffde5aac25 | # -*- encoding: utf-8 -*-
from django.shortcuts import render
from rest_framework import viewsets
from .models import WebSlide
from .serializers import WebSlideModelSerializer
class WebSlideModelViewSet(viewsets.ModelViewSet):
model = WebSlide
serializer_class = WebSlideModelSerializer
queryset = WebSlide.objects.all()
|
18,453 | febb11ca8f6c4ba1689efe6007fc2665f66e0b8b | from pyautogui import press, typewrite, hotkey
import keyboard
from keyboard import press
import time
x = 0
#time.sleep(5)
while x < 10000:
keyboard.wait('esc')
for i in range(0, 100):
y = str(x).zfill(4)
# print(y)
# typewrite(y)
# hotkey('enter')
# hotkey('backspace')
# hotkey('backspace')
# hotkey('backspace')
# hotkey('backspace')
keyboard.write(y)
time.sleep(0.001)
hotkey('enter')
time.sleep(0.001)
press('backspace')
time.sleep(0.001)
press('backspace')
time.sleep(0.001)
press('backspace')
time.sleep(0.001)
press('backspace')
time.sleep(0.001)
press('backspace')
time.sleep(0.001)
press('backspace')
x = x + 1
time.sleep(0.05) |
18,454 | ce1faba1e1b82874ff9f57d26d1e08c6813eb5aa | import random
courses = ["410244","410245"]
electives= {"410244":["410244A","410244B","410244C","410244D"],"410245":["410245A","410245B","410245C","410245D"]}
cnt = 1
for i in range(481,721):
for j in courses:
temp = [1,2,3,4]
for k in electives[j]:
t = random.choice(temp)
print("insert into student_pref values({0},\"{1}\",{2},\"{3}\",\"{4}\");".format(cnt,j,t,"stud"+str(i),k))
cnt+=1
temp.remove(t)
courses = ['510105']
electives = {'510105':['510105A','510105B','510105C','510105D','510105E']}
for i in range(721,801):
for j in courses:
temp = [1,2,3,4,5]
for k in electives[j]:
t = random.choice(temp)
print("insert into student_pref values({0},\"{1}\",{2},\"{3}\",\"{4}\");".format(cnt,j,t,"stud"+str(i),k))
cnt+=1
temp.remove(t)
courses = ['610103']
electives = {'610103':['610103A','610103B','610103C','610103D','610103E']}
for i in range(241,321):
for j in courses:
temp = [1,2,3,4,5]
for k in electives[j]:
t = random.choice(temp)
print("insert into student_pref values({0},\"{1}\",{2},\"{3}\",\"{4}\");".format(cnt,j,t,"stud"+str(i),k))
cnt+=1
temp.remove(t)
|
18,455 | ea896c18a30fba334dfb56c22e964527e783d380 | from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import pytest
from time import sleep
import random
import string
def test_create_empty_note(app):
driver = app.driver
# открываем главную страницу
app.open_home_page()
# создаем записку с пустым текстом
inputtext = ""
app.create_note(inputtext)
# проверяем, что есть сообщение об ошибке
assert app.is_element_present(By.CSS_SELECTOR, "#error_note_is_empty"), "Error hint doesn't exist"
# проверяем, текст сообщения
assert "Ошибка: текст записки пуст" in driver.find_element_by_css_selector("#error_note_is_empty").text, "Error hint isn't correct"
def test_create_and_read_note_with_confirm(app):
driver = app.driver
app.open_home_page()
# создаем записку со случайным текстом
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
app.create_note(inputtext)
# получаем ссылку на заметку и открываем ее
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
driver.get(link)
# подтверждаем прочтение нажатием кнопки
driver.find_element_by_css_selector("#confirm_button").click()
# считываем текст записки и сохраняем в файл (по-другому никак)
app.copy_note_text()
app.write_text_in_file()
outputtext = app.read_text_from_file()
# проверяем, что открылась правильная записка (исходный текст равен полученному)
assert inputtext == str(outputtext), "Output text differ from input text"
def test_create_and_read_note_without_confirm(app):
driver = app.driver
# открываем главную страницу
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
# открываем дополнительные параметры
driver.find_element_by_css_selector("#advanced_options_show").click()
# снимаем флажок, чтобы записки удалялись без подтверждения
driver.find_element_by_css_selector("#destroy_without_confirmation").click()
# завершаем создание записки
app.create_note_with_scrolling(inputtext)
# получаем ссылку на заметку и открываем ее
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
driver.get(link)
# считываем текст записки и сохраняем в файл
app.copy_note_text()
app.write_text_in_file()
outputtext = app.read_text_from_file()
# проверяем, что открылась правильная записка (исходный текст равен полученному)
assert inputtext == str(outputtext), "Output text differ from input text"
def test_read_destroyed_note_via_link(app):
driver = app.driver
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
app.create_note(inputtext)
# получаем ссылку на заметку и открываем ее
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
driver.get(link)
# извлекаем из сообщения ИД записки
id = driver.find_element_by_css_selector("#link_ok p strong").get_attribute("strong")
# подтверждаем прочтение нажатием кнопки
driver.find_element_by_css_selector("#confirm_button").click()
driver.get(link)
# проверяем, что есть сообщение об ошибке, и записка на самом деле удалена
assert app.is_element_present(By.CSS_SELECTOR, "#note_error"), "Error hint doesn't exist"
# проверяем, что удалилась правильная записка с нужным ИД
error = driver.find_element_by_css_selector("#note_error p").get_attribute("p")
assert str(id) in str(error), "Incorrect note was destroyed"
def test_create_and_read_note_with_correct_password(app):
driver = app.driver
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
# открываем дополнительные параметры
driver.find_element_by_css_selector("#advanced_options_show").click()
# заполняем пароли случайным значением
password = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(50))])
driver.find_element_by_css_selector("#manual_password").send_keys(password)
driver.find_element_by_css_selector("#manual_password_confirm").send_keys(password)
# завершаем создание записки
app.create_note_with_scrolling(inputtext)
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
# получаем пароль и сохраняем, который нам подсказывает сайт
driver.find_element_by_css_selector("#show_password").click()
driver.find_element_by_css_selector("#select_password").click()
driver.find_element_by_css_selector("#note_password_input").send_keys(Keys.CONTROL, 'c')
app.write_text_in_file()
password2 = app.read_text_from_file()
# проверяем, что пароль в подсказке правильный, и совпадает с исходным
assert password == password2, "Incorrect password is showed"
# открываем записку
driver.get(link)
# подтверждаем прочтение нажатием кнопки
driver.find_element_by_css_selector("#confirm_button").click()
# вводим пароль
driver.find_element_by_css_selector("#note_password").send_keys(password)
driver.find_element_by_css_selector("#decrypt_button").click()
# считываем текст записки и сохраняем в файл
app.copy_note_text()
app.write_text_in_file()
outputtext = app.read_text_from_file()
# проверяем, что открылась правильная записка (исходный текст равен полученному)
assert inputtext == str(outputtext), "Output text differ from input text"
def test_create_and_read_note_with_incorrect_password(app):
driver = app.driver
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
# открываем дополнительные параметры
driver.find_element_by_css_selector("#advanced_options_show").click()
# заполняем пароли случайным значением
password = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(50))])
driver.find_element_by_css_selector("#manual_password").send_keys(password)
driver.find_element_by_css_selector("#manual_password_confirm").send_keys(password)
# завершаем создание записки
app.create_note_with_scrolling(inputtext)
# получаем ссылку на заметку и открываем ее
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
driver.get(link)
# подтверждаем прочтение нажатием кнопки
driver.find_element_by_css_selector("#confirm_button").click()
# вводим неправильный пароль
driver.find_element_by_css_selector("#note_password").send_keys(password*2)
driver.find_element_by_css_selector("#decrypt_button").click()
# проверяем, что есть сообщение об ошибке
assert "Введен неверный пароль" in driver.find_element_by_css_selector("#error_password_incorrect").text, "Error hint isn't correct"
# Тест падает.
@pytest.mark.xfail
def test_read_destroyed_note_via_button(app):
driver = app.driver
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
app.create_note(inputtext)
# получаем ссылку на заметку, копируем, записываем в файл
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
driver.find_element_by_css_selector("#select_link").click()
driver.find_element_by_css_selector("#note_link_input").send_keys(Keys.CONTROL, 'c')
app.write_text_in_file()
# из полученного значения извлекаем ИД
outputtext = app.read_text_from_file()
id = str(outputtext)[22:29]
# уничтожаем записку
driver.find_element_by_css_selector("#destroy_link").click()
driver.find_element_by_css_selector("#confirm_button").click()
driver.get(link)
# подтверждаем прочтение нажатием кнопки
assert app.is_element_present(By.CSS_SELECTOR, "#note_error"), "Error hint doesn't exist"
error = driver.find_element_by_css_selector("#note_error p").get_attribute("p")
# проверяем, что удалилась правильная записка с нужным ИД
assert id in str(error), "Incorrect note was destroyed"
# здесь тест падает с ошибкой AssertionError: Incorrect note was destroyed
# assert 'JRuPLwy' in 'None'
# как будто строки с ошибкой нет, хотя она есть и селектор верный. Возможно, надо знать особенности кода.
# Тест падает.
@pytest.mark.xfail
def test_self_destruction_an_hour(app):
driver = app.driver
app.open_home_page()
inputtext = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(random.randrange(200))])
# открываем дополнительные параметры
driver.find_element_by_css_selector("#advanced_options_show").click()
# ставим самоуничтожение записки спустя час
driver.find_element_by_css_selector("#duration_hours").click()
driver.find_element_by_css_selector("[value='1']").click()
# завершаем создание записки
app.create_note_with_scrolling(inputtext)
# получаем ссылку на заметку и открываем ее в случайном интервале меньше часа
link = driver.find_element_by_css_selector("#show_link").get_attribute("href")
time = (random.randrange(1, 55))*60
sleep(time)
driver.get(link)
# извлекаем из сообщения ссылку на записку
link2 = driver.find_element_by_css_selector("#note_link").get_attribute("href")
# проверяем, что ссылка в сообщении корректная и совпадает с исходной
assert link == link2, "Link in hint is incorrect"
# извлекаем из ссылки ИД
id = str(link2)[22:29]
# ожидаем остаточный интервал времени (немного с запасом) и снова открываем ссылку
time2 = 65*60 - time
sleep(time2)
driver.get(link)
# проверяем, что есть сообщение об ошибке
assert app.is_element_present(By.CSS_SELECTOR, "#note_error"), "Error hint doesn't exist"
# проверяем, что удалилась правильная записка с нужным ИД
error = driver.find_element_by_css_selector("#note_error p").get_attribute("p")
assert str(id) in str(error), "Incorrect note was destroyed"
# здесь тест падает аналогично предыдущему с ошибкой AssertionError: Incorrect note was destroyed
# assert 'JRuPLwy' in 'None'
# как будто строки с ошибкой нет, хотя она есть и селектор верный. Возможно, надо знать особенности кода.
|
18,456 | b9bb4da511d6717469920e0a32a06d71bae79de4 | import os
import sys
import traceback
import logging
from jinja2 import Template, TemplateError
__copyright__ = 'Copyright (C) 2019, Nokia'
LOGGER = logging.getLogger(__name__)
class TemplateOSError(OSError):
pass
def create_dir(filename):
"""creates dir for given file"""
LOGGER.debug("Creating base directory for %s", filename)
absolute_path = os.path.abspath(filename)
path_to_dir = os.path.dirname(absolute_path)
if not os.path.exists(path_to_dir):
os.makedirs(os.path.dirname(filename))
def add_toc_tree(root_dir, template_toc):
"""creates Table of content tree filenames starting from given directory"""
try:
with open(template_toc, 'r') as f:
template = Template(f.read())
for dirpath, dirnames, _ in os.walk(os.path.abspath(root_dir)):
for dirname in dirnames:
name = os.path.join(dirpath, dirname)
rendered = template.render(e=dirname)
new_name = "".join([name, "_index.rst"])
_write_rendered_template(rendered, new_name)
except (TemplateOSError, TemplateError) as e:
msg = '{cls}: {msg}\nTraceback:\n{tb}'.format(
cls=e.__class__.__name__,
msg=str(e),
tb=''.join(traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))))
LOGGER.debug(msg)
def _template_from_file(template_file):
try:
with open(template_file, 'r') as f:
return Template(f.read())
except OSError:
raise TemplateOSError
def _write_rendered_template(rendered_template, target_file):
try:
with open(target_file, 'w') as f:
f.write(rendered_template)
except OSError:
raise TemplateOSError
|
18,457 | 45e5e6d76462d13a75b9ea3ed21fd15166120ad1 | import shutil
import os
import glob
import random
"""
Dieses Skript verarbeitet die Daten für den Kaggle ASL-Alphabet
Datensatz (https://www.kaggle.com/grassknoted/asl-alphabet), welche im Ordner
DatasetOrig vorliegen müssen, so, dass sie in passenden Ordnerstrukturen von Test und
Trainingsdaten vorliegen. Ziel davon ist es, dass image_dataset_from_directory genutzt
werden kann. Dazu müssen die Bilder einer Klasse in einzelnen passenden Ordner vorliegen.
Um dieses Skript anzuwenden, muss der Datensatz lediglich entpackt werden, der oberste Ordner
in DatasetOrig umgenannt und die beiden Ordner für Trainings- und Testdaten in train, sowie
test umbenannt werden.
"""
# Original Ordner
orig_directory = "DatasetOrig"
target_directory = "DatasetPrepared"
# Name der Ordner für Training und Test
train_directory_name = "train"
test_directory_name = "test"
# Anzahl der Bilder, welche pro Klasse für Testdaten genutzt
# und zufällig umkopiert werden sollen.
rand_num_to_move = 300
# Entferne Target-Directory, wenn es schon vorhanden ist.
if os.path.exists(target_directory):
shutil.rmtree(target_directory)
# Kopiere Orig-Dir in Target-Dir
shutil.copytree(orig_directory, target_directory)
# Erstelle für jeden File im Testordner ein eigenen Ordner und kopiere ihn in diesen.
for target_dir_name in glob.glob(target_directory + "/" + test_directory_name + "/*"):
new_path = target_dir_name.replace("_", "/")
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.move(target_dir_name, new_path)
# Bewege die in rand_num_to_move definierte Anzahl zufälliger Elemente pro
# Klasse in den Testordner.
for target_dir_name in glob.glob(target_directory + "/" + train_directory_name + "/*"):
target_dir_classes = glob.glob(target_dir_name + "/*")
# Wähle für diese Klasse rand_num_to_move zufällige Elemente aus.
random_elements = random.sample(target_dir_classes, rand_num_to_move)
# Kopiere die Elemente um.
for move_element in random_elements:
new_dir = move_element.replace(train_directory_name, test_directory_name)
os.makedirs(os.path.dirname(new_dir), exist_ok=True)
shutil.move(move_element, new_dir)
|
18,458 | cc7561fb06511df8e8424892f4cf099489dd1554 | # Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
#
# This file is used to regenerate the optimizer state_dict test files,
# where PyTorch optimizer state is transferred to TorchSharp.
#
# Execute this from the command line in the $/tests/TorchSharpTest directory.
import torch
import shutil
shutil.copy2('../../src/Python/exportsd.py', 'exportsd.py')
import exportsd
lin1 = torch.nn.Linear(10,10)
lin2 = torch.nn.Linear(10,10)
input = torch.rand(4,10)
# SGD
optim = torch.optim.SGD(lin1.parameters(), lr=0.001, momentum=0.1)
output = lin1(input).sum()
output.backward()
optim.step()
f = open("sgd1.dat", "wb")
exportsd.save_sgd(optim, f)
f.close()
# ASGD
optim = torch.optim.ASGD(lin1.parameters(), lr=0.001, alpha=0.65, t0=1e5, lambd=1e-3)
output = lin1(input).sum()
output.backward()
optim.step()
f = open("asgd1.dat", "wb")
exportsd.save_asgd(optim, f)
f.close()
# RMSprop
seq = torch.nn.Sequential(lin1, lin2)
optim = torch.optim.RMSprop(lin1.parameters(), lr=0.001, momentum=0.1)
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'momentum' : 0, 'centered': True})
output = seq(input).sum()
output.backward()
optim.step()
f = open("rmsprop1.dat", "wb")
exportsd.save_rmsprop(optim, f)
f.close()
# Rprop
optim = torch.optim.Rprop(lin1.parameters(), lr=0.001, etas=(0.35, 1.5), step_sizes=(1e-5, 5))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'etas': (0.45, 1.5), 'step_sizes': (1e-5, 5), 'maximize': True})
output = seq(input).sum()
output.backward()
optim.step()
f = open("rprop1.dat", "wb")
exportsd.save_rprop(optim, f)
f.close()
# Adam
optim = torch.optim.Adam(lin1.parameters(), lr=0.001, betas=(0.8, 0.9))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'betas' : (0.7, 0.79), 'amsgrad': True})
output = seq(input).sum()
output.backward()
optim.step()
f = open("adam1.dat", "wb")
exportsd.save_adam(optim, f)
f.close()
# AdamW
optim = torch.optim.AdamW(lin1.parameters(), lr=0.001, betas=(0.8, 0.9))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'betas' : (0.7, 0.79), 'amsgrad': True})
output = seq(input).sum()
output.backward()
optim.step()
f = open("adamw1.dat", "wb")
exportsd.save_adamw(optim, f)
f.close()
# NAdam
optim = torch.optim.NAdam(lin1.parameters(), lr=0.001, betas=(0.8, 0.9))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'betas' : (0.7, 0.79), 'weight_decay': 0.3})
output = seq(input).sum()
output.backward()
optim.step()
f = open("nadam1.dat", "wb")
exportsd.save_nadam(optim, f)
f.close()
# RAdam
optim = torch.optim.RAdam(lin1.parameters(), lr=0.001, betas=(0.8, 0.9))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'betas' : (0.7, 0.79), 'weight_decay': 0.3})
output = seq(input).sum()
output.backward()
optim.step()
f = open("radam1.dat", "wb")
exportsd.save_radam(optim, f)
f.close()
# Adamax
optim = torch.optim.Adamax(lin1.parameters(), lr=0.001, betas=(0.8, 0.9))
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'betas' : (0.7, 0.79), 'weight_decay' : 0.3})
output = seq(input).sum()
output.backward()
optim.step()
f = open("adamax1.dat", "wb")
exportsd.save_adamax(optim, f)
f.close()
# Adadelta
optim = torch.optim.Adadelta(lin1.parameters(), lr=0.001, rho=0.85, weight_decay=0.3)
optim.add_param_group({'params': lin2.parameters(), 'lr': 0.01, 'rho' : 0.79, 'maximize': True})
output = seq(input).sum()
output.backward()
optim.step()
f = open("adadelta1.dat", "wb")
exportsd.save_adadelta(optim, f)
f.close()
# Adagrad
optim = torch.optim.Adagrad(lin1.parameters(), lr=0.001, lr_decay=0.85, weight_decay=0.3)
output = seq(input).sum()
output.backward()
optim.step()
f = open("adagrad1.dat", "wb")
exportsd.save_adagrad(optim, f)
f.close() |
18,459 | 2c6b4c1277e6bfcf175b9bfa1530cc575109e60c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 23:48:48 2019
@author: yiwang
"""
"""
the time complexity is nlogMax(L)
n is because calculating the count
"""
class Solution():
def woodcut(self, L, k):
if L is None or len(L) == 0:
return 0
start = 0
end = max(L[:])
while start + 1 < end:
mid = (end - start) // 2 + start
count = self.count(L, mid)
if count >= k:
start = mid
else:
end = mid
if self.count(L, end) >= k:
return end
if self.count(L, start) >= k:
return start
return 0
def count(self, L, length):
if length == 0:
return 0
n = 0
for i in range(len(L)):
n = n + L[i]//length
return n
t = Solution()
t0 = t.woodcut([232, 124, 456], 7)
print(t0)
t0 = t.woodcut([6,6,5,5,6,5,4,4,5,6,6,6,5,4,5,6,5,6,4,4,4,4,6,4,5,5,4,6,6,6,6,4,6,4,4,5,6,5,5,4,6,4,6,4,4,6,4,5,6,5,6,6,5,4,4,5,6,4,5,5,5,6,6,4,4,5,5,6,5,5,6,4,6,4,5,6,4,4,4,6,4,6,4,5,4,6,5,6,6,5,4,4,6,5,6,5,6,4,4,6,4,6,5,4,4,4,4,6,6,4,6,6,6,5,4,4,6,4,4,4,5,4,6,4,6,6,4,5,6,5,6,5,4,5,5,5,4,6,5,4,5,6,4,4,6,6,5,6,6,4,6,5,6,5,4,6,4,6,4,6,5,6,4,6,6,4,5,4,6,6,5,6,6,5,4,4,4,4,4,5,5,4,6,5,4,6,4,6,5,6,6,5,4,5,6,4,4,6,5,5,6,6,6,4,6,6,5,6,5,4,6,5,4,6,5,6,4,5,4,4,5,4,5,6,6,4,4,4,4,6,6,6,6,6,5,5,4,4,5,5,6,6,5,6,6,6,4,5,5,4,6,4,6,4,4,6,5,6,6,6,5,4,4,4,6,4,6,4,5,6,6,4,5,6,6,6,6,6,4,5,6,4,4,4,4,5,5,5,6,5,5,5,4,4,5,6,6,4,6,6,6,4,5,4,6,5,6,5,5,6,5,4,5,4,6,4,5,5,5,5,5,5,4,5,5,6,4,5,6,5,6,5,6,4,4,5,6,5,6,6,6,5,4,5,4,6,6,6,6,4,5,5,4,5,5,5,4,6,5,5,4,6,5,6,6,6,4,4,4,6,4,6,6,4,5,4,5,6,6,6,5,6,4,6,6,5,6,5,6,4,4,6,4,6,5,6,6,5,4,6,5,6,6,5,6,6,4,6,6,5,6,5,6,4,5,5,5,6,6,4,5,4,4,6,4,6,6,5,6,6,6,5,6,6,4,6,6,5,5,6,5,6,6,6,5,5,6,6,5,4,6,5,6,5,6,5,5,6,5,4,5,6,5,4,6,5,5,4,5,4,5,6,5,6,4,6,5,4,4,4,5,5,4,5,4,6,4,5,5,6,5,6,6,5,4,6,6,5,5,6,5,5,5,5,6,5,5,4,4,6,4,5,4,6,6,5,6,5,6,6,4,5,5,5,5,6,6,4,4,5,6,5,5,6,4,6,6,5,5,6,4,4,4,4,5,4,5,4,5,5,4,5,5,5,4,6,4,4,4,5,4,4,5,4,6,6,5,6,4,4,6,6,4,4,5,6,5,5,4,6,6,4,6,5,4,5,5,6,6,6,4,6,6,5,6,4,5,6,4,6,4,5,5,4,4,6,6,6,4,6,5,5,4,5,5,4,5,6,6,6,4,5,4,5,6,4,6,4,5,4,5,4,6,6,6,5,4,6,4,6,4,5,6,5,6,6,6,5,4,5,6,6,6,6,4,5,6,5,4,4,5,5,4,4,4,5,4,6,6,6,6,4,5,5,4,5,4,5,4,4,6,4,5,5,6,4,6,5,4,6,4,6,5,5,6,5,4,4,5,4,4,5,5,5,5,4,4,5,6,4,4,6,6,5,4,6,5,5,4,4,6,4,6,4,5,6,4,5,4,6,5,4,4,5,6,4,4,5,5,6,6,6,6,6,6,4,6,5,6,6,6,6,4,4,5,6,5,4,5,4,4,5,6,5,5,4,4,4,6,6,6,6,6,4,5,4,4,4,4,5,6,4,4,5,5,6,6,6,6,4,6,6,5,4,6,4,5,4,5,5,4,6,6,4,6,4,5,4,5,5,4,6,5,5,4,4,6,6,5,5,4,6,5,5,6,6,4,6,6,4,6,6,4,5,6,6,5,4,5,4,5,5,5,4,6,5,4,6,5,4,4,5,6,5,5,6,4,4,6,4,4,5,4,5,5,4,5,5,4,4,6,4,5,6,4,4,5,6,6,4,5,4,6,5,5,5,5,6,4,5,6,6,6,4,6,5,4,6,5,4,5,4,5,4,6,4,6,6,4,5,6,4,4,6,5,4,4,5,6,6,4,6,6,4,6,5,6,6,4,4,5,5,6,5,4,4,5,4,5,4,4,6,4,4,4,4,4,4,6,6,6,5,6,5,5,4,5,5,5,5,5,6,5,6,4,6,5,4,5,4,5,4,6,5,5,4,5,6,5,4,5,5,6,4,6,5,6,4,6,6,5,6,4,4,4,5,4,6,4,4,4,5,4,5,4,6,4,4,5,4,5,6,4,6,6,5,4,5,4,5,6,4,5,6,4,5,4,5,5,4,4,5,4,5,6,5,4,5,5,6,4,5,5,4,6,6,6,5,5,5,6,6,4,6,5,4,6,6,6,5,6,6,4,4,5,6,4,4,5,6,5,5,5,6,6,5,4,6,6,5,4,5,6,4,4,6,4,6,4,4,5,4,5,4,5,4,6,4,5,5,6,5,5,4,6,6,4,6,5,5,5,6,5,4,5,6,6,4,6,6,4,5,6,4,6,4,5,4,4,6,5,5,4,6,5,4,6,6,4,4,5,6,4,4,4,4,4,6,6,6,4,5,5,4,4,5,4,4,5,6,5,4,6,6,6,5,6,5,4,5,4,6,6,5,6,5,4,4,5,4,4,4,6,4,5,5,4,4,5,4,5,5,5,5,5,6,4,5,4,4,4,4,4,4,5,4,6,5,5,4,4,5,5,4,4,5,5,6,4,6,4,5,5,6,5,5,6,4,4,5,5,6,5,4,4,4,4,4,6,5,4,5,6,4,4,6,4,4,4,6,4,4,6,5,6,6,6,5,6,5,4,5,4,5,5,6,5,6,4,6,5,6,4,4,6,6,4,4,5,5,5,5,5,4,6,4,5,4,4,4,4,5,6,4,4,5,4,5,5,6,6,5,6,6,5,6,6,5,5,4,5,4,5,5,5,4,4,6,5,4,6,6,6,4,4,6,5,6,5,6,6,4,5,6,6,6,4,4,4,5,4,4,6,5,5,6,4,6,4,6,4,4,4,5,6,6,6,6,5,6,5,5,4,4,6,4,6,4,4,6,6,5,4,4,6,5,4,5,4,4,6,6,6,5,5,5,6,6,6,4,4,6,5,6,4,6,5,6,5,5,5,5,6,5,6,6,4,6,6,6,6,4,5,6,5,6,4,6,6,5,5,4,4,6,4,4,5,4,4,5,6,6,4,4,6,6,6,4,5,4,5,6,6,5,6,4,5,5,5,4,4,4,5,4,5,5,5,5,4,4,5,5,6,4,6,5,5,4,4,6,6,5,6,4,4,6,4,6,4,6,4,4,4,6,5,6,4,6,5,4,4,6,4,6,6,4,5,6,6,4,4,4,4,6,5,4,4,5,5,6,6,5,4,4,4,6,6,5,5,5,6,4,5,4,5,5,4,4,6,5,4,5,4,6,5,6,5,4,4,6,4,5,6,5,4,5,4,5,4,4,5,6,5,6,5,4,5,4,4,5,6,4,6,4,6,4,5,4,4,6,5,5,5,4,5,6,4,5,4,4,6,5,5,6,5,6,6,4,4,6,6,6,6,4,6,4,4,5,4,4,4,6,6,5,4,6,4,6,6,6,5,4,5,6,5,5,5,5,5,4,4,6,4,5,5,5,5,5,5,5,4,6,4,6,6,4,5,4,4,6,5,6,5,4,4,6,5,6,5,6,5,5,6,6,6,5,4,4,4,5,4,6,6,5,5,4,6,5,6,6,5,4,4,5,4,6,5,4,6,6,5,5,5,5,6,6,6,6,6,4,6,6,5,6,6,4,4,4,4,5,4,5,4,4,6,6,6,6,6,6,5,5,5,5,5,5,4,5,4,6,4,4,5,5,5,6,6,5,6,5,4,4,4,6,6,5,6,4,5,5,6,6,4,5,5,4,5,5,4,5,6,5,6,5,6,6,5,5,4,5,4,5,6,5,5,5,4,4,6,5,5,4,4,6,6,6,6,5,6,6,4,5,5,4,5,4,4,4,4,5,6,6,5,4,4,4,5,6,5,5,4,4,6,5,4,6,4,4,4,4,5,6,4,5,5,4,5,4,5,6,5,5,4,5,6,4,5,4,4,4,6,5,4,5,6,4,5,4,4,4,5,5,5,6,6,4,4,6,6,4,4,5,5,6,4,6,6,6,6,4,5,6,4,6,6,6,5,6,5,6,5,5,5,5,5,5,5,4,6,5,4,4,6,6,6,6,6,4,5,4,6,4,5,5,6,6,5,5,6,6,5,6,5,6,4,5,4,5,4,5,6,5,4,5,5,6,4,4,6,6,6,6,5,5,6,4,6,4,6,6,6,6,4,6,5,4,6,4,4,5,6,6,6,6,4,5,6,4,6,4,4,4,6,6,4,5,6,4,6,6,5,6,6,6,5,5,4,4,5,5,5,6,5,6,6,6,6,6,5,5,5,6,4,5,6,5,4,4,5,4,5,5,4,4,6,4,4,6,4,5],90000)
print(t0)
t0 = t.woodcut([6,6,5,5,6,5,4,4,5,6,6,6,5,4,5,6,5,6,4,4,4,4,6,4,5,5,4,6,6,6,6,4,6,4,4,5,6,5,5,4,6,4,6,4,4,6,4,5,6,5,6,6,5,4,4,5,6,4,5,5,5,6,6,4,4,5,5,6,5,5,6,4,6,4,5,6,4,4,4,6,4,6,4,5,4,6,5,6,6,5,4,4,6,5,6,5,6,4,4,6,4,6,5,4,4,4,4,6,6,4,6,6,6,5,4,4,6,4,4,4,5,4,6,4,6,6,4,5,6,5,6,5,4,5,5,5,4,6,5,4,5,6,4,4,6,6,5,6,6,4,6,5,6,5,4,6,4,6,4,6,5,6,4,6,6,4,5,4,6,6,5,6,6,5,4,4,4,4,4,5,5,4,6,5,4,6,4,6,5,6,6,5,4,5,6,4,4,6,5,5,6,6,6,4,6,6,5,6,5,4,6,5,4,6,5,6,4,5,4,4,5,4,5,6,6,4,4,4,4,6,6,6,6,6,5,5,4,4,5,5,6,6,5,6,6,6,4,5,5,4,6,4,6,4,4,6,5,6,6,6,5,4,4,4,6,4,6,4,5,6,6,4,5,6,6,6,6,6,4,5,6,4,4,4,4,5,5,5,6,5,5,5,4,4,5,6,6,4,6,6,6,4,5,4,6,5,6,5,5,6,5,4,5,4,6,4,5,5,5,5,5,5,4,5,5,6,4,5,6,5,6,5,6,4,4,5,6,5,6,6,6,5,4,5,4,6,6,6,6,4,5,5,4,5,5,5,4,6,5,5,4,6,5,6,6,6,4,4,4,6,4,6,6,4,5,4,5,6,6,6,5,6,4,6,6,5,6,5,6,4,4,6,4,6,5,6,6,5,4,6,5,6,6,5,6,6,4,6,6,5,6,5,6,4,5,5,5,6,6,4,5,4,4,6,4,6,6,5,6,6,6,5,6,6,4,6,6,5,5,6,5,6,6,6,5,5,6,6,5,4,6,5,6,5,6,5,5,6,5,4,5,6,5,4,6,5,5,4,5,4,5,6,5,6,4,6,5,4,4,4,5,5,4,5,4,6,4,5,5,6,5,6,6,5,4,6,6,5,5,6,5,5,5,5,6,5,5,4,4,6,4,5,4,6,6,5,6,5,6,6,4,5,5,5,5,6,6,4,4,5,6,5,5,6,4,6,6,5,5,6,4,4,4,4,5,4,5,4,5,5,4,5,5,5,4,6,4,4,4,5,4,4,5,4,6,6,5,6,4,4,6,6,4,4,5,6,5,5,4,6,6,4,6,5,4,5,5,6,6,6,4,6,6,5,6,4,5,6,4,6,4,5,5,4,4,6,6,6,4,6,5,5,4,5,5,4,5,6,6,6,4,5,4,5,6,4,6,4,5,4,5,4,6,6,6,5,4,6,4,6,4,5,6,5,6,6,6,5,4,5,6,6,6,6,4,5,6,5,4,4,5,5,4,4,4,5,4,6,6,6,6,4,5,5,4,5,4,5,4,4,6,4,5,5,6,4,6,5,4,6,4,6,5,5,6,5,4,4,5,4,4,5,5,5,5,4,4,5,6,4,4,6,6,5,4,6,5,5,4,4,6,4,6,4,5,6,4,5,4,6,5,4,4,5,6,4,4,5,5,6,6,6,6,6,6,4,6,5,6,6,6,6,4,4,5,6,5,4,5,4,4,5,6,5,5,4,4,4,6,6,6,6,6,4,5,4,4,4,4,5,6,4,4,5,5,6,6,6,6,4,6,6,5,4,6,4,5,4,5,5,4,6,6,4,6,4,5,4,5,5,4,6,5,5,4,4,6,6,5,5,4,6,5,5,6,6,4,6,6,4,6,6,4,5,6,6,5,4,5,4,5,5,5,4,6,5,4,6,5,4,4,5,6,5,5,6,4,4,6,4,4,5,4,5,5,4,5,5,4,4,6,4,5,6,4,4,5,6,6,4,5,4,6,5,5,5,5,6,4,5,6,6,6,4,6,5,4,6,5,4,5,4,5,4,6,4,6,6,4,5,6,4,4,6,5,4,4,5,6,6,4,6,6,4,6,5,6,6,4,4,5,5,6,5,4,4,5,4,5,4,4,6,4,4,4,4,4,4,6,6,6,5,6,5,5,4,5,5,5,5,5,6,5,6,4,6,5,4,5,4,5,4,6,5,5,4,5,6,5,4,5,5,6,4,6,5,6,4,6,6,5,6,4,4,4,5,4,6,4,4,4,5,4,5,4,6,4,4,5,4,5,6,4,6,6,5,4,5,4,5,6,4,5,6,4,5,4,5,5,4,4,5,4,5,6,5,4,5,5,6,4,5,5,4,6,6,6,5,5,5,6,6,4,6,5,4,6,6,6,5,6,6,4,4,5,6,4,4,5,6,5,5,5,6,6,5,4,6,6,5,4,5,6,4,4,6,4,6,4,4,5,4,5,4,5,4,6,4,5,5,6,5,5,4,6,6,4,6,5,5,5,6,5,4,5,6,6,4,6,6,4,5,6,4,6,4,5,4,4,6,5,5,4,6,5,4,6,6,4,4,5,6,4,4,4,4,4,6,6,6,4,5,5,4,4,5,4,4,5,6,5,4,6,6,6,5,6,5,4,5,4,6,6,5,6,5,4,4,5,4,4,4,6,4,5,5,4,4,5,4,5,5,5,5,5,6,4,5,4,4,4,4,4,4,5,4,6,5,5,4,4,5,5,4,4,5,5,6,4,6,4,5,5,6,5,5,6,4,4,5,5,6,5,4,4,4,4,4,6,5,4,5,6,4,4,6,4,4,4,6,4,4,6,5,6,6,6,5,6,5,4,5,4,5,5,6,5,6,4,6,5,6,4,4,6,6,4,4,5,5,5,5,5,4,6,4,5,4,4,4,4,5,6,4,4,5,4,5,5,6,6,5,6,6,5,6,6,5,5,4,5,4,5,5,5,4,4,6,5,4,6,6,6,4,4,6,5,6,5,6,6,4,5,6,6,6,4,4,4,5,4,4,6,5,5,6,4,6,4,6,4,4,4,5,6,6,6,6,5,6,5,5,4,4,6,4,6,4,4,6,6,5,4,4,6,5,4,5,4,4,6,6,6,5,5,5,6,6,6,4,4,6,5,6,4,6,5,6,5,5,5,5,6,5,6,6,4,6,6,6,6,4,5,6,5,6,4,6,6,5,5,4,4,6,4,4,5,4,4,5,6,6,4,4,6,6,6,4,5,4,5,6,6,5,6,4,5,5,5,4,4,4,5,4,5,5,5,5,4,4,5,5,6,4,6,5,5,4,4,6,6,5,6,4,4,6,4,6,4,6,4,4,4,6,5,6,4,6,5,4,4,6,4,6,6,4,5,6,6,4,4,4,4,6,5,4,4,5,5,6,6,5,4,4,4,6,6,5,5,5,6,4,5,4,5,5,4,4,6,5,4,5,4,6,5,6,5,4,4,6,4,5,6,5,4,5,4,5,4,4,5,6,5,6,5,4,5,4,4,5,6,4,6,4,6,4,5,4,4,6,5,5,5,4,5,6,4,5,4,4,6,5,5,6,5,6,6,4,4,6,6,6,6,4,6,4,4,5,4,4,4,6,6,5,4,6,4,6,6,6,5,4,5,6,5,5,5,5,5,4,4,6,4,5,5,5,5,5,5,5,4,6,4,6,6,4,5,4,4,6,5,6,5,4,4,6,5,6,5,6,5,5,6,6,6,5,4,4,4,5,4,6,6,5,5,4,6,5,6,6,5,4,4,5,4,6,5,4,6,6,5,5,5,5,6,6,6,6,6,4,6,6,5,6,6,4,4,4,4,5,4,5,4,4,6,6,6,6,6,6,5,5,5,5,5,5,4,5,4,6,4,4,5,5,5,6,6,5,6,5,4,4,4,6,6,5,6,4,5,5,6,6,4,5,5,4,5,5,4,5,6,5,6,5,6,6,5,5,4,5,4,5,6,5,5,5,4,4,6,5,5,4,4,6,6,6,6,5,6,6,4,5,5,4,5,4,4,4,4,5,6,6,5,4,4,4,5,6,5,5,4,4,6,5,4,6,4,4,4,4,5,6,4,5,5,4,5,4,5,6,5,5,4,5,6,4,5,4,4,4,6,5,4,5,6,4,5,4,4,4,5,5,5,6,6,4,4,6,6,4,4,5,5,6,4,6,6,6,6,4,5,6,4,6,6,6,5,6,5,6,5,5,5,5,5,5,5,4,6,5,4,4,6,6,6,6,6,4,5,4,6,4,5,5,6,6,5,5,6,6,5,6,5,6,4,5,4,5,4,5,6,5,4,5,5,6,4,4,6,6,6,6,5,5,6,4,6,4,6,6,6,6,4,6,5,4,6,4,4,5,6,6,6,6,4,5,6,4,6,4,4,4,6,6,4,5,6,4,6,6,5,6,6,6,5,5,4,4,5,5,5,6,5,6,6,6,6,6,5,5,5,6,4,5,6,5,4,4,5,4,5,5,4,4,6,4,4,6,4,5],9000)
print(t0)
|
18,460 | 23bb3986fe4357b826a4c3a4db9baca1a88f9a66 |
class MovieSearchHandel:
def __init__(self, movie):
self.movie = movie
@staticmethod
def __movie_handelO(movie):
anchor = {'data': [], 'total': '', 'pageSum': ''}
if len(movie.items):
for item in movie.items:
anchor['data'].append({
'id':item.id,'name':item.name,'elname':item.elname,
'img':item.img,'cat':item.cat,'score':item.score,
'catgory':item.catgory,'address':item.address,
'releaseTime':item.releaseTime,'play_cat':item.play_cat
})
anchor['total'] = movie.total
anchor['currenPage'] = movie.page
anchor['pageSum'] = movie.pages
return anchor
else:
return []
def run(self):
return self.__movie_handelO(self.movie) |
18,461 | 919097fbc652bf44a973f9f26ade89dbba859148 | from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[2, 3, 4]), row=1, col=1)
fig.add_trace(go.Scatter(x=[20, 30, 40], y=[5, 5, 5]), row=1, col=2)
fig.update_layout(height=600, width=600, title_text="Multiple Subplots with Shared Y-Axes")
fig.show()
|
18,462 | b1a6cad25d949f332ad4d684eb1cd5184c911da6 | def my_func(*args):
# Iterating over the Python args tuple
for count, fruit in enumerate(args):
print('{0}. {1}'.format(count, fruit))
my_func('mango', 'orange', 'apple')
def my_func(**kwargs):
result = 0
# Iterating over the Python kwargs dictionary
for grocery in kwargs.values():
result += grocery
return result
print(my_func(bananas=4, cabbage=6, mangoes=7))
# Unpack a list
my_list = ["oranges", "mangoes", "tomatoes", "bananas"]
# print unpacked list
print(*my_list)
my_dict = {"oranges": 4, "mangoes": 5, "tomatoes": 7, "bananas": 6}
# print unpacked dictionary
print(*my_dict)
my_dict = {"oranges": 4, "mangoes": 5, "tomatoes": 7, "bananas": 6}
unpacked_dict = {**my_dict}
# print unpacked dictionary
print(unpacked_dict)
list_1 = [1, 2, 3, 4]
list_2 = [5, 6, 7, 8]
final_list = [*list_1, *list_2]
print(final_list, list_1 + list_2)
dict_1 = {"mangoes": 4, "apples": 5, "bananas": 6}
dict_2 = {"lemons": 7, "carrots": "None"}
dict_3 = {"bread": 2, "tomatoes": 15}
final_dict = {**dict_1, **dict_2, **dict_3}
print(final_dict)
|
18,463 | a9c0938ae8a80d2e51f9fc38e3984398baafa106 | from socket import *
from time import sleep
s = socket()
s.bind(('0.0.0.0',8889))
s.listen(5)
c,addr = s.accept()
print("Connect from ",addr)
f = open('timg.jpeg','rb')
#将文件名称告知对方
c.send('timg.jpeg'.encode())
sleep(0.1)
while True:
data = f.read(1024)
if not data:
break
c.send(data)
sleep(0.1)
c.send('##'.encode())
data = c.recv(1024)
print(data.decode())
f.close()
c.close()
s.close()
|
18,464 | 4b29bbbede35738aac41572f094e08e246317b16 | """Python scripts executable from the QML app"""
import pyotherside # pylint: disable=import-error
from helpers import exec_fn
from .build_environment import BuildEnv
from .config import CONTAINER_ID
from .helpers import patch_env
def build(config_file, install=False):
"""
Runs build for the given clickable.json file
Keyword arguments:
config_file -- path to clickable.json
install -- true to install/launch buit app
"""
return exec_fn(lambda: _build(config_file, install))
def create(dir_name, options):
"""
Creates a new project
Keyword arguments:
options -- options for `clickable create --non-interactive ...`
"""
return exec_fn(lambda: _create(dir_name, options))
def ensure_container():
"""
Creates a Libertine container to execute clickable if not exists
"""
return exec_fn(_init_container)
def update_container():
"""
Upgrades built tools within a Libertine container
"""
return exec_fn(_update_container)
def test_container_exists():
"""Returns Trues if Libertine container exists, False otherwise"""
return exec_fn(_test_container_exists)
def _init_container():
patch_env()
build_env = BuildEnv(container_id=CONTAINER_ID,
print_renderer=lambda txt: pyotherside.send('stdout', txt))
build_env.init_container()
return build_env
def _build(config_file, install):
build_env = _init_container()
return build_env.build(config_file, install)
def _create(dir_name, options):
build_env = _init_container()
return build_env.create(dir_name, options)
def _test_container_exists():
build_env = BuildEnv(container_id=CONTAINER_ID, print_renderer=pyotherside.send)
return build_env.test_container_exists()
def _update_container():
build_env = _init_container()
return build_env.update_container()
|
18,465 | 1a6162edf10650695396b1c660337c883637d1bf | from phonenumber_field.modelfields import PhoneNumberField
from django.db import models
from django.contrib.auth.models import User
import datetime
# Create your models here.
from products.models import Product
class Order(models.Model):
user = models.ForeignKey(User)
total_cost = models.DecimalField(max_digits = 20,decimal_places = 2,default=0.00)
confirmed = models.BooleanField(default=False)
dispatched = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created_on']
def __str__(self):
return 'Order No {}'.format(self.pk)
class OrderDetail(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey(Product)
quantity = models.IntegerField(default=0)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created_on']
class ConfirmedOrder(models.Model):
order = models.ForeignKey(Order)
user = models.ForeignKey(User)
address = models.TextField()
phone_number = PhoneNumberField()
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created_on']
|
18,466 | 757360234499b3e1fd69af6307aa8c2dba3c560f | """
This file is intended for individual analyses of the gender_novels project
"""
from gender_novels.corpus import Corpus
import nltk
import math
from operator import itemgetter
nltk.download('stopwords', quiet=True)
# TODO: add prior two lines to setup, necessary to run
import collections
from scipy.stats import chi2
from statistics import mean, median, mode
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
import numpy as np
import matplotlib.pyplot as plt
from more_itertools import windowed
import unittest
import seaborn as sns
palette = "colorblind"
style_name = "white"
style_list = {'axes.edgecolor': '.6', 'grid.color': '.9', 'axes.grid': 'True',
'font.family': 'serif'}
sns.set_color_codes(palette)
sns.set_style(style_name,style_list)
def get_count_words(novel, words):
"""
Takes in novel, a Novel object, and words, a list of words to be counted.
Returns a dictionary where the keys are the elements of 'words' list
and the values are the numbers of occurences of the elements in the novel.
N.B.: Not case-sensitive.
>>> from gender_novels import novel
>>> summary = "Hester was convicted of adultery. "
>>> summary += "which made her very sad, and then Arthur was also sad, and everybody was "
>>> summary += "sad and then Arthur died and it was very sad. Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1850',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> get_count_words(scarlett, ["sad", "and"])
{'sad': 4, 'and': 4}
:param:words: a list of words to be counted in text
:return: a dictionary where the key is the word and the value is the count
"""
dic_word_counts = {}
for word in words:
dic_word_counts[word] = novel.get_count_of_word(word)
return dic_word_counts
def get_comparative_word_freq(freqs):
"""
Returns a dictionary of the frequency of words counted relative to each other.
If frequency passed in is zero, returns zero
:param freqs: dictionary
:return: dictionary
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1900',
... 'filename': 'hawthorne_scarlet.txt'}
>>> scarlet = novel.Novel(novel_metadata)
>>> d = {'he':scarlet.get_word_freq('he'), 'she':scarlet.get_word_freq('she')}
>>> d
{'he': 0.007329554965683813, 'she': 0.005894731807638042}
>>> x = get_comparative_word_freq(d)
>>> x
{'he': 0.554249547920434, 'she': 0.445750452079566}
>>> d2 = {'he': 0, 'she': 0}
>>> d2
{'he': 0, 'she': 0}
"""
total_freq = sum(freqs.values())
comp_freqs = {}
for k, v in freqs.items():
try:
freq = v / total_freq
except ZeroDivisionError:
freq = 0
comp_freqs[k] = freq
return comp_freqs
def get_counts_by_pos(freqs):
"""
This functions returns a dictionary where each key is a part of speech tag (e.g. 'NN' for nouns)
and the value is a counter object of words of that part of speech and their frequencies.
It also filters out words like "is", "the". We used `nltk`'s stop words function for filtering.
>>> get_counts_by_pos(collections.Counter({'baked':1,'chair':3,'swimming':4}))
{'VBN': Counter({'baked': 1}), 'NN': Counter({'chair': 3}), 'VBG': Counter({'swimming': 4})}
>>> get_counts_by_pos(collections.Counter({'is':10,'usually':7,'quietly':42}))
{'RB': Counter({'quietly': 42, 'usually': 7})}
:param freqs:
:return:
"""
sorted_words = {}
# for each word in the counter
for word in freqs.keys():
# filter out if in nltk's list of stop words, e.g. is, the
if word not in stop_words:
# get its part of speech tag from nltk's pos_tag function
tag = nltk.pos_tag([word])[0][1]
# add that word to the counter object in the relevant dict entry
if tag not in sorted_words.keys():
sorted_words[tag] = collections.Counter({word:freqs[word]})
else:
sorted_words[tag].update({word: freqs[word]})
return sorted_words
def display_gender_freq(d, title):
"""
Takes in a dictionary sorted by author and gender frequencies, and a title.
Outputs the resulting graph to 'visualizations/title.pdf' AND 'visualizations/title.png'
dictionary format {"Author/Novel": [he_freq, she_freq]}
Will scale to allow inputs of larger dictionaries with non-binary values
:param d, title:
:return:
"""
he_val = []
she_val = []
authors = []
for entry in d:
authors.append(entry)
he_val.append(d[entry][0])
she_val.append(d[entry][1])
fig, ax = plt.subplots()
plt.ylim(0, 1)
index = np.arange(len(d.keys()))
bar_width = 0.35
opacity = 0.4
he_val = tuple(he_val)
she_val = tuple(she_val)
authors = tuple(authors)
rects1 = ax.bar(index, he_val, bar_width, alpha=opacity, color='b', label='He')
rects2 = ax.bar(index + bar_width, she_val, bar_width, alpha=opacity, color='r', label='She')
ax.set_xlabel('Authors')
ax.set_ylabel('Frequency')
ax.set_title('Gendered Pronouns by Author')
ax.set_xticks(index + bar_width / 2)
plt.xticks(fontsize=8, rotation=90)
ax.set_xticklabels(authors)
ax.legend()
fig.tight_layout()
filepng = "visualizations/he_she_freq" + title + ".png"
filepdf = "visualizations/he_she_freq" + title + ".pdf"
plt.savefig(filepng, bbox_inches='tight')
plt.savefig(filepdf, bbox_inches='tight')
def run_gender_freq(corpus):
"""
Runs a program that uses the gender frequency analysis on all novels existing in a given
corpus, and outputs the data as graphs
:param corpus:
:return:
"""
novels = corpus._load_novels()
c = len(novels)
loops = c//10 + 1
num = 0
while num < loops:
dictionary = {}
for novel in novels[num * 10: min(c, num * 10 + 9)]:
d = {'he': novel.get_word_freq('he'), 'she': novel.get_word_freq('she')}
d = get_comparative_word_freq(d)
lst = [d["he"], d["she"]]
book = novel.title[0:20] + "\n" + novel.author
dictionary[book] = lst
display_gender_freq(dictionary, str(num))
num += 1
def dunn_individual_word(total_words_m_corpus, total_words_f_corpus, wordcount_female,
wordcount_male):
'''
applies dunning log likelihood to compare individual word usage in male and female corpus
:param word: desired word to compare
:param m_corpus: c.filter_by_gender('male')
:param f_corpus: c. filter_by_gender('female')
:return: log likelihoods and p value
>>> total_words_m_corpus = 8648489
>>> total_words_f_corpus = 8700765
>>> wordcount_female = 1000
>>> wordcount_male = 50
>>> dunn_individual_word(total_words_m_corpus,total_words_f_corpus,wordcount_male,wordcount_female)
'''
def dunn_individual_word(total_words_corpus_1, total_words_corpus_2, count_of_word_corpus_1,
count_of_word_corpus_2):
'''
applies dunning log likelihood to compare individual word usage in male and female corpus
:param word: desired word to compare
:param m_corpus: c.filter_by_gender('male')
:param f_corpus: c. filter_by_gender('female')
:return: log likelihoods and p value
>>> total_words_m_corpus = 8648489
>>> total_words_f_corpus = 8700765
>>> wordcount_female = 1000
>>> wordcount_male = 50
>>> dunn_individual_word(total_words_m_corpus,total_words_f_corpus,wordcount_male,wordcount_female)
-800
'''
a = count_of_word_corpus_1
b = count_of_word_corpus_2
c = total_words_corpus_1
d = total_words_corpus_2
e1 = c * (a + b) / (c + d)
e2 = d * (a + b) / (c + d)
dunning_log_likelihood = 2 * (a * math.log(a / e1) + b * math.log(b / e2))
if count_of_word_corpus_1 * math.log(count_of_word_corpus_1 / e1) < 0:
dunning_log_likelihood = -dunning_log_likelihood
p = 1 - chi2.cdf(abs(dunning_log_likelihood), 1)
return dunning_log_likelihood
def dunning_total(m_corpus, f_corpus):
'''
goes through gendered corpora
runs dunning_indiviidual on all words that are in BOTH corpora
returns sorted dictionary of words and their dunning scores
shows top 10 and lowest 10 words
:return: dictionary of common word with dunning value and p value
>>> c = Corpus('sample_novels')
>>> m_corpus = c.filter_by_gender('male')
>>> f_corpus = c.filter_by_gender('female')
>>> result = dunning_total(m_corpus, f_corpus)
>>> print(result[0:10])
[('she', (-12292.762338290115, 29042, 45509)),
('her', (-11800.614222528242, 37517, 53463)),
('jo', (-3268.940103481869, 1, 1835)),
('carlyle', (-2743.3204833572668, 3, 1555)),
('mrs', (-2703.877430262923, 3437, 6786)),
('amy', (-2221.449213948045, 36, 1408)),
('laurie', (-1925.9408323278521, 2, 1091)),
('adeline', (-1896.0496657740907, 13, 1131)),
('alessandro', (-1804.1775207769476, 3, 1029)),
('mr', (-1772.0584351647658, 7900, 10220))]
'''
wordcounter_male = m_corpus.get_wordcount_counter()
wordcounter_female = f_corpus.get_wordcount_counter()
totalmale_words = 0
totalfemale_words = 0
for male_word in wordcounter_male:
totalmale_words += wordcounter_male[male_word]
for female_word in wordcounter_female:
totalfemale_words += wordcounter_female[female_word]
dunning_result = {}
for word in wordcounter_male:
wordcount_male = wordcounter_male[word]
if word in wordcounter_female:
wordcount_female = wordcounter_female[word]
dunning_word = dunn_individual_word(totalmale_words, totalfemale_words,wordcount_male, wordcount_female)
dunning_result[word] = (dunning_word, wordcount_male, wordcount_female)
dunning_result = sorted(dunning_result.items(), key=itemgetter(1))
print(dunning_result)
return dunning_result
def instance_dist(novel, word):
"""
Takes in a particular word, returns a list of distances between each instance of that word in the novel.
>>> from gender_novels import novel
>>> summary = "Hester was her convicted of adultery. "
>>> summary += "which made her very sad, and then her Arthur was also sad, and her everybody was "
>>> summary += "sad and then Arthur her died and it was very sad. her Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> instance_dist(scarlett, "her")
[6, 5, 6, 7, 7]
:param:novel to analyze, gendered word
:return: list of distances between instances of gendered word
"""
output = []
count = 0
start = False
text = novel.get_tokenized_text()
for e in text:
if not start:
if e == word:
start = True
else:
count += 1
if e == word:
output.append(count)
count = 0
return output
def pronoun_instance_dist(novel, words):
"""
Takes in a novel and list of gender pronouns, returns a list of distances between each
instance of a pronoun in that novel
>>> from gender_novels import novel
>>> summary = "James was his convicted of adultery. "
>>> summary += "which made him very sad, and then his Jane was also sad, and himself everybody was "
>>> summary += "sad and then he died and it was very sad. His Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> pronoun_instance_dist(scarlett, ["his", "him", "he", "himself"])
[6, 5, 6, 6, 7]
:param:novel
:return: list of distances between instances of pronouns
"""
text = novel.get_tokenized_text()
output = []
count = 0
start = False
for e in text:
e = e.lower()
if not start:
if e in words:
start = True
else:
count += 1
if e in words:
output.append(count)
count = 0
return output
def male_instance_dist(novel):
"""
Takes in a novel, returns a list of distances between each instance of a female pronoun in that novel
>>> from gender_novels import novel
>>> summary = "James was his convicted of adultery. "
>>> summary += "which made him very sad, and then he Arthur was also sad, and himself everybody was "
>>> summary += "sad and then he died and it was very sad. His Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> male_instance_dist(scarlett)
[6, 5, 6, 6, 7]
:param: novel
:return: list of distances between instances of gendered word
"""
return pronoun_instance_dist(novel, ["his", "him", "he", "himself"])
def female_instance_dist(novel):
"""
Takes in a novel, returns a list of distances between each instance of a female pronoun in that novel
>>> from gender_novels import novel
>>> summary = "Hester was her convicted of adultery. "
>>> summary += "which made her very sad, and then she Hester was also sad, and herself everybody was "
>>> summary += "sad and then she died and it was very sad. Her Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> female_instance_dist(scarlett)
[6, 5, 6, 6, 7]
:param: novel
:return: list of distances between instances of gendered word
"""
return pronoun_instance_dist(novel, ["her", "hers", "she", "herself"])
def find_gender_adj(novel, female):
"""
Takes in a novel and boolean indicating gender, returns a dictionary of adjectives that appear within
a window of 5 words around each male pronoun
>>> from gender_novels import novel
>>> summary = "James was convicted of adultery. "
>>> summary += "he was a handsome guy, and everyone thought that he was so handsome, and everybody was "
>>> summary += "sad and then he died a very handsome death. His Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> find_gender_adj(scarlett, False)
{'handsome': 3, 'sad': 1}
:param:novel, boolean indicating whether to search for female adjectives (true) or male adj (false)
:return: dictionary of adjectives that appear around male pronouns and the number of occurences
"""
output = {}
text = novel.get_tokenized_text()
if female:
distances = female_instance_dist(novel)
pronouns1 = ["her", "hers", "she", "herself"]
pronouns2 = ["his", "him", "he", "himself"]
else:
distances = male_instance_dist(novel)
pronouns1 = ["his", "him", "he", "himself"]
pronouns2 = ["her", "hers", "she", "herself"]
if len(distances) == 0:
return {}
elif len(distances) <= 3:
lower_window_bound = 5
else:
lower_window_bound = median(sorted(distances)[:int(len(distances) / 2)])
if not lower_window_bound >= 5:
return "lower window bound less than 5"
for l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11 in windowed(text, 11):
l6 = l6.lower()
if not l6 in pronouns1:
continue
words = [l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11]
if bool(set(words) & set(pronouns2)):
continue
for index, word in enumerate(words):
words[index] = word.lower()
tags = nltk.pos_tag(words)
for tag_index, tag in enumerate(tags):
if tags[tag_index][1] == "JJ" or tags[tag_index][1] == "JJR" or tags[tag_index][1] == "JJS":
word = words[tag_index]
if word in output.keys():
output[word] += 1
else:
output[word] = 1
return output
def find_male_adj(novel):
"""
Takes in a novel, returns a dictionary of adjectives that appear within a window of 5 words around each male pronoun
>>> from gender_novels import novel
>>> summary = "James was convicted of adultery. "
>>> summary += "he was a handsome guy, and everyone thought that he was so handsome, and everybody was "
>>> summary += "sad and then he died a very handsome death. His Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> find_male_adj(scarlett)
{'handsome': 3, 'sad': 1}
:param:novel
:return: dictionary of adjectives that appear around male pronouns and the number of occurences
"""
return find_gender_adj(novel, False)
def find_female_adj(novel):
"""
Takes in a novel, returns a dictionary of adjectives that appear within a window of 5 words around each female pronoun
>>> from gender_novels import novel
>>> summary = "Jane was convicted of adultery. "
>>> summary += "she was a beautiful gal, and everyone thought that she was very beautiful, and everybody was "
>>> summary += "sad and then she died. Everyone agreed that she was a beautiful corpse that deserved peace."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> find_female_adj(scarlett)
{'beautiful': 3, 'sad': 1}
:param:novel
:return: dictionary of adjectives that appear around female pronouns and the number of occurences
"""
return find_gender_adj(novel, True)
def process_medians(helst, shelst, authlst):
"""
>>> medians_he = [12, 130, 0, 12, 314, 18, 15, 12, 123]
>>> medians_she = [123, 52, 12, 345, 0, 13, 214, 12, 23]
>>> books = ["a", "b", "c", "d", "e", "f", "g", "h", "i"]
>>> process_medians(helst=medians_he, shelst=medians_she, authlst=books)
{'he': [0, 2.5, 0, 1.3846153846153846, 0, 1.0, 5.3478260869565215], 'she': [10.25, 0, 28.75,
0, 14.266666666666667, 0, 0], 'book': ['a', 'b', 'd', 'f', 'g', 'h', 'i']}
:param helst:
:param shelst:
:param authlst:
:return: a dictionary sorted as so {
"he":[ratio of he to she if >= 1, else 0],
"she":[ratio of she to he if > 1, else 0]
"book":[lst of book authors]
}
"""
d = {"he": [], "she": [], "book": []}
for num in range(len(helst)):
if helst[num] > 0 and shelst[num] > 0:
res = helst[num] - shelst[num]
if res >= 0:
d["he"].append(helst[num] / shelst[num])
d["she"].append(0)
d["book"].append(authlst[num])
else:
d["he"].append(0)
d["she"].append(shelst[num] / helst[num])
d["book"].append(authlst[num])
else:
if helst == 0:
print("ERR: no MALE values: " + authlst[num])
if shelst == 0:
print("ERR: no FEMALE values: " + authlst[num])
return d
def bubble_sort_across_lists(dictionary):
"""
>>> d = {'he': [0, 2.5, 0, 1.3846153846153846, 0, 1.0, 5.3478260869565215],
... 'she': [10.25, 0, 28.75, 0, 14.266666666666667, 0, 0],
... 'book': ['a', 'b', 'd', 'f', 'g', 'h', 'i']}
>>> bubble_sort_across_lists(d)
{'he': [5.3478260869565215, 2.5, 1.3846153846153846, 1.0, 0, 0, 0], 'she': [0, 0, 0, 0,
10.25, 14.266666666666667, 28.75], 'book': ['i', 'b', 'f', 'h', 'a', 'g', 'd']}
:param dictionary: containing 3 different list values.
Note: dictionary keys MUST contain arguments 'he', 'she', and 'book'
:return dictionary sorted across all three lists in a specific method:
1) Descending order of 'he' values
2) Ascending order of 'she' values
3) Corresponding values of 'book' values
"""
lst1 = dictionary['he']
lst2 = dictionary['she']
lst3 = dictionary['book']
r = range(len(lst1) - 1)
p = True
# sort by lst1 descending
for j in r:
for i in r:
if lst1[i] < lst1[i + 1]:
# manipulating lst 1
temp1 = lst1[i]
lst1[i] = lst1[i + 1]
lst1[i + 1] = temp1
# manipulating lst 2
temp2 = lst2[i]
lst2[i] = lst2[i + 1]
lst2[i + 1] = temp2
# manipulating lst of authors
temp3 = lst3[i]
lst3[i] = lst3[i + 1]
lst3[i + 1] = temp3
p = False
if p:
break
else:
p = True
# sort by lst2 ascending
for j in r:
for i in r:
if lst2[i] > lst2[i + 1]:
# manipulating lst 1
temp1 = lst1[i]
lst1[i] = lst1[i + 1]
lst1[i + 1] = temp1
# manipulating lst 2
temp2 = lst2[i]
lst2[i] = lst2[i + 1]
lst2[i + 1] = temp2
# manipulating lst of authors
temp3 = lst3[i]
lst3[i] = lst3[i + 1]
lst3[i + 1] = temp3
p = False
if p:
break
else:
p = True
d = {}
d['he'] = lst1
d['she'] = lst2
d['book'] = lst3
return d
def instance_stats(book, medians1, medians2, title):
"""
:param book:
:param medians1:
:param medians2:
:param title:
:return: file written to visualizations folder depicting the ratio of two values given as a
bar graph
"""
fig, ax = plt.subplots()
plt.ylim(0, 50)
index = np.arange(len(book))
bar_width = .7
opacity = 0.4
medians_she = tuple(medians2)
medians_he = tuple(medians1)
book = tuple(book)
rects1 = ax.bar(index, medians_he, bar_width, alpha=opacity, color='b', label='Male to Female')
rects2 = ax.bar(index, medians_she, bar_width, alpha=opacity, color='r', label='Female to Male')
ax.set_xlabel('Book')
ax.set_ylabel('Ratio of Median Values')
ax.set_title(
'MtF or FtM Ratio of Median Distance of Gendered Instances by Author')
ax.set_xticks(index)
plt.xticks(fontsize=8, rotation=90)
ax.set_xticklabels(book)
ax.set_yscale("symlog")
ax.legend()
fig.tight_layout()
filepng = "visualizations/" + title + ".png"
filepdf = "visualizations/" + title + ".pdf"
plt.savefig(filepng, bbox_inches='tight')
plt.savefig(filepdf, bbox_inches='tight')
def run_dist_inst(corpus):
"""
Runs a program that uses the instance distance analysis on all novels existing in a given
corpus, and outputs the data as graphs
:param corpus:
:return:
"""
novels = corpus._load_novels()
c = len(novels)
loops = c//10 + 1
num = 0
while num < loops:
medians_he = []
medians_she = []
books = []
for novel in novels[num * 10: min(c, num * 10 + 9)]:
result_he = instance_dist(novel, "he")
result_she = instance_dist(novel, "she")
try:
medians_he.append(median(result_he))
except:
medians_he.append(0)
try:
medians_she.append(median(result_she))
except:
medians_she.append(0)
books.append(novel.title[0:20] + "\n" + novel.author)
d = process_medians(helst=medians_he, shelst=medians_she, authlst=books)
d = bubble_sort_across_lists(d)
instance_stats(d["book"], d["he"], d["she"], "inst_dist" + str(num))
num += 1
class Test(unittest.TestCase):
def test_dunning_total(self):
c = Corpus('sample_novels')
m_corpus = c.filter_by_gender('male')
f_corpus = c.filter_by_gender('female')
results = dunning_total(m_corpus, f_corpus)
print(results[10::])
#print(reversed(results[-100::]))
if __name__ == '__main__':
# unittest.main()
'''
print("loading corpus")
corpus = Corpus('sample_novels')
print("loading novel")
novel = corpus._load_novels()[15]
print(novel.author, novel.title, novel.word_count)
print("running function")
result = find_male_adj(novel)
output = []
for key in result.keys():
output.append((result[key], key))
print(sorted(output, reverse=True))
'''
c = Corpus('sample_novels')
run_dist_inst(c)
run_gender_freq(c)
print("hello")
|
18,467 | c560d992bbccc12d99c62f5d21c0933c26d0983f | num =5
if num !=5:
print("This will not execute")
else:
print("Num is 5")
python_course = True
if not python_course:
print("This will also not execute")
else:
print("Python course is defined") |
18,468 | cb15659268c2776c885477a556a4fb32d5554959 | import random
with open('generated_problems.txt') as gen_f:
content = gen_f.readlines()
generated_problems = [x.strip() for x in content]
problems = """
Two Sum
Best Time to Buy and Sell Stock
Contains Duplicate
Product of Array Except Self
Maximum Subarray
Maximum Product Subarray
Find Minimum in Rotated Sorted Array
Search in Rotated Sorted Array
3 Sum
Container With Most Water
Sum of Two Integers
Number of 1 Bits
Counting Bits
Missing Number
Reverse Bits
Climbing Stairs
Coin Change
Longest Increasing Subsequence
Longest Common Subsequence
Word Break Problem
Combination Sum
House Robber
House Robber II
Decode Ways
Unique Paths
Jump Game
Clone Graph
Course Schedule
Pacific Atlantic Water Flow
Number of Islands
Longest Consecutive Sequence
Alien Dictionary (Leetcode Premium)
Graph Valid Tree (Leetcode Premium)
Number of Connected Components in an Undirected Graph (Leetcode Premium)
Insert Interval
Merge Intervals
Non-overlapping Intervals
Meeting Rooms (Leetcode Premium)
Meeting Rooms II (Leetcode Premium)
Reverse a Linked List
Detect Cycle in a Linked List
Merge Two Sorted Lists
Merge K Sorted Lists
Remove Nth Node From End Of List
Reorder List
Set Matrix Zeroes
Spiral Matrix
Rotate Image
Word Search
Longest Substring Without Repeating Characters
Longest Repeating Character Replacement
Minimum Window Substring
Valid Anagram
Group Anagrams
Valid Parentheses
Valid Palindrome
Longest Palindromic Substring
Palindromic Substrings
Encode and Decode Strings (Leetcode Premium)
Maximum Depth of Binary Tree
Same Tree
Invert/Flip Binary Tree
Binary Tree Maximum Path Sum
Binary Tree Level Order Traversal
Serialize and Deserialize Binary Tree
Subtree of Another Tree
Construct Binary Tree from Preorder and Inorder Traversal
Validate Binary Search Tree
Kth Smallest Element in a BST
Lowest Common Ancestor of BST
Implement Trie (Prefix Tree)
Add and Search Word
Word Search II
Merge K Sorted Lists
Top K Frequent Elements
Find Median from Data Stream
""".split('\n')
problem = random.choice(list(set(problems) - set(generated_problems)))
with open('generated_problems.txt', 'w+') as gen_f:
for prob in generated_problems+[problem]:
gen_f.write(prob+"\n")
print(problem)
|
18,469 | 56edf7b04acad6b1af8aac1f10611ced5508db07 |
import sys
from os import listdir
from os.path import isfile, join
from datetime import datetime
# Map declaration this will list all the amino acids with their respective codon
m = {'GCT':'A','GCC':'A','GCA':'A','GCG':'A','CGT':'R','CGC':'R','CGA':'R','CGG':'R','AGA':'R','AGG':'R','AAT':'N','AAC':'N','GAT':'D','GAC':'D','TGT':'C','TGC':'C','CAA':'Q','CAG':'Q','GAA':'E','GAG':'E','GGT':'G','GGC':'G','GGA':'G','GGG':'G','CAT':'H','CAC':'H','ATT':'I','ATC':'I','ATA':'I','ATG':'1','TTA':'L','TTG':'L','CTT':'L','CTC':'L','CTA':'L','CTG':'L','AAA':'K','AAG':'K','ATG':'M','TTT':'F','TTC':'F','CCT':'P','CCC':'P','CCA':'P','CCG':'P','TCT':'S','TCC':'S','TCA':'S','TCG':'S','AGT':'S','AGC':'S','ACT':'T','ACC':'T','ACA':'T','ACG':'T','TGG':'W','TAT':'Y','TAC':'Y','GTT':'V','GTC':'V','GTA':'V','GTG':'V','TAA':'0','TGA':'0','TAG':'0'}
n = {'A':0,'R':0,'N':0,'D':0,'C':0,'Q':0,'E':0,'G':0,'H':0,'I':0,'1':0,'L':0,'K':0,'M':0,'F':0,'P':0,'S':0,'T':0,'W':0,'Y':0,'V':0,'0':0,'X':0 }
#This method returns the amino acid that belongs to a codon, if the codon is not identified it will return an X
def getCodon(tri):
if m.has_key(tri):
a = m[tri]
n[a] += 1
return a
else:
n['X'] += 1
return 'X'
# Method that will count how many amino acid are
def countNucleotides(folderName):
""" Print the nucleotides count (A: Adenine, C: Citocine, G: Guanine, T: Timminine) and (N: not identified) """
archivosFna = [filefna for filefna in listdir(folderName) if filefna[-3:] == ".fa"]
if len(archivosFna) == 0:
print(" The Folder given has not fasta files ")
return
# Date time
instanteinicial = datetime.now()
for filefna in archivosFna:
filePath = folderName+'/'+filefna
print " --- Analyzing "+filefna+" ------------ "+str((datetime.now()-instanteinicial).seconds)+" sec"
fasta = open(filePath,'r')
rfasta = open('results/'+filefna+".proteins",'w')
fastaR = ""
for line in fasta:
rline = ''
if line[0] == '>': continue
i = 0
while i < len(line[:-2]):
rline += getCodon(line[i]+line[i+1]+line[i+2])
i+=3
fastaR += (rline+'\n')
rfasta.write(fastaR)
rfasta.close()
fasta.close()
print " --- Generated "+filefna+".proteins --- "+str((datetime.now()-instanteinicial).seconds)+" sec"
# This method will print the countingof the amino acids
def printCounting():
print 'Results ----------------------------------------------------------------------------------------'
for i in n:
print "> "+i+": "+str(n[i])
def main():
if len(sys.argv) != 2:
raise Exception('Usage python Counting_DNA_Nucleotides.py <FolderName>')
if not isfile(join(sys.argv[1])):
countNucleotides(sys.argv[1])
printCounting()
else:
raise Exception('<FolderName> must be a Folder')
if __name__=='__main__':
try:
main()
except ValueError as e:
print "*** Error in Args ***"
|
18,470 | 484f223e9c30a2a4bce506b87da34031b4928a1d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-09 14:20:58
# @Author : lczean (lczean@163.com)
# @Link : https://github.com/lczean
# @Version : 1.0
# @File : list.py
'''
# list 显示列表
# isinstance 内置函数 判断数据类型 返回True或False
'''
# list 列表从0开始,先进后出,属于堆栈
# MOVIES = ["肖申克的救赎", "霸王别姬", "大话西游"]
# MOVIES.insert(1, 1997)
# MOVIES.insert(3, 1993)
# MOVIES.append(1995)
MOVIES = ["肖申克的救赎", 1994, "弗兰克·德拉邦特", 142,
["蒂姆·罗宾斯",
["摩根·弗里曼", "鲍勃·冈顿", "威廉姆·塞德勒", "克兰西·布朗 & 吉尔·贝罗斯"]]]
# 遍历列表,中间使用if判断是否存在嵌套列表
# No.1
for each_item in MOVIES:
print(each_item)
print("***************")
# No.2
for each_item in MOVIES:
if isinstance(each_item, list):
for nested_item in each_item:
print(nested_item)
else:
print(each_item)
print("***************")
# No.3
for each_item in MOVIES:
if isinstance(each_item, list):
for nested_item in each_item:
if isinstance(nested_item, list):
for deeper_item in nested_item:
if isinstance(deeper_item, list):
for deepest_item in deeper_item:
print(deepest_item)
else:
print(deeper_item)
else:
print(nested_item)
else:
print(each_item)
print("***************")
# No.4
# 不重复代码,应当创建一个函数
# def 函数名 (参数,可选)
# 函数代码组
def print_lists(the_list):
for each_items in the_list:
if isinstance(each_items, list): # 判断是否是列表
print_lists(each_items) # 递归,python默认递归深度不能超过100个
else:
print(each_items)
print_lists(MOVIES)
|
18,471 | 829518f84f9afe43aac771cfeebe766ffaaad879 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
engine = create_engine('sqlite:///test.db')
### 1. MultiIndex (hierarchical index)
# create multi index
# from a list of tuples
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
# or from 'products' of iterables
iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
foo = pd.MultiIndex.from_product(iterables, names=['first', 'second'])
foo.get_values()
# use the index when creating Series/DataFrame
s = pd.Series(np.random.randn(8), index=index)
df = pd.DataFrame(np.random.randn(3, 8), index=['A', 'B', 'C'], columns=index)
# specify multi index when creating series or dataframe
arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
pd.Series(np.random.randn(8), index=arrays)
pd.DataFrame(np.random.randn(8, 4), index=arrays)
# plain tuples can be used directly as index, too, but it's of course not
# recommended
pd.Series(np.random.randn(8), index=tuples)
### 2. using multi index
# show names of the index
df.columns.names
# get an array of tuples from the index
df.columns.values
# or list
df.index.tolist()
# get different levels respectively
# by index
index.get_level_values(0)
# by name
index.get_level_values('second')
# basic indexing
s = pd.Series(np.random.randn(8), index=arrays)
df = pd.DataFrame(np.random.randn(3, 8), index=['A', 'B', 'C'], columns=index)
bi_df = df
bi_df_T = df.T
s['foo']
df['bar']
df['bar', 'two']
df['bar']['two']
bi_df_T.loc['bar', 'one']
bi_df_T.loc['bar'].loc['one']
# alignment and reindex
# works the same as normal single index
s + s[:-2]
s + s[::2]
s.reindex(index[::2])
s.reindex([('foo', 'two'), ('bar', 'one'), ('qux', 'one'), ('baz', 'one')])
### 3. advanced indexing
df = df.T
df.loc['bar']
df.loc['bar', 'two']
# slicing rows
df.loc['baz':'foo']
df.loc[('baz', 'two'):('qux', 'one')]
# select using list of tuples
df.loc[[('bar', 'two'), ('qux', 'one')]]
# the same
df.ix[[('bar', 'two'), ('qux', 'one')]]
# using slicers
# XXX: when using slicers, the MultiIndex must be sorted MANUALLY beforehand
def mklbl(prefix,n):
return ["%s%s" % (prefix,i) for i in range(n)]
miindex = pd.MultiIndex.from_product([mklbl('A',4),
mklbl('B',2),
mklbl('C',4),
mklbl('D',2)])
micolumns = pd.MultiIndex.from_tuples([('a','foo'),('a','bar'),
('b','foo'),('b','bah')],
names=['lvl0', 'lvl1'])
dfmi = pd.DataFrame(
np.arange(len(miindex)*len(micolumns)).\
reshape((len(miindex),len(micolumns))),
index=miindex,
columns=micolumns
)
# shuffling the rows
dfmi = dfmi.iloc[np.random.permutation(len(dfmi))]
# another method
dfmi = dfmi.reindex(np.random.permutation(dfmi.index))
# shuffling the columns
dfmi = dfmi.reindex(columns=np.random.permutation(dfmi.columns))
# or
dfmi = dfmi.iloc[:,np.random.permutation(len(dfmi.columns))]
# SORT IT !
dfmi = dfmi.sort_index().sort_index(axis=1)
# or
dfmi.sort_index(inplace=True)
dfmi.sort_index(axis=1, inplace=True)
# slice(None) selects all rows of that level
# not that levels cannot NOT be omited when using slice on MultiIndex!
# XXX: the last ':' is mandatory
dfmi.loc[(slice('A1','A3'),slice(None),['C1','C3']),:]
# an alternative (better) syntax
idx = pd.IndexSlice
dfmi.loc[idx[:,:,['C1','C3']],idx[:,'foo']]
# can mixed with boolean indexers
mask = dfmi[('a','foo')]>200
dfmi.loc[idx[mask,:,['C1','C3']],idx[:]]
dfmi.loc[idx[mask,:,['C1','C3']],:]
# operate on a single axis
df2 = dfmi.copy()
# and values can be set directly
df2.loc[idx[:,:,['C1','C3']],:] = df2/1000
df2
df2.loc(axis=0)[:,:,['C1','C3']] = -10
# note that without axis=0, this will NOT work because it's ambiguous
#df2.loc[:,:,['C1','C3']]
# the same problem
#df2.loc[idx[:,:,['C1','C3']]]
|
18,472 | 3f6853dbaedd9132bfaf00fec4a4f7e7d905c2ee | import sys
class BellmanFordAlgorithm:
"""
Class BellmanFordAlgorithm contains methods used to find
the shortest path using Bellman Ford algorithm.
"""
def __init__(self, adj_matrix_with_distances: list, adj_matrix: list):
self.number_of_nodes = len(adj_matrix_with_distances)
self.distance_tab = [0 for _ in range(len(adj_matrix_with_distances))]
self.previous_node_tab = [0 for _ in range(len(adj_matrix_with_distances))]
self.adj_matrix_with_distances = adj_matrix_with_distances
self.adj_matrix = adj_matrix
def distances_init(self, source_node: int):
for i in range(len(self.adj_matrix_with_distances)):
self.distance_tab[i] = sys.maxsize
self.previous_node_tab[i] = None
self.distance_tab[source_node] = 0
def relaxation(self, u: int, v: int):
if self.distance_tab[v] > (self.distance_tab[u] + self.adj_matrix_with_distances[u][v]):
self.distance_tab[v] = self.distance_tab[u] + self.adj_matrix_with_distances[u][v]
self.previous_node_tab[v] = u
def find_shortest_path(self, source_node: int) -> bool:
self.distances_init(source_node)
G = [j for j in range(self.number_of_nodes)]
for i in range(self.number_of_nodes - 1):
for u in G:
for v in G:
if self.adj_matrix[u][v]:
self.relaxation(u, v)
for u in G:
for v in G:
if self.adj_matrix[u][v]:
if self.distance_tab[v] > (self.distance_tab[u] + self.adj_matrix_with_distances[u][v]):
return False
return True
def all_shortest_paths(self, source_node: int):
print("Distances from node: {}".format(source_node))
if self.find_shortest_path(source_node):
for i in range(len(self.distance_tab)):
path_string = "d[{}] = {} ==> [".format(i, self.distance_tab[i])
path_nodes_tab = []
path_nodes_tab.append(i)
x = self.previous_node_tab[i]
while x != None:
path_nodes_tab.append(x)
x = self.previous_node_tab[x]
path_nodes_tab.reverse()
for i in range(len(path_nodes_tab) - 1):
path_string += "{} - ".format(path_nodes_tab[i])
path_string += "{}".format(path_nodes_tab[len(path_nodes_tab) - 1])
path_string += "]"
print(path_string)
else:
print("ERROR negative cycle")
def get_distances_list(self) -> list:
return self.distance_tab.copy()
|
18,473 | e0353362b152bcc4ffe09fbf9faa360c6a33abd2 | import time
import random
import datetime
from sentidos.som import FREQUENCIA, doppler
print(FREQUENCIA)
doppler()
exit()
#import subprocess
#Importando mudulos específicos
from subprocess import run, PIPE
#r = subprocess.run(['free', '-h'], stdout=subprocess.PIPE)
r = run(['apt-get', '-install','-y','sl'], stdout=PIPE, stderr=PIPE)
if not r.returncode != 0:
print('Deu merda.......')
print(r.stdout)
exit()
#Ou import time, ramdom, datetime
letras = ['A', 'B', 'C ', 'D']
print(random.randint(100, 999))
time.sleep(5)
print(random.choice(letras))
print(datetime.datetime.now())
hoje = datetime.datetime.now()
print(hoje.strftime('%d/%m/%Y')) |
18,474 | 34d25e31a4f6e10c29d8302e9b21f75b64344735 | bitcoin = float(input())
ch_una = float(input())
commission = float(input())
bit = bitcoin * 1168
ch = ch_una * 0.15
ch_lv = ch * 1.76
total = (bit + ch_lv) / 1.95
com = total * commission/100
result = total - com
print(f'{result: .2f}')
|
18,475 | fc7996c02a5ee30d667188a1662fc969fbf04f9a | # input:test_group_event
# group_users
# user_emb,luser_emb,ruser_emb,item_emb
# output:hit@n, MRR
import pandas as pd
import numpy as np
import pickle as pk
round = 8
test_user_file = "./data/dataset/ciao/test/test_user.pkl"
user_emb_file = "./data/dataset/ciao/output/user_r0.1N2_round{}".format(round)
#item_emb_file = "./data/dataset/ciao/output/item_r0.1N2_round{}".format(round)
item_emb_file = "./data/dataset/ciao/output/item_r0.1N2_init"
user_user_map_matrix_file = "./data/dataset/ciao/output/matrix_uu_r0.1N2_round{}.pkl".format(round)
#user_item_map_matrix_file = "./data/dataset/ciao/output/matrix_uv_r0.1N2_round{}.pkl".format(round)
user_item_map_matrix_file = "./data/dataset/ciao/output/matrix_uv_r0.1N2_init.pkl"
# iter_time = 1832176
# user_emb_file = "./data/baseline/ciao/joint_model/user{}".format(iter_time)
# item_emb_file = "./data/baseline/ciao/joint_model/item{}".format(iter_time)
# user_user_map_matrix_file = "./data/baseline/ciao/joint_model/matrix_uu{}.pkl".format(iter_time)
# user_item_map_matrix_file = "./data/baseline/ciao/joint_model/matrix_uv{}.pkl".format(iter_time)
DIM = 50
def get_emb(vertex_emb_file):
df = pd.read_csv(vertex_emb_file, sep="\t", names=["vertex", "emb"], engine="python")
vertex_emb = dict()
for index, row in df.iterrows():
vertex_emb[row["vertex"]] = np.array(str(row["emb"]).strip().split(" ")).astype(np.float32)
return vertex_emb
user_emb = get_emb(user_emb_file)
item_emb = get_emb(item_emb_file)
user_map_matrix = pk.load(open(user_user_map_matrix_file,'rb'))
item_map_matrix = pk.load(open(user_item_map_matrix_file,'rb'))
print("user len:%d, item len:%d" % (len(user_emb),len(item_emb)))
print("load file finished")
def cal_user_friend_topk(user,friend,candi_list):
global user_emb
target_user_emb = user_emb.get(user)
friend_emb = user_emb.get(friend)
rec_friends_dict = dict()
rec_friends_dict[friend] = target_user_emb.dot(user_map_matrix).dot(friend_emb)
for candi in candi_list:
if candi in user_emb:
rec_friends_dict[candi] = target_user_emb.dot(user_map_matrix).dot(user_emb.get(candi))
# sort recommendation list
sorted_rec_friends_dict = sorted(rec_friends_dict.items(), key=lambda d: d[1], reverse=True)
user_k_map = dict()
rr = 0
for k in top_k:
rank = hit = 0
for t in sorted_rec_friends_dict:
rank += 1
if friend == t[0]:
if rank <= k:
hit = 1
rr = 1 / float(rank)
break
user_k_map[k] = hit
return user_k_map, rr
def cal_user_item_topk(user,item,candi_list):
global user_emb,item_emb
target_user_emb = user_emb.get(user)
target_item_emb = item_emb.get(item)
rec_items_dict = dict()
rec_items_dict[item] = target_user_emb.dot(item_map_matrix).dot(target_item_emb)
for candi in candi_list:
if candi in item_emb:
rec_items_dict[candi] = target_user_emb.dot(item_map_matrix).dot(item_emb.get(candi))
# sort recommendation list
sorted_rec_items_dict = sorted(rec_items_dict.items(), key=lambda d: d[1], reverse=True)
user_k_map = dict()
rr = 0
for k in top_k:
rank = hit = 0
for t in sorted_rec_items_dict:
rank += 1
if item == t[0]:
if rank <= k:
hit = 1
rr = 1 / float(rank)
break
user_k_map[k] = hit
return user_k_map, rr
if __name__ == "__main__":
top_k = [1,5,10,15,20,40,60,80,100]
friends_hit_at_k_map = dict()
items_hit_at_k_map = dict()
for k in top_k:
friends_hit_at_k_map[k] = items_hit_at_k_map[k] = 0.0
MRR = 0.0
test_user = pk.load(open(test_user_file,'rb'))
friend_lost = item_lost = 0
friend_avg_hit = friend_MRR = pos_friend_num = 0.0
item_avg_hit = item_MRR = pos_item_num = 0.0
for user, info in test_user.items():
friends_candi_list = info.get("neg_friend")
items_candi_list = info.get("neg_item")
if info.get("pos_friend",0) != 0:
for pos_friend in info.get("pos_friend").keys():
if pos_friend not in user_emb:
friend_lost += 1
continue
pos_friend_num += 1
hit_at_k_map, rr = cal_user_friend_topk(user, pos_friend, friends_candi_list)
for k in top_k:
friends_hit_at_k_map[k] += hit_at_k_map[k]
friend_MRR += rr
if info.get("pos_item", 0) != 0:
for pos_item in info.get("pos_item").keys():
if pos_item not in item_emb:
item_lost += 1
continue
pos_item_num += 1
hit_at_k_map, rr = cal_user_item_topk(user, pos_item, items_candi_list)
for k in top_k:
items_hit_at_k_map[k] += hit_at_k_map[k]
item_MRR += rr
print("friend lost num:%d, item lost num:%d" % (friend_lost,item_lost))
print("friend evaluation")
for k in top_k:
print("%f" % (friends_hit_at_k_map[k]/(pos_friend_num)))
print("%f" % (friend_MRR / pos_friend_num))
print("item evaluation")
for k in top_k:
print("%f" % (items_hit_at_k_map[k]/(pos_item_num)))
print("%f" % (item_MRR / pos_item_num))
|
18,476 | a8f4f45114f311e9ad227a91d12dfdfd6caffb4b | #program liczący za użytkownika
pierwsza_liczba = int(input("Podaj pierwszą liczbę"))
druga_liczba = int(input("Podaj drugą liczbę"))
roznica = int(input("Co ile mam liczyć?"))
for i in range(pierwsza_liczba, druga_liczba, roznica):
print(i, end=" ")
input("\n\nAby zakonczyć program, naciśnij Enter")
|
18,477 | 3613263830fd59f1bf076bd367c59a1bcae9748c | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return 'Hello from the simple webapp.'
@app.route('/page1')
def page1():
return 'This is page 1.'
@app.route('/page2')
def page2():
return 'This is page 2.'
@app.route('/page3')
def page3():
return 'This is page 3.'
if __name__ == '__main__':
app.run(debug=True)
|
18,478 | e3f21a8ededab34155455375894fce3737fc9127 | """
Site global settings
"""
def site():
return {
'cors_origin_whitelist': (
'192.168.56.101:5000',
'192.168.56.101:8000',
'192.168.56.101',
)
}
|
18,479 | 71bcc0584984794ef087e21da2afadc17ac89810 | # fibonacci.py
def fib():
fibs=[1,2]
for i in range(1,9):
A=fibs[i-1]
B=fibs[i]
C=A+B
fibs.append(C)
return fibs
def main():
print('OUTPUT', fib())
if __name__ == "__main__":
main()
|
18,480 | 0264b2fb5d73e538426e657900c6eec63790c032 | from abc import ABC, abstractmethod
from typing import Iterable
class QeFont:
'''Classe abstracta que permet obtenir dades d'una font de dades definida
Les fonts poden ser (per exemple, i sense excloure'n d'altres):
- SQL
- Arxiu
- URL
Per fer una subclasse s'haurien d'implementar les funcions abstractes definides'''
@abstractmethod
def getUnic(self) -> str:
pass
@abstractmethod
def getLlista(self) -> Iterable[str]:
'''Retorna un iterable de strings'''
pass
@abstractmethod
def getTaula(self) -> Iterable[Iterable[str]]:
'''Retorna un iterable d'iterables de strings'''
pass
|
18,481 | 1d7b37bdfebdd92cca2704fa23cce972b7ed495a | import pygame
from pygame.locals import *
import random
from Project.RockPaperScissors.src.Enum.GameState import *
from Project.RockPaperScissors.src.Enum.ResultType import *
from Project.RockPaperScissors.src.Enum.RpsPlan import *
from Project.RockPaperScissors.src.GameLogic.ResourceLoader import *
from Utility.PygameWrapper.Input.KeyInput import *
# ジャンケンゲーム
class MyGame:
#<
def __init__(self):
# pygameの初期化
pygame.init()
# 変数達
self.isGameEnd = False # ゲーム終了フラグ
self.state = GameState.Ready # 現在のゲームの状態
self.result = ResultType.Null # 勝敗
self.playerPlan = RpsPlan.Null # プレイヤーの手
self.enemyPlan = RpsPlan.Null # 敵の手
self.resourceLoader = ResourceLoader() # リソース読み込み、管理クラス
self.inputGetter = KeyInput() # キー入力取得クラス
self.screen = pygame.display.set_mode((800, 600)) # スクリーン
self.clock = pygame.time.Clock() # ウェイトタイマ
#>
#<
def __del__(self):
# pygameの終了
pygame.quit()
#>
#<
# ゲームの実行
def run(self):
# ゲームループ
while self.isGameEnd == False:
# 更新処理
self.update()
# 描画処理
self.draw()
# タイマウェイト
self.clock.tick(60)
# ゲーム終了チェック
self.gameEndCheck()
#>
#<
# ゲーム終了チェック
def gameEndCheck(self):
# pygameがで発生したイベントを全てチェックする
for event in pygame.event.get():
# ゲーム終了イベントかESCキーが入力されたら
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
# ゲーム終了フラグを立てる
self.isGameEnd = True
#>
#<
# 更新処理
def update(self):
# 入力の更新処理
self.inputGetter.updateKey();
# 今がプレイヤー入力待ち状態だったら
if self.state == GameState.Ready:
# プレイヤーの入力を取得する
self.getPlayerInput()
# プレイヤーの入力がされていたら
if self.playerPlan != RpsPlan.Null:
# 入力されたキーの情報を取得
inputKeyPushData = pygame.key.get_pressed()
# スペースキーが入力された場合
if self.inputGetter.isTrigger(pygame.K_SPACE):
# 敵の手を決める
self.setEnemyPlan()
# 勝敗を判定する
self.WinLossJudge()
# 勝敗結果状態にする
self.state = GameState.Result
# 今が勝敗結果状態だったら
elif self.state == GameState.Result:
# スペースキーが入力された場合、ゲームをリセットする
if self.inputGetter.isTrigger(pygame.K_SPACE):
self.state = GameState.Ready
self.result = ResultType.Null
self.playerPlan = RpsPlan.Null
self.enemyPlan = RpsPlan.Null
#>
#<
# プレイヤーの入力を取得する
def getPlayerInput(self):
# キー入力に対応したプレイヤーの手に変更する
if self.inputGetter.isTrigger(pygame.K_1):
self.playerPlan = RpsPlan.Rock
elif self.inputGetter.isTrigger(pygame.K_2):
self.playerPlan = RpsPlan.Scissors
elif self.inputGetter.isTrigger(pygame.K_3):
self.playerPlan = RpsPlan.Paper
#>
#<
# 敵の手を決める
def setEnemyPlan(self):
self.enemyPlan = RpsPlan(random.randrange(1, 4))
#>
#<
# 勝敗を判定する
def WinLossJudge(self):
# 勝敗判定に使用する数字
JudgeNum = (self.playerPlan - self.enemyPlan + 3) % 3
# 0だったらあいこ
if JudgeNum == 0:
self.result = ResultType.Draw
# 1だったら負け
elif JudgeNum == 1:
self.result = ResultType.Lose
# 2だったら勝ち
elif JudgeNum == 2:
self.result = ResultType.Win
#>
#<
# 描画
def draw(self):
self.screen.fill((0, 128, 255))
#「あいて」の描画
self.screen.blit(self.resourceLoader.texture_aite, (20, 20))
# 「あなた」の描画
self.screen.blit(self.resourceLoader.texture_anata, (20, 210))
if self.playerPlan != RpsPlan.Null:
# 自分の手の描画
self.screen.blit(self.getRpsPlanTexture(self.playerPlan), (330, 210))
# じゃんけん前の描画処理
if self.state == GameState.Ready:
# あいてが手を選んでいる様子の描画
self.screen.blit(self.getRpsPlanTexture(RpsPlan(random.randrange(1, 4))), (330, 20))
# 操作説明の描画
self.screen.blit(self.resourceLoader.texture_setsumei, (100, 330))
# じゃんけん後の描画処理
elif self.state == GameState.Result:
# あいての手の描画
self.screen.blit(self.getRpsPlanTexture(self.enemyPlan), (330, 20))
# 結果の描画
if self.result == ResultType.Win:
resultTexture = self.resourceLoader.texture_kachi
elif self.result == ResultType.Lose:
resultTexture = self.resourceLoader.texture_make
else:
resultTexture = self.resourceLoader.texture_aiko
self.screen.blit(resultTexture, (480, 210))
pygame.display.update()
#>
# 手の番号をテクスチャに変換
def getRpsPlanTexture(self, plan):
if plan == RpsPlan.Rock: return self.resourceLoader.texture_gu
if plan == RpsPlan.Scissors: return self.resourceLoader.texture_choki
if plan == RpsPlan.Paper: return self.resourceLoader.texture_pa |
18,482 | 345bc15708cc5f956863a292f6f28c8a7dd8db2f | from storeSignUpDialog import Ui_Dialog
from PyQt5.QtWidgets import QWidget, QMessageBox
import pymysql
class storeDialog(QWidget, Ui_Dialog):
def __init__(self, db, dbcursor):
super(storeDialog, self).__init__()
self.setupUi(self)
self.db = db
self.dbcursor = dbcursor
self.pushButton.clicked.connect(self.signUp)
def signUp(self):
if self.nameLineEdit.text() == '':
QMessageBox.warning(self, 'Warning', '昵称不能为空')
elif self.keyLineEdit.text() == '':
QMessageBox.warning(self, 'Warning', '密码不能为空')
elif self.keyLineEdit.text() != self.confirmLineEdit.text():
QMessageBox.warning(self, 'Warning', '两次密码不一致')
elif self.principalLineEdit.text() == '':
QMessageBox.warning(self, 'Warning', '负责人不能为空')
elif len(self.IDLineEdit.text()) != 18:
QMessageBox.warning(self, 'Warning', '请输入18位身份证号')
elif len(self.TelLineEdit.text()) != 11:
QMessageBox.warning(self, 'Warning', '请输入正确的手机号码')
else:
self.sql = 'select Sid from store;'
self.dbcursor.execute(self.sql)
self.data = self.dbcursor.fetchall()
if len(self.data) == 0:
self.id = '001'
else:
if int(self.data[-1][0]) > 98:
self.id = str(int(self.data[-1][0]) + 1)
elif 99 > int(self.data[-1][0]) > 8:
self.id = '0' + str(int(self.data[-1][0]) + 1)
elif int(self.data[-1][0]) < 9:
self.id = '00' + str(int(self.data[-1][0]) + 1)
self.sql = 'insert into store(Sid, Sname, Tel, Scredit, principal, ID_card, Bkey, BkeyProtect1, BkeyAns1, BkeyProtect2, BkeyAns2) ' \
'value (\"' + self.id + '\", \"' + self.nameLineEdit.text() + '\", \"' + self.TelLineEdit.text() \
+ '\", \"1", \"' + self.principalLineEdit.text() + '\", \"' + self.IDLineEdit.text() + '\", \"' \
+ self.keyLineEdit.text() + '\", \"' + self.que1LineEdit.text() + '\", \"' + self.ans1LineEdit.text() \
+ '\", \"' + self.que2LineEdit.text() + '\", \"' + self.ans2LineEdit.text() + '\");'
self.dbcursor.execute(self.sql)
self.db.commit()
QMessageBox.information(self, 'Information', '注册成功')
self.close()
|
18,483 | 7afd16a761904abee9913237ffeaf74a7b1fc198 | """
retrieve tweets, embeddings, and persist in the database
"""
import basilica, os, tweepy
from dotenv import load_dotenv
from flask import Blueprint, render_template
from web_app.models import db, Tweet, User
twitter_routes = Blueprint("twitter_routes", __name__)
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
BASILICA_API_KEY = os.getenv("BASILICA_API_KEY")
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
TWITTER_AUTH.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
TWITTER = tweepy.API(TWITTER_AUTH)
BASILICA = basilica.Connection(BASILICA_API_KEY)
@twitter_routes.route('/reset')
def reset():
db.drop_all()
db.create_all()
return render_template('layout.html', title='DB RESET!', users=[])
@twitter_routes.route("/users/<twit_handle>")
def get_user(screen_name=None):
handle = TWITTER.get_user(screen_name)
tweet_info = TWITTER.user_timeline(screen_name, tweet_mode='extended', count=150)
db_user = User(id=TWITTER.id)
db_user.twit_handle = TWITTER.screen_name
db_user.followers_count = TWITTER.followers_count
db_user.location = TWITTER.location
db_user.newest_tweet_id = TWITTER.tweets[0].id
db.session.add(db_user)
db.session.commit()
return jsonify({
"user:": handle.__json,
"tweets": [tweet_info.full_text for tweet in tweet_info]
})
|
18,484 | 182ef3af106f7b87f3c08367f2ffe5f5d36eb69e | if "happy" > 2:
print "Hello world"
|
18,485 | 2ad90b9435cb4d9fd09fb0a35db4aaa0b8f6e46d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ..login.models import Users
from datetime import date, datetime, timedelta
# Create your models here.
class TripManager(models.Manager):
def trip_validation(self, post_data):
errors=[]
today = datetime.today()
if len(post_data['destination']) < 3 :
errors.append("Destination needs to be more than 3 characters")
if len(post_data['plan']) < 3 :
errors.append("Plan needs to be longer than 3 characters")
try:
travelfrom = datetime.strptime(post_data['start_date'], '%Y-%m-%d')
if travelfrom < today - timedelta(1):
errors.append("Starting travel date is in the past")
if post_data['end_date'] < post_data['start_date']:
errors.append("End Date cannot be before Start Date")
except ValueError:
errors.append('Must fill out dates')
return errors
def addtrip(self, cleanData, request):
return self.create(
destination = cleanData['destination'],
start_date = cleanData['start_date'],
end_date = cleanData['end_date'],
plan = cleanData['plan'],
planner = Users.objects.get(id=request.session['id'])
)
def joiner(self, id, user):
trip = self.get(id = id)
trip.tourist.add(user)
return self
class Trip(models.Model):
destination=models.CharField(max_length=255)
plan=models.TextField(max_length=1000)
start_date=models.DateField()
end_date=models.DateField()
tourist=models.ManyToManyField(Users, related_name="tourist")
planner=models.ForeignKey(Users, related_name="planner")
created_at=models.DateTimeField(auto_now=True)
updated_at=models.DateTimeField(auto_now=True)
objects = TripManager()
|
18,486 | eddbfb509d2fb1db3d10b53ee8db9dce4ec2bcd1 | import torch
import argparse
import numpy as np
from data_provider import datasets_factory
from utils import preprocess, metrics
from layers import CausalLSTMStack # noqa
parser = argparse.ArgumentParser(description='Process some integers.')
args = parser.parse_args()
args.dataset_name = "mnist"
args.train_data_paths = 'data/moving-mnist-example/moving-mnist-train.npz'
args.valid_data_paths = 'data/moving-mnist-example/moving-mnist-valid.npz'
args.save_dir = 'checkpoints/mnist_predrnn_pp'
args.img_width = 64
args.batch_size = 8
args.patch_size = 4 #1
args.seq_length = 19
args.num_hidden = [128, 64, 64, 64, 16]
args.num_layers = len(args.num_hidden)
args.lr = 0.00001
##### load the train data
train_input_handle, test_input_handle = datasets_factory.data_provider(
args.dataset_name, args.train_data_paths, args.valid_data_paths,
args.batch_size, args.img_width)
# tmp = np.load(args.train_data_paths)
# tmp = tmp['input_raw_data']
# print(tmp.shape)
# print(tmp[0].shape)
#print(type(train_input_handle), type(test_input_handle))
model = CausalLSTMStack(3, 2, args.num_hidden) #filter_size, num_dims
decoder = torch.nn.Conv2d(16, 16, 1, 1)
# tmp = np.random.rand(8, 20, 16, 16, 16)
### run iters
model.cuda()
decoder.cuda()
loss_fn = torch.nn.MSELoss()
optim = torch.optim.Adam(list(model.parameters())+list(decoder.parameters()), lr=args.lr)
for itr in range(10000):
ims = train_input_handle.get_batch()
ims = preprocess.reshape_patch(ims, args.patch_size)
#print(ims.shape)# (8, 20, 16, 16, 16)
ims = np.swapaxes(ims, 0, 1)
h, c, m, z = [None]*4
#print(ims.shape)# (20, 8, 16, 16, 16)
ims = np.swapaxes(ims, 2, 4)
#print(ims.shape)
#print(ims.shape)# (20, 8, 16, 16, 16)
for t in range(args.seq_length):
tmp = torch.Tensor(ims[t])
tmp = tmp.cuda()
h, c, m, z = model(tmp, h, c, m, z)
z = decoder(h[-1].permute(0,-1,1,2))#.permute(0,2,3,1)
y = torch.Tensor(ims[-1])
y = y.cuda()
loss = loss_fn( z, y )
loss.backward()
optim.step()
#print(len(h), len(c), len(m), len(z))
#print("h", h[-1].shape)
print(loss.item())
|
18,487 | 0e2fb9e757b092adf94a3f15ddf928e7f403415c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:35:24 2021
@author: huw
"""
import concurrent.futures
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pytesseract
from itertools import repeat
from PIL import Image
from osgeo import gdal, osr
import cartopy.crs as ccrs
from shapely.geometry.polygon import Polygon
import shapely.vectorized
# Options: 'no mask' or 'yes mask'
use_mask = 'yes mask'
# Options: 'no' or 'yes'
save_table = 'yes'
georeference_images = 'yes'
# Search this directory for the image that will be processed
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
image_names = os.listdir()
image_names_no_extension = [os.path.splitext(i)[0] for i in image_names]
image_names_no_extension.sort()
image_names.sort()
# %%
def map_extent(input_raster):
"""
A method for providing the top righ, left, and bottom right, left
coordinates of the input raster image.
Parameters
----------
input_raster : string
Directory to the raster which should be in tiff format.
Returns
-------
raster_extent : tuple
the top left righ and bottom left right corner coordinates of
the input raster.
"""
gdal.UseExceptions()
raster = gdal.Open(input_raster)
raster_geotransform = raster.GetGeoTransform()
raster_extent = (raster_geotransform[0],
raster_geotransform[0]
+ raster.RasterXSize * raster_geotransform[1],
raster_geotransform[3]
+ raster.RasterYSize * raster_geotransform[5],
raster_geotransform[3])
return raster_extent
def georeferenced_images(png_image, tif_image):
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
ds = gdal.Translate('temporary.tif', png_image)
# Set spatial reference:
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
# Pixel coordinates in the pre-georeferenced image
left_x = 427.59
right_x = 763.168
bottom_y = 736.604
top_y = 568.527
# Enter the ground control points (GCPs)
# Format: [map x-coordinate(longitude)], [map y-coordinate (latitude)], [elevation],
# [image column index(x)], [image row index (y)]
gcps = [gdal.GCP(0, 30, 0, left_x, top_y),
gdal.GCP(180, 30, 0, right_x, top_y),
gdal.GCP(180, -60, 0, right_x, bottom_y),
gdal.GCP(0, -60, 0, left_x, bottom_y)]
ds.SetProjection(sr.ExportToWkt())
wkt = ds.GetProjection()
ds.SetGCPs(gcps, wkt)
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
gdal.Warp(f"{tif_image}.tif", ds, dstSRS='EPSG:4326', format='gtiff')
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
os.remove('temporary.tif')
ds= None
if georeference_images == 'yes':
for i, w in zip(image_names, image_names_no_extension):
georeferenced_images(i, w)
# %%
# starts in the top left going clockwise finishing at top left (x, y).
# coordinates in decimal degrees.
irregular_study_area = Polygon([(98, 13.5),
(125, 13.5),
(145, -3),
(145, -18),
(122, -18),
(98, -3),
(98, 13.5)])
# %%
def raster_to_array(input_raster):
"""
Convert a raster tiff image to a numpy array. Input Requires the
address to the tiff image.
Parameters
----------
input_raster : string
Directory to the raster which should be in tiff format.
Returns
-------
converted_array : numpy array
A numpy array of the input raster.
"""
raster = gdal.Open(input_raster)
band = raster.GetRasterBand(1)
converted_array = band.ReadAsArray()
return converted_array
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
georeferenced_images = os.listdir()
georeferenced_images.sort()
test = raster_to_array(georeferenced_images[0])
test_extent = map_extent(georeferenced_images[0])
x0, x1 = test_extent[0], test_extent[1]
y0, y1 = test_extent[2], test_extent[3]
def linear_interpolation_of_x_y(georeferenced_array, extent_minimum,
extent_maximum):
"""
A rather cluncky method. The purpose is to create an array of
longitude and latitude values that have the same length as the input
georeferenced array. This method linearly interpolates the longitude
and latitude values.
Parameters
----------
georeferenced_array : 2D Array
DESCRIPTION.
loc0 : float or integer
The first extent value of the georeferenced array.
loc1 : float or integer
The second extent value of the georeferenced array.
Returns
-------
interpolated_coordinates : 1D array
Interpolated latitude or longitude values for the length of the
georeferenced array.
"""
# Extracts axis 1 (columns) from the input array.
# This represents the longitude.
if extent_minimum == x0:
inn = georeferenced_array[0, :]
# Extracts axis 0 (rows) from the input array.
# This represents the latitude.
elif extent_minimum == y0:
inn = georeferenced_array[:, 0]
#
linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(
(len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]
# Claculates the difference between the value in front and the value
# behind in the list
difference = [y - x for x, y in zip(linear_interpolation,
linear_interpolation[1:])]
# Calculates the size of each array so to compare it to the size of the
# input array.
array_length = [np.size(np.arange(
extent_minimum, extent_maximum, i)) for i in difference]
# Select values that only match the longitude/latitude length then return
# the first index in the list of matched values.
# This list is a list of indexes that correspond to the index in the
# variable difference.
index_of_correct_value = [i for i, v in enumerate(
array_length) if v == len(inn)][0]
#
interpolated_coordinates = np.arange(extent_minimum,
extent_maximum,
difference[index_of_correct_value])
return interpolated_coordinates
x_longitude = linear_interpolation_of_x_y(test, x0, x1)
y_latitude = linear_interpolation_of_x_y(test, y0, y1)
xx_longitude, yy_longitude = np.meshgrid(x_longitude, y_latitude[::-1])
mask = shapely.vectorized.contains(irregular_study_area,
xx_longitude,
yy_longitude)
def mask_and_binarize(polygon_mask, area_of_interest_raster, threshold):
raster_array = raster_to_array(area_of_interest_raster)
# Pixels outside of the polygon are assigned nan values.
masked = np.where(mask == True, raster_array, np.nan)
binerized_array = np.where(masked >= threshold, 255, 0)
box_top, box_bottom, box_left, box_right = 616, 649, 637, 681
# Draw hollow rectangle with 2px border width on left and 1px for rest.
# -9999 is a random value I chose. Easier to detect in image.
binerized_array[box_top:box_bottom, box_left:box_left+2] = -9999
binerized_array[box_top:box_bottom, box_right-1:box_right] = -9999
binerized_array[box_top:box_top+1, box_left:box_right] = -9999
binerized_array[box_bottom-1:box_bottom, box_left:box_right] = -9999
# If pixels are not equal to -9999 keep the pixel value.
# Pixels that are equal to -9999 are assigned 'nan'.
binerized_array = np.where(binerized_array != -9999, binerized_array, np.nan)
binerized_array = np.ma.array(binerized_array, mask=np.isnan(masked))
return binerized_array
sample_mask = mask_and_binarize(mask, georeferenced_images[0], 150)
# %% Recreating the sea_land_ratio method
def sea_land_ratio_calculation(masked_array, box_perimeter_fill_value):
cleaned_array = np.nan_to_num(masked_array,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 4)
return land_percentage, ocean_percentage, land_ocean_ratio
# %% testing the sea_land_ratio definition
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
# apply_box_perimeter_mask = 'yes'
box_perimeter_fill_value = np.nan
cleaned_array = np.nan_to_num(sample_mask,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
# land_percentage = round((land_pixels/image_pixels)*100, 4)
# ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 10)
print(f'{box_perimeter_fill_value}', land_ocean_ratio)
map_projection = ccrs.PlateCarree()
fig, axes = plt.subplots(nrows=2,
ncols=1,
figsize=(15, 5), subplot_kw={'projection': map_projection})
from matplotlib import colors
cmap = colors.ListedColormap(['dodgerblue', 'tan'])
import cartopy.feature as cfeature
georeferenced_images = os.listdir()
georeferenced_images.sort()
img = Image.open(georeferenced_images[1])
ax = axes[0]
ax.imshow(img,
origin='upper',
extent=map_extent(georeferenced_images[0]),
cmap=cmap)
continents = cfeature.NaturalEarthFeature(category='physical',
name='land',
scale='50m',
edgecolor='face')
ax.add_feature(continents,
facecolor='none',
edgecolor='grey',
lw=1)
ax.set_extent([90, 155, -25, 20], crs=map_projection)
ax = axes[1]
ax.imshow(cleaned_array,
origin='upper',
extent=map_extent(georeferenced_images[0]),
cmap=cmap)
ax.add_feature(continents,
facecolor='none',
edgecolor='grey',
lw=1)
ax.set_extent([90, 155, -25, 20], crs=map_projection)
# ax.set_extent([91, 150.59, -20.35, 20.61], crs=map_projection)
unmodified_image = raster_array = raster_to_array(georeferenced_images[0])
# %%
def multi_process_mask_and_binarize(polygon_mask,
area_of_interest_raster,
threshold):
with concurrent.futures.ProcessPoolExecutor() as executor:
processed_image = executor.map(mask_and_binarize,
repeat(polygon_mask),
area_of_interest_raster,
repeat(threshold))
return processed_image
processed_image = multi_process_mask_and_binarize(mask,
georeferenced_images,
150)
sea_land_ratio = [sea_land_ratio_calculation(i, 0) for i in processed_image]
# sea_land_ratio = [sea_land_ratio_calculation(i, 'yes mask') for i in processed_image]
# %%
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
def CropImage(image, left, top, right, bottom):
open_image = Image.open(image)
# Cropped image of above dimension
# (It will not change orginal image)
cropped_image = open_image.crop((left,
top,
right,
bottom))
return cropped_image
def MultiProcessCrop(names, im_left, im_top, im_right, im_bottom):
with concurrent.futures.ProcessPoolExecutor() as executor:
processed_image = executor.map(CropImage,
names,
repeat(im_left),
repeat(im_top),
repeat(im_right),
repeat(im_bottom))
return processed_image
cropped_year = MultiProcessCrop(image_names, 300, 0, 395, 50)
cropped_eustatic = MultiProcessCrop(image_names, 336, 45, 377, 62)
threshold_value = 150
# %%
model_year = (float(pytesseract.image_to_string(i)) for i in cropped_year)
# %%
def CleanEustaticNumbersInImages(cropped_eustatic_images):
test_list = []
index_missing_numbers = []
for i, r in enumerate(cropped_eustatic_images):
try:
test_list.append(float(pytesseract.image_to_string(
r, config='--psm 6')))
except ValueError:
test_list.append(-9999)
index_missing_numbers.append(i)
# images_missing_eustatic = [image_names[i] for i in index_missing_numbers]
replacement_eustatic = [7.176,
4.585,
13.111,
50.435,
36.167,
6.645,
6.253,
7.721,
7.721,
9.185,
9.185,
7.512,
7.512,
36.945,
36.945,
43.045]
for (i, r) in zip(index_missing_numbers, replacement_eustatic):
test_list[i] = r
index_wrong_numbers = [76, 138, 196, 197, 400, 509, 510]
replacement_numbers = [37.173, 31.124, 31.144, 31.144, 5.291, 71.277,
71.277]
for (i, r) in zip(index_wrong_numbers, replacement_numbers):
test_list[i] = r
test_list_divide = [x/1000 if x > 100 else x for x in test_list]
cleaned_eustatic = (-x if x > 0 else x for x in test_list_divide)
return cleaned_eustatic
eustatic_clean = CleanEustaticNumbersInImages(cropped_eustatic)
def DataToPandasDataFrame(name, year, eustatic, sea_land_and_ratio):
variable_locations = [[i]*len(name) for i in [0, 1, 2]]
variables_to_insert = [name, year, eustatic]
image_index = [np.arange(0, len(name), 1)]*3
sea_land_ratio_list = [list(i) for i in sea_land_and_ratio]
for i, r, t in zip(image_index, variable_locations, variables_to_insert):
for w, c, b in zip(i, r, t):
sea_land_ratio_list[w].insert(c, b)
df = pd.DataFrame(sea_land_ratio_list,
columns=[
'Image',
'Years ago [kyr]',
'Eustatic [m]',
'Land [%]',
'Ocean [%]',
'Ratio'])
return df
summary_table = DataToPandasDataFrame(image_names_no_extension,
model_year,
eustatic_clean,
sea_land_ratio)
# %%
"""Saving tables and figures"""
# Saving figure and table in a different directory
if save_table == 'yes' and use_mask == 'yes mask':
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Excel')
summary_table.to_excel(f'RSL_with_polgon_AOI_Ocean.xlsx',
index=False)
print(f'Table saved: {os.getcwd()}/RSL_with_mask_small_AOI.xlsx ')
elif save_table == 'yes' and use_mask == 'no mask':
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Excel')
summary_table.to_excel(f'Sea_level_threshold{threshold_value}.xlsx',
index=False)
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Excel')
|
18,488 | ae0fa7291f1f604e78d9843335f6b71c7e14f70b | from django.contrib.auth.models import User
from django.forms import ModelForm
from django_oauth_twitter.models import TwitterUser
class RegistrationForm(ModelForm):
class Meta:
model = User
fields = ('username',)
def __init__(self, *args, **kwargs):
self.access_token = kwargs.pop('access_token', None)
self.userinfo = kwargs.pop('userinfo', None)
initial = kwargs.get('initial', None)
if initial is not None and 'username' in initial:
if User.objects.filter(username=initial['username']):
# User already exists in the system, so don't prefill the
# username.
del initial['username']
return super(RegistrationForm, self).__init__(*args, **kwargs)
def save(self):
user = super(RegistrationForm, self).save(commit=False)
user.set_unusable_password()
user.save()
# Associate it with a Twitter account.
TwitterUser.objects.update_or_create(user=user,
access_token=self.access_token,
userinfo=self.userinfo)
return user
|
18,489 | 78f0c6aae8a583a567299e65a5d8856414c5434d | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
# pyformat: disable
r"""Demos a basic NitroML benchmark on the 'Titanic' dataset from OpenML.
To run in open-source:
python examples/titanic_benchmark.py
""" # pylint: disable=line-too-long
# pyformat: enable
# pylint: disable=g-import-not-at-top
import os
import sys
# Required since Python binaries ignore relative paths when importing:
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import nitroml
from nitroml.automl import autodata as ad
from nitroml.automl.autotrainer import subpipeline as at
from nitroml.benchmark.tasks import tfds_task
from examples import config
import tensorflow_datasets as tfds
class TitanicBenchmark(nitroml.Benchmark):
r"""Demos a NitroML benchmark on the 'Titanic' dataset from OpenML."""
def benchmark(self,
data_dir: str = None,
use_keras: bool = True,
enable_tuning: bool = True):
# Use TFDSTask to define the task for the titanic dataset.
task = tfds_task.TFDSTask(tfds.builder('titanic', data_dir=data_dir))
self.add(task.components)
autodata = self.add(
ad.AutoData(
task.problem_statement,
examples=task.train_and_eval_examples,
preprocessor=ad.BasicPreprocessor()))
# Define a Trainer to train our model on the given task.
trainer = self.add(
at.AutoTrainer(
problem_statement=task.problem_statement,
transformed_examples=autodata.outputs.transformed_examples,
transform_graph=autodata.outputs.transform_graph,
schema=autodata.outputs.schema,
train_steps=1000,
eval_steps=500,
enable_tuning=enable_tuning))
# Finally, call evaluate() on the workflow DAG outputs. This will
# automatically append Evaluators to compute metrics from the given
# SavedModel and 'eval' TF Examples.
self.evaluate(task=task, model=trainer.outputs.model)
if __name__ == '__main__':
if config.USE_KUBEFLOW:
# We need the string "KubeflowDagRunner" in this file to appease the
# validator used in `tfx create pipeline`.
# Validator: https://github.com/tensorflow/tfx/blob/v0.22.0/tfx/tools/cli/handler/base_handler.py#L105
nitroml.main(
pipeline_name=config.PIPELINE_NAME + '_titanic',
pipeline_root=config.PIPELINE_ROOT,
data_dir=config.TF_DOWNLOAD_DIR,
tfx_runner=nitroml.get_default_kubeflow_dag_runner())
else:
nitroml.main()
|
18,490 | bd54126e21a4aec7788923eb09245d0eebcb481f | from django.apps import AppConfig
class EmenuConfig(AppConfig):
name = 'eMenu'
|
18,491 | a4cd36432ed18996e4ec20182b72e625ff24633d | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 18:34:09 2016
@author: Wesley
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 18:14:42 2016
@author: Wesley
"""
import json
import pymysql
import sys
#grab password from commandline
password = sys.argv[1]
# Open database connection
db = pymysql.connect(host='cs.elon.edu',
db='wupham',
user='wupham',
passwd=password,
port=3306,
charset='utf8mb4',
autocommit=True)
cursorSelect = db.cursor()
insertQuery = "INSERT INTO yelp_categories (business_id, category) VALUES (%s, %s)"
with open('yelp_business.json') as myfile:
for line in myfile:
data = json.loads(line)
businessID = data.get("business_id")
categories = data.get("categories")
if 'Restaurants' in categories:
for category in categories:
#print(businessID, category)
cursorSelect.execute(insertQuery,(businessID, category))
|
18,492 | 5011a87d8b94bbf1278bb5642890a280c5e83339 | n == input()
if n.isnumeric():
a=n[::-1]
print(a)
else:
print("Invalid")
|
18,493 | 24c22401aee00995f0dd57bf89a078298536f12d | """
https://www.hackerrank.com/challenges/validating-credit-card-number
Language: Python 3
Sample Input
6
4123456789123456
5123-4567-8912-3456
61234-567-8912-3456
4123356789123456
5133-3367-8912-3456
5123 - 3567 - 8912 - 3456
Sample Output
Valid
Valid
Invalid
Valid
Invalid
Invalid
"""
import re
for _ in range(int(input())):
s = input()
is_valid = re.search(r'^[456]\d{3}(\d{12}|(-\d{4}){3})$', s)
is_valid = is_valid and not re.search(r'(\d)(?:\1){3,}', s.replace('-',''))
print('Valid' if is_valid else 'Invalid')
|
18,494 | 0edba8703213e92dc34b07d09407aca8e490c1c7 |
import nfqueue
import cracker
import thread
#from scapy.all import IP, TCP,
import socket
import time
from dpkt import ip, tcp, hexdump, udp
""" Module that will lift messages off the network
for decryption
"""
"""
def crack():
message = "HI"
print message
try:
thread.start_new_thread(cracker.monoAlphabeticCrack, (message,))
except:
print "W"
while 1:
pass
"""
count = 0
def log(dummy, payload):
global count
print "Got packet"
data = payload.get_data()
packet = ip.IP(data)
tcp = packet.data
payload.set_verdict(nfqueue.NF_ACCEPT)
if __name__ == "__main__":
q = None
q = nfqueue.queue()
q.open()
q.bind(socket.AF_INET)
q.set_callback(log)
q.create_queue(0)
try:
q.try_run()
except KeyboardInterrupt:
print "Exiting"
finally:
q.unbind(socket.AF_INET)
q.close()
q.unbind(socket.AF_INET)
q.close()
|
18,495 | 95f93ed681a73cf7166665073849a453fb5b7768 | # Version 1.1.0
asd = "000"
st = "000"
two = "2"
lan = input("Choose your language (English, Russian):")
if lan == "English":
print("This program encrypts one letter in English into binary")
print("To end encryption write command end")
print("At the beginning of the cipher is added 000 and at the end")
print("After each encrypted letter is the number 2")
while 2 == 2:
a = input("What is the letter:")
if a == "a":
aa = "1"
st = st + aa + two
elif a == "b":
bb = "10"
st = st + bb + two
elif a == "c":
cc = "11"
st = st + cc + two
elif a == "d":
dd = "100"
st = st + dd + two
elif a == "e":
ee = "101"
st = st + ee + two
elif a == "f":
ff = "110"
st = st + ff + two
elif a == "g":
gg = "111"
st = st + gg + two
elif a == "h":
hh = "1000"
st = st + hh + two
elif a == "i":
ii = "1001"
st = st + ii + two
elif a == "j":
jj = "1010"
st = st + jj + two
elif a == "k":
kk = "1011"
st = st + kk + two
elif a == "l":
ll = "1100"
st = st + ll + two
elif a == "m":
mm = "1101"
st = st + mm + two
elif a == "n":
nn = "1110"
st = st + nn + two
elif a == "o":
oo = "1111"
st = st + oo + two
elif a == "p":
pp = "10000"
st = st + pp + two
elif a == "q":
qq = "10001"
st = st + qq + two
elif a == "r":
rr = "10010"
st = st + rr + two
elif a == "s":
ss = "10011"
st = st + ss + two
elif a == "t":
tt = "10100"
st = st + tt + two
elif a == "u":
uu = "10101"
st = st + uu + two
elif a == "v":
vv = "10110"
st = st + vv + two
elif a == "w":
ww = "10111"
st = st + ww + two
elif a == "x":
xx = "11000"
st = st + xx + two
elif a == "y":
yy = "11001"
st = st + yy + two
elif a == "z":
zz = "11010"
st = st + zz + two
elif a == "end":
break
elif lan == "Russian":
print("Эта программа шифрует по ОДНОЙ букве на РУССКОМ языке в двоичный код")
print("Чтобы закончить шифрование напишите комманду end")
print("В начале шифра добовляется 000 и в конце")
print("После каждой зашефрованой буквы идёт цифра 2")
while 2 == 2:
a = input("Назовите букву:")
if a == "а":
aaa = "1"
st = st + aa + two
elif a == "б":
bbb = "10"
st = st + bbb + two
elif a == "в":
vvv = "11"
st = st + vvv + two
elif a == "г":
ggg = "100"
st = st + ggg + two
elif a == "д":
ddd = "101"
st = st + ddd + two
elif a == "е":
ye = "110"
st = st + ye + two
elif a == "ё":
yo = "111"
st = st + yo + two
elif a == "ж":
zhe = "1000"
st = st + zhe + two
elif a == "з":
ze = "1001"
st = st + ze + two
elif a == "и":
iii = "1010"
st = st + iii + two
elif a == "й":
ikr = "1011"
st = st + ikr + two
elif a == "к":
ka = "1100"
st = st + ka + two
elif a == "л":
lll = "1101"
st = st + lll + two
elif a == "м":
em = "1110"
st = st + em + two
elif a == "н":
en = "1111"
st = st + en + two
elif a == "о":
ooo = "10000"
st = st + ooo + two
elif a == "п":
pe = "10001"
st = st + pe + two
elif a == "р":
re = "10010"
st = st + re + two
elif a == "с":
sss = "10011"
st = st + sss + two
elif a == "т":
ttt = "10100"
st = st + ttt + two
elif a == "у":
uuu = "10101"
st = st + uuu + two
elif a == "ф":
fff = "10110"
st = st + fff + two
elif a == "х":
hhh = "10111"
st = st + hhh + two
elif a == "ц":
zshe = "11000"
st = st + zshe + two
elif a == "ч":
che = "11001"
st = st + che + two
elif a == "ш":
sh = "11010"
st = st + sh + two
elif a == "щ":
sha = "11011"
st = st + sha + two
elif a == "ъ":
tverd = "11100"
st = st + tverd + two
elif a == "ы":
qwerty = "11101"
st = st + qwerty + two
elif a == "ь":
mye = "11110"
st = st + mye + two
elif a == "э":
eee = "11111"
st = st + eee + two
elif a == "ю":
yu = "100000"
st = st + yu + two
elif a == "я":
ya = "100001"
st = st + ya + two
elif a == "end":
break
print(st + asd)
input("exit")
|
18,496 | 41a6bc1d841c09fdc9e66fbe9a574cec4472ee82 | import tensorflow as tf
# Allow GPU grow
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("########## Placeholder ##########")
# 1.placeholder
v1 = tf.placeholder(tf.float32, shape=[2, 3, 4])
print(v1.name) # Placeholder:0
print('Placeholder without setting name', v1.name)
v1 = tf.placeholder(tf.float32, shape=[2, 3, 4], name='ph')
print(v1.name) # ph:0
v1 = tf.placeholder(tf.float32, shape=[2, 3, 4], name='ph')
print(v1.name) # ph_1:0
print("type of tf.placeholder()", type(v1))
print(v1)
print("########## tf.Variable() ##########")
# 2. tf.Variable()
v2 = tf.Variable([1, 2], dtype=tf.float32)
print(v2.name) # Variable:0
print('tf.Variable() without setting name', v2.name)
v2 = tf.Variable([1, 2], dtype=tf.float32, name='V')
print(v2.name) # V:0
v2 = tf.Variable([1, 2], dtype=tf.float32, name='V')
print(v2.name) # V_1:0
print("type of tf.Varialbe()", type(v2))
print(v2)
print("########## tf.get_variable() ##########")
# 3.tf.get_variable(), the name is must provided
v3 = tf.get_variable(name='gv', shape=[])
print(v3.name) # gv:0
# v4 = tf.get_variable(name='gv', shape=[2])
# print(v4.name)
print("type of tf.get_variable()", type(v3)) # <class 'tensorflow.python.ops.variables.Variable'>
print(v3)
print("########## conclusion ##########")
vs = tf.trainable_variables()
print(len(vs))
for v in vs:
print(v, v.name)
|
18,497 | 60d76ed2d660325c34b84711c74cd5cc5d16942b | # Declare and assign a variable.
number = 10
print(number) # This prints out 10 in the console.
|
18,498 | 655f98c7b9eee540ccd3c41fcfe9c5b807d76bae | # gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from collections import namedtuple
import time
import random
from blist import blist
# numpy
import numpy as np
class LabeledLineSentence(object):
def __init__(self, source, prefix):
self.source = source
self.prefix = prefix
self.sentences = None
def to_array(self):
if self.sentences is None:
self.sentences = blist()
with utils.smart_open(self.source) as fIn:
for item_no, line in enumerate(fIn):
line = line.replace("\n", "")
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [self.prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
random.shuffle(self.sentences)
return self.sentences
if __name__ == '__main__':
dimension = 100
sentences = LabeledLineSentence("tweets_clean.txt", "TWEET")
total_start = time.time()
dbow = False
if dbow:
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, dm=0 ,workers=6, alpha=0.04)
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DBOW:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./tweet_dbow.d2v')
dm = True
if dm:
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, dm_mean=1, alpha=0.04)
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04)
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04, dm_concat=1)
#
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DM:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./tweet_dm.d2v')
total_end = time.time()
print "tiempo total:" + str((total_end - total_start)/60.0)
|
18,499 | ca7d67fb8108c5503ccc107ec979df745d95faab | from flask import render_template, flash, redirect, request
from app import app, db
from app.forms import NewOrderForm, EditOrderForm, RegisterForm
from app.models import customer, order, driver, register
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Main')
@app.route('/register', methods=['GET', 'POST'])
def reg():
form = RegisterForm()
if form.validate_on_submit():
Register = register(username=form.username.data, email=form.email.data, password=form.password.data, password2=form.password2.data)
db.session.add(Register)
db.session.commit()
flash('Thank you for register with us')
return redirect('/index')
else:
flash('Please try again')
return render_template('register.html', title='Register', form=form)
@app.route('/orders')
def orders():
Orders = order.query.all()
return render_template('orders.html', title='Orders', rows=Orders)
@app.route('/neworder', methods=['GET', 'POST'])
def neworder():
form = NewOrderForm()
form.driver.choices = [(driver.driverID, driver.name) for driver in driver.query.all()]
if form.validate_on_submit():
Customer = customer(name=form.custname.data, phone=form.phone.data, address=form.address.data, suburb=form.suburb.data)
Order = order(orderID=form.ordernum.data, driverID=form.driver.data)
Customer.orders.append(Order)
db.session.add(Customer)
db.session.add(Order)
db.session.commit()
flash('Congratulations, you have created a new order!')
return redirect('/orders')
return render_template('neworder.html', title='New Order', form=form)
@app.route('/editorder/<orderID>', methods=['GET', 'POST'])
def editorder(orderID):
selectedOrder = order.query.filter_by(orderID=orderID).first()
form = EditOrderForm(driver=selectedOrder)
form.driver.choices = [(driver.driverID, driver.name) for driver in driver.query.all()]
if form.validate_on_submit():
selectedOrder.driverID = form.driver.data
selectedOrder.orderingCust.name = form.name.data
selectedOrder.orderingCust.phone = form.phone.data
selectedOrder.orderingCust.address = form.address.data
selectedOrder.orderingCust.suburb = form.suburb.data
db.session.commit()
flash('Congratulations, you have edited an order!')
return redirect('/orders')
elif request.method == 'GET':
form.driver.data = selectedOrder.placedWithDriver.driverID
form.orderID.data = selectedOrder.orderID
form.name.data = selectedOrder.orderingCust.name
form.phone.data = selectedOrder.orderingCust.phone
form.address.data = selectedOrder.orderingCust.address
form.suburb.data = selectedOrder.orderingCust.suburb
return render_template('editorder.html', title='Edit Order', form=form) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.