hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71970c5e8e3cf8ece041b681a8768c3ae4e47b1
| 1,079
|
py
|
Python
|
codenerix_pos/admin.py
|
centrologic/django-codenerix-pos
|
449c54971c510aba2326797ab7aaf3a0b5f6c3ab
|
[
"Apache-2.0"
] | 3
|
2017-07-19T15:24:26.000Z
|
2017-12-22T01:35:28.000Z
|
codenerix_pos/admin.py
|
centrologic/django-codenerix-pos
|
449c54971c510aba2326797ab7aaf3a0b5f6c3ab
|
[
"Apache-2.0"
] | null | null | null |
codenerix_pos/admin.py
|
centrologic/django-codenerix-pos
|
449c54971c510aba2326797ab7aaf3a0b5f6c3ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# django-codenerix-pos
#
# Codenerix GNU
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import admin
from .models import POS, POSSlot, POSPlant, POSZone, POSProduct, POSHardware, POSOperator, POSLog, POSGroupProduct
admin.site.register(POSPlant)
admin.site.register(POSZone)
admin.site.register(POSHardware)
admin.site.register(POSGroupProduct)
admin.site.register(POS)
admin.site.register(POSSlot)
admin.site.register(POSProduct)
admin.site.register(POSOperator)
admin.site.register(POSLog)
| 31.735294
| 114
| 0.776645
|
from django.contrib import admin
from .models import POS, POSSlot, POSPlant, POSZone, POSProduct, POSHardware, POSOperator, POSLog, POSGroupProduct
admin.site.register(POSPlant)
admin.site.register(POSZone)
admin.site.register(POSHardware)
admin.site.register(POSGroupProduct)
admin.site.register(POS)
admin.site.register(POSSlot)
admin.site.register(POSProduct)
admin.site.register(POSOperator)
admin.site.register(POSLog)
| true
| true
|
f71970cd94b344b2aafb1578c85a08cee5366ca7
| 20,090
|
py
|
Python
|
script.py
|
lawlie8/Mr.Hyde
|
c3d2c04de6343580b4b14cbd2319737ed0b3a73e
|
[
"MIT"
] | 3
|
2020-06-04T10:02:35.000Z
|
2020-06-05T11:44:02.000Z
|
script.py
|
lawlie8/Mr.Hyde
|
c3d2c04de6343580b4b14cbd2319737ed0b3a73e
|
[
"MIT"
] | null | null | null |
script.py
|
lawlie8/Mr.Hyde
|
c3d2c04de6343580b4b14cbd2319737ed0b3a73e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import tkinter as tk
from tkinter import *
from tkinter.ttk import *
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
import os
import os.path
from os import listdir
from os.path import isfile, join
#import time
import ctypes
#import threading
import hashlib
import random
import binascii
#add admin to read write C: Files
try:
def is_admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if is_admin():
def initilise():
global check_file
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
os.system('mkdir .hydefiles')
ctypes.windll.shcore.SetProcessDpiAwareness(1)
global label,window
window = tk.Tk() #creates window
window.tk.call('tk', 'scaling', 2.0)
window.geometry("1050x380")
window.resizable(width=False,height=False)
window.title("Mr. Hyde")
try:
window.iconbitmap('hyde.ico')
except:
pass
window.configure(bg='#333338')
label = tk.Label(text = "Mr. Hyde" ,fg="#d6d6c2",bg="#333338")
label.place(relx=.5,rely=.5,anchor="c")
def clear_label():
label.place_forget()
label1 = tk.Label(text = "Encrypt Your Files with 'AES-256'" ,fg="white",bg="#333338")
label1.pack()
open_files_button = tk.Button(text='Select Files',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=open_files,master=window)
open_files_button.pack(anchor='nw',pady=50,padx=10,side='left')
password_entry = tk.Entry(width=15,show='*')
password_entry.place(x=600,y=82,height=42)
encrypt_button = tk.Button(text='Encrypt',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=lambda:encrypt_section(password_entry,select_files_label),master=window)
encrypt_button.place(x=910,y=82)
decrypt_button = tk.Button(text='Decrypt',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=lambda:decrypt_section(password_entry,select_files_label),master=window)
decrypt_button.place(x=770,y=82)
password_label = tk.Label(text='Password',fg='#d6d6c2',bg='#333338')
password_label.place(x=500,y=85)
select_files_label = tk.Label(text='files not selected',fg='#d6d6c2',bg='#333338')
select_files_label.place(x=460,y=220)
setting_button = tk.Button(text='setting',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=5,fg='white',bg='#338237',command=lambda :setting_window(check_file))#,master=window)
setting_button.place(x=980,y=12,height=30)
def open_files():#selects files to encrypt decrypt
from tkinter import filedialog
global window_filename,enc_file_list,mylist,enc_file_scroll,file_to_encrypt_label
enc_file_list = []
window_filename = filedialog.askopenfilenames(initialdir = "/",title = "Select file",filetypes = (("all files","*.*"),("enc files","*.enc"),("jpeg files","*.jpg")))
enc_file_list.append(window_filename)
file_to_encrypt_label = tk.Label(text='Files to Encrypt',justify='left',fg='#d6d6c2',bg='#333338')
enc_file_scroll = tk.Scrollbar(window,width=16,elementborderwidth=0,highlightcolor='green',bg='green',bd=0,activebackground='green')
mylist = Listbox(window,width='90',height='7',yscrollcommand=enc_file_scroll.set,bg='green',bd=0,fg='#d6d6c2')
if window_filename:
for i in window_filename:
mylist.insert(END,' ' + i)
mylist.place(x='65',y='160')
enc_file_scroll.place(x='975',y='160',height=185)#anchor='w',fill='y',side='right',pady=50,padx=20)
file_to_encrypt_label.place(x=70,y=125)
enc_file_scroll.config(command=mylist.yview)
mainloop()
def setting_window(check_file):#setting window code
global default_password_entry,setting
check_file = open('hyde.law','r+')
check_file_lines = check_file.readlines()
setting_flag = check_file_lines[0]
if setting_flag == 'setting_window_off':
setting = tk.Tk()
setting.tk.call('tk', 'scaling', 2.0)
setting.geometry("570x300")
setting.resizable(width=False,height=False)
setting.title('Setting')
setting.configure(bg='#333338')
try:
setting.iconbitmap('setting.ico')
except:
pass
setting_info_label = tk.Label(bg='#333338',fg='#d6d6c2',text='Mr.Hyde uses AES-256 bit Encryption algorithm \n Users be advised',master=setting)
setting_info_label.pack()
default_password_label = tk.Label(bg='#333338',fg='#d6d6c2',text='Default password',master=setting)
default_password_label.pack(anchor='w',padx='10',pady='30')#place(x=30,y=80)
default_password_entry = tk.Entry(width='15',show='*',master=setting)
default_password_entry.place(x=140,y=80)
default_password_warning = tk.Label(bg='white',fg='red',text='1:The use of default password is not recommended. Remember password instead. \n 2: If you decide to use default password,\n there is no need to set a password in the main window.',master=setting)
default_password_warning.place(x=10,y=150)
set_default_password = tk.Button(text='set password',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=13,fg='white',bg='#338237',command=set_default_password_section,master=setting)#,master=window)
set_default_password.place(x=280,y=80,height=25)
check_file = open('hyde.law','w+')
check_file.write('setting_window_on')
check_file.close()
def on_closing(): #jugad pe jugad
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
setting.destroy()
def close_everything():
window.destroy()
setting.destroy()
setting.protocol('WM_DELETE_WINDOW',on_closing)
window.protocol('WM_DELETE_WINDOW',close_everything)
def get_default_password_section(default_password_file_list):#does what the function name says
global default_key
default_key = ''
for file,sun in zip(default_password_file_list,range(0,8)):
file_extract = open(file,'r+')
file_extract = file_extract.readlines()
bun= sun * 8
default_key = default_key + file_extract[0][bun:bun+8]
return default_key
#print(key)
def set_default_password_section():#sets default password
os.system('mkdir .hydefiles')
global default_password_file_list
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
default_password = default_password_entry.get()
if default_password != '':
salt_value = ''
hex_list = ['a','b','c','d','e','f','1','2','3','4','5','6','7','8','9','0']
for salt_char in range(0,8):
salt_value += random.choice(hex_list)
#salt_value += salt_value
#print(salt_value)
salt_file = open('.hydefiles/default_salt.law','w+')
salt_file.write(salt_value)
salt_file.close()
default_password = str(default_password+salt_value)
default_key = hashlib.sha256(default_password.encode('utf-8')).hexdigest()
#print(default_key)
#print(default_password)
for file,sun in zip(default_password_file_list,range(0,8)):
seti = ''
for bill in range(0,65):
seti = random.choice(hex_list) + seti
#print(seti)
seti2 = seti
#for sun in range(0,8):
if sun == 0:
bun = sun * 7
seti = default_key[bun:bun+8] + seti2[9:65]
default_password_file = open(file,'w+')
default_password_file.write(seti)
default_password_file.close()
#print(seti)
else:
bun2 = sun * 8
seti = seti2[0:bun2]+ default_key[bun2:bun2+8] + seti2[bun2+9:65]
#print(seti)
default_password_file = open(file,'w+')
default_password_file.write(seti)
default_password_file.close()
#print('setting default password')
else:
try:
import shutil
shutil.rmtree('.hydefiles')
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Blank Password Not Allowed','Error', 0)
except:
pass
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
setting.destroy()
get_default_password_section(default_password_file_list)
def encrypt_section(password_entry,select_files_label):#Encrypt Section
try:
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
#IV = 16 * '\x00'
#mode = AES.MODE_CBC
password_entry_for_encryption = password_entry.get()
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
if password_entry_for_encryption == '':
key = get_default_password_section(default_password_file_list)
check_sum_key = key.lower()
key = binascii.unhexlify(key)
#print(key)
else:
hex_list = ['a','b','c','d','e','f','1','2','3','4','5','6','7','8','9','0']
salt_value = ''
for salt_char in range(0,8):
salt_value += random.choice(hex_list)
password_entry_for_encryption = password_entry_for_encryption + salt_value
not_defalt_salt = open('.hydefiles/salt.law','a')
key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).digest()
check_sum_key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).hexdigest()
not_defalt_salt.write(check_sum_key[30:36]+'---'+salt_value+'\n')
not_defalt_salt.close()
hex_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
#print(a)
num = check_sum_key[30:36]
'''
for hex_i in check_sum_key:
for hex_b in hex_list:
if hex_i == hex_b:
num += hex_list.index(hex_b)# * check_sum_key.index(hex_i)
'''
#print(str(num))
enc_counter = 0
progress = Progressbar(window,orient=HORIZONTAL,length=926,mode='determinate')
progress.place(anchor='w',x=65,y=360)
prog = 100 / len(window_filename)
for file_to_encrypt in window_filename:
if file_to_encrypt.endswith('.enc'):
enc_counter+=1
else:
progress['value'] = prog
window.update_idletasks()
prog = prog + prog
fh = open(file_to_encrypt,'rb')
message = fh.read()
fh.close()
message = pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key,AES.MODE_CBC,iv)
encrypted_text = iv + cipher.encrypt(message)
fh = open(file_to_encrypt + str(num) + '.enc','wb')
fh.write(encrypted_text)
fh.close()
os.remove(file_to_encrypt)
#print(file_to_encrypt)
if enc_counter !=0:
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Already encrypted', 'Error', 0)
mylist.delete(0,END)
enc_file_scroll.place_forget()
mylist.place_forget()
progress.place_forget()
file_to_encrypt_label.place_forget()
password_entry.delete(0,END)
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Selected Files Encrypted','Success', 0)
#window_filename = {}
except:
progress.place_forget()
try:
mylist.place_forget()
except:
pass
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Select Files First', 'Error', 0)
password_entry.delete(0,END)
finally:
pass
def decrypt_section(password_entry,select_files_label):#Decrypt section
try:
def unpad(s):
return s[:-ord(s[len(s)-1:])]
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
password_entry_for_encryption = password_entry.get()
if password_entry_for_encryption == '':
key = get_default_password_section(default_password_file_list)
check_sum_key = key.lower()
key = binascii.unhexlify(key)
#print(key)
else:
not_defalt_salt = open('.hydefiles/salt.law','r+')
salt_lines = not_defalt_salt.readlines()
#salt_lines = salt_lines[0].strip(' ')
#print(salt_lines)
for check_salt_value in salt_lines:
for file_to_check in window_filename:
if str(check_salt_value[0:6]) == str(file_to_check[-10:-4]):
#print('here')
salt_lines = check_salt_value[-9:-1]
#print(salt_lines)
#print(check_salt_value)
#ashdkjahsdjh = input("inpput here")
#print(str(file_to_check[-10:-4])+'---'+check_salt_value[0:6])
password_entry_for_encryption2 = password_entry_for_encryption
password_entry_for_encryption = password_entry_for_encryption + salt_lines
key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).digest()
check_sum_key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).hexdigest()
hex_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
#print(a)
num = str(check_sum_key[30:36]).strip(' ')
#print(num+'---'+salt_lines)
#asdasdasrca = input('here2')
'''
for hex_i in check_sum_key:
for hex_b in hex_list:
if hex_i == hex_b:
num += hex_list.index(hex_b)
'''
invalid_counter = 0
progress = Progressbar(window,orient=HORIZONTAL,length=926,mode='determinate')
progress.place(anchor='w',x=65,y=360)
prog = 100 / len(window_filename)
for file_to_decrypt in window_filename:
#print(str(file_to_decrypt[-10:-4]))
if num == str(file_to_decrypt[-10:-4]):
progress['value'] = prog
window.update_idletasks()
prog = prog + prog
fd = open(file_to_decrypt,'rb')
message = fd.read()
fd.close()
iv = message[:AES.block_size]
cipher = AES.new(key,AES.MODE_CBC,iv)
plaintext = cipher.decrypt(message[AES.block_size:])
write_message = plaintext.rstrip(b"\0")
remove_file = file_to_decrypt
file_to_decrypt = file_to_decrypt[0:-10]
fd = open(file_to_decrypt,'wb')
fd.write(write_message)
fd.close()
os.remove(remove_file)
else:
#print('key_invalid')
invalid_counter +=1
#entry1.delete(0,tk.END)
if invalid_counter != 0 :
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Invalid key used for '+str(invalid_counter)+' files','Error', 0)
progress.place_forget()
password_entry.delete(0,END)
else:
file_to_encrypt_label.place_forget()
enc_file_scroll.place_forget()
mylist.place_forget()
progress.place_forget()
password_entry.delete(0,END)
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Selected Files Decrypted','Success', 0)
except:
progress.place_forget()
try:
mylist.place_forget()
except:
pass
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Select Files First', 'Error', 0)
password_entry.delete(0,END)
finally:
pass
#window_filename = {}
#print(window_filename)
initilise() #initialise window
label.after(3000,clear_label) #app_name label intro
window.mainloop()
else:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
is_admin()
except IOError as e:
error_file = open('error.log','a+')
error_file.write(e+'\n')
error_file.close()
| 49.482759
| 318
| 0.541563
|
import tkinter as tk
from tkinter import *
from tkinter.ttk import *
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
import os
import os.path
from os import listdir
from os.path import isfile, join
import ctypes
import hashlib
import random
import binascii
try:
def is_admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if is_admin():
def initilise():
global check_file
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
os.system('mkdir .hydefiles')
ctypes.windll.shcore.SetProcessDpiAwareness(1)
global label,window
window = tk.Tk()
window.tk.call('tk', 'scaling', 2.0)
window.geometry("1050x380")
window.resizable(width=False,height=False)
window.title("Mr. Hyde")
try:
window.iconbitmap('hyde.ico')
except:
pass
window.configure(bg='#333338')
label = tk.Label(text = "Mr. Hyde" ,fg="#d6d6c2",bg="#333338")
label.place(relx=.5,rely=.5,anchor="c")
def clear_label():
label.place_forget()
label1 = tk.Label(text = "Encrypt Your Files with 'AES-256'" ,fg="white",bg="#333338")
label1.pack()
open_files_button = tk.Button(text='Select Files',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=open_files,master=window)
open_files_button.pack(anchor='nw',pady=50,padx=10,side='left')
password_entry = tk.Entry(width=15,show='*')
password_entry.place(x=600,y=82,height=42)
encrypt_button = tk.Button(text='Encrypt',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=lambda:encrypt_section(password_entry,select_files_label),master=window)
encrypt_button.place(x=910,y=82)
decrypt_button = tk.Button(text='Decrypt',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=12,fg='white',bg='#338237',command=lambda:decrypt_section(password_entry,select_files_label),master=window)
decrypt_button.place(x=770,y=82)
password_label = tk.Label(text='Password',fg='#d6d6c2',bg='#333338')
password_label.place(x=500,y=85)
select_files_label = tk.Label(text='files not selected',fg='#d6d6c2',bg='#333338')
select_files_label.place(x=460,y=220)
setting_button = tk.Button(text='setting',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=5,fg='white',bg='#338237',command=lambda :setting_window(check_file))
setting_button.place(x=980,y=12,height=30)
def open_files():
from tkinter import filedialog
global window_filename,enc_file_list,mylist,enc_file_scroll,file_to_encrypt_label
enc_file_list = []
window_filename = filedialog.askopenfilenames(initialdir = "/",title = "Select file",filetypes = (("all files","*.*"),("enc files","*.enc"),("jpeg files","*.jpg")))
enc_file_list.append(window_filename)
file_to_encrypt_label = tk.Label(text='Files to Encrypt',justify='left',fg='#d6d6c2',bg='#333338')
enc_file_scroll = tk.Scrollbar(window,width=16,elementborderwidth=0,highlightcolor='green',bg='green',bd=0,activebackground='green')
mylist = Listbox(window,width='90',height='7',yscrollcommand=enc_file_scroll.set,bg='green',bd=0,fg='#d6d6c2')
if window_filename:
for i in window_filename:
mylist.insert(END,' ' + i)
mylist.place(x='65',y='160')
enc_file_scroll.place(x='975',y='160',height=185)
file_to_encrypt_label.place(x=70,y=125)
enc_file_scroll.config(command=mylist.yview)
mainloop()
def setting_window(check_file):
global default_password_entry,setting
check_file = open('hyde.law','r+')
check_file_lines = check_file.readlines()
setting_flag = check_file_lines[0]
if setting_flag == 'setting_window_off':
setting = tk.Tk()
setting.tk.call('tk', 'scaling', 2.0)
setting.geometry("570x300")
setting.resizable(width=False,height=False)
setting.title('Setting')
setting.configure(bg='#333338')
try:
setting.iconbitmap('setting.ico')
except:
pass
setting_info_label = tk.Label(bg='#333338',fg='#d6d6c2',text='Mr.Hyde uses AES-256 bit Encryption algorithm \n Users be advised',master=setting)
setting_info_label.pack()
default_password_label = tk.Label(bg='#333338',fg='#d6d6c2',text='Default password',master=setting)
default_password_label.pack(anchor='w',padx='10',pady='30')
default_password_entry = tk.Entry(width='15',show='*',master=setting)
default_password_entry.place(x=140,y=80)
default_password_warning = tk.Label(bg='white',fg='red',text='1:The use of default password is not recommended. Remember password instead. \n 2: If you decide to use default password,\n there is no need to set a password in the main window.',master=setting)
default_password_warning.place(x=10,y=150)
set_default_password = tk.Button(text='set password',activebackground='black',highlightcolor='black',bd=1,relief='flat',height=0,width=13,fg='white',bg='#338237',command=set_default_password_section,master=setting)
set_default_password.place(x=280,y=80,height=25)
check_file = open('hyde.law','w+')
check_file.write('setting_window_on')
check_file.close()
def on_closing():
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
setting.destroy()
def close_everything():
window.destroy()
setting.destroy()
setting.protocol('WM_DELETE_WINDOW',on_closing)
window.protocol('WM_DELETE_WINDOW',close_everything)
def get_default_password_section(default_password_file_list):
global default_key
default_key = ''
for file,sun in zip(default_password_file_list,range(0,8)):
file_extract = open(file,'r+')
file_extract = file_extract.readlines()
bun= sun * 8
default_key = default_key + file_extract[0][bun:bun+8]
return default_key
def set_default_password_section():
os.system('mkdir .hydefiles')
global default_password_file_list
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
default_password = default_password_entry.get()
if default_password != '':
salt_value = ''
hex_list = ['a','b','c','d','e','f','1','2','3','4','5','6','7','8','9','0']
for salt_char in range(0,8):
salt_value += random.choice(hex_list)
salt_file = open('.hydefiles/default_salt.law','w+')
salt_file.write(salt_value)
salt_file.close()
default_password = str(default_password+salt_value)
default_key = hashlib.sha256(default_password.encode('utf-8')).hexdigest()
for file,sun in zip(default_password_file_list,range(0,8)):
seti = ''
for bill in range(0,65):
seti = random.choice(hex_list) + seti
seti2 = seti
if sun == 0:
bun = sun * 7
seti = default_key[bun:bun+8] + seti2[9:65]
default_password_file = open(file,'w+')
default_password_file.write(seti)
default_password_file.close()
else:
bun2 = sun * 8
seti = seti2[0:bun2]+ default_key[bun2:bun2+8] + seti2[bun2+9:65]
default_password_file = open(file,'w+')
default_password_file.write(seti)
default_password_file.close()
else:
try:
import shutil
shutil.rmtree('.hydefiles')
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Blank Password Not Allowed','Error', 0)
except:
pass
check_file = open('hyde.law','w+')
check_file.write('setting_window_off')
check_file.close()
setting.destroy()
get_default_password_section(default_password_file_list)
def encrypt_section(password_entry,select_files_label):
try:
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
password_entry_for_encryption = password_entry.get()
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
if password_entry_for_encryption == '':
key = get_default_password_section(default_password_file_list)
check_sum_key = key.lower()
key = binascii.unhexlify(key)
else:
hex_list = ['a','b','c','d','e','f','1','2','3','4','5','6','7','8','9','0']
salt_value = ''
for salt_char in range(0,8):
salt_value += random.choice(hex_list)
password_entry_for_encryption = password_entry_for_encryption + salt_value
not_defalt_salt = open('.hydefiles/salt.law','a')
key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).digest()
check_sum_key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).hexdigest()
not_defalt_salt.write(check_sum_key[30:36]+'---'+salt_value+'\n')
not_defalt_salt.close()
hex_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
num = check_sum_key[30:36]
enc_counter = 0
progress = Progressbar(window,orient=HORIZONTAL,length=926,mode='determinate')
progress.place(anchor='w',x=65,y=360)
prog = 100 / len(window_filename)
for file_to_encrypt in window_filename:
if file_to_encrypt.endswith('.enc'):
enc_counter+=1
else:
progress['value'] = prog
window.update_idletasks()
prog = prog + prog
fh = open(file_to_encrypt,'rb')
message = fh.read()
fh.close()
message = pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key,AES.MODE_CBC,iv)
encrypted_text = iv + cipher.encrypt(message)
fh = open(file_to_encrypt + str(num) + '.enc','wb')
fh.write(encrypted_text)
fh.close()
os.remove(file_to_encrypt)
if enc_counter !=0:
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Already encrypted', 'Error', 0)
mylist.delete(0,END)
enc_file_scroll.place_forget()
mylist.place_forget()
progress.place_forget()
file_to_encrypt_label.place_forget()
password_entry.delete(0,END)
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Selected Files Encrypted','Success', 0)
except:
progress.place_forget()
try:
mylist.place_forget()
except:
pass
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Select Files First', 'Error', 0)
password_entry.delete(0,END)
finally:
pass
def decrypt_section(password_entry,select_files_label):
try:
def unpad(s):
return s[:-ord(s[len(s)-1:])]
default_password_file_list = ['.hydefiles/0okq7sgzt00emuwr.law','.hydefiles/dz5a0l17zehztni8.law','.hydefiles/uv8wbbi1zylip4v6.law','.hydefiles/0pk588qx1m1m5bf2.law','.hydefiles/nzlcnrcv88rrnghh.law','.hydefiles/kcf609aheo3rksm4.law','.hydefiles/q05y5cmdos60n58s.law','.hydefiles/5kcsxvpb5srx24vz.law']
password_entry_for_encryption = password_entry.get()
if password_entry_for_encryption == '':
key = get_default_password_section(default_password_file_list)
check_sum_key = key.lower()
key = binascii.unhexlify(key)
else:
not_defalt_salt = open('.hydefiles/salt.law','r+')
salt_lines = not_defalt_salt.readlines()
for check_salt_value in salt_lines:
for file_to_check in window_filename:
if str(check_salt_value[0:6]) == str(file_to_check[-10:-4]):
salt_lines = check_salt_value[-9:-1]
password_entry_for_encryption2 = password_entry_for_encryption
password_entry_for_encryption = password_entry_for_encryption + salt_lines
key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).digest()
check_sum_key = hashlib.sha256(password_entry_for_encryption.encode('utf-8')).hexdigest()
hex_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
num = str(check_sum_key[30:36]).strip(' ')
invalid_counter = 0
progress = Progressbar(window,orient=HORIZONTAL,length=926,mode='determinate')
progress.place(anchor='w',x=65,y=360)
prog = 100 / len(window_filename)
for file_to_decrypt in window_filename:
if num == str(file_to_decrypt[-10:-4]):
progress['value'] = prog
window.update_idletasks()
prog = prog + prog
fd = open(file_to_decrypt,'rb')
message = fd.read()
fd.close()
iv = message[:AES.block_size]
cipher = AES.new(key,AES.MODE_CBC,iv)
plaintext = cipher.decrypt(message[AES.block_size:])
write_message = plaintext.rstrip(b"\0")
remove_file = file_to_decrypt
file_to_decrypt = file_to_decrypt[0:-10]
fd = open(file_to_decrypt,'wb')
fd.write(write_message)
fd.close()
os.remove(remove_file)
else:
invalid_counter +=1
if invalid_counter != 0 :
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Invalid key used for '+str(invalid_counter)+' files','Error', 0)
progress.place_forget()
password_entry.delete(0,END)
else:
file_to_encrypt_label.place_forget()
enc_file_scroll.place_forget()
mylist.place_forget()
progress.place_forget()
password_entry.delete(0,END)
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Selected Files Decrypted','Success', 0)
except:
progress.place_forget()
try:
mylist.place_forget()
except:
pass
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, 'Select Files First', 'Error', 0)
password_entry.delete(0,END)
finally:
pass
initilise()
label.after(3000,clear_label)
window.mainloop()
else:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
is_admin()
except IOError as e:
error_file = open('error.log','a+')
error_file.write(e+'\n')
error_file.close()
| true
| true
|
f719721f94c03312a66a5dd67b5e3f239bdd431b
| 1,925
|
py
|
Python
|
setup.py
|
unclemedia0/phasiakon
|
fe6cef9b8c3d8f7da0a9ef3b18f9c2ea0ec08dc0
|
[
"MIT"
] | null | null | null |
setup.py
|
unclemedia0/phasiakon
|
fe6cef9b8c3d8f7da0a9ef3b18f9c2ea0ec08dc0
|
[
"MIT"
] | null | null | null |
setup.py
|
unclemedia0/phasiakon
|
fe6cef9b8c3d8f7da0a9ef3b18f9c2ea0ec08dc0
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
try:
with open("README.md","r") as fh:
long_description = fh.read()
except:
long_description = 'Taxation7% by UncleMedia'
setup(
name = 'phasiakon', # How you named your package folder (MyLib)
packages = ['phasiakon'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Taxation7% by UncleMedia', # Give a short description about your library
long_description=long_description,
long_description_content_type = "text/markdown",
author = 'UncleMedia', # Type in your name
author_email = 'unclemedia0@gmail.com', # Type in your E-Mail
url = 'https://github.com/unclemedia0/phasiakon', # Provide either the link to your github or to your website
download_url = 'https://github.com/unclemedia0/phasiakon/archive/v_01.tar.gz', # I explain this later on
keywords = ['phasiakon', 'Hmong', 'UncleMedia'], # Keywords that define your package best
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 55
| 147
| 0.660779
|
from distutils.core import setup
try:
with open("README.md","r") as fh:
long_description = fh.read()
except:
long_description = 'Taxation7% by UncleMedia'
setup(
name = 'phasiakon',
packages = ['phasiakon'],
version = '0.1',
license='MIT',
description = 'Taxation7% by UncleMedia',
long_description=long_description,
long_description_content_type = "text/markdown",
author = 'UncleMedia',
author_email = 'unclemedia0@gmail.com',
url = 'https://github.com/unclemedia0/phasiakon',
download_url = 'https://github.com/unclemedia0/phasiakon/archive/v_01.tar.gz',
keywords = ['phasiakon', 'Hmong', 'UncleMedia'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| true
| true
|
f719733841763fd63a92a93738e9a161aaffbe6e
| 4,753
|
py
|
Python
|
momus/VHRED/split-examples-by-token.py
|
ourDirection/ourDirection
|
b99ed67a8cc0fe5016e03fe3b5ad083b7f8bbdc0
|
[
"Apache-2.0"
] | null | null | null |
momus/VHRED/split-examples-by-token.py
|
ourDirection/ourDirection
|
b99ed67a8cc0fe5016e03fe3b5ad083b7f8bbdc0
|
[
"Apache-2.0"
] | null | null | null |
momus/VHRED/split-examples-by-token.py
|
ourDirection/ourDirection
|
b99ed67a8cc0fe5016e03fe3b5ad083b7f8bbdc0
|
[
"Apache-2.0"
] | null | null | null |
"""
Takes as input a binarized dialogue corpus, splits the examples by a certain token and shuffles it
Example run:
python split-examples-by-token.py Training.dialogues.pkl 2 Training_SplitByDialogues.dialogues --join_last_two_examples
@author Iulian Vlad Serban
"""
import collections
import numpy
import math
import operator
import os
import sys
import logging
import cPickle
from collections import Counter
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('text2dict')
def safe_pickle(obj, filename):
if os.path.isfile(filename):
logger.info("Overwriting %s." % filename)
else:
logger.info("Saving to %s." % filename)
with open(filename, 'wb') as f:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
# Thanks to Emile on Stackoverflow:
# http://stackoverflow.com/questions/4322705/split-a-list-into-nested-lists-on-a-value
def _itersplit(l, splitters):
current = []
for item in l:
if item in splitters:
yield current
current = []
else:
current.append(item)
yield current
def magicsplit(l, *splitters):
return [subl for subl in _itersplit(l, splitters) if subl]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Binarized dialogue corpus (pkl file)")
parser.add_argument("token_id", type=int, help="Token index to split examples by (e.g. to split by end-of-dialogue set this to 2)")
parser.add_argument("consecutive_examples_to_merge", type=int, default='1', help="After splitting these number of examples will be merged.")
parser.add_argument("--join_last_two_examples",
action="store_true", default=False,
help="If on, will join the last two splits generated from each example. This is useful to handle empty or very short last samples")
parser.add_argument("output", type=str, help="Filename of processed binarized dialogue corpus (pkl file)")
args = parser.parse_args()
if not os.path.isfile(args.input):
raise Exception("Input file not found!")
logger.info("Loading dialogue corpus")
data = cPickle.load(open(args.input, 'r'))
data_len = len(data)
logger.info('Corpus loaded... Data len is %d' % data_len)
# Count number of tokens
tokens_count = 0
for i in range(data_len):
tokens_count += len(data[i])
logger.info('Tokens count %d' % tokens_count)
logger.info("Splitting corpus examples by token id... ")
processed_binarized_corpus = []
for i in range(data_len):
logger.info(' Example %d ' % i)
new_examples = magicsplit(data[i], int(args.token_id))
# If option is specified, we append the last new example to the second last one
if args.join_last_two_examples and len(new_examples) > 1:
new_examples[len(new_examples)-2] += new_examples[len(new_examples)-1]
del new_examples[len(new_examples)-1]
# Simpler version of the two for loops, which does not allow merging together samples
#for new_example in new_examples:
# processed_binarized_corpus.append(new_example + [int(args.token_id)])
s = int(math.floor(len(new_examples) / args.consecutive_examples_to_merge))
for j in range(1, s):
start_index = j*args.consecutive_examples_to_merge
merged_example = []
for k in reversed(range(args.consecutive_examples_to_merge)):
merged_example += new_examples[start_index-k-1] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
if s > 0:
merged_example = []
for k in range((s-1)*args.consecutive_examples_to_merge, len(new_examples)):
merged_example += new_examples[k] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
else:
merged_example = []
for k in range(len(new_examples)):
merged_example += new_examples[k] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
logger.info('New data len is %d' % len(processed_binarized_corpus))
# Count number of tokens
processed_tokens_count = 0
for i in range(len(processed_binarized_corpus)):
processed_tokens_count += len(processed_binarized_corpus[i])
logger.info('New tokens count %d' % processed_tokens_count)
# When splitting by end-of-utterance token </s>, there are some instances with multiple </s> at the end of each example. Our splitting method will effectively remove these, but it is not of any concern to us.
# assert(processed_tokens_count == tokens_count)
logger.info("Reshuffling corpus.")
rng = numpy.random.RandomState(13248)
rng.shuffle(processed_binarized_corpus)
logger.info("Saving corpus.")
safe_pickle(processed_binarized_corpus, args.output + ".pkl")
logger.info("Corpus saved. All done!")
| 35.736842
| 208
| 0.722281
|
import collections
import numpy
import math
import operator
import os
import sys
import logging
import cPickle
from collections import Counter
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('text2dict')
def safe_pickle(obj, filename):
if os.path.isfile(filename):
logger.info("Overwriting %s." % filename)
else:
logger.info("Saving to %s." % filename)
with open(filename, 'wb') as f:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
def _itersplit(l, splitters):
current = []
for item in l:
if item in splitters:
yield current
current = []
else:
current.append(item)
yield current
def magicsplit(l, *splitters):
return [subl for subl in _itersplit(l, splitters) if subl]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Binarized dialogue corpus (pkl file)")
parser.add_argument("token_id", type=int, help="Token index to split examples by (e.g. to split by end-of-dialogue set this to 2)")
parser.add_argument("consecutive_examples_to_merge", type=int, default='1', help="After splitting these number of examples will be merged.")
parser.add_argument("--join_last_two_examples",
action="store_true", default=False,
help="If on, will join the last two splits generated from each example. This is useful to handle empty or very short last samples")
parser.add_argument("output", type=str, help="Filename of processed binarized dialogue corpus (pkl file)")
args = parser.parse_args()
if not os.path.isfile(args.input):
raise Exception("Input file not found!")
logger.info("Loading dialogue corpus")
data = cPickle.load(open(args.input, 'r'))
data_len = len(data)
logger.info('Corpus loaded... Data len is %d' % data_len)
tokens_count = 0
for i in range(data_len):
tokens_count += len(data[i])
logger.info('Tokens count %d' % tokens_count)
logger.info("Splitting corpus examples by token id... ")
processed_binarized_corpus = []
for i in range(data_len):
logger.info(' Example %d ' % i)
new_examples = magicsplit(data[i], int(args.token_id))
if args.join_last_two_examples and len(new_examples) > 1:
new_examples[len(new_examples)-2] += new_examples[len(new_examples)-1]
del new_examples[len(new_examples)-1]
s = int(math.floor(len(new_examples) / args.consecutive_examples_to_merge))
for j in range(1, s):
start_index = j*args.consecutive_examples_to_merge
merged_example = []
for k in reversed(range(args.consecutive_examples_to_merge)):
merged_example += new_examples[start_index-k-1] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
if s > 0:
merged_example = []
for k in range((s-1)*args.consecutive_examples_to_merge, len(new_examples)):
merged_example += new_examples[k] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
else:
merged_example = []
for k in range(len(new_examples)):
merged_example += new_examples[k] + [int(args.token_id)]
processed_binarized_corpus.append(merged_example)
logger.info('New data len is %d' % len(processed_binarized_corpus))
processed_tokens_count = 0
for i in range(len(processed_binarized_corpus)):
processed_tokens_count += len(processed_binarized_corpus[i])
logger.info('New tokens count %d' % processed_tokens_count)
logger.info("Reshuffling corpus.")
rng = numpy.random.RandomState(13248)
rng.shuffle(processed_binarized_corpus)
logger.info("Saving corpus.")
safe_pickle(processed_binarized_corpus, args.output + ".pkl")
logger.info("Corpus saved. All done!")
| true
| true
|
f719736b5b137de7082002cec486dbcda1835bae
| 1,497
|
py
|
Python
|
tapis_cli/commands/taccapis/v2/systems/roles_show.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
tapis_cli/commands/taccapis/v2/systems/roles_show.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
tapis_cli/commands/taccapis/v2/systems/roles_show.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
from agavepy.agave import AgaveError
from tapis_cli.display import Verbosity
from tapis_cli.clients.services.mixins import ServiceIdentifier, Username
from . import API_NAME, SERVICE_VERSION
from .models import SystemRole
from .formatters import SystemsFormatOne
__all__ = ['SystemsRolesShow']
class SystemsRolesShow(SystemsFormatOne, ServiceIdentifier, Username):
"""Show role on a System for a User
"""
VERBOSITY = Verbosity.BRIEF
EXTRA_VERBOSITY = Verbosity.RECORD
def get_parser(self, prog_name):
parser = super(SystemsRolesShow, self).get_parser(prog_name)
parser = ServiceIdentifier.extend_parser(self, parser)
parser = Username.extend_parser(self, parser)
return parser
def take_action(self, parsed_args):
parsed_args = self.preprocess_args(parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
self.update_payload(parsed_args)
headers = self.render_headers(SystemRole, parsed_args)
try:
rec = self.tapis_client.systems.getRoleForUser(
systemId=parsed_args.identifier, username=parsed_args.username)
except Exception:
rec = {
'username': parsed_args.username,
'role': None,
'_links': []
}
data = []
for key in headers:
val = self.render_value(rec.get(key, None))
data.append(val)
return (tuple(headers), tuple(data))
| 33.266667
| 79
| 0.669339
|
from agavepy.agave import AgaveError
from tapis_cli.display import Verbosity
from tapis_cli.clients.services.mixins import ServiceIdentifier, Username
from . import API_NAME, SERVICE_VERSION
from .models import SystemRole
from .formatters import SystemsFormatOne
__all__ = ['SystemsRolesShow']
class SystemsRolesShow(SystemsFormatOne, ServiceIdentifier, Username):
VERBOSITY = Verbosity.BRIEF
EXTRA_VERBOSITY = Verbosity.RECORD
def get_parser(self, prog_name):
parser = super(SystemsRolesShow, self).get_parser(prog_name)
parser = ServiceIdentifier.extend_parser(self, parser)
parser = Username.extend_parser(self, parser)
return parser
def take_action(self, parsed_args):
parsed_args = self.preprocess_args(parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
self.update_payload(parsed_args)
headers = self.render_headers(SystemRole, parsed_args)
try:
rec = self.tapis_client.systems.getRoleForUser(
systemId=parsed_args.identifier, username=parsed_args.username)
except Exception:
rec = {
'username': parsed_args.username,
'role': None,
'_links': []
}
data = []
for key in headers:
val = self.render_value(rec.get(key, None))
data.append(val)
return (tuple(headers), tuple(data))
| true
| true
|
f719761c09d3fa035769e8bee81a2d948a8ad1b9
| 255
|
py
|
Python
|
tests/test_example.py
|
skylifewww/handball
|
853190e44037086b7749cb8f62d9df6577b379fd
|
[
"MIT"
] | null | null | null |
tests/test_example.py
|
skylifewww/handball
|
853190e44037086b7749cb8f62d9df6577b379fd
|
[
"MIT"
] | null | null | null |
tests/test_example.py
|
skylifewww/handball
|
853190e44037086b7749cb8f62d9df6577b379fd
|
[
"MIT"
] | null | null | null |
from handball.core.test import TestCase
from handball.users.factories import UserFactory
class TestExample(TestCase):
def test_example(self):
UserFactory()
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
| 23.181818
| 48
| 0.709804
|
from handball.core.test import TestCase
from handball.users.factories import UserFactory
class TestExample(TestCase):
def test_example(self):
UserFactory()
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
| true
| true
|
f719771bdcfb47ab5315aba6e6e1b06f312f1af0
| 792
|
py
|
Python
|
samples/client/petstore/python-experimental/test/test_parent.py
|
MalcolmScoffable/openapi-generator
|
73605a0c0e0c825286c95123c63678ba75b44d5c
|
[
"Apache-2.0"
] | 4
|
2020-07-24T07:02:57.000Z
|
2022-01-08T17:37:38.000Z
|
samples/client/petstore/python-experimental/test/test_parent.py
|
MalcolmScoffable/openapi-generator
|
73605a0c0e0c825286c95123c63678ba75b44d5c
|
[
"Apache-2.0"
] | 7
|
2021-05-12T00:00:20.000Z
|
2022-02-27T11:23:35.000Z
|
samples/client/petstore/python-experimental/test/test_parent.py
|
MalcolmScoffable/openapi-generator
|
73605a0c0e0c825286c95123c63678ba75b44d5c
|
[
"Apache-2.0"
] | 2
|
2020-04-24T15:18:41.000Z
|
2021-12-07T09:39:40.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import petstore_api
class TestParent(unittest.TestCase):
"""Parent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testParent(self):
"""Test Parent"""
# FIXME: construct object with mandatory attributes with example values
# model = petstore_api.Parent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.842105
| 174
| 0.667929
|
from __future__ import absolute_import
import unittest
import petstore_api
class TestParent(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testParent(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71977e9459670b106619a17c0921c378ddd8285
| 166
|
py
|
Python
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_Hour'] , ['SVR'] );
| 41.5
| 88
| 0.759036
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_Hour'] , ['SVR'] );
| true
| true
|
f71979aaf903ff6353153012c1f5c64b155b2d5a
| 6,406
|
py
|
Python
|
libs/python/multicore_sorted/drafts/py_merge/multicore_sorted.py
|
denis-ryzhkov/antiques
|
6a67bf606c1b49cc413df26bfdf00d392b605f88
|
[
"MIT"
] | null | null | null |
libs/python/multicore_sorted/drafts/py_merge/multicore_sorted.py
|
denis-ryzhkov/antiques
|
6a67bf606c1b49cc413df26bfdf00d392b605f88
|
[
"MIT"
] | null | null | null |
libs/python/multicore_sorted/drafts/py_merge/multicore_sorted.py
|
denis-ryzhkov/antiques
|
6a67bf606c1b49cc413df26bfdf00d392b605f88
|
[
"MIT"
] | null | null | null |
"""
>>> DRAFT "py_merge"! <<<
Builtin "sorted()" function, but using all CPU cores available for speedup!
It supports all kwargs of "sorted()": "cmp", "key" and "reverse",
however items of "iterable" and all of these kwargs should be picklable:
https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled
Under the hood it uses map-reduce via "multiprocessing.Pool().map()" with builtin "sorted()"
and then merges sorted chunks as in merge-sort.
"processes" kwarg allows to set number of processes different from "cpu_count()".
Usage:
pip install multicore_sorted
cat <<END >test.py
from multicore_sorted import multicore_sorted
in_data = [1, 5, 2, 4, 3]
out_data = [1, 2, 3, 4, 5]
def cmp(a, b):
return b - a
def key(a):
return -a
if __name__ == '__main__':
assert multicore_sorted(in_data) == sorted(in_data) == out_data
# But N times faster, given Big data and N CPU cores!
assert (
multicore_sorted(in_data, cmp=cmp) ==
multicore_sorted(in_data, key=key) ==
multicore_sorted(in_data, reverse=True) ==
list(reversed(out_data))
)
print('OK')
END
python test.py
drafts/py_merge/multicore_sorted version 0.1.0
Copyright (C) 2014 by Denis Ryzhkov <denisr@denisr.com>
MIT License, see http://opensource.org/licenses/MIT
"""
#### export
__all__ = ['multicore_sorted']
#### import
from bn import Bn
from functools import cmp_to_key
from multiprocessing import cpu_count, Pool
#### multicore_sorted
def multicore_sorted(iterable, **kwargs):
bn = Bn()
#### processes
bn('processes')
processes = kwargs.pop('processes', None)
if processes is None:
try:
processes = cpu_count() # Yes, "Pool()" does the same, but we need "processes" before calling "Pool()".
except NotImplementedError:
processes = 1
if processes < 2:
return sorted(iterable, **kwargs)
# No need for multiprocessing if less than 2 processes!
# It is tempting to do the same for small enough "len(iterable)",
# but then the code below would be not efficient for generators having no "__len__".
#### chunks
bn('chunks')
chunks = [[] for _ in xrange(processes)]
# "[[]] * processes" would have created N links to the same list,
# while we need separate lists.
for i, item in enumerate(iterable): # Efficient even if "iterable" is a generator.
chunks[i % processes].append(item) # Round-robin chunking.
chunks = [ # Packing for "picklable_sorted" below.
(chunk, kwargs) # "chunk" here is just a ref to one of big lists created above. So it is efficient.
for chunk in chunks
]
#### map-reduce
bn('pool')
pool = Pool(processes=processes) # No "maxtasksperchild" - the pool will be GC-ed after the sort.
bn('map')
chunks = pool.map(picklable_sorted, chunks)
#bn('pool')
#pool.close() # Test!
#bn('merge_sorted')
result = merge_sorted(chunks, **kwargs) # Alas "heapq.merge()" does not support "key=lambda", etc.
#bn('test_import')
#from itertools import chain
#bn('test_timsort')
#result = sorted(chain(*chunks), **kwargs)
print(bn)
return result
#### picklable_sorted
def picklable_sorted(chunk):
# "Pool().map()" does not support additional kwargs like "key=lambda" for the "func".
# Natural closure inside "multicore_sorted" is not picklable.
# This is a picklable single-argument workaround.
chunk, kwargs = chunk # Unpacking via efficient refs.
#print((chunk, kwargs))
return sorted(chunk, **kwargs)
#### merge_sorted
def merge_sorted(chunks, cmp=None, key=None, reverse=False):
#bn = Bn()
#bn('init')
#### K - combined key.
if cmp:
cmp_key = cmp_to_key(cmp)
K = (lambda a: cmp_key(key(a))) if key else cmp_key
elif key:
K = key
else:
K = lambda a: a
# NOTE: "reverse" is processed below.
#### init
chunks = [iter(chunk) for chunk in chunks] # Prepare to fetch from each chunk.
items = [chunk.next() for chunk in chunks] # Fetch first item from each chunk. Should be no empty chunks here.
skip_me = object() # Unique marker.
result = []
while True:
min_item = min_key = min_index = None
#### Find "min".
#bn('min')
for chunk_index, item in enumerate(items): # Bultin "min()" does not fit, even with its "key" kwarg.
if item is not skip_me and (
min_index is None or # First not "skip_me" chunk becomes "min" chunk.
not reverse and K(item) < min_key or # Default case "reverse=False" should be the first one.
reverse and K(item) > min_key # Attempt to use "not <" would lead to extra computations below on "==".
):
min_item = item
min_key = K(item)
min_index = chunk_index
if min_index is None: # All chunks are "skip_me".
break
#bn('append')
result.append(min_item)
#### Fetch next item instead of "min".
#bn('fetch')
try:
items[min_index] = chunks[min_index].next()
except StopIteration:
items[min_index] = skip_me
#print(bn)
return result
#### tests
def cmp(a, b):
return b - a
def key(a):
return -a
def tests():
from random import randint
in_data = [randint(-100, 100) for _ in xrange(4 * 10**6)]
out_data = sorted(in_data)
reversed_out_data = list(reversed(out_data))
bn = Bn()
bn('sorted')
assert sorted(in_data) == out_data
bn('multicore_sorted')
assert multicore_sorted(in_data) == out_data
print(bn)
#"""
assert multicore_sorted(in_data) == sorted(in_data) == out_data
assert multicore_sorted(in_data, cmp=cmp) == reversed_out_data
assert multicore_sorted(in_data, key=key) == reversed_out_data
assert multicore_sorted(in_data, reverse=True) == reversed_out_data
assert multicore_sorted(in_data, cmp=cmp, key=key) == out_data
assert multicore_sorted(in_data, cmp=cmp, reverse=True) == out_data
assert multicore_sorted(in_data, key=key, reverse=True) == out_data
assert multicore_sorted(in_data, cmp=cmp, key=key, reverse=True) == reversed_out_data
#"""
print('OK')
if __name__ == '__main__':
tests()
| 28.471111
| 118
| 0.635498
|
ted']
nctools import cmp_to_key
from multiprocessing import cpu_count, Pool
= kwargs.pop('processes', None)
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 2:
return sorted(iterable, **kwargs)
ks = [[] for _ in xrange(processes)]
for i, item in enumerate(iterable):
chunks[i % processes].append(item)
chunks = [
(chunk, kwargs)
for chunk in chunks
]
ses=processes)
bn('map')
chunks = pool.map(picklable_sorted, chunks)
result = merge_sorted(chunks, **kwargs)
print(bn)
return result
kwargs = chunk
return sorted(chunk, **kwargs)
, reverse=False):
(lambda a: cmp_key(key(a))) if key else cmp_key
elif key:
K = key
else:
K = lambda a: a
chunk) for chunk in chunks]
items = [chunk.next() for chunk in chunks]
skip_me = object()
result = []
while True:
min_item = min_key = min_index = None
n enumerate(items):
if item is not skip_me and (
min_index is None or
not reverse and K(item) < min_key or
reverse and K(item) > min_key
):
min_item = item
min_key = K(item)
min_index = chunk_index
if min_index is None:
break
result.append(min_item)
:
items[min_index] = skip_me
return result
urn b - a
def key(a):
return -a
def tests():
from random import randint
in_data = [randint(-100, 100) for _ in xrange(4 * 10**6)]
out_data = sorted(in_data)
reversed_out_data = list(reversed(out_data))
bn = Bn()
bn('sorted')
assert sorted(in_data) == out_data
bn('multicore_sorted')
assert multicore_sorted(in_data) == out_data
print(bn)
assert multicore_sorted(in_data) == sorted(in_data) == out_data
assert multicore_sorted(in_data, cmp=cmp) == reversed_out_data
assert multicore_sorted(in_data, key=key) == reversed_out_data
assert multicore_sorted(in_data, reverse=True) == reversed_out_data
assert multicore_sorted(in_data, cmp=cmp, key=key) == out_data
assert multicore_sorted(in_data, cmp=cmp, reverse=True) == out_data
assert multicore_sorted(in_data, key=key, reverse=True) == out_data
assert multicore_sorted(in_data, cmp=cmp, key=key, reverse=True) == reversed_out_data
#"""
print('OK')
if __name__ == '__main__':
tests()
| true
| true
|
f7197a79112a1c5cebafd40d2898d9834ee03a99
| 15,162
|
py
|
Python
|
test/simulator_tests/birth_death_simulator_test.py
|
YosefLab/SingleCellLineageTracing
|
d9133fc80c8314e7935fde037dd86111cac47447
|
[
"MIT"
] | 52
|
2019-05-14T02:06:24.000Z
|
2022-03-27T05:22:56.000Z
|
test/simulator_tests/birth_death_simulator_test.py
|
sbradford2/Cassiopeia
|
010072b307f7eadbf10dc4af8b2165e48f1736a7
|
[
"MIT"
] | 88
|
2019-06-07T15:07:45.000Z
|
2022-03-22T14:40:03.000Z
|
test/simulator_tests/birth_death_simulator_test.py
|
sbradford2/Cassiopeia
|
010072b307f7eadbf10dc4af8b2165e48f1736a7
|
[
"MIT"
] | 17
|
2019-05-17T00:46:16.000Z
|
2022-03-25T00:39:18.000Z
|
import unittest
import networkx as nx
import numpy as np
from typing import List, Tuple
from cassiopeia.data.CassiopeiaTree import CassiopeiaTree
from cassiopeia.mixins import TreeSimulatorError
from cassiopeia.simulator.BirthDeathFitnessSimulator import (
BirthDeathFitnessSimulator,
)
import cassiopeia.data.utilities as utilities
def extract_tree_statistics(
tree: CassiopeiaTree,
) -> Tuple[List[float], int, bool]:
"""A helper function for testing simulated trees.
Outputs the total lived time for each extant lineage, the number of extant
lineages, and whether the tree has the expected node degrees (to ensure
unifurcations were collapsed).
Args:
tree: The tree to test
Returns:
The total time lived for each leaf, the number of leaves, and if the
degrees only have degree 0 or 2
"""
times = []
out_degrees = []
for i in tree.nodes:
if tree.is_leaf(i):
times.append(tree.get_time(i))
out_degrees.append(len(tree.children(i)))
out_degrees.pop(0)
correct_degrees = all(x == 2 or x == 0 for x in out_degrees)
return times, len(times), correct_degrees
class BirthDeathSimulatorTest(unittest.TestCase):
def test_bad_waiting_distributions(self):
"""Ensures errors when invalid distributions are given."""
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: -1, 1, experiment_time=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(lambda _: 0, 1, num_extant=4)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: -1, num_extant=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 0, experiment_time=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1,
1,
lambda: 0,
mutation_distribution=lambda: -1,
fitness_distribution=lambda: 1,
experiment_time=1,
)
tree = bd_sim.simulate_tree()
def test_bad_stopping_conditions(self):
"""Ensures errors when an invalid stopping conditions are given."""
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, lambda: 2)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=0.5
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=-1
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=0
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, experiment_time=-1
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, experiment_time=0
)
def test_dead_at_start(self):
"""Ensures errors in base case where all lineages die on first event."""
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 2, 1, lambda: 1, num_extant=4
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 2, 1, lambda: 1, experiment_time=4
)
tree = bd_sim.simulate_tree()
def test_dead_before_end(self):
"""Ensures errors when all lineages die before stopping condition."""
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(0.6)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, num_extant=8, random_seed=5
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, experiment_time=2, random_seed=5
)
tree = bd_sim.simulate_tree()
def test_single_lineage(self):
"""Tests base case that stopping conditions work before divisions."""
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=1)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertEqual(results[1], 1)
self.assertEqual(tree.get_branch_length("0", "1"), 1.0)
self.assertEqual(results[0], [1])
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=1)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertEqual(results[1], 1)
self.assertEqual(tree.get_branch_length("0", "1"), 1.0)
self.assertEqual(results[0], [1])
def test_constant_yule(self):
"""Tests small case without death with constant waiting times."""
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=32)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 6)
self.assertEqual(results[1], 32)
self.assertTrue(results[2])
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=6)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 6)
self.assertEqual(results[1], 32)
self.assertTrue(results[2])
def test_nonconstant_yule(self):
"""Tests case without death with variable waiting times."""
birth_wd = lambda scale: np.random.exponential(scale)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 1, num_extant=16, random_seed=54
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 16)
self.assertTrue(results[2])
self.assertEqual(max([int(i) for i in tree.nodes]), 31)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 1, experiment_time=2, random_seed=54
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 2)
self.assertTrue(results[2])
def test_nonconstant_birth_death(self):
"""Tests case with with variable birth and death waiting times.
Also, tests pruning dead lineages and unifurcation collapsing."""
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, num_extant=8, random_seed=1234
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
self.assertNotIn("9", tree.nodes)
self.assertNotIn("2", tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, experiment_time=2, random_seed=1234
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 2))
self.assertTrue(results[2])
self.assertNotIn("9", tree.nodes)
self.assertNotIn("2", tree.nodes)
def test_nonconstant_birth_death_no_unifurcation_collapsing(self):
"""Tests case with with variable birth and death waiting times.
Checks that unifurcations are not collapsed."""
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
collapse_unifurcations=False,
random_seed=12,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertFalse(results[2])
self.assertNotIn("3", tree.nodes)
self.assertIn("2", tree.nodes)
self.assertIn("6", tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
experiment_time=1.3,
collapse_unifurcations=False,
random_seed=12,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 1.3))
self.assertFalse(results[2])
self.assertNotIn("3", tree.nodes)
self.assertIn("2", tree.nodes)
self.assertIn("6", tree.nodes)
def test_nonconstant_birth_death_both_stopping_conditions(self):
"""Tests case with with variable birth and death waiting times.
Checks that using both stopping conditions works fine."""
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
experiment_time=2,
random_seed=17,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertTrue(all(x > 1 for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
experiment_time=1,
random_seed=17,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 1))
self.assertEqual(results[1], 3)
self.assertTrue(results[2])
def test_nonconstant_yule_with_predictable_fitness(self):
"""Tests case with birth and death with constant fitness."""
def check_fitness_values_as_expected(tree: nx.DiGraph):
"""Checks if the fitness value stored at each node is what we
expect given deterministic fitness evolution"""
tree = tree.copy()
for u, v in tree.edges:
tree[u][v]["val"] = 1
tree.nodes["0"]["depth"] = 0
for u, v in nx.dfs_edges(tree, source="0"):
tree.nodes[v]["depth"] = (
tree.nodes[u]["depth"] + tree[u][v]["val"]
)
leaves = [n for n in tree if tree.out_degree(n) == 0]
for i in tree.nodes:
if i in leaves:
self.assertTrue(
np.isclose(
tree.nodes[i]["birth_scale"],
0.5 * 0.98 ** (2 * (tree.nodes[i]["depth"] - 1)),
)
)
else:
self.assertTrue(
np.isclose(
tree.nodes[i]["birth_scale"],
0.5 * 0.98 ** (2 * tree.nodes[i]["depth"]),
)
)
birth_wd = lambda scale: np.random.exponential(scale)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
mutation_distribution=lambda: 2,
fitness_distribution=lambda: 1,
fitness_base=0.98,
num_extant=8,
random_seed=1234,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
check_fitness_values_as_expected(tree.get_tree_topology())
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
mutation_distribution=lambda: 2,
fitness_distribution=lambda: 1,
fitness_base=0.98,
experiment_time=0.6,
random_seed=1234,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 0.6))
self.assertTrue(results[2])
check_fitness_values_as_expected(tree.get_tree_topology())
def test_nonconstant_birth_death_with_variable_fitness(self):
"""Tests a case with variable birth and death waiting times, as well
as variable fitness evolution. Also tests pruning and collapsing."""
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(0.6)
mut_dist = lambda: 1 if np.random.uniform() < 0.2 else 0
fit_dist = lambda: np.random.uniform(-1, 1)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
mut_dist,
fit_dist,
1.5,
num_extant=8,
random_seed=12364,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
self.assertNotIn(2, tree.nodes)
self.assertNotIn(3, tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
mut_dist,
fit_dist,
1.5,
experiment_time=3,
random_seed=12364,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 3))
self.assertTrue(results[2])
self.assertNotIn(2, tree.nodes)
self.assertNotIn(3, tree.nodes)
if __name__ == "__main__":
unittest.main()
| 36.186158
| 80
| 0.595502
|
import unittest
import networkx as nx
import numpy as np
from typing import List, Tuple
from cassiopeia.data.CassiopeiaTree import CassiopeiaTree
from cassiopeia.mixins import TreeSimulatorError
from cassiopeia.simulator.BirthDeathFitnessSimulator import (
BirthDeathFitnessSimulator,
)
import cassiopeia.data.utilities as utilities
def extract_tree_statistics(
tree: CassiopeiaTree,
) -> Tuple[List[float], int, bool]:
times = []
out_degrees = []
for i in tree.nodes:
if tree.is_leaf(i):
times.append(tree.get_time(i))
out_degrees.append(len(tree.children(i)))
out_degrees.pop(0)
correct_degrees = all(x == 2 or x == 0 for x in out_degrees)
return times, len(times), correct_degrees
class BirthDeathSimulatorTest(unittest.TestCase):
def test_bad_waiting_distributions(self):
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: -1, 1, experiment_time=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(lambda _: 0, 1, num_extant=4)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: -1, num_extant=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 0, experiment_time=1
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1,
1,
lambda: 0,
mutation_distribution=lambda: -1,
fitness_distribution=lambda: 1,
experiment_time=1,
)
tree = bd_sim.simulate_tree()
def test_bad_stopping_conditions(self):
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, lambda: 2)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=0.5
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=-1
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, num_extant=0
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, experiment_time=-1
)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 1, 1, lambda: 2, experiment_time=0
)
def test_dead_at_start(self):
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 2, 1, lambda: 1, num_extant=4
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
lambda _: 2, 1, lambda: 1, experiment_time=4
)
tree = bd_sim.simulate_tree()
def test_dead_before_end(self):
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(0.6)
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, num_extant=8, random_seed=5
)
tree = bd_sim.simulate_tree()
with self.assertRaises(TreeSimulatorError):
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, experiment_time=2, random_seed=5
)
tree = bd_sim.simulate_tree()
def test_single_lineage(self):
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=1)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertEqual(results[1], 1)
self.assertEqual(tree.get_branch_length("0", "1"), 1.0)
self.assertEqual(results[0], [1])
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=1)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertEqual(results[1], 1)
self.assertEqual(tree.get_branch_length("0", "1"), 1.0)
self.assertEqual(results[0], [1])
def test_constant_yule(self):
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=32)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 6)
self.assertEqual(results[1], 32)
self.assertTrue(results[2])
bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=6)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 6)
self.assertEqual(results[1], 32)
self.assertTrue(results[2])
def test_nonconstant_yule(self):
birth_wd = lambda scale: np.random.exponential(scale)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 1, num_extant=16, random_seed=54
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 16)
self.assertTrue(results[2])
self.assertEqual(max([int(i) for i in tree.nodes]), 31)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 1, experiment_time=2, random_seed=54
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertEqual(i, 2)
self.assertTrue(results[2])
def test_nonconstant_birth_death(self):
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, num_extant=8, random_seed=1234
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
self.assertNotIn("9", tree.nodes)
self.assertNotIn("2", tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd, 0.5, death_wd, experiment_time=2, random_seed=1234
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 2))
self.assertTrue(results[2])
self.assertNotIn("9", tree.nodes)
self.assertNotIn("2", tree.nodes)
def test_nonconstant_birth_death_no_unifurcation_collapsing(self):
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
collapse_unifurcations=False,
random_seed=12,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertFalse(results[2])
self.assertNotIn("3", tree.nodes)
self.assertIn("2", tree.nodes)
self.assertIn("6", tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
experiment_time=1.3,
collapse_unifurcations=False,
random_seed=12,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 1.3))
self.assertFalse(results[2])
self.assertNotIn("3", tree.nodes)
self.assertIn("2", tree.nodes)
self.assertIn("6", tree.nodes)
def test_nonconstant_birth_death_both_stopping_conditions(self):
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(1.5)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
experiment_time=2,
random_seed=17,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertTrue(all(x > 1 for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
num_extant=8,
experiment_time=1,
random_seed=17,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 1))
self.assertEqual(results[1], 3)
self.assertTrue(results[2])
def test_nonconstant_yule_with_predictable_fitness(self):
def check_fitness_values_as_expected(tree: nx.DiGraph):
tree = tree.copy()
for u, v in tree.edges:
tree[u][v]["val"] = 1
tree.nodes["0"]["depth"] = 0
for u, v in nx.dfs_edges(tree, source="0"):
tree.nodes[v]["depth"] = (
tree.nodes[u]["depth"] + tree[u][v]["val"]
)
leaves = [n for n in tree if tree.out_degree(n) == 0]
for i in tree.nodes:
if i in leaves:
self.assertTrue(
np.isclose(
tree.nodes[i]["birth_scale"],
0.5 * 0.98 ** (2 * (tree.nodes[i]["depth"] - 1)),
)
)
else:
self.assertTrue(
np.isclose(
tree.nodes[i]["birth_scale"],
0.5 * 0.98 ** (2 * tree.nodes[i]["depth"]),
)
)
birth_wd = lambda scale: np.random.exponential(scale)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
mutation_distribution=lambda: 2,
fitness_distribution=lambda: 1,
fitness_base=0.98,
num_extant=8,
random_seed=1234,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
check_fitness_values_as_expected(tree.get_tree_topology())
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
mutation_distribution=lambda: 2,
fitness_distribution=lambda: 1,
fitness_base=0.98,
experiment_time=0.6,
random_seed=1234,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 0.6))
self.assertTrue(results[2])
check_fitness_values_as_expected(tree.get_tree_topology())
def test_nonconstant_birth_death_with_variable_fitness(self):
birth_wd = lambda scale: np.random.exponential(scale)
death_wd = lambda: np.random.exponential(0.6)
mut_dist = lambda: 1 if np.random.uniform() < 0.2 else 0
fit_dist = lambda: np.random.uniform(-1, 1)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
mut_dist,
fit_dist,
1.5,
num_extant=8,
random_seed=12364,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))
self.assertEqual(results[1], 8)
self.assertTrue(results[2])
self.assertNotIn(2, tree.nodes)
self.assertNotIn(3, tree.nodes)
bd_sim = BirthDeathFitnessSimulator(
birth_wd,
0.5,
death_wd,
mut_dist,
fit_dist,
1.5,
experiment_time=3,
random_seed=12364,
)
tree = bd_sim.simulate_tree()
results = extract_tree_statistics(tree)
for i in results[0]:
self.assertTrue(np.isclose(i, 3))
self.assertTrue(results[2])
self.assertNotIn(2, tree.nodes)
self.assertNotIn(3, tree.nodes)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f7197adb438e0099947d4309aa51de3f15e7c419
| 2,615
|
py
|
Python
|
src/sparkload.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | 1
|
2015-05-21T20:00:12.000Z
|
2015-05-21T20:00:12.000Z
|
src/sparkload.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | null | null | null |
src/sparkload.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | null | null | null |
#!/usr/bin/env jython
#*****************************************************************************#
#* Copyright (c) 2004-2008, SRI International. *#
#* All rights reserved. *#
#* *#
#* Redistribution and use in source and binary forms, with or without *#
#* modification, are permitted provided that the following conditions are *#
#* met: *#
#* * Redistributions of source code must retain the above copyright *#
#* notice, this list of conditions and the following disclaimer. *#
#* * Redistributions in binary form must reproduce the above copyright *#
#* notice, this list of conditions and the following disclaimer in the *#
#* documentation and/or other materials provided with the distribution. *#
#* * Neither the name of SRI International nor the names of its *#
#* contributors may be used to endorse or promote products derived from *#
#* this software without specific prior written permission. *#
#* *#
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *#
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *#
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *#
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *#
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *#
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *#
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *#
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *#
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *#
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *#
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *#
#*****************************************************************************#
#* "$Revision:: 26 $" *#
#* "$HeadURL:: https://svn.ai.sri.com/projects/spark/trunk/spark/src/spar#$" *#
#*****************************************************************************#
import sys
from spark.internal.version import *
from spark.main import main
| 70.675676
| 80
| 0.518164
|
al.version import *
from spark.main import main
| true
| true
|
f7197b8026a171a1b01fc519f2c5d4c23b3f4e4d
| 7,153
|
py
|
Python
|
tests/test_other_scripts.py
|
vaibhavad/ParlAI
|
8960fab4cb7b7063df6023d8734adc8881dfed6e
|
[
"MIT"
] | 2
|
2017-09-20T21:49:51.000Z
|
2018-08-12T06:58:10.000Z
|
tests/test_other_scripts.py
|
vaibhavad/ParlAI
|
8960fab4cb7b7063df6023d8734adc8881dfed6e
|
[
"MIT"
] | 1
|
2021-01-22T08:11:01.000Z
|
2021-01-22T08:11:01.000Z
|
tests/test_other_scripts.py
|
vaibhavad/ParlAI
|
8960fab4cb7b7063df6023d8734adc8881dfed6e
|
[
"MIT"
] | 1
|
2021-01-07T11:45:03.000Z
|
2021-01-07T11:45:03.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Catch all for a number of "other" scripts.
"""
import os
import unittest
import parlai.utils.testing as testing_utils
class TestConvertToParlaiFormat(unittest.TestCase):
def test_convert(self):
from parlai.scripts.convert_data_to_parlai_format import (
ConvertDataToParlaiFormat,
)
with testing_utils.tempdir() as tmpdir:
fn = os.path.join(tmpdir, 'parlai.txt')
ConvertDataToParlaiFormat.main(
task='integration_tests:nocandidate', outfile=fn
)
with open(fn) as f:
assert (
f.readline() == 'text:4 1 3 2\tlabels:4 1 3 2\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:3 0 4 1\tlabels:3 0 4 1\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:5 1 6 3\tlabels:5 1 6 3\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:4 5 6 2\tlabels:4 5 6 2\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:0 5 3 1\tlabels:0 5 3 1\tepisode_done:True\n'
)
assert f.readline() == '\n'
class TestVerifyData(unittest.TestCase):
def test_verify_data(self):
from parlai.scripts.verify_data import VerifyData
report = VerifyData.main(task='integration_tests')
assert report['did_not_return_message'] == 0
assert report['empty_string_label_candidates'] == 0
assert report['exs'] == 500
assert report['label_candidates_with_missing_label'] == 0
assert report['missing_label_candidates'] == 0
assert report['missing_labels'] == 0
assert report['missing_text'] == 0
class TestVacuum(unittest.TestCase):
def test_vacuum(self):
with testing_utils.tempdir() as tmpdir:
from parlai.scripts.vacuum import Vacuum
model_file = os.path.join(tmpdir, 'model')
valid, test = testing_utils.train_model(
{
'task': 'integration_tests',
'optimizer': 'adam',
'learningrate': 0.01,
'model_file': model_file,
'num_epochs': 0.05,
'skip_generation': True,
'batchsize': 8,
# TODO: switch to test_agents/unigram
'model': 'transformer/generator',
'ffn_size': 32,
'embedding_size': 32,
'n_layers': 1,
}
)
size_before = os.stat(model_file).st_size
Vacuum.main(model_file=model_file)
size_after = os.stat(model_file).st_size
assert size_after < size_before
assert os.path.exists(model_file + '.unvacuumed')
valid2, test2 = testing_utils.eval_model(
{'task': 'integration_tests', 'model_file': model_file, 'batchsize': 8}
)
for key in ['loss', 'exs', 'ppl', 'token_acc']:
assert valid2[key] == valid[key], f"{key} score doesn't match"
assert test2[key] == test[key], f"{key} score doesn't match"
class TestDetectOffensive(unittest.TestCase):
def test_offensive(self):
from parlai.scripts.detect_offensive_language import DetectOffensive
report = DetectOffensive.main(
task='babi:task1k:10', datatype='valid', safety='string_matcher'
)
assert report['string_offenses%'] == 0
assert report['word_offenses'] == 0
assert report['exs'] == 100
class TestParty(unittest.TestCase):
def test_party(self):
from parlai.scripts.party import Party
Party.main(seconds=0.01)
class TestDataStats(unittest.TestCase):
def test_simple(self):
from parlai.scripts.data_stats import DataStats
report = DataStats.main(task='integration_tests')
assert report['both/avg_utterance_length'] == 4
assert report['input/avg_utterance_length'] == 4
assert report['labels/avg_utterance_length'] == 4
assert report['both/tokens'] == 4000
assert report['input/tokens'] == 2000
assert report['labels/tokens'] == 2000
assert report['both/unique_tokens'] == 7
assert report['input/unique_tokens'] == 7
assert report['labels/unique_tokens'] == 7
assert report['both/unique_utterances'] == 500
assert report['input/unique_utterances'] == 500
assert report['labels/unique_utterances'] == 500
assert report['both/utterances'] == 1000
assert report['input/utterances'] == 500
assert report['labels/utterances'] == 500
class TestProfileTrain(unittest.TestCase):
"""
Test profile_train doesn't crash.
"""
def test_cprofile(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
def test_torch(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
torch=True,
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
@testing_utils.skipUnlessGPU
def test_torch_cuda(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
torch_cuda=True,
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
class TestTokenStats(unittest.TestCase):
def test_token_stats(self):
from parlai.scripts.token_stats import TokenStats
from parlai.core.metrics import dict_report
results = dict_report(TokenStats.main(task='integration_tests:multiturn'))
assert results == {
'exs': 2000,
'max': 16,
'mean': 7.5,
'min': 1,
'p01': 1,
'p05': 1,
'p10': 1,
'p25': 4,
'p50': 7.5,
'p75': 11.5,
'p90': 16,
'p95': 16,
'p99': 16,
'p@128': 1,
}
| 35.063725
| 87
| 0.562282
|
import os
import unittest
import parlai.utils.testing as testing_utils
class TestConvertToParlaiFormat(unittest.TestCase):
def test_convert(self):
from parlai.scripts.convert_data_to_parlai_format import (
ConvertDataToParlaiFormat,
)
with testing_utils.tempdir() as tmpdir:
fn = os.path.join(tmpdir, 'parlai.txt')
ConvertDataToParlaiFormat.main(
task='integration_tests:nocandidate', outfile=fn
)
with open(fn) as f:
assert (
f.readline() == 'text:4 1 3 2\tlabels:4 1 3 2\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:3 0 4 1\tlabels:3 0 4 1\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:5 1 6 3\tlabels:5 1 6 3\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:4 5 6 2\tlabels:4 5 6 2\tepisode_done:True\n'
)
assert f.readline() == '\n'
assert (
f.readline() == 'text:0 5 3 1\tlabels:0 5 3 1\tepisode_done:True\n'
)
assert f.readline() == '\n'
class TestVerifyData(unittest.TestCase):
def test_verify_data(self):
from parlai.scripts.verify_data import VerifyData
report = VerifyData.main(task='integration_tests')
assert report['did_not_return_message'] == 0
assert report['empty_string_label_candidates'] == 0
assert report['exs'] == 500
assert report['label_candidates_with_missing_label'] == 0
assert report['missing_label_candidates'] == 0
assert report['missing_labels'] == 0
assert report['missing_text'] == 0
class TestVacuum(unittest.TestCase):
def test_vacuum(self):
with testing_utils.tempdir() as tmpdir:
from parlai.scripts.vacuum import Vacuum
model_file = os.path.join(tmpdir, 'model')
valid, test = testing_utils.train_model(
{
'task': 'integration_tests',
'optimizer': 'adam',
'learningrate': 0.01,
'model_file': model_file,
'num_epochs': 0.05,
'skip_generation': True,
'batchsize': 8,
'model': 'transformer/generator',
'ffn_size': 32,
'embedding_size': 32,
'n_layers': 1,
}
)
size_before = os.stat(model_file).st_size
Vacuum.main(model_file=model_file)
size_after = os.stat(model_file).st_size
assert size_after < size_before
assert os.path.exists(model_file + '.unvacuumed')
valid2, test2 = testing_utils.eval_model(
{'task': 'integration_tests', 'model_file': model_file, 'batchsize': 8}
)
for key in ['loss', 'exs', 'ppl', 'token_acc']:
assert valid2[key] == valid[key], f"{key} score doesn't match"
assert test2[key] == test[key], f"{key} score doesn't match"
class TestDetectOffensive(unittest.TestCase):
def test_offensive(self):
from parlai.scripts.detect_offensive_language import DetectOffensive
report = DetectOffensive.main(
task='babi:task1k:10', datatype='valid', safety='string_matcher'
)
assert report['string_offenses%'] == 0
assert report['word_offenses'] == 0
assert report['exs'] == 100
class TestParty(unittest.TestCase):
def test_party(self):
from parlai.scripts.party import Party
Party.main(seconds=0.01)
class TestDataStats(unittest.TestCase):
def test_simple(self):
from parlai.scripts.data_stats import DataStats
report = DataStats.main(task='integration_tests')
assert report['both/avg_utterance_length'] == 4
assert report['input/avg_utterance_length'] == 4
assert report['labels/avg_utterance_length'] == 4
assert report['both/tokens'] == 4000
assert report['input/tokens'] == 2000
assert report['labels/tokens'] == 2000
assert report['both/unique_tokens'] == 7
assert report['input/unique_tokens'] == 7
assert report['labels/unique_tokens'] == 7
assert report['both/unique_utterances'] == 500
assert report['input/unique_utterances'] == 500
assert report['labels/unique_utterances'] == 500
assert report['both/utterances'] == 1000
assert report['input/utterances'] == 500
assert report['labels/utterances'] == 500
class TestProfileTrain(unittest.TestCase):
def test_cprofile(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
def test_torch(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
torch=True,
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
@testing_utils.skipUnlessGPU
def test_torch_cuda(self):
from parlai.scripts.profile_train import ProfileTrain
with testing_utils.tempdir() as tmpdir:
ProfileTrain.main(
task='integration_tests:overfit',
model='test_agents/unigram',
torch_cuda=True,
model_file=os.path.join(tmpdir, 'model'),
skip_generation=True,
)
class TestTokenStats(unittest.TestCase):
def test_token_stats(self):
from parlai.scripts.token_stats import TokenStats
from parlai.core.metrics import dict_report
results = dict_report(TokenStats.main(task='integration_tests:multiturn'))
assert results == {
'exs': 2000,
'max': 16,
'mean': 7.5,
'min': 1,
'p01': 1,
'p05': 1,
'p10': 1,
'p25': 4,
'p50': 7.5,
'p75': 11.5,
'p90': 16,
'p95': 16,
'p99': 16,
'p@128': 1,
}
| true
| true
|
f7197c8fd871714cbb61cb9b004d8a0b6f5dd33a
| 1,297
|
py
|
Python
|
sr700api/utils.py
|
AlexGS74/sr700api
|
22fc79c0e02ef66f4ef92f9c8b4a56c04fe09c4a
|
[
"MIT"
] | 5
|
2017-10-15T21:58:55.000Z
|
2020-09-02T05:12:32.000Z
|
sr700api/utils.py
|
AlexGS74/sr700api
|
22fc79c0e02ef66f4ef92f9c8b4a56c04fe09c4a
|
[
"MIT"
] | null | null | null |
sr700api/utils.py
|
AlexGS74/sr700api
|
22fc79c0e02ef66f4ef92f9c8b4a56c04fe09c4a
|
[
"MIT"
] | 1
|
2018-08-25T23:27:53.000Z
|
2018-08-25T23:27:53.000Z
|
"""
MIT License
Copyright (c) 2017 int3ll3ct.ly@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def f_to_c(deg_f):
"utility to convert degrees fahrenheit to celsius"
return (deg_f - 32.0)/1.8
def c_to_f(deg_c):
"utility to convert degrees celsius to fahrenheit"
return deg_c * 1.8 + 32.0
| 40.53125
| 78
| 0.781033
|
def f_to_c(deg_f):
return (deg_f - 32.0)/1.8
def c_to_f(deg_c):
return deg_c * 1.8 + 32.0
| true
| true
|
f7197ce5cfde61155bd0c88a6aa247110b8af814
| 10,671
|
py
|
Python
|
tests/test_camera.py
|
netmanchris/abodepy
|
cd7b5527cc2becd12763d949057fe0184e0395d2
|
[
"MIT"
] | null | null | null |
tests/test_camera.py
|
netmanchris/abodepy
|
cd7b5527cc2becd12763d949057fe0184e0395d2
|
[
"MIT"
] | null | null | null |
tests/test_camera.py
|
netmanchris/abodepy
|
cd7b5527cc2becd12763d949057fe0184e0395d2
|
[
"MIT"
] | null | null | null |
"""Test the Abode camera class."""
import os
import unittest
import requests_mock
import abodepy
import abodepy.helpers.constants as CONST
import tests.mock as MOCK
import tests.mock.devices.ir_camera as IRCAMERA
import tests.mock.login as LOGIN
import tests.mock.oauth_claims as OAUTH_CLAIMS
import tests.mock.logout as LOGOUT
import tests.mock.panel as PANEL
USERNAME = 'foobar'
PASSWORD = 'deadbeef'
class TestCamera(unittest.TestCase):
"""Test the AbodePy camera."""
def setUp(self):
"""Set up Abode module."""
self.abode = abodepy.Abode(username=USERNAME,
password=PASSWORD,
disable_cache=True)
def tearDown(self):
"""Clean up after test."""
self.abode = None
@requests_mock.mock()
def tests_camera_properties(self, m):
"""Tests that camera properties work as expected."""
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
self.assertFalse(device.battery_low)
self.assertFalse(device.no_response)
# Set up our direct device get url
device_url = str.replace(CONST.DEVICE_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
# Change device properties
m.get(device_url,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_OFFLINE,
low_battery=True,
no_response=True))
# Refesh device and test changes
device.refresh()
self.assertEqual(device.status, CONST.STATUS_OFFLINE)
self.assertTrue(device.battery_low)
self.assertTrue(device.no_response)
@requests_mock.mock()
def tests_camera_capture(self, m):
"""Tests that camera devices capture new images."""
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up capture url response
url = str.replace(CONST.CAMS_ID_CAPTURE_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.put(url, text=MOCK.generic_response_ok())
# Capture the image
self.assertTrue(device.capture())
# Change response
m.put(url, text=IRCAMERA.get_capture_timeout(), status_code=600)
# Capture the image with failure
self.assertFalse(device.capture())
@requests_mock.mock()
def tests_camera_image_update(self, m):
"""Tests that camera devices update correctly via timeline request."""
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up timeline response
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID) + ']')
# Set up our file path response
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302, headers={'Location': IRCAMERA.LOCATION_HEADER})
# Refresh the image
self.assertTrue(device.refresh_image())
# Verify the image location
self.assertEqual(device.image_url, IRCAMERA.LOCATION_HEADER)
# Test that a bad file_path response header results in an exception
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302)
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that a bad file_path response code results in an exception
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=200, headers={'Location': IRCAMERA.LOCATION_HEADER})
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that an an empty timeline event throws exception
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID, file_path='') +
']')
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that an unexpected timeline event throws exception
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID, event_code='1234') +
']')
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
@requests_mock.mock()
def tests_camera_no_image_update(self, m):
"""Tests that camera updates correctly with no timeline events."""
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up timeline response
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[]')
# Refresh the image
self.assertFalse(device.refresh_image())
self.assertIsNone(device.image_url)
@requests_mock.mock()
def tests_camera_image_write(self, m):
"""Tests that camera images will write to a file."""
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up timeline response
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID) + ']')
# Set up our file path response
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302, headers={'Location': IRCAMERA.LOCATION_HEADER})
# Set up our image response
image_response = "this is a beautiful jpeg image"
m.get(IRCAMERA.LOCATION_HEADER, text=image_response)
# Refresh the image
path = "test.jpg"
self.assertTrue(device.image_to_file(path, get_image=True))
# Test the file written and cleanup
image_data = open(path, 'r').read()
self.assertTrue(image_response, image_data)
os.remove(path)
# Test that bad response returns False
m.get(IRCAMERA.LOCATION_HEADER, status_code=400)
with self.assertRaises(abodepy.AbodeException):
device.image_to_file(path, get_image=True)
# Test that the image fails to update returns False
m.get(url, text='[]')
self.assertFalse(device.image_to_file(path, get_image=True))
| 37.181185
| 79
| 0.607722
|
import os
import unittest
import requests_mock
import abodepy
import abodepy.helpers.constants as CONST
import tests.mock as MOCK
import tests.mock.devices.ir_camera as IRCAMERA
import tests.mock.login as LOGIN
import tests.mock.oauth_claims as OAUTH_CLAIMS
import tests.mock.logout as LOGOUT
import tests.mock.panel as PANEL
USERNAME = 'foobar'
PASSWORD = 'deadbeef'
class TestCamera(unittest.TestCase):
def setUp(self):
self.abode = abodepy.Abode(username=USERNAME,
password=PASSWORD,
disable_cache=True)
def tearDown(self):
self.abode = None
@requests_mock.mock()
def tests_camera_properties(self, m):
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
self.assertFalse(device.battery_low)
self.assertFalse(device.no_response)
# Set up our direct device get url
device_url = str.replace(CONST.DEVICE_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
# Change device properties
m.get(device_url,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_OFFLINE,
low_battery=True,
no_response=True))
# Refesh device and test changes
device.refresh()
self.assertEqual(device.status, CONST.STATUS_OFFLINE)
self.assertTrue(device.battery_low)
self.assertTrue(device.no_response)
@requests_mock.mock()
def tests_camera_capture(self, m):
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
self.abode.logout()
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
url = str.replace(CONST.CAMS_ID_CAPTURE_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.put(url, text=MOCK.generic_response_ok())
self.assertTrue(device.capture())
m.put(url, text=IRCAMERA.get_capture_timeout(), status_code=600)
self.assertFalse(device.capture())
@requests_mock.mock()
def tests_camera_image_update(self, m):
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up timeline response
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID) + ']')
# Set up our file path response
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302, headers={'Location': IRCAMERA.LOCATION_HEADER})
# Refresh the image
self.assertTrue(device.refresh_image())
# Verify the image location
self.assertEqual(device.image_url, IRCAMERA.LOCATION_HEADER)
# Test that a bad file_path response header results in an exception
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302)
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that a bad file_path response code results in an exception
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=200, headers={'Location': IRCAMERA.LOCATION_HEADER})
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that an an empty timeline event throws exception
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID, file_path='') +
']')
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
# Test that an unexpected timeline event throws exception
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID, event_code='1234') +
']')
with self.assertRaises(abodepy.AbodeException):
device.refresh_image()
@requests_mock.mock()
def tests_camera_no_image_update(self, m):
# Set up URL's
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
self.abode.logout()
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[]')
self.assertFalse(device.refresh_image())
self.assertIsNone(device.image_url)
@requests_mock.mock()
def tests_camera_image_write(self, m):
m.post(CONST.LOGIN_URL, text=LOGIN.post_response_ok())
m.get(CONST.OAUTH_TOKEN_URL, text=OAUTH_CLAIMS.get_response_ok())
m.post(CONST.LOGOUT_URL, text=LOGOUT.post_response_ok())
m.get(CONST.PANEL_URL,
text=PANEL.get_response_ok(mode=CONST.MODE_STANDBY))
m.get(CONST.DEVICES_URL,
text=IRCAMERA.device(devid=IRCAMERA.DEVICE_ID,
status=CONST.STATUS_ONLINE,
low_battery=False,
no_response=False))
# Logout to reset everything
self.abode.logout()
# Get our camera
device = self.abode.get_device(IRCAMERA.DEVICE_ID)
# Test that we have our device
self.assertIsNotNone(device)
self.assertEqual(device.status, CONST.STATUS_ONLINE)
# Set up timeline response
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', IRCAMERA.DEVICE_ID)
m.get(url, text='[' +
IRCAMERA.timeline_event(IRCAMERA.DEVICE_ID) + ']')
# Set up our file path response
file_path = CONST.BASE_URL + IRCAMERA.FILE_PATH
m.head(file_path,
status_code=302, headers={'Location': IRCAMERA.LOCATION_HEADER})
# Set up our image response
image_response = "this is a beautiful jpeg image"
m.get(IRCAMERA.LOCATION_HEADER, text=image_response)
# Refresh the image
path = "test.jpg"
self.assertTrue(device.image_to_file(path, get_image=True))
# Test the file written and cleanup
image_data = open(path, 'r').read()
self.assertTrue(image_response, image_data)
os.remove(path)
# Test that bad response returns False
m.get(IRCAMERA.LOCATION_HEADER, status_code=400)
with self.assertRaises(abodepy.AbodeException):
device.image_to_file(path, get_image=True)
# Test that the image fails to update returns False
m.get(url, text='[]')
self.assertFalse(device.image_to_file(path, get_image=True))
| true
| true
|
f7197da9b6e226e3c5a5e47bd5f775747c208e82
| 13,187
|
py
|
Python
|
kws_streaming/train/train.py
|
ssccutyy/KWS-Transformer
|
7ae6d2e8fce1a293d88eedc0dbfacae726151a08
|
[
"Apache-2.0"
] | 1
|
2022-03-13T07:52:15.000Z
|
2022-03-13T07:52:15.000Z
|
kws_streaming/train/train.py
|
ssccutyy/KWS-Transformer
|
7ae6d2e8fce1a293d88eedc0dbfacae726151a08
|
[
"Apache-2.0"
] | null | null | null |
kws_streaming/train/train.py
|
ssccutyy/KWS-Transformer
|
7ae6d2e8fce1a293d88eedc0dbfacae726151a08
|
[
"Apache-2.0"
] | 1
|
2022-03-11T12:33:27.000Z
|
2022-03-11T12:33:27.000Z
|
# coding=utf-8
# Copyright (c) 2021, Arm Limited and Contributors.
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train utility functions, based on tensorflow/examples/speech_commands.
It consists of several steps:
1. Creates model.
2. Reads data
3. Trains model
4. Select the best model and evaluates it
"""
import json
from types import SimpleNamespace
import os.path
import pprint
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
import kws_streaming.data.input_data as input_data
from kws_streaming.models import models
from kws_streaming.models import utils
import math
from transformers import AdamWeightDecay
from kws_streaming.models import model_flags
def train(flags):
"""Model training."""
flags.training = True
# Set the verbosity based on flags (default is INFO, so we see all messages)
logging.set_verbosity(flags.verbosity)
# Start a new TensorFlow session.
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
audio_processor = input_data.AudioProcessor(flags)
time_shift_samples = int((flags.time_shift_ms * flags.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, flags.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, flags.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
logging.info(flags)
model = models.MODELS[flags.model_name](flags)
if flags.distill_teacher_json:
with open(flags.distill_teacher_json, 'r') as f:
teacher_flags = json.load(f, object_hook=lambda d: SimpleNamespace(
**{ k: v for k, v in flags.__dict__.items() if not k in d },
**d))
teacher_base = models.MODELS[teacher_flags.model_name](teacher_flags)
hard_labels = tf.keras.layers.Lambda(lambda logits: tf.one_hot(tf.math.argmax(logits, axis=-1), depth=flags.label_count))
teacher = tf.keras.models.Sequential([teacher_base, hard_labels])
teacher_base.trainable = False
teacher.trainable = False
else:
teacher = None
teacher_flags = None
base_model = model
logging.info(model.summary())
# save model summary
utils.save_model_summary(model, flags.train_dir)
# save model and data flags
with open(os.path.join(flags.train_dir, 'flags.txt'), 'wt') as f:
pprint.pprint(flags, stream=f)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=flags.label_smoothing)
metrics = ['accuracy']
if flags.optimizer == 'adam':
optimizer = tf.keras.optimizers.Adam(epsilon=flags.optimizer_epsilon)
elif flags.optimizer == 'momentum':
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
elif flags.optimizer == 'novograd':
optimizer = tfa.optimizers.NovoGrad(
lr=0.05,
beta_1=flags.novograd_beta_1,
beta_2=flags.novograd_beta_2,
weight_decay=flags.novograd_weight_decay,
grad_averaging=bool(flags.novograd_grad_averaging))
elif flags.optimizer == 'adamw':
# Exclude some layers for weight decay
exclude = ["pos_emb", "class_emb", "layer_normalization", "bias"]
optimizer = AdamWeightDecay(learning_rate=0.05, weight_decay_rate=flags.l2_weight_decay, exclude_from_weight_decay=exclude)
else:
raise ValueError('Unsupported optimizer:%s' % flags.optimizer)
loss_weights = [ 0.5, 0.5, 0.0 ] if teacher else [ 1. ] # equally weight losses form label and teacher, ignore ensemble output
model.compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights, metrics=metrics)
train_writer = tf.summary.FileWriter(flags.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(flags.summaries_dir + '/validation')
sess.run(tf.global_variables_initializer())
if flags.start_checkpoint:
model.load_weights(flags.start_checkpoint).expect_partial()
logging.info('Weights loaded from %s', flags.start_checkpoint)
if teacher_flags and teacher_flags.start_checkpoint:
# Load weights into teacher base as this is the actual model that was saved, teacher includes hard label head
teacher_base.load_weights(teacher_flags.start_checkpoint).assert_existing_objects_matched()
logging.info('Distillation teacher weights loaded from %s', teacher_flags.start_checkpoint)
start_step = 0
logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, flags.train_dir, 'graph.pbtxt')
# Save list of words.
with tf.io.gfile.GFile(os.path.join(flags.train_dir, 'labels.txt'), 'w') as f:
f.write('\n'.join(audio_processor.words_list))
best_accuracy = 0.0
# prepare parameters for exp learning rate decay
training_steps_max = np.sum(training_steps_list)
lr_init = learning_rates_list[0]
exp_rate = -np.log(learning_rates_list[-1] / lr_init)/training_steps_max
mode = 'training'
if flags.lr_schedule == 'cosine':
# Currently, no restarts are performed, so it is just a cosine decay over the entire
# training process. I think this is how DeiT does it.
lr_init = lr_init * flags.batch_size / 512
num_train = audio_processor.set_size(mode)
warmup_steps = int((num_train / flags.batch_size) * flags.warmup_epochs)
first_decay_steps=training_steps_max
# Training loop.
for training_step in range(start_step, training_steps_max + 1):
if training_step > 0:
offset = (training_step -
1) * flags.batch_size if flags.pick_deterministically else 0
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
flags.batch_size, offset, flags, flags.background_frequency,
flags.background_volume, time_shift_samples, mode,
flags.resample, flags.volume_resample, sess)
if flags.lr_schedule == 'exp':
learning_rate_value = lr_init * np.exp(-exp_rate * training_step)
elif flags.lr_schedule == 'linear':
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
elif flags.lr_schedule == 'cosine':
learning_rate_value = lr_init * min(1, float(training_step) / max(1, warmup_steps)) * (math.cos(math.pi * training_step / training_steps_max) + 1) / 2.
else:
raise ValueError('Wrong lr_schedule: %s' % flags.lr_schedule)
tf.keras.backend.set_value(model.optimizer.learning_rate, learning_rate_value)
one_hot_labels = tf.keras.utils.to_categorical(train_ground_truth, num_classes=flags.label_count)
if teacher:
teacher_labels = teacher.predict_on_batch(train_fingerprints)
one_hot_labels = [ one_hot_labels, teacher_labels, one_hot_labels ] # third is for the ensemble output, gradient is unused
result = model.train_on_batch(train_fingerprints, one_hot_labels)
if teacher:
loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result
differences = (teacher_labels != one_hot_labels).astype(dtype=int).sum()
logging.info(
'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f, teacher acc %.2f%% (%d diff), teacher cross entropy %f, ensemble acc %.2f%%',
*(training_step, learning_rate_value, acc_label * 100, loss_total, acc_teacher * 100, differences, loss_teacher, acc_ensemble * 100))
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),
tf.Summary.Value(tag='teacher_accuracy', simple_value=acc_teacher),
tf.Summary.Value(tag='ensemble_accuracy', simple_value=acc_ensemble),
])
else:
loss_label, acc_label = result
logging.info(
'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f',
*(training_step, learning_rate_value, acc_label * 100, loss_label))
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),
])
train_writer.add_summary(summary, training_step)
is_last_step = (training_step == training_steps_max)
if (training_step % flags.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
set_size = int(set_size / flags.batch_size) * flags.batch_size
total_accuracy = 0.0
count = 0.0
for i in range(0, set_size, flags.batch_size):
validation_fingerprints, validation_ground_truth = audio_processor.get_data(
flags.batch_size, i, flags, 0.0,
0.0, 0, 'validation',
0.0, 0.0, sess)
one_hot_labels = tf.keras.utils.to_categorical(validation_ground_truth, num_classes=flags.label_count)
if teacher:
one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
result = model.test_on_batch(validation_fingerprints,
one_hot_labels)
if teacher:
loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_ensemble),
tf.Summary.Value(tag='label_head_accuracy', simple_value=acc_label),
tf.Summary.Value(tag='distill_head_accuracy', simple_value=acc_teacher),
])
accuracy = acc_ensemble
else:
loss_label, acc_label = result
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),])
accuracy = acc_label
validation_writer.add_summary(summary, training_step)
total_accuracy += accuracy
count = count + 1.0
total_accuracy = total_accuracy / count
logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)',
*(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint when validation accuracy improves
if total_accuracy >= best_accuracy:
best_accuracy = total_accuracy
# overwrite the best model weights
model.save_weights(flags.train_dir + 'best_weights')
logging.info('So far the best validation accuracy is %.2f%%',
(best_accuracy * 100))
tf.keras.backend.set_learning_phase(0)
set_size = audio_processor.set_size('testing')
set_size = int(set_size / flags.batch_size) * flags.batch_size
logging.info('set_size=%d', set_size)
total_accuracy = 0.0
count = 0.0
for i in range(0, set_size, flags.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
flags.batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)
one_hot_labels = tf.keras.utils.to_categorical(test_ground_truth, num_classes=flags.label_count)
if teacher:
one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]
result = model.test_on_batch(test_fingerprints, one_hot_labels)
total_accuracy += result[-1] if teacher else result[1]
count = count + 1.0
total_accuracy = total_accuracy / count
logging.info('Final test accuracy = %.2f%% (N=%d)',
*(total_accuracy * 100, set_size))
with open(os.path.join(flags.train_dir, 'accuracy_last.txt'), 'wt') as fd:
fd.write(str(total_accuracy * 100))
model.save_weights(flags.train_dir + 'last_weights')
if __name__ == '__main__':
flags = model_flags.update_flags(None)
train(flags)
| 42.401929
| 159
| 0.705543
|
import json
from types import SimpleNamespace
import os.path
import pprint
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
import kws_streaming.data.input_data as input_data
from kws_streaming.models import models
from kws_streaming.models import utils
import math
from transformers import AdamWeightDecay
from kws_streaming.models import model_flags
def train(flags):
flags.training = True
logging.set_verbosity(flags.verbosity)
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
audio_processor = input_data.AudioProcessor(flags)
time_shift_samples = int((flags.time_shift_ms * flags.sample_rate) / 1000)
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, flags.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, flags.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
logging.info(flags)
model = models.MODELS[flags.model_name](flags)
if flags.distill_teacher_json:
with open(flags.distill_teacher_json, 'r') as f:
teacher_flags = json.load(f, object_hook=lambda d: SimpleNamespace(
**{ k: v for k, v in flags.__dict__.items() if not k in d },
**d))
teacher_base = models.MODELS[teacher_flags.model_name](teacher_flags)
hard_labels = tf.keras.layers.Lambda(lambda logits: tf.one_hot(tf.math.argmax(logits, axis=-1), depth=flags.label_count))
teacher = tf.keras.models.Sequential([teacher_base, hard_labels])
teacher_base.trainable = False
teacher.trainable = False
else:
teacher = None
teacher_flags = None
base_model = model
logging.info(model.summary())
# save model summary
utils.save_model_summary(model, flags.train_dir)
# save model and data flags
with open(os.path.join(flags.train_dir, 'flags.txt'), 'wt') as f:
pprint.pprint(flags, stream=f)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=flags.label_smoothing)
metrics = ['accuracy']
if flags.optimizer == 'adam':
optimizer = tf.keras.optimizers.Adam(epsilon=flags.optimizer_epsilon)
elif flags.optimizer == 'momentum':
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
elif flags.optimizer == 'novograd':
optimizer = tfa.optimizers.NovoGrad(
lr=0.05,
beta_1=flags.novograd_beta_1,
beta_2=flags.novograd_beta_2,
weight_decay=flags.novograd_weight_decay,
grad_averaging=bool(flags.novograd_grad_averaging))
elif flags.optimizer == 'adamw':
# Exclude some layers for weight decay
exclude = ["pos_emb", "class_emb", "layer_normalization", "bias"]
optimizer = AdamWeightDecay(learning_rate=0.05, weight_decay_rate=flags.l2_weight_decay, exclude_from_weight_decay=exclude)
else:
raise ValueError('Unsupported optimizer:%s' % flags.optimizer)
loss_weights = [ 0.5, 0.5, 0.0 ] if teacher else [ 1. ] # equally weight losses form label and teacher, ignore ensemble output
model.compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights, metrics=metrics)
train_writer = tf.summary.FileWriter(flags.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(flags.summaries_dir + '/validation')
sess.run(tf.global_variables_initializer())
if flags.start_checkpoint:
model.load_weights(flags.start_checkpoint).expect_partial()
logging.info('Weights loaded from %s', flags.start_checkpoint)
if teacher_flags and teacher_flags.start_checkpoint:
# Load weights into teacher base as this is the actual model that was saved, teacher includes hard label head
teacher_base.load_weights(teacher_flags.start_checkpoint).assert_existing_objects_matched()
logging.info('Distillation teacher weights loaded from %s', teacher_flags.start_checkpoint)
start_step = 0
logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, flags.train_dir, 'graph.pbtxt')
# Save list of words.
with tf.io.gfile.GFile(os.path.join(flags.train_dir, 'labels.txt'), 'w') as f:
f.write('\n'.join(audio_processor.words_list))
best_accuracy = 0.0
# prepare parameters for exp learning rate decay
training_steps_max = np.sum(training_steps_list)
lr_init = learning_rates_list[0]
exp_rate = -np.log(learning_rates_list[-1] / lr_init)/training_steps_max
mode = 'training'
if flags.lr_schedule == 'cosine':
# Currently, no restarts are performed, so it is just a cosine decay over the entire
# training process. I think this is how DeiT does it.
lr_init = lr_init * flags.batch_size / 512
num_train = audio_processor.set_size(mode)
warmup_steps = int((num_train / flags.batch_size) * flags.warmup_epochs)
first_decay_steps=training_steps_max
# Training loop.
for training_step in range(start_step, training_steps_max + 1):
if training_step > 0:
offset = (training_step -
1) * flags.batch_size if flags.pick_deterministically else 0
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
flags.batch_size, offset, flags, flags.background_frequency,
flags.background_volume, time_shift_samples, mode,
flags.resample, flags.volume_resample, sess)
if flags.lr_schedule == 'exp':
learning_rate_value = lr_init * np.exp(-exp_rate * training_step)
elif flags.lr_schedule == 'linear':
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
elif flags.lr_schedule == 'cosine':
learning_rate_value = lr_init * min(1, float(training_step) / max(1, warmup_steps)) * (math.cos(math.pi * training_step / training_steps_max) + 1) / 2.
else:
raise ValueError('Wrong lr_schedule: %s' % flags.lr_schedule)
tf.keras.backend.set_value(model.optimizer.learning_rate, learning_rate_value)
one_hot_labels = tf.keras.utils.to_categorical(train_ground_truth, num_classes=flags.label_count)
if teacher:
teacher_labels = teacher.predict_on_batch(train_fingerprints)
one_hot_labels = [ one_hot_labels, teacher_labels, one_hot_labels ]
result = model.train_on_batch(train_fingerprints, one_hot_labels)
if teacher:
loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result
differences = (teacher_labels != one_hot_labels).astype(dtype=int).sum()
logging.info(
'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f, teacher acc %.2f%% (%d diff), teacher cross entropy %f, ensemble acc %.2f%%',
*(training_step, learning_rate_value, acc_label * 100, loss_total, acc_teacher * 100, differences, loss_teacher, acc_ensemble * 100))
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),
tf.Summary.Value(tag='teacher_accuracy', simple_value=acc_teacher),
tf.Summary.Value(tag='ensemble_accuracy', simple_value=acc_ensemble),
])
else:
loss_label, acc_label = result
logging.info(
'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f',
*(training_step, learning_rate_value, acc_label * 100, loss_label))
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),
])
train_writer.add_summary(summary, training_step)
is_last_step = (training_step == training_steps_max)
if (training_step % flags.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
set_size = int(set_size / flags.batch_size) * flags.batch_size
total_accuracy = 0.0
count = 0.0
for i in range(0, set_size, flags.batch_size):
validation_fingerprints, validation_ground_truth = audio_processor.get_data(
flags.batch_size, i, flags, 0.0,
0.0, 0, 'validation',
0.0, 0.0, sess)
one_hot_labels = tf.keras.utils.to_categorical(validation_ground_truth, num_classes=flags.label_count)
if teacher:
one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]
result = model.test_on_batch(validation_fingerprints,
one_hot_labels)
if teacher:
loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_ensemble),
tf.Summary.Value(tag='label_head_accuracy', simple_value=acc_label),
tf.Summary.Value(tag='distill_head_accuracy', simple_value=acc_teacher),
])
accuracy = acc_ensemble
else:
loss_label, acc_label = result
summary = tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=acc_label),])
accuracy = acc_label
validation_writer.add_summary(summary, training_step)
total_accuracy += accuracy
count = count + 1.0
total_accuracy = total_accuracy / count
logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)',
*(training_step, total_accuracy * 100, set_size))
if total_accuracy >= best_accuracy:
best_accuracy = total_accuracy
model.save_weights(flags.train_dir + 'best_weights')
logging.info('So far the best validation accuracy is %.2f%%',
(best_accuracy * 100))
tf.keras.backend.set_learning_phase(0)
set_size = audio_processor.set_size('testing')
set_size = int(set_size / flags.batch_size) * flags.batch_size
logging.info('set_size=%d', set_size)
total_accuracy = 0.0
count = 0.0
for i in range(0, set_size, flags.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
flags.batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)
one_hot_labels = tf.keras.utils.to_categorical(test_ground_truth, num_classes=flags.label_count)
if teacher:
one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]
result = model.test_on_batch(test_fingerprints, one_hot_labels)
total_accuracy += result[-1] if teacher else result[1]
count = count + 1.0
total_accuracy = total_accuracy / count
logging.info('Final test accuracy = %.2f%% (N=%d)',
*(total_accuracy * 100, set_size))
with open(os.path.join(flags.train_dir, 'accuracy_last.txt'), 'wt') as fd:
fd.write(str(total_accuracy * 100))
model.save_weights(flags.train_dir + 'last_weights')
if __name__ == '__main__':
flags = model_flags.update_flags(None)
train(flags)
| true
| true
|
f7197ec8accb7480f7e6eca284267bccdb20df57
| 5,965
|
py
|
Python
|
test/functional/rpc_users.py
|
mrmikeo/GAU-Core
|
6f56bb73d0736a4245c22391314d6ba55de0e0d8
|
[
"MIT"
] | 2
|
2020-08-25T18:02:32.000Z
|
2021-08-23T09:40:41.000Z
|
test/functional/rpc_users.py
|
mrmikeo/GAU-Core
|
6f56bb73d0736a4245c22391314d6ba55de0e0d8
|
[
"MIT"
] | null | null | null |
test/functional/rpc_users.py
|
mrmikeo/GAU-Core
|
6f56bb73d0736a4245c22391314d6ba55de0e0d8
|
[
"MIT"
] | 2
|
2020-08-06T20:56:42.000Z
|
2020-11-23T03:11:17.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser�"
rpcpassword = "rpcpassword=rpcpassword�"
with open(os.path.join(self.options.tmpdir+"/node0", "gauntlet.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "gauntlet.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser�:rpcpassword�"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.733766
| 129
| 0.61425
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser�"
rpcpassword = "rpcpassword=rpcpassword�"
with open(os.path.join(self.options.tmpdir+"/node0", "gauntlet.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "gauntlet.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser�:rpcpassword�"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| true
| true
|
f719804da78f16f6af3489ac457e49300a75a6b2
| 1,916
|
py
|
Python
|
do_flask_mail.py
|
penglee87/lpython
|
3a53322ccdebf83d6b358386518cf81712433c9e
|
[
"bzip2-1.0.6"
] | null | null | null |
do_flask_mail.py
|
penglee87/lpython
|
3a53322ccdebf83d6b358386518cf81712433c9e
|
[
"bzip2-1.0.6"
] | null | null | null |
do_flask_mail.py
|
penglee87/lpython
|
3a53322ccdebf83d6b358386518cf81712433c9e
|
[
"bzip2-1.0.6"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask
from flask_mail import Mail
from flask_mail import Message
import os
#测试成功,部分参数作用不明
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.163.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = 'penglee87@163.com'
app.config['MAIL_PASSWORD'] = '******'
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]' #邮件主题
#app.config['FLASKY_MAIL_SENDER'] = 'penglee87@163.com'
#app.config['FLASKY_ADMIN'] = 'penglee87@163.com'
mail = Mail(app)
"""
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
app.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin <flasky@example.com>'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
"""
@app.route("/")
def index():
#Message(主题,发件人,收件人)
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
return '<h1>Hello World!</h1>'
if __name__ == '__main__':
app.run(debug=True)
"""
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
if __name__ == '__main__':
mail.send(msg)
pip install --no-deps lamson chardet flask-mail
set MAIL_USERNAME=penglee87@163.com
set MAIL_PASSWORD=******
set FLASKY_ADMIN=penglee87@163.com
>>> from flask.ext.mail import Message
>>> from hello import mail
>>> msg = Message('test subject', sender='penglee1206@gmail.com',recipients=['380517767@qq.com'])
>>> msg.body = 'text body'
>>> msg.html = '<b>HTML</b> body'
>>> with app.app_context():
... mail.send(msg)
"""
| 26.246575
| 97
| 0.647182
|
from flask import Flask
from flask_mail import Mail
from flask_mail import Message
import os
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.163.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = 'penglee87@163.com'
app.config['MAIL_PASSWORD'] = '******'
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
mail = Mail(app)
@app.route("/")
def index():
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
return '<h1>Hello World!</h1>'
if __name__ == '__main__':
app.run(debug=True)
| true
| true
|
f71982541576d139123ce5e181dca42523d11d05
| 459
|
py
|
Python
|
blog/search_indexes.py
|
GITliyanfeng/blog-django
|
a804702026a2d58664ec83a993116e17b89e9e8e
|
[
"MIT"
] | 2
|
2019-03-14T12:35:36.000Z
|
2019-03-14T12:35:38.000Z
|
blog/search_indexes.py
|
GITliyanfeng/blog-django
|
a804702026a2d58664ec83a993116e17b89e9e8e
|
[
"MIT"
] | null | null | null |
blog/search_indexes.py
|
GITliyanfeng/blog-django
|
a804702026a2d58664ec83a993116e17b89e9e8e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/3/19 0019 16:25
# @Author : __Yanfeng
# @Site :
# @File : search_indexes.py
# @Software: PyCharm
from haystack import indexes
from .models import Post
class PostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Post
def index_queryset(self, using=None):
return self.get_model().latest_posts()
| 24.157895
| 62
| 0.67756
|
from haystack import indexes
from .models import Post
class PostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Post
def index_queryset(self, using=None):
return self.get_model().latest_posts()
| true
| true
|
f7198316dcf1fee5ef6b1b5530246a472718064a
| 109
|
py
|
Python
|
rest_framework_security/deny_repeat_password/__init__.py
|
RubenEu/django-rest-framework-security
|
638cf271c51a5bafd434a6b6a9c25a7c4849b485
|
[
"MIT"
] | 7
|
2020-09-01T09:55:25.000Z
|
2021-11-04T06:59:04.000Z
|
rest_framework_security/deny_repeat_password/__init__.py
|
RubenEu/django-rest-framework-security
|
638cf271c51a5bafd434a6b6a9c25a7c4849b485
|
[
"MIT"
] | 32
|
2020-10-28T17:09:18.000Z
|
2022-03-12T00:55:09.000Z
|
rest_framework_security/deny_repeat_password/__init__.py
|
RubenEu/django-rest-framework-security
|
638cf271c51a5bafd434a6b6a9c25a7c4849b485
|
[
"MIT"
] | 2
|
2020-12-18T01:26:53.000Z
|
2021-11-04T06:59:07.000Z
|
default_app_config = (
"rest_framework_security.deny_repeat_password.apps.DenyRepeatPasswordAppConfig"
)
| 27.25
| 83
| 0.844037
|
default_app_config = (
"rest_framework_security.deny_repeat_password.apps.DenyRepeatPasswordAppConfig"
)
| true
| true
|
f71983d5a0a270119c6b7c7701a902ea4892f18a
| 20,123
|
py
|
Python
|
obstools/scripts/atacr_clean_spectra.py
|
paudetseis/OBStools
|
c6c02d8864c25a14f22d1fae17ff5ad911b9ff00
|
[
"MIT"
] | 1
|
2019-12-05T04:32:38.000Z
|
2019-12-05T04:32:38.000Z
|
obstools/scripts/atacr_clean_spectra.py
|
paudetseis/OBStools
|
c6c02d8864c25a14f22d1fae17ff5ad911b9ff00
|
[
"MIT"
] | 2
|
2019-12-04T02:06:45.000Z
|
2019-12-06T22:20:19.000Z
|
obstools/scripts/atacr_clean_spectra.py
|
paudetseis/OBStools
|
c6c02d8864c25a14f22d1fae17ff5ad911b9ff00
|
[
"MIT"
] | 1
|
2020-02-25T16:51:35.000Z
|
2020-02-25T16:51:35.000Z
|
#!/usr/bin/env python
# Copyright 2019 Pascal Audet & Helen Janiszewski
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obstools.atacr import StaNoise, Power, Cross, Rotation
from obstools.atacr import utils, plotting
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from obspy import UTCDateTime
from numpy import nan
def get_cleanspec_arguments(argv=None):
"""
Get Options from :class:`~optparse.OptionParser` objects.
Calling options for the script `obs_clean_spectra.py` that accompany this
package.
"""
parser = ArgumentParser(
usage="%(prog)s [options] <indb>",
description="Script used "
"to extract daily spectra calculated from " +
"`obs_daily_spectra.py` and flag days for outlier " +
"PSDs and calculate spectral averages of the " +
"corresponding Fourier transforms over the entire " +
"time period specified. The stations are processed " +
"one by one and the data are stored to disk.")
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
# General Settings
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station " +
"keys for which to perform the analysis. These must " +
"be contained within the station database. Partial " +
"keys will be used to match against those in the " +
"dictionary. For instance, providing IU will match " +
"with all stations in the IU network. " +
"[Default processes all stations in the database]")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
# Event Selection Criteria
DaysGroup = parser.add_argument_group(
title="Time Search Settings",
description="Time settings associated with " +
"searching for day-long seismograms")
DaysGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start day for the data search. " +
"This will override any station start times. " +
"[Default start date of each station in database]")
DaysGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start time for the data search. " +
"This will override any station end times. " +
"[Default end date of each station in database]")
# Constants Settings
ConstGroup = parser.add_argument_group(
title='Parameter Settings',
description="Miscellaneous default values " +
"and settings")
ConstGroup.add_argument(
"--freq-band",
action="store",
type=str,
dest="pd",
default=None,
help="Specify comma-separated frequency limits " +
"(float, in Hz) over which to calculate spectral " +
"features used in flagging the days/windows. " +
"[Default 0.004,2.0]")
ConstGroup.add_argument(
"--tolerance",
action="store",
type=float,
dest="tol",
default=1.5,
help="Specify parameter for tolerance threshold. " +
"If spectrum > std*tol, window is flagged as bad. " +
"[Default 1.5]")
ConstGroup.add_argument(
"--alpha",
action="store",
type=float,
dest="alpha",
default=0.05,
help="Confidence level for f-test, for iterative " +
"flagging of windows. [Default 0.05, or 95 percent confidence]")
# Constants Settings
FigureGroup = parser.add_argument_group(
title='Figure Settings',
description="Flags for plotting figures")
FigureGroup.add_argument(
"--figQC",
action="store_true",
dest="fig_QC",
default=False,
help="Plot Quality-Control figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Plot intermediate steps for debugging. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figAverage",
action="store_true",
dest="fig_average",
default=False,
help="Plot daily average figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCoh",
action="store_true",
dest="fig_coh_ph",
default=False,
help="Plot Coherence and Phase figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCross",
action="store_true",
dest="fig_av_cross",
default=False,
help="Plot cross-spectra figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--save-fig",
action="store_true",
dest="saveplot",
default=False,
help="Set this option if you wish to save the figure(s). [Default " +
"does not save figure]")
FigureGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
# Check inputs
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
# create station key list
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
# construct start time
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
# construct end time
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.pd is None:
args.pd = [0.004, 2.0]
else:
args.pd = [float(val) for val in args.pd.split(',')]
args.pd = sorted(args.pd)
if (len(args.pd)) != 2:
raise(Exception(
"Error: --freq-band should contain 2 " +
"comma-separated floats"))
return args
def main(args=None):
if args is None:
# Run Input Parser
args = get_cleanspec_arguments()
# Load Database
# stdb>0.1.3
try:
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
# stdb=0.1.3
except Exception:
db = stdb.io.load_db(fname=args.indb)
# Construct station key loop
allkeys = db.keys()
sorted(allkeys)
# Extract key subset
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Path where spectra are located
specpath = Path('SPECTRA') / stkey
if not specpath.is_dir():
raise(Exception(
"Path to " + str(specpath) +
" doesn`t exist - aborting"))
# Path where average spectra will be saved
avstpath = Path('AVG_STA') / stkey
if not avstpath.is_dir():
print("Path to "+str(avstpath)+" doesn`t exist - creating it")
avstpath.mkdir(parents=True)
# Path where plots will be saved
if args.saveplot:
plotpath = avstpath / 'PLOTS'
if not plotpath.is_dir():
plotpath.mkdir(parents=True)
else:
plotpath = False
# Get catalogue search start time
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
# Get catalogue search end time
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print("\n|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Filename for output average spectra
dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'
dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'
fileavst = avstpath / (dstart+dend+'avg_sta.pkl')
if fileavst.exists():
if not args.ovr:
print("* -> file "+str(fileavst)+" exists - continuing")
continue
# Containers for power and cross spectra
coh_all = []
ph_all = []
coh_12_all = []
coh_1Z_all = []
coh_1P_all = []
coh_2Z_all = []
coh_2P_all = []
coh_ZP_all = []
ph_12_all = []
ph_1Z_all = []
ph_1P_all = []
ph_2Z_all = []
ph_2P_all = []
ph_ZP_all = []
ad_12_all = []
ad_1Z_all = []
ad_1P_all = []
ad_2Z_all = []
ad_2P_all = []
ad_ZP_all = []
nwins = []
t1 = tstart
# Initialize StaNoise object
stanoise = StaNoise()
# Loop through each day withing time range
while t1 < tend:
year = str(t1.year).zfill(4)
jday = str(t1.julday).zfill(3)
tstamp = year+'.'+jday+'.'
filespec = specpath / (tstamp + 'spectra.pkl')
# Load file if it exists
if filespec.exists():
print("\n"+"*"*60)
print('* Calculating noise spectra for key ' +
stkey+' and day '+year+'.'+jday)
print("* -> file "+str(filespec)+" found - loading")
file = open(filespec, 'rb')
daynoise = pickle.load(file)
file.close()
stanoise += daynoise
else:
t1 += 3600.*24.
continue
coh_all.append(daynoise.rotation.coh)
ph_all.append(daynoise.rotation.ph)
# Coherence
coh_12_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c12,
daynoise.power.c11,
daynoise.power.c22), 50))
coh_1Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1Z,
daynoise.power.c11,
daynoise.power.cZZ), 50))
coh_1P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1P,
daynoise.power.c11,
daynoise.power.cPP), 50))
coh_2Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2Z,
daynoise.power.c22,
daynoise.power.cZZ), 50))
coh_2P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2P,
daynoise.power.c22,
daynoise.power.cPP), 50))
coh_ZP_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.cZP,
daynoise.power.cZZ,
daynoise.power.cPP), 50))
# Phase
try:
ph_12_all.append(
180./np.pi*utils.phase(daynoise.cross.c12))
except Exception:
ph_12_all.append(None)
try:
ph_1Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c1Z))
except Exception:
ph_1Z_all.append(None)
try:
ph_1P_all.append(
180./np.pi*utils.phase(daynoise.cross.c1P))
except Exception:
ph_1P_all.append(None)
try:
ph_2Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c2Z))
except Exception:
ph_2Z_all.append(None)
try:
ph_2P_all.append(
180./np.pi*utils.phase(daynoise.cross.c2P))
except Exception:
ph_2P_all.append(None)
try:
ph_ZP_all.append(
180./np.pi*utils.phase(daynoise.cross.cZP))
except Exception:
ph_ZP_all.append(None)
# Admittance
ad_12_all.append(utils.smooth(utils.admittance(
daynoise.cross.c12, daynoise.power.c11), 50))
ad_1Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1Z, daynoise.power.c11), 50))
ad_1P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1P, daynoise.power.c11), 50))
ad_2Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2Z, daynoise.power.c22), 50))
ad_2P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2P, daynoise.power.c22), 50))
ad_ZP_all.append(utils.smooth(utils.admittance(
daynoise.cross.cZP, daynoise.power.cZZ), 50))
t1 += 3600.*24.
# Convert to numpy arrays
coh_all = np.array(coh_all)
ph_all = np.array(ph_all)
coh_12_all = np.array(coh_12_all)
coh_1Z_all = np.array(coh_1Z_all)
coh_1P_all = np.array(coh_1P_all)
coh_2Z_all = np.array(coh_2Z_all)
coh_2P_all = np.array(coh_2P_all)
coh_ZP_all = np.array(coh_ZP_all)
ph_12_all = np.array(ph_12_all)
ph_1Z_all = np.array(ph_1Z_all)
ph_1P_all = np.array(ph_1P_all)
ph_2Z_all = np.array(ph_2Z_all)
ph_2P_all = np.array(ph_2P_all)
ph_ZP_all = np.array(ph_ZP_all)
ad_12_all = np.array(ad_12_all)
ad_1Z_all = np.array(ad_1Z_all)
ad_1P_all = np.array(ad_1P_all)
ad_2Z_all = np.array(ad_2Z_all)
ad_2P_all = np.array(ad_2P_all)
ad_ZP_all = np.array(ad_ZP_all)
# Store transfer functions as objects for plotting
coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,
coh_2Z_all, coh_2P_all, coh_ZP_all)
ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,
ph_2Z_all, ph_2P_all, ph_ZP_all)
ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,
ad_2Z_all, ad_2P_all, ad_ZP_all)
# Quality control to identify outliers
stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,
fig_QC=args.fig_QC, debug=args.debug,
save=plotpath, form=args.form)
# Average spectra for good days
stanoise.average_sta_spectra(
fig_average=args.fig_average,
save=plotpath, form=args.form)
if args.fig_av_cross:
fname = stkey + '.' + 'av_coherence'
plot = plotting.fig_av_cross(
stanoise.f, coh, stanoise.gooddays,
'Coherence', stanoise.ncomp, key=stkey, lw=0.5)
# if plotpath.is_dir():
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_admittance'
plot = plotting.fig_av_cross(
stanoise.f, ad, stanoise.gooddays,
'Admittance', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_phase'
plot = plotting.fig_av_cross(
stanoise.f, ph, stanoise.gooddays,
'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
if args.fig_coh_ph and stanoise.direc is not None:
fname = stkey + '.' + 'coh_ph'
plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
# Save to file
stanoise.save(fileavst)
if __name__ == "__main__":
# Run main program
main()
| 34.28109
| 79
| 0.533718
|
import numpy as np
import pickle
import stdb
from obstools.atacr import StaNoise, Power, Cross, Rotation
from obstools.atacr import utils, plotting
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from obspy import UTCDateTime
from numpy import nan
def get_cleanspec_arguments(argv=None):
parser = ArgumentParser(
usage="%(prog)s [options] <indb>",
description="Script used "
"to extract daily spectra calculated from " +
"`obs_daily_spectra.py` and flag days for outlier " +
"PSDs and calculate spectral averages of the " +
"corresponding Fourier transforms over the entire " +
"time period specified. The stations are processed " +
"one by one and the data are stored to disk.")
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station " +
"keys for which to perform the analysis. These must " +
"be contained within the station database. Partial " +
"keys will be used to match against those in the " +
"dictionary. For instance, providing IU will match " +
"with all stations in the IU network. " +
"[Default processes all stations in the database]")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
DaysGroup = parser.add_argument_group(
title="Time Search Settings",
description="Time settings associated with " +
"searching for day-long seismograms")
DaysGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start day for the data search. " +
"This will override any station start times. " +
"[Default start date of each station in database]")
DaysGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start time for the data search. " +
"This will override any station end times. " +
"[Default end date of each station in database]")
ConstGroup = parser.add_argument_group(
title='Parameter Settings',
description="Miscellaneous default values " +
"and settings")
ConstGroup.add_argument(
"--freq-band",
action="store",
type=str,
dest="pd",
default=None,
help="Specify comma-separated frequency limits " +
"(float, in Hz) over which to calculate spectral " +
"features used in flagging the days/windows. " +
"[Default 0.004,2.0]")
ConstGroup.add_argument(
"--tolerance",
action="store",
type=float,
dest="tol",
default=1.5,
help="Specify parameter for tolerance threshold. " +
"If spectrum > std*tol, window is flagged as bad. " +
"[Default 1.5]")
ConstGroup.add_argument(
"--alpha",
action="store",
type=float,
dest="alpha",
default=0.05,
help="Confidence level for f-test, for iterative " +
"flagging of windows. [Default 0.05, or 95 percent confidence]")
FigureGroup = parser.add_argument_group(
title='Figure Settings',
description="Flags for plotting figures")
FigureGroup.add_argument(
"--figQC",
action="store_true",
dest="fig_QC",
default=False,
help="Plot Quality-Control figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Plot intermediate steps for debugging. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figAverage",
action="store_true",
dest="fig_average",
default=False,
help="Plot daily average figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCoh",
action="store_true",
dest="fig_coh_ph",
default=False,
help="Plot Coherence and Phase figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCross",
action="store_true",
dest="fig_av_cross",
default=False,
help="Plot cross-spectra figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--save-fig",
action="store_true",
dest="saveplot",
default=False,
help="Set this option if you wish to save the figure(s). [Default " +
"does not save figure]")
FigureGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.pd is None:
args.pd = [0.004, 2.0]
else:
args.pd = [float(val) for val in args.pd.split(',')]
args.pd = sorted(args.pd)
if (len(args.pd)) != 2:
raise(Exception(
"Error: --freq-band should contain 2 " +
"comma-separated floats"))
return args
def main(args=None):
if args is None:
args = get_cleanspec_arguments()
try:
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
except Exception:
db = stdb.io.load_db(fname=args.indb)
allkeys = db.keys()
sorted(allkeys)
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
for stkey in list(stkeys):
sta = db[stkey]
specpath = Path('SPECTRA') / stkey
if not specpath.is_dir():
raise(Exception(
"Path to " + str(specpath) +
" doesn`t exist - aborting"))
avstpath = Path('AVG_STA') / stkey
if not avstpath.is_dir():
print("Path to "+str(avstpath)+" doesn`t exist - creating it")
avstpath.mkdir(parents=True)
if args.saveplot:
plotpath = avstpath / 'PLOTS'
if not plotpath.is_dir():
plotpath.mkdir(parents=True)
else:
plotpath = False
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
print("\n|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'
dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'
fileavst = avstpath / (dstart+dend+'avg_sta.pkl')
if fileavst.exists():
if not args.ovr:
print("* -> file "+str(fileavst)+" exists - continuing")
continue
coh_all = []
ph_all = []
coh_12_all = []
coh_1Z_all = []
coh_1P_all = []
coh_2Z_all = []
coh_2P_all = []
coh_ZP_all = []
ph_12_all = []
ph_1Z_all = []
ph_1P_all = []
ph_2Z_all = []
ph_2P_all = []
ph_ZP_all = []
ad_12_all = []
ad_1Z_all = []
ad_1P_all = []
ad_2Z_all = []
ad_2P_all = []
ad_ZP_all = []
nwins = []
t1 = tstart
stanoise = StaNoise()
while t1 < tend:
year = str(t1.year).zfill(4)
jday = str(t1.julday).zfill(3)
tstamp = year+'.'+jday+'.'
filespec = specpath / (tstamp + 'spectra.pkl')
if filespec.exists():
print("\n"+"*"*60)
print('* Calculating noise spectra for key ' +
stkey+' and day '+year+'.'+jday)
print("* -> file "+str(filespec)+" found - loading")
file = open(filespec, 'rb')
daynoise = pickle.load(file)
file.close()
stanoise += daynoise
else:
t1 += 3600.*24.
continue
coh_all.append(daynoise.rotation.coh)
ph_all.append(daynoise.rotation.ph)
coh_12_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c12,
daynoise.power.c11,
daynoise.power.c22), 50))
coh_1Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1Z,
daynoise.power.c11,
daynoise.power.cZZ), 50))
coh_1P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1P,
daynoise.power.c11,
daynoise.power.cPP), 50))
coh_2Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2Z,
daynoise.power.c22,
daynoise.power.cZZ), 50))
coh_2P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2P,
daynoise.power.c22,
daynoise.power.cPP), 50))
coh_ZP_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.cZP,
daynoise.power.cZZ,
daynoise.power.cPP), 50))
try:
ph_12_all.append(
180./np.pi*utils.phase(daynoise.cross.c12))
except Exception:
ph_12_all.append(None)
try:
ph_1Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c1Z))
except Exception:
ph_1Z_all.append(None)
try:
ph_1P_all.append(
180./np.pi*utils.phase(daynoise.cross.c1P))
except Exception:
ph_1P_all.append(None)
try:
ph_2Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c2Z))
except Exception:
ph_2Z_all.append(None)
try:
ph_2P_all.append(
180./np.pi*utils.phase(daynoise.cross.c2P))
except Exception:
ph_2P_all.append(None)
try:
ph_ZP_all.append(
180./np.pi*utils.phase(daynoise.cross.cZP))
except Exception:
ph_ZP_all.append(None)
ad_12_all.append(utils.smooth(utils.admittance(
daynoise.cross.c12, daynoise.power.c11), 50))
ad_1Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1Z, daynoise.power.c11), 50))
ad_1P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1P, daynoise.power.c11), 50))
ad_2Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2Z, daynoise.power.c22), 50))
ad_2P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2P, daynoise.power.c22), 50))
ad_ZP_all.append(utils.smooth(utils.admittance(
daynoise.cross.cZP, daynoise.power.cZZ), 50))
t1 += 3600.*24.
coh_all = np.array(coh_all)
ph_all = np.array(ph_all)
coh_12_all = np.array(coh_12_all)
coh_1Z_all = np.array(coh_1Z_all)
coh_1P_all = np.array(coh_1P_all)
coh_2Z_all = np.array(coh_2Z_all)
coh_2P_all = np.array(coh_2P_all)
coh_ZP_all = np.array(coh_ZP_all)
ph_12_all = np.array(ph_12_all)
ph_1Z_all = np.array(ph_1Z_all)
ph_1P_all = np.array(ph_1P_all)
ph_2Z_all = np.array(ph_2Z_all)
ph_2P_all = np.array(ph_2P_all)
ph_ZP_all = np.array(ph_ZP_all)
ad_12_all = np.array(ad_12_all)
ad_1Z_all = np.array(ad_1Z_all)
ad_1P_all = np.array(ad_1P_all)
ad_2Z_all = np.array(ad_2Z_all)
ad_2P_all = np.array(ad_2P_all)
ad_ZP_all = np.array(ad_ZP_all)
coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,
coh_2Z_all, coh_2P_all, coh_ZP_all)
ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,
ph_2Z_all, ph_2P_all, ph_ZP_all)
ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,
ad_2Z_all, ad_2P_all, ad_ZP_all)
stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,
fig_QC=args.fig_QC, debug=args.debug,
save=plotpath, form=args.form)
stanoise.average_sta_spectra(
fig_average=args.fig_average,
save=plotpath, form=args.form)
if args.fig_av_cross:
fname = stkey + '.' + 'av_coherence'
plot = plotting.fig_av_cross(
stanoise.f, coh, stanoise.gooddays,
'Coherence', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_admittance'
plot = plotting.fig_av_cross(
stanoise.f, ad, stanoise.gooddays,
'Admittance', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_phase'
plot = plotting.fig_av_cross(
stanoise.f, ph, stanoise.gooddays,
'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
if args.fig_coh_ph and stanoise.direc is not None:
fname = stkey + '.' + 'coh_ph'
plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
stanoise.save(fileavst)
if __name__ == "__main__":
main()
| true
| true
|
f7198466f423c197e1cd92a6791f6a97eeca93b9
| 2,362
|
py
|
Python
|
tests/demos/test_demos.py
|
Nicolinho/RLBench
|
3014e872f518d5439e73e057e2251dee1f9df481
|
[
"BSD-3-Clause"
] | 619
|
2019-09-26T23:15:57.000Z
|
2022-03-15T23:46:48.000Z
|
tests/demos/test_demos.py
|
Nicolinho/RLBench
|
3014e872f518d5439e73e057e2251dee1f9df481
|
[
"BSD-3-Clause"
] | 147
|
2019-09-27T02:22:45.000Z
|
2022-03-30T08:37:43.000Z
|
tests/demos/test_demos.py
|
Nicolinho/RLBench
|
3014e872f518d5439e73e057e2251dee1f9df481
|
[
"BSD-3-Clause"
] | 142
|
2019-09-27T03:43:12.000Z
|
2022-03-13T19:00:18.000Z
|
import unittest
import rlbench.backend.task as task
import os
from rlbench.backend.utils import task_file_to_task_class
from pyrep import PyRep
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
from rlbench.backend.const import TTT_FILE
from tools.task_validator import task_smoke
from rlbench.observation_config import ObservationConfig
from rlbench.backend.scene import Scene
from rlbench.backend.robot import Robot
TASKS = [t for t in os.listdir(task.TASKS_PATH)
if t != '__init__.py' and t.endswith('.py')]
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
# Task does work, but fails demos often. These should eventually be improved.
FLAKY_TASKS = ['put_all_groceries_in_cupboard']
class TestTasks(unittest.TestCase):
"""Tests all of the tasks via the task_validator tool.
Given that unit tests shouldn't take forever to run, we only limit
each validation run to 1 variation. In practice, a newly created task
should be validated for all variations. Despite this, the test still takes
a while to run.
"""
def test_run_task_validator(self):
for task_file in TASKS:
test_name = task_file.split('.py')[0]
with self.subTest(task=test_name):
if test_name in FLAKY_TASKS:
self.skipTest('Flaky task.')
sim = PyRep()
ttt_file = os.path.join(
DIR_PATH, '..', '..', 'rlbench', TTT_FILE)
sim.launch(ttt_file, headless=True)
sim.step_ui()
sim.set_simulation_timestep(50.0)
sim.step_ui()
sim.start()
robot = Robot(Panda(), PandaGripper())
obs = ObservationConfig()
obs.set_all(False)
scene = Scene(sim, robot, obs)
sim.start()
task_class = task_file_to_task_class(task_file)
active_task = task_class(sim, robot)
try:
task_smoke(active_task, scene, variation=-1,
max_variations=2, success=0.25)
except Exception as e:
sim.stop()
sim.shutdown()
raise e
sim.stop()
sim.shutdown()
| 38.096774
| 78
| 0.610076
|
import unittest
import rlbench.backend.task as task
import os
from rlbench.backend.utils import task_file_to_task_class
from pyrep import PyRep
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
from rlbench.backend.const import TTT_FILE
from tools.task_validator import task_smoke
from rlbench.observation_config import ObservationConfig
from rlbench.backend.scene import Scene
from rlbench.backend.robot import Robot
TASKS = [t for t in os.listdir(task.TASKS_PATH)
if t != '__init__.py' and t.endswith('.py')]
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
FLAKY_TASKS = ['put_all_groceries_in_cupboard']
class TestTasks(unittest.TestCase):
def test_run_task_validator(self):
for task_file in TASKS:
test_name = task_file.split('.py')[0]
with self.subTest(task=test_name):
if test_name in FLAKY_TASKS:
self.skipTest('Flaky task.')
sim = PyRep()
ttt_file = os.path.join(
DIR_PATH, '..', '..', 'rlbench', TTT_FILE)
sim.launch(ttt_file, headless=True)
sim.step_ui()
sim.set_simulation_timestep(50.0)
sim.step_ui()
sim.start()
robot = Robot(Panda(), PandaGripper())
obs = ObservationConfig()
obs.set_all(False)
scene = Scene(sim, robot, obs)
sim.start()
task_class = task_file_to_task_class(task_file)
active_task = task_class(sim, robot)
try:
task_smoke(active_task, scene, variation=-1,
max_variations=2, success=0.25)
except Exception as e:
sim.stop()
sim.shutdown()
raise e
sim.stop()
sim.shutdown()
| true
| true
|
f719867e8b00abb554a28d0fafbc160c9ea3d04e
| 3,652
|
py
|
Python
|
nova/openstack/common/excutils.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | 1
|
2020-02-21T19:19:11.000Z
|
2020-02-21T19:19:11.000Z
|
nova/openstack/common/excutils.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | null | null | null |
nova/openstack/common/excutils.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | 1
|
2020-07-24T09:15:58.000Z
|
2020-07-24T09:15:58.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
from nova.openstack.common.gettextutils import _
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
raise self.type_, self.value, self.tb
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
if exc.message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
exc.message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = exc.message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func
| 36.888889
| 78
| 0.61172
|
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
from nova.openstack.common.gettextutils import _
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
raise self.type_, self.value, self.tb
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
if exc.message == last_exc_message:
exc_count += 1
else:
exc_count = 1
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
exc.message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = exc.message
exc_count = 0
# a sleep.
time.sleep(1)
return inner_func
| false
| true
|
f71986b928e02b3c1c5322f3668bc41a49a8abc1
| 7,013
|
py
|
Python
|
GHC2018/process.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
GHC2018/process.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
GHC2018/process.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
from GHC2018.input import Input
from GHC2018.models.Car import Car
from GHC2018.models.Route import Route
from GHC2018.models.ride import calculate_distance
from tqdm import tqdm
class Process:
def __init__(self, input_data, debug=True):
self.input_data = input_data
self.debug = debug
self.current_time = 0
# self.get_routes()
def initialise_cars(self):
cars = []
for i in range(0, self.input_data.vehicle_count):
car = Car(i, 0, 0)
cars.append(car)
self.cars = cars
def debug_print(self, message):
if self.debug:
print(message)
def run(self):
self.initialise_cars()
self.rides = self.input_data.rides
sim_range = range(0, self.input_data.sim_steps)
if not self.debug:
sim_range = tqdm(sim_range)
for i in sim_range:
self.debug_print('--- STEP {}/{} ---'.format(i, self.input_data.sim_steps))
self.current_time = i
# if cars are at their destination, end the ride
self.end_rides()
# schedule any cars that are not assigned a ride
self.schedule_rides()
# move any cars
self.move_cars()
self.debug_print('SIMULATION ENDED')
print('{} rides completed. {} rides left unfinished.'.format(
len(self.get_completed_rides()),
len(self.rides) - len(self.get_completed_rides()))
)
self.output_file()
def output_file(self):
output_file_path = self.input_data.file_path.replace('.in', '.out')
car_rides = {}
for ride in self.get_completed_rides():
if ride.assigned_car in car_rides.keys():
car_rides[ride.assigned_car].append(ride.ride_id)
else:
car_rides[ride.assigned_car] = [ride.ride_id]
with open(output_file_path, 'w') as output_file:
for car, rides in car_rides.items():
output_string = str(len(rides))
for ride_id in rides:
output_string += ' {}'.format(ride_id)
output_file.write(output_string + '\n')
def end_rides(self):
self.debug_print('Checking if cars have arrived')
completed_cars = [
car for car in self.get_assigned_cars() if car.is_at_destination()
]
self.debug_print('{} cars completed their ride this turn'.format(len(completed_cars)))
for car in completed_cars:
self.debug_print('Car {} has completed their ride'.format(car.car_id))
car.complete_ride()
if car.assigned_route_completed():
car.complete_route()
self.debug_print('{}/{} rides completed'.format(
len(self.get_completed_rides()),
len(self.rides)
))
def get_completed_rides(self):
return [ride for ride in self.input_data.rides if ride.completed]
def schedule_rides(self):
unassigned_cars = self.get_unassigned_cars()
self.debug_print('Scheduling {} cars'.format(len(unassigned_cars)))
unassigned_rides = self.get_unassigned_rides()
if len(unassigned_rides) == 0:
return
for car in unassigned_cars:
# next_ride = unassigned_rides.pop(0)
unassigned_rides = self.get_unassigned_rides()
next_ride = self.get_next_ride(car, unassigned_rides, self.current_time)
rides_for_route = [next_ride]
route = Route(rides_for_route)
self.debug_print('Assigned route with ride IDs {} to car: {}'.format(
route.get_route_ride_ids(),
car.car_id
))
car.assign_route(route)
def get_closest_ride_to_car(self, car, rides):
closest_ride = rides[0]
closest_distance = calculate_distance(car.row, closest_ride.row_start, car.col, closest_ride.col_start)
for i in range(1, len(rides)):
ride = rides[i]
next_closest_distance = calculate_distance(car.row, ride.row_start, car.col, ride.col_start)
if next_closest_distance < closest_distance:
closest_ride = ride
closest_distance = next_closest_distance
return rides.pop(rides.index(closest_ride))
def move_cars(self):
for car in self.get_assigned_cars():
self.debug_print('Moving car with ID: {}'.format(car.car_id))
car.move_towards_destination()
def get_assigned_cars(self):
return [car for car in self.cars if car.assigned_route is not None]
def get_unassigned_cars(self):
return [car for car in self.cars if car.assigned_route is None]
def get_unassigned_rides(self):
return [ride for ride in self.input_data.rides if ride.assigned_car is None]
def set_next_routes(self, route, routes):
for t_route in routes:
if not t_route is route:
wait = t_route.ordered_rides[0].earliest_start -route.ordered_rides[-1].latest_finish
if wait >= 0:
route.next_routes.append({'route':t_route, 'wait_time': wait})
def add_to_route(self, ride, next_ride, routes):
for route in routes:
start_ride = route.ordered_rides[0]
end_ride = route.ordered_rides[-1]
if start_ride is next_ride:
route.ordered_rides.insert(0, ride)
return routes
elif end_ride is ride:
route.ordered_rides.insert(-1, next_ride)
return routes
routes.append(Route([ride, next_ride]))
return routes
def get_next_ride(self, car, rides, actual_start_time):
# unassigned_rides = deepcopy(self.get_unassigned_rides())
best_ride = None
waiting = 0
for unassigned_ride in rides:
distance_to_next_ride = calculate_distance(car.row, unassigned_ride.row_start, car.col, unassigned_ride.col_start)
time_to_new_start = actual_start_time + distance_to_next_ride
if time_to_new_start + unassigned_ride.distance <= unassigned_ride.latest_finish:
temp_waiting = max(unassigned_ride.earliest_start - (time_to_new_start + unassigned_ride.distance), 0)
if best_ride is None or waiting > temp_waiting:
waiting = temp_waiting
best_ride = unassigned_ride
if waiting == 0:
return best_ride
return best_ride
if __name__ == '__main__':
file_names = [
'a_example.in',
'b_should_be_easy.in',
'c_no_hurry.in',
'd_metropolis.in',
'e_high_bonus.in'
]
for file_name in file_names:
print('Running: {}\n'.format(file_name))
input_parser = Input(file_name)
input_parser.read_file()
p = Process(input_data=input_parser, debug=False)
p.run()
| 39.178771
| 126
| 0.610866
|
from GHC2018.input import Input
from GHC2018.models.Car import Car
from GHC2018.models.Route import Route
from GHC2018.models.ride import calculate_distance
from tqdm import tqdm
class Process:
def __init__(self, input_data, debug=True):
self.input_data = input_data
self.debug = debug
self.current_time = 0
def initialise_cars(self):
cars = []
for i in range(0, self.input_data.vehicle_count):
car = Car(i, 0, 0)
cars.append(car)
self.cars = cars
def debug_print(self, message):
if self.debug:
print(message)
def run(self):
self.initialise_cars()
self.rides = self.input_data.rides
sim_range = range(0, self.input_data.sim_steps)
if not self.debug:
sim_range = tqdm(sim_range)
for i in sim_range:
self.debug_print('--- STEP {}/{} ---'.format(i, self.input_data.sim_steps))
self.current_time = i
self.end_rides()
self.schedule_rides()
self.move_cars()
self.debug_print('SIMULATION ENDED')
print('{} rides completed. {} rides left unfinished.'.format(
len(self.get_completed_rides()),
len(self.rides) - len(self.get_completed_rides()))
)
self.output_file()
def output_file(self):
output_file_path = self.input_data.file_path.replace('.in', '.out')
car_rides = {}
for ride in self.get_completed_rides():
if ride.assigned_car in car_rides.keys():
car_rides[ride.assigned_car].append(ride.ride_id)
else:
car_rides[ride.assigned_car] = [ride.ride_id]
with open(output_file_path, 'w') as output_file:
for car, rides in car_rides.items():
output_string = str(len(rides))
for ride_id in rides:
output_string += ' {}'.format(ride_id)
output_file.write(output_string + '\n')
def end_rides(self):
self.debug_print('Checking if cars have arrived')
completed_cars = [
car for car in self.get_assigned_cars() if car.is_at_destination()
]
self.debug_print('{} cars completed their ride this turn'.format(len(completed_cars)))
for car in completed_cars:
self.debug_print('Car {} has completed their ride'.format(car.car_id))
car.complete_ride()
if car.assigned_route_completed():
car.complete_route()
self.debug_print('{}/{} rides completed'.format(
len(self.get_completed_rides()),
len(self.rides)
))
def get_completed_rides(self):
return [ride for ride in self.input_data.rides if ride.completed]
def schedule_rides(self):
unassigned_cars = self.get_unassigned_cars()
self.debug_print('Scheduling {} cars'.format(len(unassigned_cars)))
unassigned_rides = self.get_unassigned_rides()
if len(unassigned_rides) == 0:
return
for car in unassigned_cars:
unassigned_rides = self.get_unassigned_rides()
next_ride = self.get_next_ride(car, unassigned_rides, self.current_time)
rides_for_route = [next_ride]
route = Route(rides_for_route)
self.debug_print('Assigned route with ride IDs {} to car: {}'.format(
route.get_route_ride_ids(),
car.car_id
))
car.assign_route(route)
def get_closest_ride_to_car(self, car, rides):
closest_ride = rides[0]
closest_distance = calculate_distance(car.row, closest_ride.row_start, car.col, closest_ride.col_start)
for i in range(1, len(rides)):
ride = rides[i]
next_closest_distance = calculate_distance(car.row, ride.row_start, car.col, ride.col_start)
if next_closest_distance < closest_distance:
closest_ride = ride
closest_distance = next_closest_distance
return rides.pop(rides.index(closest_ride))
def move_cars(self):
for car in self.get_assigned_cars():
self.debug_print('Moving car with ID: {}'.format(car.car_id))
car.move_towards_destination()
def get_assigned_cars(self):
return [car for car in self.cars if car.assigned_route is not None]
def get_unassigned_cars(self):
return [car for car in self.cars if car.assigned_route is None]
def get_unassigned_rides(self):
return [ride for ride in self.input_data.rides if ride.assigned_car is None]
def set_next_routes(self, route, routes):
for t_route in routes:
if not t_route is route:
wait = t_route.ordered_rides[0].earliest_start -route.ordered_rides[-1].latest_finish
if wait >= 0:
route.next_routes.append({'route':t_route, 'wait_time': wait})
def add_to_route(self, ride, next_ride, routes):
for route in routes:
start_ride = route.ordered_rides[0]
end_ride = route.ordered_rides[-1]
if start_ride is next_ride:
route.ordered_rides.insert(0, ride)
return routes
elif end_ride is ride:
route.ordered_rides.insert(-1, next_ride)
return routes
routes.append(Route([ride, next_ride]))
return routes
def get_next_ride(self, car, rides, actual_start_time):
best_ride = None
waiting = 0
for unassigned_ride in rides:
distance_to_next_ride = calculate_distance(car.row, unassigned_ride.row_start, car.col, unassigned_ride.col_start)
time_to_new_start = actual_start_time + distance_to_next_ride
if time_to_new_start + unassigned_ride.distance <= unassigned_ride.latest_finish:
temp_waiting = max(unassigned_ride.earliest_start - (time_to_new_start + unassigned_ride.distance), 0)
if best_ride is None or waiting > temp_waiting:
waiting = temp_waiting
best_ride = unassigned_ride
if waiting == 0:
return best_ride
return best_ride
if __name__ == '__main__':
file_names = [
'a_example.in',
'b_should_be_easy.in',
'c_no_hurry.in',
'd_metropolis.in',
'e_high_bonus.in'
]
for file_name in file_names:
print('Running: {}\n'.format(file_name))
input_parser = Input(file_name)
input_parser.read_file()
p = Process(input_data=input_parser, debug=False)
p.run()
| true
| true
|
f719878d7cf2f176cf391bedf04e4b2cfa47cc02
| 1,701
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
SirEric-A/recipe-app-api
|
05a767fcb87f2ca47918698930d10f6e21654576
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
SirEric-A/recipe-app-api
|
05a767fcb87f2ca47918698930d10f6e21654576
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
SirEric-A/recipe-app-api
|
05a767fcb87f2ca47918698930d10f6e21654576
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-18 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412
| 266
| 0.637272
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true
| true
|
f71987f0e511820af63a6cf60ad703869664ef65
| 4,832
|
py
|
Python
|
.ycm_extra_conf.py
|
bigt1234/objectpool
|
dab515f71c12f8df22686053043f7e2c4c929354
|
[
"Zlib"
] | 66
|
2016-11-07T01:00:46.000Z
|
2022-03-13T01:25:54.000Z
|
.ycm_extra_conf.py
|
bigt1234/objectpool
|
dab515f71c12f8df22686053043f7e2c4c929354
|
[
"Zlib"
] | 1
|
2020-11-26T12:08:53.000Z
|
2021-09-24T01:06:49.000Z
|
.ycm_extra_conf.py
|
bigt1234/objectpool
|
dab515f71c12f8df22686053043f7e2c4c929354
|
[
"Zlib"
] | 19
|
2016-07-18T07:58:11.000Z
|
2022-03-13T01:24:07.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I', 'src',
'-I', 'thirdparty/nonius',
'-I', 'thirdparty/Catch',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in ['.h', '.hxx', '.hpp', '.hh', '.h++']
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 32.648649
| 79
| 0.708609
|
import os
import ycm_core
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I', 'src',
'-I', 'thirdparty/nonius',
'-I', 'thirdparty/Catch',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in ['.h', '.hxx', '.hpp', '.hh', '.h++']
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| true
| true
|
f71988e1a677b3eb305af40560a0785370f713df
| 14,327
|
py
|
Python
|
oqupy/backends/tempo_backend.py
|
tempoCollaboration/OQuPy
|
a389a161991a59259e5df47d8e0f405fcac75fe5
|
[
"Apache-2.0"
] | 13
|
2022-02-15T12:33:17.000Z
|
2022-03-31T10:01:57.000Z
|
oqupy/backends/tempo_backend.py
|
tempoCollaboration/OQuPy
|
a389a161991a59259e5df47d8e0f405fcac75fe5
|
[
"Apache-2.0"
] | 11
|
2022-02-16T07:35:46.000Z
|
2022-03-24T18:22:12.000Z
|
oqupy/backends/tempo_backend.py
|
tempoCollaboration/OQuPy
|
a389a161991a59259e5df47d8e0f405fcac75fe5
|
[
"Apache-2.0"
] | 2
|
2022-02-17T01:23:55.000Z
|
2022-02-17T08:51:57.000Z
|
# Copyright 2020 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for tempo and mean-field tempo backend.
"""
from typing import Callable, Dict, Optional, Tuple
from copy import copy
from numpy import ndarray, moveaxis, dot
from oqupy import operators
from oqupy.config import TEMPO_BACKEND_CONFIG
from oqupy.backends import node_array as na
from oqupy.util import create_delta
class BaseTempoBackend:
"""
Backend class for TEMPO.
Parameters
----------
initial_state: ndarray
The initial density matrix (as a vector).
influence: callable(int) -> ndarray
Callable that takes an integer `step` and returns the influence super
operator of that `step`.
unitary_transform: ndarray
Unitary that transforms the coupling operator into a diagonal form.
sum_north: ndarray
The summing vector for the north legs.
sum_west: ndarray
The summing vector for the west legs.
dkmax: int
Number of influences to include. If ``dkmax == None`` then all
influences are included.
epsrel: float
Maximal relative SVD truncation error.
"""
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
"""Create a TempoBackend object. """
self._initial_state = initial_state
self._influence = influence
self._unitary_transform = unitary_transform
self._sum_north = sum_north
self._sum_west = sum_west
self._dkmax = dkmax
self._epsrel = epsrel
self._step = None
self._state = None
self._config = TEMPO_BACKEND_CONFIG if config is None else config
self._mps = None
self._mpo = None
self._super_u = None
self._super_u_dagg = None
self._sum_north_na = None
@property
def step(self) -> int:
"""The current step in the TEMPO computation. """
return self._step
def _initialize_mps_mpo(self) :
"""ToDo"""
self._initial_state = copy(self._initial_state).reshape(-1)
self._super_u = operators.left_right_super(
self._unitary_transform,
self._unitary_transform.conjugate().T)
self._super_u_dagg = operators.left_right_super(
self._unitary_transform.conjugate().T,
self._unitary_transform)
self._sum_north_na = na.NodeArray([self._sum_north],
left=False,
right=False,
name="Sum north")
influences = []
if self._dkmax is None:
dkmax_pre_compute = 1
else:
dkmax_pre_compute = self._dkmax + 1
for i in range(dkmax_pre_compute):
infl = self._influence(i)
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
if i == 0:
tmp = dot(moveaxis(infl_four_legs, 1, -1),
self._super_u_dagg)
tmp = moveaxis(tmp, -1, 1)
tmp = dot(tmp, self._super_u.T)
infl_four_legs = tmp
influences.append(infl_four_legs)
self._mps = na.NodeArray([self._initial_state],
left=False,
right=False,
name="Thee MPS")
self._mpo = na.NodeArray(list(reversed(influences)),
left=True,
right=True,
name="Thee Time Evolving MPO")
def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:
"""
Takes a step in the TEMPO tensor network computation.
For example, for at step 4, we start with:
A ... self._mps
B ... self._mpo
w ... self._sum_west
n ... self._sum_north_array
p1 ... prop_1
p2 ... prop_2
n n n n
| | | |
| | | | |
w~~ ~~B~~B~~B~~B~~ ~~p2
| | | |
p1
| | | |
A~~A~~A~~A
return:
step = 4
state = contraction of A,B,w,n,p1
effects:
self._mpo will grow to the left with the next influence functional
self._mps will be contraction of A,B,w,p1,p2
Returns
-------
step: int
The current step count.
state: ndarray
Density matrix at the current step.
"""
prop_1_na = na.NodeArray([prop_1.T],
left=False,
right=False,
name="first half-step")
prop_2_na = na.NodeArray([prop_2.T],
left=True,
right=False,
name="second half-step")
if self._dkmax is None:
mpo = self._mpo.copy()
infl = self._influence(len(mpo))
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
self._mpo = na.join(infl_na,
self._mpo,
name="The Time Evolving MPO",
copy=False)
elif current_step <= self._dkmax:
_, mpo = na.split(self._mpo,
int(0 - current_step),
copy=True)
else: # current_step > self._dkmax
mpo = self._mpo.copy()
infl = self._influence(self._dkmax-current_step)
if infl is not None:
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
_, mpo = na.split(self._mpo,
index=1,
copy=True)
mpo = na.join(infl_na,
mpo,
name="Thee Time Evolving MPO",
copy=False)
mpo.name = "temporary MPO"
mpo.apply_vector(self._sum_west, left=True)
self._mps.zip_up(prop_1_na,
axes=[(0,0)],
left_index=-1,
right_index=-1,
direction="left",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
if len(self._mps) != len(mpo):
self._mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
self._mps.zip_up(mpo,
axes=[(0, 0)],
left_index=0,
right_index=-1,
direction="right",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
self._mps.svd_sweep(from_index=-1,
to_index=0,
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True)
self._mps = na.join(self._mps,
prop_2_na,
copy=False,
name=f"The MPS ({current_step})")
tmp_mps = self._mps.copy()
for _ in range(len(tmp_mps)-1):
tmp_mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
assert len(tmp_mps) == 1
assert not tmp_mps.left
assert not tmp_mps.right
assert tmp_mps.rank == 1
state = tmp_mps.nodes[0].get_tensor()
return state
class TempoBackend(BaseTempoBackend):
"""
ToDo
"""
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int], Tuple[ndarray, ndarray]],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
"""Create a TempoBackend object. """
super().__init__(
initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
self._propagators = propagators
def initialize(self)-> Tuple[int, ndarray]:
"""
ToDo
"""
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
return self._step, copy(self._state)
def compute_step(self) -> Tuple[int, ndarray]:
"""
ToDo
"""
self._step += 1
prop_1, prop_2 = self._propagators(self._step-1)
self._state = self._compute_system_step(self._step, prop_1, prop_2)
return self._step, copy(self._state)
class TempoWithFieldBackend(BaseTempoBackend):
"""
backend for tensor network tempo with coherent field evolution.
Note the only difference from TensorNetworkTempoBackend in the
signature is the addition of the initial_field and compute_field
parameters, and the change of the propagator signature.
Parameters
----------
initial_state: ndarray
The initial density matrix (as a vector).
initial_field: complex
The initial field value.
influence: callable(int) -> ndarray
Callable that takes an integer `step` and returns the influence super
operator of that `step`.
unitary_transform: ndarray
Unitary that transforms the coupling operator into a diagonal form.
propagators: callable(int, ndarray, complex) -> ndarray, ndarray
Callable that takes an integer `step`, an ndarray `state` and a complex
`field` and returns the first and second half of the system propagator
of that `step`.
compute_field: callable(int, ndarray, complex, ndarray) -> complex
Callable that takes an integer `step`, a complex `field` (the current
value of the field) and two ndarrays for (respectively) the current and
next density matrix as vectors, and returns the next field value.
sum_north: ndarray
The summing vector for the north legs.
sum_west: ndarray
The summing vector for the west legs.
dkmax: int
Number of influences to include. If ``dkmax == -1`` then all influences
are included.
epsrel: float
Maximal relative SVD truncation error.
"""
def __init__(
self,
initial_state: ndarray,
initial_field: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int, ndarray, complex],
Tuple[ndarray, ndarray]],
compute_field: Callable[[float, ndarray, complex], complex],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Dict):
# Field specific variables
self._initial_field = initial_field
self._compute_field = compute_field
self._field = initial_field
self._propagators = propagators
"""Create a TempoWithFieldBackend object. """
super().__init__(initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
def initialize(self) -> Tuple[int, ndarray, complex]:
"""See BaseBackend.initialize() for main docstring."""
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
self._field = self._initial_field
return self._step, copy(self._state), self._field
def compute_step(self) -> Tuple[int, ndarray, complex]:
"""
ToDo
"""
current_step = self._step
next_step = current_step + 1
current_state = copy(self._state)
current_field = self._field
prop_1, prop_2 = self._propagators(current_step, current_state,
current_field)
next_state = self._compute_system_step(next_step, prop_1, prop_2)
next_field = self._compute_field(current_step, current_state,
current_field, next_state)
self._state = next_state
self._field = next_field
self._step = next_step
return self._step, copy(self._state), self._field
| 35.8175
| 79
| 0.520346
|
from typing import Callable, Dict, Optional, Tuple
from copy import copy
from numpy import ndarray, moveaxis, dot
from oqupy import operators
from oqupy.config import TEMPO_BACKEND_CONFIG
from oqupy.backends import node_array as na
from oqupy.util import create_delta
class BaseTempoBackend:
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
self._initial_state = initial_state
self._influence = influence
self._unitary_transform = unitary_transform
self._sum_north = sum_north
self._sum_west = sum_west
self._dkmax = dkmax
self._epsrel = epsrel
self._step = None
self._state = None
self._config = TEMPO_BACKEND_CONFIG if config is None else config
self._mps = None
self._mpo = None
self._super_u = None
self._super_u_dagg = None
self._sum_north_na = None
@property
def step(self) -> int:
return self._step
def _initialize_mps_mpo(self) :
self._initial_state = copy(self._initial_state).reshape(-1)
self._super_u = operators.left_right_super(
self._unitary_transform,
self._unitary_transform.conjugate().T)
self._super_u_dagg = operators.left_right_super(
self._unitary_transform.conjugate().T,
self._unitary_transform)
self._sum_north_na = na.NodeArray([self._sum_north],
left=False,
right=False,
name="Sum north")
influences = []
if self._dkmax is None:
dkmax_pre_compute = 1
else:
dkmax_pre_compute = self._dkmax + 1
for i in range(dkmax_pre_compute):
infl = self._influence(i)
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
if i == 0:
tmp = dot(moveaxis(infl_four_legs, 1, -1),
self._super_u_dagg)
tmp = moveaxis(tmp, -1, 1)
tmp = dot(tmp, self._super_u.T)
infl_four_legs = tmp
influences.append(infl_four_legs)
self._mps = na.NodeArray([self._initial_state],
left=False,
right=False,
name="Thee MPS")
self._mpo = na.NodeArray(list(reversed(influences)),
left=True,
right=True,
name="Thee Time Evolving MPO")
def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:
prop_1_na = na.NodeArray([prop_1.T],
left=False,
right=False,
name="first half-step")
prop_2_na = na.NodeArray([prop_2.T],
left=True,
right=False,
name="second half-step")
if self._dkmax is None:
mpo = self._mpo.copy()
infl = self._influence(len(mpo))
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
self._mpo = na.join(infl_na,
self._mpo,
name="The Time Evolving MPO",
copy=False)
elif current_step <= self._dkmax:
_, mpo = na.split(self._mpo,
int(0 - current_step),
copy=True)
else:
mpo = self._mpo.copy()
infl = self._influence(self._dkmax-current_step)
if infl is not None:
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
_, mpo = na.split(self._mpo,
index=1,
copy=True)
mpo = na.join(infl_na,
mpo,
name="Thee Time Evolving MPO",
copy=False)
mpo.name = "temporary MPO"
mpo.apply_vector(self._sum_west, left=True)
self._mps.zip_up(prop_1_na,
axes=[(0,0)],
left_index=-1,
right_index=-1,
direction="left",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
if len(self._mps) != len(mpo):
self._mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
self._mps.zip_up(mpo,
axes=[(0, 0)],
left_index=0,
right_index=-1,
direction="right",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
self._mps.svd_sweep(from_index=-1,
to_index=0,
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True)
self._mps = na.join(self._mps,
prop_2_na,
copy=False,
name=f"The MPS ({current_step})")
tmp_mps = self._mps.copy()
for _ in range(len(tmp_mps)-1):
tmp_mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
assert len(tmp_mps) == 1
assert not tmp_mps.left
assert not tmp_mps.right
assert tmp_mps.rank == 1
state = tmp_mps.nodes[0].get_tensor()
return state
class TempoBackend(BaseTempoBackend):
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int], Tuple[ndarray, ndarray]],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
super().__init__(
initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
self._propagators = propagators
def initialize(self)-> Tuple[int, ndarray]:
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
return self._step, copy(self._state)
def compute_step(self) -> Tuple[int, ndarray]:
self._step += 1
prop_1, prop_2 = self._propagators(self._step-1)
self._state = self._compute_system_step(self._step, prop_1, prop_2)
return self._step, copy(self._state)
class TempoWithFieldBackend(BaseTempoBackend):
def __init__(
self,
initial_state: ndarray,
initial_field: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int, ndarray, complex],
Tuple[ndarray, ndarray]],
compute_field: Callable[[float, ndarray, complex], complex],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Dict):
self._initial_field = initial_field
self._compute_field = compute_field
self._field = initial_field
self._propagators = propagators
super().__init__(initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
def initialize(self) -> Tuple[int, ndarray, complex]:
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
self._field = self._initial_field
return self._step, copy(self._state), self._field
def compute_step(self) -> Tuple[int, ndarray, complex]:
current_step = self._step
next_step = current_step + 1
current_state = copy(self._state)
current_field = self._field
prop_1, prop_2 = self._propagators(current_step, current_state,
current_field)
next_state = self._compute_system_step(next_step, prop_1, prop_2)
next_field = self._compute_field(current_step, current_state,
current_field, next_state)
self._state = next_state
self._field = next_field
self._step = next_step
return self._step, copy(self._state), self._field
| true
| true
|
f71988f8e6cbe49da433af143788c3ecc8e82b65
| 446
|
py
|
Python
|
setup.py
|
Moomoo-pls/NLP_Game_of_Life
|
afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb
|
[
"MIT"
] | null | null | null |
setup.py
|
Moomoo-pls/NLP_Game_of_Life
|
afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb
|
[
"MIT"
] | null | null | null |
setup.py
|
Moomoo-pls/NLP_Game_of_Life
|
afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="Moo_NLP_Game_of_Life",
version="1.0.0",
author="Stephen Moo-Young",
author_email="mooyoung12@gmail.com",
description="Game of Life for the take home coding challenge",
url="https://github.com/Moomoo-pls/NLP_Game_of_Life",
packages=setuptools.find_packages(),
entry_points={
'console_scripts':[
'game-of-life=Game_of_Life.main:main',
]
},
)
| 27.875
| 66
| 0.663677
|
import setuptools
setuptools.setup(
name="Moo_NLP_Game_of_Life",
version="1.0.0",
author="Stephen Moo-Young",
author_email="mooyoung12@gmail.com",
description="Game of Life for the take home coding challenge",
url="https://github.com/Moomoo-pls/NLP_Game_of_Life",
packages=setuptools.find_packages(),
entry_points={
'console_scripts':[
'game-of-life=Game_of_Life.main:main',
]
},
)
| true
| true
|
f719891884a715f4ed60d4d29e0a80d1b2c17515
| 8,422
|
py
|
Python
|
2_data_collection/CIFAR_10/vgg16_CIFAR10.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
2_data_collection/CIFAR_10/vgg16_CIFAR10.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
2_data_collection/CIFAR_10/vgg16_CIFAR10.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
class cifar10vgg:
def __init__(self,train=True):
self.num_classes = 10
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('cifar10vgg.h5')
def build_model(self):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
#this function normalize inputs for zero mean and unit variance
# it is used when training a model.
# Input: training set and test set
# Output: normalized training set and test set according to the trianing set statistics.
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def normalize_production(self,x):
#this function is used to normalize instances in production according to saved training set statistics
# Input: X - a training set
# Output X - a normalized training set according to normalization constants.
#these values produced during first training and are general for the standard cifar10 training set normalization
mean = 120.707
std = 64.15
return (x-mean)/(std+1e-7)
def predict(self,x,normalize=True,batch_size=50):
if normalize:
x = self.normalize_production(x)
return self.model.predict(x,batch_size)
def train(self,model):
model.load_weights("cifar10vgg.h5")
#training parameters
batch_size = 128
maxepoches = 250
learning_rate = 0.01
lr_decay = 1e-6
lr_drop = 20
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
#optimization details
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('cifar10vgg.h5')
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = cifar10vgg()
predicted_x = model.predict(x_test)
residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)
loss = sum(residuals)/len(residuals)
print("the validation 0/1 loss is: ",loss)
| 39.172093
| 120
| 0.65412
|
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
class cifar10vgg:
def __init__(self,train=True):
self.num_classes = 10
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('cifar10vgg.h5')
def build_model(self):
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def normalize_production(self,x):
mean = 120.707
std = 64.15
return (x-mean)/(std+1e-7)
def predict(self,x,normalize=True,batch_size=50):
if normalize:
x = self.normalize_production(x)
return self.model.predict(x,batch_size)
def train(self,model):
model.load_weights("cifar10vgg.h5")
batch_size = 128
maxepoches = 250
learning_rate = 0.01
lr_decay = 1e-6
lr_drop = 20
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False)
datagen.fit(x_train)
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('cifar10vgg.h5')
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = cifar10vgg()
predicted_x = model.predict(x_test)
residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)
loss = sum(residuals)/len(residuals)
print("the validation 0/1 loss is: ",loss)
| true
| true
|
f719892d08f0cb15a072c2fb5acf64d76d3bd3a3
| 31,288
|
py
|
Python
|
scraps/forcefield_v2.py
|
kul-group/MAZE-sim
|
0f85e74bf93f9242a73bcfaa20a593ae966f38fa
|
[
"MIT"
] | 13
|
2021-03-10T18:40:32.000Z
|
2022-03-21T20:40:57.000Z
|
scraps/forcefield_v2.py
|
kul-group/MAZE-sim
|
0f85e74bf93f9242a73bcfaa20a593ae966f38fa
|
[
"MIT"
] | 27
|
2021-01-28T23:18:44.000Z
|
2021-05-06T19:33:09.000Z
|
scraps/forcefield_v2.py
|
kul-group/MAZE-sim
|
0f85e74bf93f9242a73bcfaa20a593ae966f38fa
|
[
"MIT"
] | 4
|
2021-03-19T20:46:15.000Z
|
2022-03-21T20:40:59.000Z
|
from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer
from maze.io_zeolite import read_vasp
from maze.zeolite import PerfectZeolite, Zeolite
from ase.neighborlist import natural_cutoffs, NeighborList
import os
from pathlib import Path
from ase.io import write, read, gromacs, proteindatabank
from ase.visualize import view
import copy
import shutil
from glob import glob
from ase.constraints import FixAtoms
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from ase.geometry.analysis import Analysis
import numpy as np
from itertools import permutations
from lxml import etree
from contextlib import closing
from collections import OrderedDict
from scipy.optimize import least_squares, minimize
import matplotlib.pyplot as plt
from statistics import mode
import pickle
import time
from ase.data import atomic_masses, atomic_numbers
def get_EF_atom_indices(atoms):
"""
for index tracking, to ensure we are comparing the DFT and FF forces on the same EF atoms after before and after
scooping out the smaller cluster.
alse used for recentering the cluster based on the EF-O atom
"""
TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']
index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]
index_Al = [a.index for a in atoms if a.symbol == 'Al']
nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)
nl.update(atoms)
Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))
Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']
TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))
centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]
return index_EF_TM + centering_o
def get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):
""" #TODO: check whether capping is necessary
Inconsistent capping (remove all caps for now, does not need this cluster to be physical)
Possible fix: change mult in neighbor list
Extract smaller cluster containing the extra-framework atoms and cap all the O. Then the capped cluster is moved
to the center of the cell to avoid boundary issue.
Save cluster in both .traj file and .pdb format.
:param atoms:
:param folder_path:
:param file_name:
:param save_traj: if True, save clusters into .traj as well, for later comparison and trouble shooting
:param EF_O_index: if not none, will use this value, else, will find the index using Extraframework code
:return: 1. EF-cluster including 13 atoms, index of the EF atoms in original zeolite, index of the EF atoms in
the current cluster (the later two output index lists share the ordering)
"""
EFMaker = ExtraFrameworkAnalyzer(atoms)
cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]
cluster_EF_index = get_EF_atom_indices(cluster)
centering_pos = cluster.get_positions()[cluster_EF_index[-1]]
recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]
# FIXME: recentering doesn't work well for very small unit cells. eg. SOD
# cluster = Zeolite(cluster).cap_atoms()
proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)
if save_traj is True:
write(folder_path + '/%s.traj' % file_name, recentered_cluster)
return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index
def label_pdb(folder_path, file_name, del_unlabeled_pdb):
"""
Relabeling the Atom name in proteindatabank file. (required step for openMM)
The same atom type connecting to different neighboring types are treated differently due to differences in their
chemical environments, and is therefore named separately.
:param folder_path:
:param file_name:
:param del_unlabeled_pdb:
"""
filein = open(folder_path + '/%s.pdb' % file_name, 'r')
fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')
name_list = []
for line in filein.readlines():
if line.startswith('ATOM') or line.startswith('HETATM'):
name = line[12:16].strip()
name_list.append(name)
name = name + str(name_list.count(name))
name = name.rjust(4)
line = line.replace(line[12:16], name, 1)
# only replacing the first occurrence of line[12:16], atomic symbols are maintained
fileout.writelines(line)
filein.close()
fileout.close()
if del_unlabeled_pdb is True:
os.remove(folder_path + '/%s.pdb' % file_name)
def get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):
"""
Using ase.geometry.analysis.Analysis to get all bonds, then remove the repeated ones.
Function also allows removing certain bonding pair defined by user (excluded_pair).
Or removing pairs including certain atomic indices (excluded_index).
:param cluster:
:param mult:
:param excluded_index: list of integers
:param excluded_pair: list of lists
:return: full bonding list, shortened list.
If both excluded_index and excluded_pair are None, bonding list == shortened list
"""
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
bond_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):
for index in indices:
if [count, index] not in bond_list and [index, count] not in bond_list:
bond_list.append([count, index])
for bond in bond_list:
if all(single_index not in bond for single_index in excluded_index) and \
all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):
shortened_list.append(bond)
return bond_list, shortened_list
def get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):
"""
#TODO: consider combining get_bonds and get_angles function
ase.geometry.analysis.Analysis.unique_angles function does not work, return all angles.
three-body interactions.
:param excluded_pair: excluding all [particle1, particle2, particle3] lists involving the excluded pair
"""
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
angle_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):
for index in indices:
if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):
angle_list.append([count, index[0], index[1]])
for angle in angle_list:
if all(single_index not in angle for single_index in excluded_index) and \
all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):
shortened_list.append(angle)
return angle_list, shortened_list
def write_xml(atoms, bonds, save_as):
# on-the-fly generation of force field xml file, matching atoms and bonds with pdb file
root = etree.Element('ForceField')
xml_section = etree.SubElement(root, "AtomTypes")
for atom in atoms:
element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))
# properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
if element_type == 'Cu' or atom.name == 'O9':
atomic_mass = atomic_masses[atomic_numbers[element_type]]
else:
atomic_mass = 0.0
properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
etree.SubElement(xml_section, 'Type', **properties)
xml_section = etree.SubElement(root, 'Residues')
xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')
for atom in atoms:
etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)
for bond in bonds:
etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)
tree = etree.ElementTree(root)
xml = etree.tostring(tree, pretty_print=True).decode('utf-8')
with closing(open(save_as, 'w')) as f:
f.write(xml)
def check_atom_types(cluster, index):
""" assign atom types, same element connected to different neighbors are assigned into different classes.
For example, extra-framework O (in Cu-O-Cu) is in a different class from framework O (Si-O-Si). Each class
assignment is unique (each atom belongs to one class and one class only).
O_EF: extra-framework O
O-Cu: framework O, connecting to one T-site(Al) and Cu
O-H: framework O, connecting to one T-site(Al) and H (capping)
"""
nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)
nl.update(cluster)
class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']
class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']
class_H = [atom.index for atom in cluster if atom.symbol == 'H']
class_O_EF = [get_EF_atom_indices(cluster)[-1]]
class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and
all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]
class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]
if index in class_Al:
return 'Al'
if index in class_Cu:
return 'Cu'
if index in class_H:
return 'H'
if index in class_O_EF:
return 'O-EF'
if index in class_O_Cu:
return 'O-Cu'
if index in class_O_H:
return 'O-H'
else:
return 'None'
def get_property_types(cluster, property_list):
""" assign all bonding pairs or angles into different types based on differences in atom types. For example,
O(extra-framework)-Cu is different from O(framework)-Cu.
:param property_list: bond or angle index list of the cluster of interests
:return type_dict: return a dictionary of all unique bond-pairs or angle types, with "keys" being integers starting
from 0, and "values" being a list of two atom types string for bonds or three atom types string for angles.
eg. {0: [AtomClass1, AtomClass2], 1: [AtomClass1, AtomClass3], ...} for bonds
Note: Bond types such as [AtomClass1, AtomClass2] and [AtomClass2, AtomClass1] are considered the same. Same rules
also apply for angles.
:return whole_type_list: return the entire list of bond or angle types assignment of the input.
len(whole_type_list) = len(my_list)
"""
type_dict, repeated_list, whole_type_list, count = {}, [], [], 0
for items in property_list:
my_list = []
for val in items:
my_list.append(check_atom_types(cluster, val))
whole_type_list.append(my_list)
if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):
repeated_list.append(my_list)
type_dict[count] = my_list
count += 1
return type_dict, whole_type_list
def _get_index_dict(type_dict, whole_type_list, index_list):
""" assign bond pairs or angles indices into different bond or angle types, all the pairs or angles within the same
types will share the same set of force field parameters.
:param type_dict:
:param whole_type_list:
:param index_list:
:return index_dict: return a dictionary of all bond-pairs or angle indices for each unique bond or angle type,
using the the same keys as type_dict.
"""
index_dict = {}
for key, value in type_dict.items():
temp_list = []
for count, items in enumerate(whole_type_list):
if any(list(pair) == value for pair in list(permutations(items))):
temp_list.append(index_list[count])
index_dict[key] = temp_list
return index_dict
def get_type_index_pair(type_dict, whole_type_list, index_list):
""" write bond_type and bond_index into a single dictionary; can use tuples as dictionary key, not lists
:param type_dict:
:param whole_type_list:
:param index_list:
"""
bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)
type_index_dict = {}
for key, value in type_dict.items():
type_index_dict[tuple(value)] = bond_index_dict[key]
return type_index_dict
def pretty_print(my_dict):
""" for better visualization of the bond (or angle) types and bond (or angle) indices that belong to certain types.
"""
for key, value in my_dict.items():
print(key, '-->', value)
def shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,
include_property_type=None, case=0):
"""
allow excluding certain property types or only including certain types
"""
if exclude_atom_type is not None and exclude_property_type is None:
case = 1
if exclude_property_type is not None and exclude_atom_type is None:
case = 2
if exclude_property_type is not None and exclude_atom_type is not None:
case = 3
if include_property_type is not None:
case = 4
shortened_list = []
for type_list, index_list in type_index_dict.items():
if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):
shortened_list.extend(index_list)
elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \
all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
return shortened_list
def set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):
""" Feed pdb topology file and xml force field file into openMM, generate a system for the MD simulation/force
calculation.
:param folder_path:
:param cluster_tag_number:
:param shortened_bond_list:
:return pdb:
:return system:
"""
pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)
atoms = list(pdb.topology.atoms())
for index in shortened_bond_list:
pdb.topology.addBond(atoms[index[0]], atoms[index[1]])
bonds = list(pdb.topology.bonds())
write_xml(atoms, bonds, folder_path + '/forcefield.xml')
FF = ForceField(folder_path + '/forcefield.xml')
system = FF.createSystem(pdb.topology)
return pdb, system
def custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
""" #todo: add argument allowing this custom function to be fed in as an input (more flexible used-designed ff)
:param bond_list: list to be included into force field
:param angle_list:
:param bond_type_index_dict: {(type): [index], ...}
:param angle_type_index_dict:
:param bond_param_dict: {(type): [param], ...} Note: parameters here uses the standard units, kJ, nm, ...
:param angle_param_dict:
:return system: openMM system with custom forces added onto it
"""
force = CustomBondForce("D*(1-exp(-alpha*(r-r0)))^2") # Morse bond
force.addPerBondParameter("D")
force.addPerBondParameter("alpha")
force.addPerBondParameter("r0")
force.setUsesPeriodicBoundaryConditions(periodic=True)
for bond in bond_list:
for my_type, my_index in bond_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(bond))):
try:
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
except:
my_type = tuple(reversed(my_type))
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
# note: consider updating the info_dict to make it order insensitive
system.addForce(force)
force = HarmonicAngleForce() # Harmonic angle
force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions
for angle in angle_list:
for my_type, my_index in angle_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(angle))):
type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]
force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))
system.addForce(force)
# assert(system.usesPeriodicBoundaryConditions() == True)
return system
def get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
""" forces for a single configuration
use numb to keep track of individual configurations
integrator used for advancing the equations of motion in MD
doesn't matter what we pick here since we only need the forces on the initial structure, but do need to have it
:return: forces values on atoms in units of eV/A
"""
system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,
angle_type_index_dict, angle_param_dict)
integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(getForces=True)
forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A
return forces
# NOTE: section below deals with multiple input structures for force field training
def get_EF_O_index(traj):
"""
get the mode of EF_O, and use that to extract the EF cluster for the force field training
all EF atoms should have the same indices regardless of there is binds on the zeolite, as long as the zeolite
framework is the same - (all EF atoms, aka. Cu-O-Cu insertion follows the same procedures)
:param traj: traj of configurations containing all atoms, including both the zeolite backbone and EF atoms
"""
EF_O_index_list = []
for atoms in traj:
try:
EFAnalyzer = ExtraFrameworkAnalyzer(atoms)
EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])
except:
...
return mode(tuple(EF_O_index_list))
def prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,
show_all=False):
"""
:param folder_path:
:param sample_zeolite:
:param traj_name:
:param save_traj:
:param del_unlabeled_pdb:
:param show_all:
"""
if traj_name is not None:
traj = read(folder_path + '/%s.traj' % traj_name, ':')
output_dir = os.path.join(folder_path, traj_name)
else:
traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')
output_dir = os.path.join(folder_path, sample_zeolite)
Path(output_dir).mkdir(parents=True, exist_ok=True)
cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []
for count, atoms in enumerate(traj):
try:
cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),
save_traj, [EF_O_index])
label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)
cluster_traj.append(cluster)
print(sample_zeolite, count)
except:
print(sample_zeolite, count, 'failed!')
if show_all is True:
view(cluster_traj)
return EF_atoms_index, cluster_EF_index
def reformat_inputs(bond_param_dict, angle_param_dict):
""" reformat input dict into lists
:return bond_type: List[List[str]] eg. ['Cu', 'O']
:return angle_type: List[List[str]] eg. ['Cu', 'O', 'Cu']
:return param_list: List[float], extend all parameters into a single list, since scipy.optimize.minimize can only
take an 1D array as initial guess parameter
"""
bond_type, angle_type, param_list = [], [], []
for types, indices in bond_param_dict.items():
bond_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
for types, indices in angle_param_dict.items():
angle_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
return bond_type, angle_type, param_list
def get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict):
""" To reduce computational cost, objects such as pdb, system, shortened_bond_list, bond_type_index_dict are kept
fixed for each configuration during the optimization (only run once).
"""
shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)
shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)
pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)
return pdb, system, shortened_bond_list, shortened_angle_list
def get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index):
""" openMM forces for multiple configuration based on the same set of parameters
"""
bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0
for count, (types, indices) in enumerate(ini_bond_param_dict.items()):
bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])
number_of_bond_param += len(indices)
for count, (types, indices) in enumerate(ini_angle_param_dict.items()):
angle_param_dict[types] = list(
param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])
predicted_f = []
my_dict = copy.deepcopy(info_dict)
for config_tag, info_list in my_dict.items():
ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,
info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]
predicted_f.append([force_list for force_list in ff_forces])
return predicted_f
def get_DFT_forces_single(atoms, atom_index):
"""
reference DFT forces on single atoms
"""
f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]
f_mag = np.linalg.norm(f_vec)
return f_vec
def get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
"""
optimize force field parameters by minimizing this loss function (MSE), weighted by DFT electronic energies
k (Boltzmann's constant) = 8.617e-5 eV/K
T = 298 K
"""
predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index)
residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)
weighted_residue = residue * weights # 39 number of atoms
print(np.mean(weighted_residue ** 2))
return np.mean(weighted_residue ** 2)
def get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
# todo: more flexible bond reformating and feeding
bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),
(0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),
(-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))
res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},
args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index))
print(res.success)
return res
def make_parity_plot(ff_forces, dft_forces, atom_name):
""" plot FF forces vs. DFT forces
"""
plt.figure()
fig, ax = plt.subplots()
plt.plot(dft_forces, ff_forces, 'o')
plt.xlabel('DFT_force', fontsize=18)
plt.ylabel('FF_force', fontsize=18)
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.title('Force fitting on %s' % atom_name, fontsize=18)
plt.show()
def func():
tic = time.perf_counter()
zeolite = 'SOD'
folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'
# prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)
"""
ini_bond_param_dict = {('O-Cu', 'Cu'): [1.2, 4, 0.3], ('O-EF', 'Cu'): [1.2, 4, 0.2], ('Al', 'Cu'): [1.2, 4, 0.4]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.3, 10], ('O-Cu', 'Cu', 'O-EF'): [2.3, 10],
('Al', 'Cu', 'O-EF'): [2.3, 10]}
"""
ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],
('Al', 'Cu'): [-2.656, 4.608, 0.413]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],
('Al', 'Cu', 'O-EF'): [1.925, 1.673]}
included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)
# set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict
cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')
bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)
bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)
angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)
angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)
bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)
angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)
numb_skip = 2000
info_dict, output_path = {}, os.path.join(folder_path, traj_name)
files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]
for cluster_tag_number in np.arange(0, len(files), numb_skip):
cluster_tag_number = int(cluster_tag_number)
pdb, system, shortened_bond_list, shortened_angle_list = \
get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict)
info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]
print(cluster_tag_number)
with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(info_dict, f)
with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:
EF_index_dict = pickle.load(f)
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
DFT_f = []
for atoms in traj:
DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])
print(np.array(DFT_f).shape)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:
info_dict = pickle.load(f)
with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:
cluster_EF_index_dict = pickle.load(f)
my_dict = copy.deepcopy(info_dict) # important, need to keep openMM "systems" fixed
weights = []
for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):
weights.extend([value, value, value, value, value, value, value, value, value])
res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
print([np.around(float(val), decimals=3) for val in res.x])
FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')
force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}
with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(force_dict, f)
toc = time.perf_counter()
print(f"Program terminated in {toc - tic:0.4f} seconds")
if __name__ == '__main__':
# func()
""" weighting factor for the loss function
zeolite = 'SOD'
folder_path, traj_name, numb_skip = '/Users/jiaweiguo/Box/openMM_FF', zeolite + '_md', 2000
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
weight = np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298))
plt.plot(DFT_E, weight, 'o')
plt.xlabel('DFT electronic energies (eV)', fontsize=16)
plt.ylabel('Boltzmann weighting', fontsize=16)
plt.show()
"""
| 46.215657
| 121
| 0.685407
|
from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer
from maze.io_zeolite import read_vasp
from maze.zeolite import PerfectZeolite, Zeolite
from ase.neighborlist import natural_cutoffs, NeighborList
import os
from pathlib import Path
from ase.io import write, read, gromacs, proteindatabank
from ase.visualize import view
import copy
import shutil
from glob import glob
from ase.constraints import FixAtoms
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from ase.geometry.analysis import Analysis
import numpy as np
from itertools import permutations
from lxml import etree
from contextlib import closing
from collections import OrderedDict
from scipy.optimize import least_squares, minimize
import matplotlib.pyplot as plt
from statistics import mode
import pickle
import time
from ase.data import atomic_masses, atomic_numbers
def get_EF_atom_indices(atoms):
TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']
index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]
index_Al = [a.index for a in atoms if a.symbol == 'Al']
nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)
nl.update(atoms)
Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))
Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']
TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))
centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]
return index_EF_TM + centering_o
def get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):
EFMaker = ExtraFrameworkAnalyzer(atoms)
cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]
cluster_EF_index = get_EF_atom_indices(cluster)
centering_pos = cluster.get_positions()[cluster_EF_index[-1]]
recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]
# cluster = Zeolite(cluster).cap_atoms()
proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)
if save_traj is True:
write(folder_path + '/%s.traj' % file_name, recentered_cluster)
return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index
def label_pdb(folder_path, file_name, del_unlabeled_pdb):
filein = open(folder_path + '/%s.pdb' % file_name, 'r')
fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')
name_list = []
for line in filein.readlines():
if line.startswith('ATOM') or line.startswith('HETATM'):
name = line[12:16].strip()
name_list.append(name)
name = name + str(name_list.count(name))
name = name.rjust(4)
line = line.replace(line[12:16], name, 1)
# only replacing the first occurrence of line[12:16], atomic symbols are maintained
fileout.writelines(line)
filein.close()
fileout.close()
if del_unlabeled_pdb is True:
os.remove(folder_path + '/%s.pdb' % file_name)
def get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
bond_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):
for index in indices:
if [count, index] not in bond_list and [index, count] not in bond_list:
bond_list.append([count, index])
for bond in bond_list:
if all(single_index not in bond for single_index in excluded_index) and \
all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):
shortened_list.append(bond)
return bond_list, shortened_list
def get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
angle_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):
for index in indices:
if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):
angle_list.append([count, index[0], index[1]])
for angle in angle_list:
if all(single_index not in angle for single_index in excluded_index) and \
all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):
shortened_list.append(angle)
return angle_list, shortened_list
def write_xml(atoms, bonds, save_as):
# on-the-fly generation of force field xml file, matching atoms and bonds with pdb file
root = etree.Element('ForceField')
xml_section = etree.SubElement(root, "AtomTypes")
for atom in atoms:
element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))
# properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
if element_type == 'Cu' or atom.name == 'O9':
atomic_mass = atomic_masses[atomic_numbers[element_type]]
else:
atomic_mass = 0.0
properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
etree.SubElement(xml_section, 'Type', **properties)
xml_section = etree.SubElement(root, 'Residues')
xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')
for atom in atoms:
etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)
for bond in bonds:
etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)
tree = etree.ElementTree(root)
xml = etree.tostring(tree, pretty_print=True).decode('utf-8')
with closing(open(save_as, 'w')) as f:
f.write(xml)
def check_atom_types(cluster, index):
nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)
nl.update(cluster)
class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']
class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']
class_H = [atom.index for atom in cluster if atom.symbol == 'H']
class_O_EF = [get_EF_atom_indices(cluster)[-1]]
class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and
all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]
class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]
if index in class_Al:
return 'Al'
if index in class_Cu:
return 'Cu'
if index in class_H:
return 'H'
if index in class_O_EF:
return 'O-EF'
if index in class_O_Cu:
return 'O-Cu'
if index in class_O_H:
return 'O-H'
else:
return 'None'
def get_property_types(cluster, property_list):
type_dict, repeated_list, whole_type_list, count = {}, [], [], 0
for items in property_list:
my_list = []
for val in items:
my_list.append(check_atom_types(cluster, val))
whole_type_list.append(my_list)
if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):
repeated_list.append(my_list)
type_dict[count] = my_list
count += 1
return type_dict, whole_type_list
def _get_index_dict(type_dict, whole_type_list, index_list):
index_dict = {}
for key, value in type_dict.items():
temp_list = []
for count, items in enumerate(whole_type_list):
if any(list(pair) == value for pair in list(permutations(items))):
temp_list.append(index_list[count])
index_dict[key] = temp_list
return index_dict
def get_type_index_pair(type_dict, whole_type_list, index_list):
bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)
type_index_dict = {}
for key, value in type_dict.items():
type_index_dict[tuple(value)] = bond_index_dict[key]
return type_index_dict
def pretty_print(my_dict):
for key, value in my_dict.items():
print(key, '-->', value)
def shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,
include_property_type=None, case=0):
if exclude_atom_type is not None and exclude_property_type is None:
case = 1
if exclude_property_type is not None and exclude_atom_type is None:
case = 2
if exclude_property_type is not None and exclude_atom_type is not None:
case = 3
if include_property_type is not None:
case = 4
shortened_list = []
for type_list, index_list in type_index_dict.items():
if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):
shortened_list.extend(index_list)
elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \
all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
return shortened_list
def set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):
pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)
atoms = list(pdb.topology.atoms())
for index in shortened_bond_list:
pdb.topology.addBond(atoms[index[0]], atoms[index[1]])
bonds = list(pdb.topology.bonds())
write_xml(atoms, bonds, folder_path + '/forcefield.xml')
FF = ForceField(folder_path + '/forcefield.xml')
system = FF.createSystem(pdb.topology)
return pdb, system
def custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
force = CustomBondForce("D*(1-exp(-alpha*(r-r0)))^2") # Morse bond
force.addPerBondParameter("D")
force.addPerBondParameter("alpha")
force.addPerBondParameter("r0")
force.setUsesPeriodicBoundaryConditions(periodic=True)
for bond in bond_list:
for my_type, my_index in bond_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(bond))):
try:
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
except:
my_type = tuple(reversed(my_type))
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
# note: consider updating the info_dict to make it order insensitive
system.addForce(force)
force = HarmonicAngleForce() # Harmonic angle
force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions
for angle in angle_list:
for my_type, my_index in angle_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(angle))):
type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]
force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))
system.addForce(force)
# assert(system.usesPeriodicBoundaryConditions() == True)
return system
def get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,
angle_type_index_dict, angle_param_dict)
integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(getForces=True)
forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A
return forces
# NOTE: section below deals with multiple input structures for force field training
def get_EF_O_index(traj):
EF_O_index_list = []
for atoms in traj:
try:
EFAnalyzer = ExtraFrameworkAnalyzer(atoms)
EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])
except:
...
return mode(tuple(EF_O_index_list))
def prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,
show_all=False):
if traj_name is not None:
traj = read(folder_path + '/%s.traj' % traj_name, ':')
output_dir = os.path.join(folder_path, traj_name)
else:
traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')
output_dir = os.path.join(folder_path, sample_zeolite)
Path(output_dir).mkdir(parents=True, exist_ok=True)
cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []
for count, atoms in enumerate(traj):
try:
cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),
save_traj, [EF_O_index])
label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)
cluster_traj.append(cluster)
print(sample_zeolite, count)
except:
print(sample_zeolite, count, 'failed!')
if show_all is True:
view(cluster_traj)
return EF_atoms_index, cluster_EF_index
def reformat_inputs(bond_param_dict, angle_param_dict):
bond_type, angle_type, param_list = [], [], []
for types, indices in bond_param_dict.items():
bond_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
for types, indices in angle_param_dict.items():
angle_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
return bond_type, angle_type, param_list
def get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict):
shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)
shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)
pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)
return pdb, system, shortened_bond_list, shortened_angle_list
def get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index):
bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0
for count, (types, indices) in enumerate(ini_bond_param_dict.items()):
bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])
number_of_bond_param += len(indices)
for count, (types, indices) in enumerate(ini_angle_param_dict.items()):
angle_param_dict[types] = list(
param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])
predicted_f = []
my_dict = copy.deepcopy(info_dict)
for config_tag, info_list in my_dict.items():
ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,
info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]
predicted_f.append([force_list for force_list in ff_forces])
return predicted_f
def get_DFT_forces_single(atoms, atom_index):
f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]
f_mag = np.linalg.norm(f_vec)
return f_vec
def get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index)
residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)
weighted_residue = residue * weights # 39 number of atoms
print(np.mean(weighted_residue ** 2))
return np.mean(weighted_residue ** 2)
def get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
# todo: more flexible bond reformating and feeding
bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),
(0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),
(-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))
res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},
args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index))
print(res.success)
return res
def make_parity_plot(ff_forces, dft_forces, atom_name):
plt.figure()
fig, ax = plt.subplots()
plt.plot(dft_forces, ff_forces, 'o')
plt.xlabel('DFT_force', fontsize=18)
plt.ylabel('FF_force', fontsize=18)
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.title('Force fitting on %s' % atom_name, fontsize=18)
plt.show()
def func():
tic = time.perf_counter()
zeolite = 'SOD'
folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'
# prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)
ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],
('Al', 'Cu'): [-2.656, 4.608, 0.413]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],
('Al', 'Cu', 'O-EF'): [1.925, 1.673]}
included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)
# set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict
cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')
bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)
bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)
angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)
angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)
bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)
angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)
numb_skip = 2000
info_dict, output_path = {}, os.path.join(folder_path, traj_name)
files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]
for cluster_tag_number in np.arange(0, len(files), numb_skip):
cluster_tag_number = int(cluster_tag_number)
pdb, system, shortened_bond_list, shortened_angle_list = \
get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict)
info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]
print(cluster_tag_number)
with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(info_dict, f)
with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:
EF_index_dict = pickle.load(f)
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
DFT_f = []
for atoms in traj:
DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])
print(np.array(DFT_f).shape)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:
info_dict = pickle.load(f)
with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:
cluster_EF_index_dict = pickle.load(f)
my_dict = copy.deepcopy(info_dict) # important, need to keep openMM "systems" fixed
weights = []
for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):
weights.extend([value, value, value, value, value, value, value, value, value])
res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
print([np.around(float(val), decimals=3) for val in res.x])
FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')
force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}
with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(force_dict, f)
toc = time.perf_counter()
print(f"Program terminated in {toc - tic:0.4f} seconds")
if __name__ == '__main__':
# func()
| true
| true
|
f71989a26c51d5d0de8be179c705597a99ff7aea
| 17,373
|
py
|
Python
|
python/ccxt/async_support/bitbay.py
|
Richard-L-Johnson/ccxt1
|
903aa1288694f9192b15d22b945508661bdc8807
|
[
"MIT"
] | 13
|
2019-01-26T14:41:37.000Z
|
2022-03-26T03:33:12.000Z
|
python/ccxt/async_support/bitbay.py
|
Richard-L-Johnson/ccxt1
|
903aa1288694f9192b15d22b945508661bdc8807
|
[
"MIT"
] | 17
|
2018-10-02T04:43:13.000Z
|
2018-11-01T17:07:37.000Z
|
python/ccxt/async_support/bitbay.py
|
Richard-L-Johnson/ccxt1
|
903aa1288694f9192b15d22b945508661bdc8807
|
[
"MIT"
] | 12
|
2018-12-24T02:19:02.000Z
|
2022-03-26T05:04:25.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import InvalidNonce
class bitbay (Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'], # Malta
'rateLimit': 1000,
'has': {
'CORS': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
],
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
},
'markets': {
'BTC/USD': {'id': 'BTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'BTC', 'quoteId': 'USD'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN', 'baseId': 'BTC', 'quoteId': 'PLN'},
'LTC/USD': {'id': 'LTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'LTC', 'quoteId': 'USD'},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN', 'baseId': 'LTC', 'quoteId': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'LTC', 'quoteId': 'BTC'},
'ETH/USD': {'id': 'ETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'ETH', 'quoteId': 'USD'},
'ETH/EUR': {'id': 'ETHEUR', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'ETH', 'quoteId': 'EUR'},
'ETH/PLN': {'id': 'ETHPLN', 'symbol': 'ETH/PLN', 'base': 'ETH', 'quote': 'PLN', 'baseId': 'ETH', 'quoteId': 'PLN'},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'ETH', 'quoteId': 'BTC'},
'LSK/USD': {'id': 'LSKUSD', 'symbol': 'LSK/USD', 'base': 'LSK', 'quote': 'USD', 'baseId': 'LSK', 'quoteId': 'USD'},
'LSK/EUR': {'id': 'LSKEUR', 'symbol': 'LSK/EUR', 'base': 'LSK', 'quote': 'EUR', 'baseId': 'LSK', 'quoteId': 'EUR'},
'LSK/PLN': {'id': 'LSKPLN', 'symbol': 'LSK/PLN', 'base': 'LSK', 'quote': 'PLN', 'baseId': 'LSK', 'quoteId': 'PLN'},
'LSK/BTC': {'id': 'LSKBTC', 'symbol': 'LSK/BTC', 'base': 'LSK', 'quote': 'BTC', 'baseId': 'LSK', 'quoteId': 'BTC'},
'BCH/USD': {'id': 'BCCUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD', 'baseId': 'BCC', 'quoteId': 'USD'},
'BCH/EUR': {'id': 'BCCEUR', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR', 'baseId': 'BCC', 'quoteId': 'EUR'},
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN', 'baseId': 'BCC', 'quoteId': 'PLN'},
'BCH/BTC': {'id': 'BCCBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'BCC', 'quoteId': 'BTC'},
'BTG/USD': {'id': 'BTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD', 'baseId': 'BTG', 'quoteId': 'USD'},
'BTG/EUR': {'id': 'BTGEUR', 'symbol': 'BTG/EUR', 'base': 'BTG', 'quote': 'EUR', 'baseId': 'BTG', 'quoteId': 'EUR'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN', 'baseId': 'BTG', 'quoteId': 'PLN'},
'BTG/BTC': {'id': 'BTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC', 'baseId': 'BTG', 'quoteId': 'BTC'},
'DASH/USD': {'id': 'DASHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD', 'baseId': 'DASH', 'quoteId': 'USD'},
'DASH/EUR': {'id': 'DASHEUR', 'symbol': 'DASH/EUR', 'base': 'DASH', 'quote': 'EUR', 'baseId': 'DASH', 'quoteId': 'EUR'},
'DASH/PLN': {'id': 'DASHPLN', 'symbol': 'DASH/PLN', 'base': 'DASH', 'quote': 'PLN', 'baseId': 'DASH', 'quoteId': 'PLN'},
'DASH/BTC': {'id': 'DASHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC', 'baseId': 'DASH', 'quoteId': 'BTC'},
'GAME/USD': {'id': 'GAMEUSD', 'symbol': 'GAME/USD', 'base': 'GAME', 'quote': 'USD', 'baseId': 'GAME', 'quoteId': 'USD'},
'GAME/EUR': {'id': 'GAMEEUR', 'symbol': 'GAME/EUR', 'base': 'GAME', 'quote': 'EUR', 'baseId': 'GAME', 'quoteId': 'EUR'},
'GAME/PLN': {'id': 'GAMEPLN', 'symbol': 'GAME/PLN', 'base': 'GAME', 'quote': 'PLN', 'baseId': 'GAME', 'quoteId': 'PLN'},
'GAME/BTC': {'id': 'GAMEBTC', 'symbol': 'GAME/BTC', 'base': 'GAME', 'quote': 'BTC', 'baseId': 'GAME', 'quoteId': 'BTC'},
'XRP/USD': {'id': 'XRPUSD', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'baseId': 'XRP', 'quoteId': 'USD'},
'XRP/EUR': {'id': 'XRPEUR', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'baseId': 'XRP', 'quoteId': 'EUR'},
'XRP/PLN': {'id': 'XRPPLN', 'symbol': 'XRP/PLN', 'base': 'XRP', 'quote': 'PLN', 'baseId': 'XRP', 'quoteId': 'PLN'},
'XRP/BTC': {'id': 'XRPBTC', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'baseId': 'XRP', 'quoteId': 'BTC'},
# 'XIN/USD': {'id': 'XINUSD', 'symbol': 'XIN/USD', 'base': 'XIN', 'quote': 'USD', 'baseId': 'XIN', 'quoteId': 'USD'},
# 'XIN/EUR': {'id': 'XINEUR', 'symbol': 'XIN/EUR', 'base': 'XIN', 'quote': 'EUR', 'baseId': 'XIN', 'quoteId': 'EUR'},
# 'XIN/PLN': {'id': 'XINPLN', 'symbol': 'XIN/PLN', 'base': 'XIN', 'quote': 'PLN', 'baseId': 'XIN', 'quoteId': 'PLN'},
'XIN/BTC': {'id': 'XINBTC', 'symbol': 'XIN/BTC', 'base': 'XIN', 'quote': 'BTC', 'baseId': 'XIN', 'quoteId': 'BTC'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'exceptions': {
'400': ExchangeError, # At least one parameter wasn't set
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds, # No enough money or crypto
# code 407 not specified are not specified in their docs
'408': InvalidOrder, # Invalid currency name
'501': AuthenticationError, # Invalid public key
'502': AuthenticationError, # Invalid sign
'503': InvalidNonce, # Invalid moment parameter. Request time doesn't match current server time
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AuthenticationError, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': ExchangeError, # Invalid market name
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostInfo()
if 'balances' in response:
balance = response['balances']
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currencies[code]
id = currency['id']
account = self.account()
if id in balance:
account['free'] = float(balance[id]['available'])
account['used'] = float(balance[id]['locked'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbook = await self.publicGetIdOrderbook(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
market = self.market(symbol)
return self.privatePostTrade(self.extend({
'type': side,
'currency': market['baseId'],
'amount': amount,
'payment_currency': market['quoteId'],
'rate': price,
}, params))
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancel({'id': id})
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
if currency in fiatCurrencies:
return True
return False
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'code' in response:
#
# bitbay returns the integer 'success': 1 key from their private API
# or an integer 'code' value from 0 to 510 and an error message
#
# {'success': 1, ...}
# {'code': 502, 'message': 'Invalid sign'}
# {'code': 0, 'message': 'offer funds not exceeding minimums'}
#
# 400 At least one parameter wasn't set
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 406 No enough money or crypto
# 408 Invalid currency name
# 501 Invalid public key
# 502 Invalid sign
# 503 Invalid moment parameter. Request time doesn't match current server time
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = response['code'] # always an integer
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in self.exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 50.650146
| 136
| 0.476141
|
rt.base.exchange import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import InvalidNonce
class bitbay (Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'],
'rateLimit': 1000,
'has': {
'CORS': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
],
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
},
'markets': {
'BTC/USD': {'id': 'BTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'BTC', 'quoteId': 'USD'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN', 'baseId': 'BTC', 'quoteId': 'PLN'},
'LTC/USD': {'id': 'LTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'LTC', 'quoteId': 'USD'},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN', 'baseId': 'LTC', 'quoteId': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'LTC', 'quoteId': 'BTC'},
'ETH/USD': {'id': 'ETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'ETH', 'quoteId': 'USD'},
'ETH/EUR': {'id': 'ETHEUR', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'ETH', 'quoteId': 'EUR'},
'ETH/PLN': {'id': 'ETHPLN', 'symbol': 'ETH/PLN', 'base': 'ETH', 'quote': 'PLN', 'baseId': 'ETH', 'quoteId': 'PLN'},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'ETH', 'quoteId': 'BTC'},
'LSK/USD': {'id': 'LSKUSD', 'symbol': 'LSK/USD', 'base': 'LSK', 'quote': 'USD', 'baseId': 'LSK', 'quoteId': 'USD'},
'LSK/EUR': {'id': 'LSKEUR', 'symbol': 'LSK/EUR', 'base': 'LSK', 'quote': 'EUR', 'baseId': 'LSK', 'quoteId': 'EUR'},
'LSK/PLN': {'id': 'LSKPLN', 'symbol': 'LSK/PLN', 'base': 'LSK', 'quote': 'PLN', 'baseId': 'LSK', 'quoteId': 'PLN'},
'LSK/BTC': {'id': 'LSKBTC', 'symbol': 'LSK/BTC', 'base': 'LSK', 'quote': 'BTC', 'baseId': 'LSK', 'quoteId': 'BTC'},
'BCH/USD': {'id': 'BCCUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD', 'baseId': 'BCC', 'quoteId': 'USD'},
'BCH/EUR': {'id': 'BCCEUR', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR', 'baseId': 'BCC', 'quoteId': 'EUR'},
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN', 'baseId': 'BCC', 'quoteId': 'PLN'},
'BCH/BTC': {'id': 'BCCBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'BCC', 'quoteId': 'BTC'},
'BTG/USD': {'id': 'BTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD', 'baseId': 'BTG', 'quoteId': 'USD'},
'BTG/EUR': {'id': 'BTGEUR', 'symbol': 'BTG/EUR', 'base': 'BTG', 'quote': 'EUR', 'baseId': 'BTG', 'quoteId': 'EUR'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN', 'baseId': 'BTG', 'quoteId': 'PLN'},
'BTG/BTC': {'id': 'BTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC', 'baseId': 'BTG', 'quoteId': 'BTC'},
'DASH/USD': {'id': 'DASHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD', 'baseId': 'DASH', 'quoteId': 'USD'},
'DASH/EUR': {'id': 'DASHEUR', 'symbol': 'DASH/EUR', 'base': 'DASH', 'quote': 'EUR', 'baseId': 'DASH', 'quoteId': 'EUR'},
'DASH/PLN': {'id': 'DASHPLN', 'symbol': 'DASH/PLN', 'base': 'DASH', 'quote': 'PLN', 'baseId': 'DASH', 'quoteId': 'PLN'},
'DASH/BTC': {'id': 'DASHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC', 'baseId': 'DASH', 'quoteId': 'BTC'},
'GAME/USD': {'id': 'GAMEUSD', 'symbol': 'GAME/USD', 'base': 'GAME', 'quote': 'USD', 'baseId': 'GAME', 'quoteId': 'USD'},
'GAME/EUR': {'id': 'GAMEEUR', 'symbol': 'GAME/EUR', 'base': 'GAME', 'quote': 'EUR', 'baseId': 'GAME', 'quoteId': 'EUR'},
'GAME/PLN': {'id': 'GAMEPLN', 'symbol': 'GAME/PLN', 'base': 'GAME', 'quote': 'PLN', 'baseId': 'GAME', 'quoteId': 'PLN'},
'GAME/BTC': {'id': 'GAMEBTC', 'symbol': 'GAME/BTC', 'base': 'GAME', 'quote': 'BTC', 'baseId': 'GAME', 'quoteId': 'BTC'},
'XRP/USD': {'id': 'XRPUSD', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'baseId': 'XRP', 'quoteId': 'USD'},
'XRP/EUR': {'id': 'XRPEUR', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'baseId': 'XRP', 'quoteId': 'EUR'},
'XRP/PLN': {'id': 'XRPPLN', 'symbol': 'XRP/PLN', 'base': 'XRP', 'quote': 'PLN', 'baseId': 'XRP', 'quoteId': 'PLN'},
'XRP/BTC': {'id': 'XRPBTC', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'baseId': 'XRP', 'quoteId': 'BTC'},
'XIN/BTC': {'id': 'XINBTC', 'symbol': 'XIN/BTC', 'base': 'XIN', 'quote': 'BTC', 'baseId': 'XIN', 'quoteId': 'BTC'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'exceptions': {
'400': ExchangeError,
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds,
'408': InvalidOrder,
'501': AuthenticationError,
'502': AuthenticationError,
'503': InvalidNonce,
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AuthenticationError, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': ExchangeError, # Invalid market name
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostInfo()
if 'balances' in response:
balance = response['balances']
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currencies[code]
id = currency['id']
account = self.account()
if id in balance:
account['free'] = float(balance[id]['available'])
account['used'] = float(balance[id]['locked'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbook = await self.publicGetIdOrderbook(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
market = self.market(symbol)
return self.privatePostTrade(self.extend({
'type': side,
'currency': market['baseId'],
'amount': amount,
'payment_currency': market['quoteId'],
'rate': price,
}, params))
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancel({'id': id})
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
if currency in fiatCurrencies:
return True
return False
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return
if len(body) < 2:
return
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'code' in response:
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = response['code'] # always an integer
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in self.exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| true
| true
|
f7198ae184bcaa5b0b938cc560dc8df6ff0d66d1
| 93,728
|
py
|
Python
|
keras/layers/recurrent.py
|
Duncanswilson/keras
|
32aa192548b6b59bf407e583fbd246ba9f5f5676
|
[
"MIT"
] | 1
|
2017-11-01T19:10:35.000Z
|
2017-11-01T19:10:35.000Z
|
keras/layers/recurrent.py
|
dmaniry/keras
|
32aa192548b6b59bf407e583fbd246ba9f5f5676
|
[
"MIT"
] | null | null | null |
keras/layers/recurrent.py
|
dmaniry/keras
|
32aa192548b6b59bf407e583fbd246ba9f5f5676
|
[
"MIT"
] | 1
|
2019-02-22T03:06:41.000Z
|
2019-02-22T03:06:41.000Z
|
# -*- coding: utf-8 -*-
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
# Legacy support.
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
# Arguments
cells: List of RNN cell instances.
# Examples
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
# States are a flat list
# in reverse order of the cell stack.
# This allows to preserve the requirement
# `stack.state_size[0] == output_dim`.
# e.g. states of a 2-layer LSTM would be
# `[h2, c2, h1, c1]`
# (assuming one LSTM has states [h, c])
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, **kwargs):
# Recover per-cell states.
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
# Format the new states as a flat list
# in reverse cell order.
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer):
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], input_shape[1], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
# Returns
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
# Arguments
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
"""Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
output_dim = state_size[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], dim) for dim in state_size]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if [spec.shape[-1] for spec in self.state_spec] != state_size:
raise ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')
for tensor in additional_inputs:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
"""Standardize `__call__` to a single list of tensor inputs.
When running a model loaded from file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
# Arguments
inputs: tensor or list/tuple of tensors
initial_state: tensor or list of tensors or None
constants: tensor or list of tensors or None
# Returns
inputs: tensor
initial_state: list of tensors or None
constants: list of tensors or None
"""
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
# TODO: consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
if isinstance(self.cell, Layer):
return self.cell.losses
return []
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
"""Cell class for the GRU layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
z = self.recurrent_activation(x_z + K.dot(h_tm1_z,
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1_r,
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1_h,
self.recurrent_kernel_h))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(RNN):
"""Long-Short Term Memory layer - Hochreiter 1997.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_ones(inputs, dims):
# Currently, CTNK can't instantiate `ones` with symbolic shapes.
# Will update workaround once CTNK supports it.
if K.backend() == 'cntk':
ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))
return K.tile(ones, (1, dims))
else:
return K.ones((K.shape(inputs)[0], dims))
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| 43.614705
| 130
| 0.583998
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, **kwargs):
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer):
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], input_shape[1], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
output_dim = state_size[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], dim) for dim in state_size]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
if [spec.shape[-1] for spec in self.state_spec] != state_size:
raise ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
def get_initial_state(self, inputs):
initial_state = K.zeros_like(inputs)
initial_state = K.sum(initial_state, axis=(1, 2))
initial_state = K.expand_dims(initial_state)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')
for tensor in additional_inputs:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if is_keras_tensor:
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
if isinstance(self.cell, Layer):
return self.cell.losses
return []
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=3)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
z = self.recurrent_activation(x_z + K.dot(h_tm1_z,
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1_r,
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1_h,
self.recurrent_kernel_h))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=4)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0]
c_tm1 = states[1]
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_ones(inputs, dims):
# Will update workaround once CTNK supports it.
if K.backend() == 'cntk':
ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))
return K.tile(ones, (1, dims))
else:
return K.ones((K.shape(inputs)[0], dims))
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| true
| true
|
f7198b1249cfc281e7acad93f4e91961e055e201
| 13,206
|
py
|
Python
|
mirage/libs/ble_utils/scapy_btlejack_layers.py
|
HomeSen/mirage
|
6beb4df508758bd152f5d929ba3e6353f161ef27
|
[
"MIT"
] | null | null | null |
mirage/libs/ble_utils/scapy_btlejack_layers.py
|
HomeSen/mirage
|
6beb4df508758bd152f5d929ba3e6353f161ef27
|
[
"MIT"
] | null | null | null |
mirage/libs/ble_utils/scapy_btlejack_layers.py
|
HomeSen/mirage
|
6beb4df508758bd152f5d929ba3e6353f161ef27
|
[
"MIT"
] | null | null | null |
from scapy.all import *
'''
This module contains some scapy definitions for communicating with a BTLEJack device.
'''
BTLEJACK_PACKETS_TYPES = {
0x1 : "command",
0x2 : "response",
0x4 : "notification"
}
BTLEJACK_PACKETS_OPCODES = {
0x1 : "version",
0x2 : "reset",
0x3 : "scan_access_address",
0x4 : "recover",
0x5 : "recover_channel_map",
0x6 : "recover_hop_interval",
0x7 : "sniff_connection_requests",
0x8 : "enable_jamming",
0x9 : "enable_hijacking",
0xa : "send_packet",
0xb : "collaborative_channel_map",
0xe : "debug",
0xf : "verbose"
}
BTLEJACK_NOTIFICATION_TYPES = {
0x0 : "access_address",
0x1 : "crc",
0x2 : "channel_map",
0x3 : "hop_interval",
0x4 : "hop_increment",
0x5 : "packet",
0x6 : "connection_request",
0x7 : "packet_nordic",
0x8 : "hijack_status",
0x9 : "connection_lost",
0xa : "advertisement"
}
class BTLEJack_Hdr(Packet):
name = "BTLEJack Packet"
fields_desc = [
XByteField("magic",0xBC),
BitEnumField("packet_type",None, 4, BTLEJACK_PACKETS_TYPES),
ConditionalField(BitEnumField("opcode",None, 4, BTLEJACK_PACKETS_OPCODES), lambda pkt:pkt.packet_type <= 0x3),
ConditionalField(BitEnumField("notification_type",None, 4, BTLEJACK_NOTIFICATION_TYPES), lambda pkt:pkt.packet_type == 0x4),
LEShortField("length",None),
XByteField("crc",None)
]
def pre_dissect(self,data):
return data[0:4] + data[-1:] + data[4:-1]
def post_build(self,p,pay):
if self.crc is None:
self.crc = 0xFF
for byte in p+pay:
self.crc ^= byte
if self.length is None:
self.length = len(pay)
self.crc ^= self.length
return p[0:2]+struct.pack('<H',self.length)+pay+struct.pack('B',self.crc)
# BTLEJack Commands
class BTLEJack_Version_Command(Packet):
name = "BTLEJack Version Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Scan_Connections_Command(Packet):
name = "BTLEJack Scan Connections Command"
class BTLEJack_Collaborative_Channel_Map_Command(Packet):
name = "BTLEJack Collaborative Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37)
]
class BTLEJack_Recover_Command(Packet):
name = "BTLEJack Recover Command"
fields_desc = [
ByteEnumField("operation_type",None, {
0x00 : "recover_crc_init",
0x01 : "recover_channel_map",
0x02 : "recover_hop"
})
]
class BTLEJack_Recover_Crcinit_Command(Packet):
name = "BTLEJack Recover CRCInit Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Channel_Map_Command(Packet):
name = "BTLEJack Recover Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37),
LEIntField("timeout",None)
]
class BTLEJack_Recover_Hopping_Parameters_Command(Packet):
name = "BTLEJack Recover Hopping Parameters Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Command(Packet):
name = "BTLEJack Recover Connection AA Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Sniff_Connection_Request_Command(Packet):
name = "BTLEJack Sniff Connection Request Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Sniff_Advertisements_Command(Packet):
name = "BTLEJack Sniff Advertisements Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Jam_Advertisements_Command(Packet):
name = "BTLEJack Jam Advertisements Command"
fields_desc = [
ByteField("channel",37),
ByteField("offset",None),
FieldLenField("pattern_length", None,fmt="B", length_of="pattern"),
StrField("pattern",None)
]
class BTLEJack_Enable_Jamming_Command(Packet):
name = "BTLEJack Enable Jamming Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Enable_Hijacking_Command(Packet):
name = "BTLEJack Enable Hijacking Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Send_Packet_Command(Packet):
name = "BTLEJack Send Packet Command"
fields_desc = [
PacketField("ble_payload",None,BTLE_DATA)
]
# BTLEJack Responses
class BTLEJack_Send_Packet_Response(Packet):
name = "BTLEJack Send Packet Response"
class BTLEJack_Enable_Jamming_Response(Packet):
name = "BTLEJack Enable Jamming Response"
class BTLEJack_Enable_Hijacking_Response(Packet):
name = "BTLEJack Enable Hijacking Response"
class BTLEJack_Recover_Response(Packet):
name = "BTLEJack Recover Response"
class BTLEJack_Scan_Connections_Response(Packet):
name = "BTLEJack Scan Connections Response"
class BTLEJack_Collaborative_Channel_Map_Response(Packet):
name = "BTLEJack Collaborative Channel Map Response"
class BTLEJack_Version_Response(Packet):
name = "BTLEJack Version Response"
fields_desc = [
ByteField("major",None),
ByteField("minor",None)
]
class BTLEJack_Reset_Response(Packet):
name = "BTLEJack Reset Response"
class BTLEJack_Sniff_Connection_Request_Response(Packet):
name = "BTLEJack Sniff Connection Request Response"
class BTLEJack_Sniff_Advertisements_Response(Packet):
name = "BTLEJack Sniff Advertisements Response"
class BTLEJack_Jam_Advertisements_Response(Packet):
name = "BTLEJack Jam Advertisements Response"
class BTLEJack_Verbose_Response(Packet):
name = "BTLEJack Verbose Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Debug_Response(Packet):
name = "BTLEJack Debug Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Recover_Connection_AA_Response(Packet):
name = "BTLEJack Recover Connection AA Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Response(Packet):
name = "BTLEJack Recover Connection AA Chm Response"
fields_desc = [
XLEIntField("access_address",None)
]
# BTLEJack Notifications
class BTLEJack_Access_Address_Notification(Packet):
name = "BTLEJack Access Address Notification"
fields_desc = [
ByteField("channel",None),
ByteField("rssi", None),
XLEIntField("access_address",None)
]
class BTLEJack_CRCInit_Notification(Packet):
name = "BTLEJack CRCInit Notification"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("unused",0)
]
class BTLEJack_Channel_Map_Notification(Packet):
name = "BTLEJack Channel Map Notification"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Hop_Interval_Notification(Packet):
name = "BTLEJack Hop Interval Notification"
fields_desc = [
XLEIntField("access_address",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Hop_Increment_Notification(Packet):
name = "BTLEJack Hop Increment Notification"
fields_desc = [
XLEIntField("access_address",None),
ByteField("hop_increment",None)
]
class BTLEJack_Nordic_Tap_Packet_Notification(Packet):
name = "BTLEJack Nordic Tap Packet Notification"
fields_desc = [
ByteField("header_length",None),
ByteField("flags",None),
ByteField("channel",None),
ByteField("rssi",None),
LEShortField("event_counter",None),
LEIntField("delta", None),
PacketField("ble_payload",None, BTLE_DATA)
]
class BTLEJack_Hijack_Status_Notification(Packet):
name = "BTLEJack Hijack Status Notification"
fields_desc = [
ByteEnumField("status",None, {0 : "success", 1 : "failure"})
]
class BTLEJack_Connection_Lost_Notification(Packet):
name = "BTLEJack Connection Lost Notification"
class BTLEJack_Advertisement_Notification(Packet):
name = "BTLEJack Advertisement Notification"
fields_desc = [
PacketField("ble_payload",None,BTLE_ADV)
]
class BTLEJack_Connection_Request_Notification(Packet):
name = "BTLEJack Connection Request Notification"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2), # Unused
BitEnumField("PDU_type", 0, 4, {0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ",
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}),
ByteField("payload_length", 0x22),
PacketField("ble_payload",None,BTLE_CONNECT_REQ)
]
# Binding BTLEJack Commands
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Command,packet_type=0x1, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Command,packet_type=0x1, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Command, packet_type=0x1,opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Command,packet_type=0x1,opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Command,packet_type=0x1, opcode=0x4)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Crcinit_Command,operation_type=0x00)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Channel_Map_Command,operation_type=0x01)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Hopping_Parameters_Command,operation_type=0x02)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Command,packet_type=0x1,opcode=0x4)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_Command,packet_type=0x1,opcode=0x5)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command,packet_type=0x1,opcode=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1, opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Command,packet_type=0x1,opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Command,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Command,packet_type=0x1,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Command,packet_type=0x1,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Command,packet_type=0x1,opcode=0xa)
# Binding BTLEJack Responses
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Response,packet_type=0x2,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Response,packet_type=0x2,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Response,packet_type=0x2,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Response,packet_type=0x2, opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Response,packet_type=0x1,opcode=0xc)
'''
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_Response,packet_type=0x2, opcode=0x5)
'''
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1,opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Response,packet_type=0x2, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Response,packet_type=0x2, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Response,packet_type=0x2, opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Response,packet_type=0x2, opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Debug_Response,packet_type=0x2, opcode=0xe)
bind_layers(BTLEJack_Hdr, BTLEJack_Verbose_Response,packet_type=0x2, opcode=0xf)
# Binding BTLEJack Notifications
bind_layers(BTLEJack_Hdr, BTLEJack_Access_Address_Notification, packet_type=0x4, notification_type=0x0)
bind_layers(BTLEJack_Hdr, BTLEJack_CRCInit_Notification, packet_type=0x4, notification_type=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Channel_Map_Notification, packet_type=0x4, notification_type=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Interval_Notification, packet_type=0x4, notification_type=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Increment_Notification, packet_type=0x4, notification_type=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Nordic_Tap_Packet_Notification, packet_type=0x4, notification_type=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Hijack_Status_Notification, packet_type=0x4, notification_type=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Lost_Notification, packet_type=0x4, notification_type=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Request_Notification, packet_type=0x4, notification_type=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Advertisement_Notification, packet_type=0x4, notification_type=0xa)
| 35.5
| 126
| 0.783659
|
from scapy.all import *
BTLEJACK_PACKETS_TYPES = {
0x1 : "command",
0x2 : "response",
0x4 : "notification"
}
BTLEJACK_PACKETS_OPCODES = {
0x1 : "version",
0x2 : "reset",
0x3 : "scan_access_address",
0x4 : "recover",
0x5 : "recover_channel_map",
0x6 : "recover_hop_interval",
0x7 : "sniff_connection_requests",
0x8 : "enable_jamming",
0x9 : "enable_hijacking",
0xa : "send_packet",
0xb : "collaborative_channel_map",
0xe : "debug",
0xf : "verbose"
}
BTLEJACK_NOTIFICATION_TYPES = {
0x0 : "access_address",
0x1 : "crc",
0x2 : "channel_map",
0x3 : "hop_interval",
0x4 : "hop_increment",
0x5 : "packet",
0x6 : "connection_request",
0x7 : "packet_nordic",
0x8 : "hijack_status",
0x9 : "connection_lost",
0xa : "advertisement"
}
class BTLEJack_Hdr(Packet):
name = "BTLEJack Packet"
fields_desc = [
XByteField("magic",0xBC),
BitEnumField("packet_type",None, 4, BTLEJACK_PACKETS_TYPES),
ConditionalField(BitEnumField("opcode",None, 4, BTLEJACK_PACKETS_OPCODES), lambda pkt:pkt.packet_type <= 0x3),
ConditionalField(BitEnumField("notification_type",None, 4, BTLEJACK_NOTIFICATION_TYPES), lambda pkt:pkt.packet_type == 0x4),
LEShortField("length",None),
XByteField("crc",None)
]
def pre_dissect(self,data):
return data[0:4] + data[-1:] + data[4:-1]
def post_build(self,p,pay):
if self.crc is None:
self.crc = 0xFF
for byte in p+pay:
self.crc ^= byte
if self.length is None:
self.length = len(pay)
self.crc ^= self.length
return p[0:2]+struct.pack('<H',self.length)+pay+struct.pack('B',self.crc)
class BTLEJack_Version_Command(Packet):
name = "BTLEJack Version Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Scan_Connections_Command(Packet):
name = "BTLEJack Scan Connections Command"
class BTLEJack_Collaborative_Channel_Map_Command(Packet):
name = "BTLEJack Collaborative Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37)
]
class BTLEJack_Recover_Command(Packet):
name = "BTLEJack Recover Command"
fields_desc = [
ByteEnumField("operation_type",None, {
0x00 : "recover_crc_init",
0x01 : "recover_channel_map",
0x02 : "recover_hop"
})
]
class BTLEJack_Recover_Crcinit_Command(Packet):
name = "BTLEJack Recover CRCInit Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Channel_Map_Command(Packet):
name = "BTLEJack Recover Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37),
LEIntField("timeout",None)
]
class BTLEJack_Recover_Hopping_Parameters_Command(Packet):
name = "BTLEJack Recover Hopping Parameters Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Command(Packet):
name = "BTLEJack Recover Connection AA Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Sniff_Connection_Request_Command(Packet):
name = "BTLEJack Sniff Connection Request Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Sniff_Advertisements_Command(Packet):
name = "BTLEJack Sniff Advertisements Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Jam_Advertisements_Command(Packet):
name = "BTLEJack Jam Advertisements Command"
fields_desc = [
ByteField("channel",37),
ByteField("offset",None),
FieldLenField("pattern_length", None,fmt="B", length_of="pattern"),
StrField("pattern",None)
]
class BTLEJack_Enable_Jamming_Command(Packet):
name = "BTLEJack Enable Jamming Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Enable_Hijacking_Command(Packet):
name = "BTLEJack Enable Hijacking Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Send_Packet_Command(Packet):
name = "BTLEJack Send Packet Command"
fields_desc = [
PacketField("ble_payload",None,BTLE_DATA)
]
class BTLEJack_Send_Packet_Response(Packet):
name = "BTLEJack Send Packet Response"
class BTLEJack_Enable_Jamming_Response(Packet):
name = "BTLEJack Enable Jamming Response"
class BTLEJack_Enable_Hijacking_Response(Packet):
name = "BTLEJack Enable Hijacking Response"
class BTLEJack_Recover_Response(Packet):
name = "BTLEJack Recover Response"
class BTLEJack_Scan_Connections_Response(Packet):
name = "BTLEJack Scan Connections Response"
class BTLEJack_Collaborative_Channel_Map_Response(Packet):
name = "BTLEJack Collaborative Channel Map Response"
class BTLEJack_Version_Response(Packet):
name = "BTLEJack Version Response"
fields_desc = [
ByteField("major",None),
ByteField("minor",None)
]
class BTLEJack_Reset_Response(Packet):
name = "BTLEJack Reset Response"
class BTLEJack_Sniff_Connection_Request_Response(Packet):
name = "BTLEJack Sniff Connection Request Response"
class BTLEJack_Sniff_Advertisements_Response(Packet):
name = "BTLEJack Sniff Advertisements Response"
class BTLEJack_Jam_Advertisements_Response(Packet):
name = "BTLEJack Jam Advertisements Response"
class BTLEJack_Verbose_Response(Packet):
name = "BTLEJack Verbose Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Debug_Response(Packet):
name = "BTLEJack Debug Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Recover_Connection_AA_Response(Packet):
name = "BTLEJack Recover Connection AA Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Response(Packet):
name = "BTLEJack Recover Connection AA Chm Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Access_Address_Notification(Packet):
name = "BTLEJack Access Address Notification"
fields_desc = [
ByteField("channel",None),
ByteField("rssi", None),
XLEIntField("access_address",None)
]
class BTLEJack_CRCInit_Notification(Packet):
name = "BTLEJack CRCInit Notification"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("unused",0)
]
class BTLEJack_Channel_Map_Notification(Packet):
name = "BTLEJack Channel Map Notification"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Hop_Interval_Notification(Packet):
name = "BTLEJack Hop Interval Notification"
fields_desc = [
XLEIntField("access_address",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Hop_Increment_Notification(Packet):
name = "BTLEJack Hop Increment Notification"
fields_desc = [
XLEIntField("access_address",None),
ByteField("hop_increment",None)
]
class BTLEJack_Nordic_Tap_Packet_Notification(Packet):
name = "BTLEJack Nordic Tap Packet Notification"
fields_desc = [
ByteField("header_length",None),
ByteField("flags",None),
ByteField("channel",None),
ByteField("rssi",None),
LEShortField("event_counter",None),
LEIntField("delta", None),
PacketField("ble_payload",None, BTLE_DATA)
]
class BTLEJack_Hijack_Status_Notification(Packet):
name = "BTLEJack Hijack Status Notification"
fields_desc = [
ByteEnumField("status",None, {0 : "success", 1 : "failure"})
]
class BTLEJack_Connection_Lost_Notification(Packet):
name = "BTLEJack Connection Lost Notification"
class BTLEJack_Advertisement_Notification(Packet):
name = "BTLEJack Advertisement Notification"
fields_desc = [
PacketField("ble_payload",None,BTLE_ADV)
]
class BTLEJack_Connection_Request_Notification(Packet):
name = "BTLEJack Connection Request Notification"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2),
BitEnumField("PDU_type", 0, 4, {0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ",
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}),
ByteField("payload_length", 0x22),
PacketField("ble_payload",None,BTLE_CONNECT_REQ)
]
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Command,packet_type=0x1, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Command,packet_type=0x1, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Command, packet_type=0x1,opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Command,packet_type=0x1,opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Command,packet_type=0x1, opcode=0x4)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Crcinit_Command,operation_type=0x00)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Channel_Map_Command,operation_type=0x01)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Hopping_Parameters_Command,operation_type=0x02)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1, opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Command,packet_type=0x1,opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Command,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Command,packet_type=0x1,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Command,packet_type=0x1,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Command,packet_type=0x1,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Response,packet_type=0x2,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Response,packet_type=0x2,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Response,packet_type=0x2,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Response,packet_type=0x2, opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Response,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1,opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Response,packet_type=0x2, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Response,packet_type=0x2, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Response,packet_type=0x2, opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Response,packet_type=0x2, opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Debug_Response,packet_type=0x2, opcode=0xe)
bind_layers(BTLEJack_Hdr, BTLEJack_Verbose_Response,packet_type=0x2, opcode=0xf)
bind_layers(BTLEJack_Hdr, BTLEJack_Access_Address_Notification, packet_type=0x4, notification_type=0x0)
bind_layers(BTLEJack_Hdr, BTLEJack_CRCInit_Notification, packet_type=0x4, notification_type=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Channel_Map_Notification, packet_type=0x4, notification_type=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Interval_Notification, packet_type=0x4, notification_type=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Increment_Notification, packet_type=0x4, notification_type=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Nordic_Tap_Packet_Notification, packet_type=0x4, notification_type=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Hijack_Status_Notification, packet_type=0x4, notification_type=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Lost_Notification, packet_type=0x4, notification_type=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Request_Notification, packet_type=0x4, notification_type=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Advertisement_Notification, packet_type=0x4, notification_type=0xa)
| true
| true
|
f7198b76ba36f1f12ec60d6aea9e6f66c8d175da
| 7,421
|
py
|
Python
|
backend/server/models.py
|
thunderlink/thunderfish
|
a600021187a50bb078d9c36306564470cc6e9fd8
|
[
"MIT"
] | 3
|
2019-04-18T04:45:27.000Z
|
2019-11-06T18:17:29.000Z
|
backend/server/models.py
|
thunderlink/thunderfish
|
a600021187a50bb078d9c36306564470cc6e9fd8
|
[
"MIT"
] | 59
|
2019-04-22T07:05:52.000Z
|
2022-03-11T23:48:33.000Z
|
backend/server/models.py
|
thunderlink/thunderfish
|
a600021187a50bb078d9c36306564470cc6e9fd8
|
[
"MIT"
] | 4
|
2019-04-24T05:49:21.000Z
|
2019-11-21T00:26:00.000Z
|
from django.db import models
from django.contrib.auth.models import User
import re
from math import sqrt, pi
# Path to default image
DEFAULT_IMAGE = '../media/app_logo.png'
DEFAULT_PROFILE_IMG = 1
DEFAULT_MEETING_IMG = 2
MEDIA_URL = '/media/'
# Unique email for each user
User._meta.local_fields[7].__dict__['_unique'] = True
class Image(models.Model):
profile = models.ImageField(blank=True, null=False, default=DEFAULT_IMAGE)
title = models.CharField(max_length=100, blank=True)
url = models.CharField(max_length=1000, blank=True, null=True)
def __str__(self):
return str(self.id)
class Profile(models.Model):
GENDER_MALE = 0
GENDER_FEMALE = 1
GENDER_PRIVATE = 2
GENDER_CHOICES = [(GENDER_MALE, 'Male'), (GENDER_FEMALE, 'Female'), (GENDER_PRIVATE, 'Private')]
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
nickname = models.CharField(max_length=20)
photo = models.ForeignKey(Image, related_name="profile_photo", on_delete=models.CASCADE, default=DEFAULT_PROFILE_IMG)
# email = models.EmailField(max_length=30)
name = models.CharField(max_length=50)
gender = models.IntegerField(choices=GENDER_CHOICES, default=GENDER_PRIVATE)
region = models.CharField(max_length=100, blank = True) # may not be necessary, use API ??
introduce = models.CharField(max_length=200, blank = True)
def __str__(self):
return self.nickname
class Meta:
ordering = ('name', )
class Meeting(models.Model):
STATUS_RECRUITING = 0
STATUS_COMPLETE = 1
STATUS_CANCELED = 2
STATUS_CHOICES = [(STATUS_RECRUITING, 'Recruiting'), (STATUS_COMPLETE, 'Complete'), (STATUS_CANCELED, 'Canceled')]
name = models.CharField(max_length=50)
host = models.ForeignKey(Profile, related_name="meeting_hosted", on_delete=models.DO_NOTHING)
date = models.DateTimeField('meeting date')
posted_date = models.DateTimeField('posted date', auto_now_add=True)
participant = models.ManyToManyField(Profile, through = 'Membership')
# contributer - people who opened the meeting with the host
max_participant = models.IntegerField()
deadline = models.DateTimeField('meeting deadline')
region = models.CharField(max_length=100, blank=True)
photo = models.ForeignKey(Image, related_name="meeting_photo", on_delete=models.CASCADE, default=DEFAULT_MEETING_IMG)
content = models.CharField(max_length=500)
tag_set = models.ManyToManyField('Tag', blank=True)
status = models.IntegerField(choices=STATUS_CHOICES) # 1 as pending, 0 as complete ?
open_chat = models.URLField(max_length=100, blank=True) # remove default
latitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
longitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
# content에서 tags를 추출하여, Tag 객체 가져오기, 신규 태그는 Tag instance 생성, 본인의 tag_set에 등록,
# Question : Does \w support korean?
# We should add exceptional control code for unvalid tag.
def tag_save(self, tag_string):
tags = re.findall(r'\b(\w+)\b', self.content)
if not tags:
return
for t in tags:
tag, tag_created = Tag.objects.get_or_create(name=t)
self.tag_set.add(tag)
def __str__(self):
return self.name
@staticmethod
def distance_search(result, dist, lat, long):
## Returns list of meetings that is
## less than dist kilometers far from (latitude, longitude)
## Ordered by increasing distance
ret = []
for meet in result:
delta_phi = abs(float(meet.latitude) - lat) ** 2
delta_theta = abs(float(meet.longitude) - long) ** 2
calculated_distance = float(6371 * sqrt(delta_phi + delta_theta) * 2 * pi / 360)
if calculated_distance <= dist:
ret.append((result.get(pk=meet.id), calculated_distance))
ret.sort(key = lambda item : item[1])
print(ret)
return ret
class Meta:
ordering = ['-id']
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.name
class Comment(models.Model):
date = models.DateTimeField('commented date', auto_now_add=True)
comment_text = models.CharField(max_length=1000, default="Test Text")
# parent_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)
parent_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
writer = models.ForeignKey(Profile, on_delete=models.CASCADE)
def __str__(self):
return self.comment_text
# For notification 1 : New comment for host
def save(self, *args, **kwargs):
notification = Notification(meeting=self.parent_meeting, profile=self.parent_meeting.host, notification = Notification.NOTIFICATION_NEW_COMMENT_FOR_HOST)
notification.save()
super().save(*args, **kwargs)
# we should add url field.
class Notification(models.Model):
NOTIFICATION_NEW_APPLY = 0
NOTIFICATION_NEW_COMMENT_FOR_HOST = 1
NOTIFICATION_APPLY_REJECTED = 2
NOTIFICATION_APPLY_APPROVED = 3
NOTIFICATION_CHOICES = [(NOTIFICATION_NEW_APPLY, 'new apply'), (NOTIFICATION_NEW_COMMENT_FOR_HOST, 'new comment for host'),(NOTIFICATION_APPLY_REJECTED, 'apply is rejected'),(NOTIFICATION_APPLY_APPROVED, 'apply is approved')]
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
checked = models.BooleanField(default=False)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, null=True)
notification = models.IntegerField(choices=NOTIFICATION_CHOICES)
def __str__(self):
return str(self.profile)
class Meta:
ordering = ['checked', '-id']
class Membership(models.Model):
STATUS_WAITING = 0
STATUS_APPROVED = 1
STATUS_REJECTED = 2
STATUS_CHOICES = [(STATUS_WAITING, 'waiting'), (STATUS_APPROVED, 'approved'), (STATUS_REJECTED, 'rejected')]
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS_CHOICES)
message = models.CharField(max_length = 500, null=True, blank=True)
def __str__(self):
return str(self.meeting.id) + '@' + str(self.profile.id)
class Meta:
unique_together = (
('profile', 'meeting')
)
# For notification 0 : New apply
# For notification 2 : Apply rejected
# For notification 3 : Apply approved
def save(self, *args, **kwargs):
if(self.pk==None):
notification = Notification(meeting=self.meeting, profile=self.meeting.host, notification = Notification.NOTIFICATION_NEW_APPLY)
notification.save()
else:
if(self.status == self.STATUS_CHOICES[1][0]):
notification = Notification(meeting=self.meeting, profile=self.profile, notification = Notification.NOTIFICATION_APPLY_APPROVED)
notification.save()
print("Notify")
elif(self.status == self.STATUS_CHOICES[2][0]):
notification = Notification(meeting = self.meeting, profile = self.profile, notification = Notification.NOTIFICATION_APPLY_REJECTED)
notification.save()
super().save(*args, **kwargs)
| 41
| 229
| 0.694381
|
from django.db import models
from django.contrib.auth.models import User
import re
from math import sqrt, pi
DEFAULT_IMAGE = '../media/app_logo.png'
DEFAULT_PROFILE_IMG = 1
DEFAULT_MEETING_IMG = 2
MEDIA_URL = '/media/'
User._meta.local_fields[7].__dict__['_unique'] = True
class Image(models.Model):
profile = models.ImageField(blank=True, null=False, default=DEFAULT_IMAGE)
title = models.CharField(max_length=100, blank=True)
url = models.CharField(max_length=1000, blank=True, null=True)
def __str__(self):
return str(self.id)
class Profile(models.Model):
GENDER_MALE = 0
GENDER_FEMALE = 1
GENDER_PRIVATE = 2
GENDER_CHOICES = [(GENDER_MALE, 'Male'), (GENDER_FEMALE, 'Female'), (GENDER_PRIVATE, 'Private')]
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
nickname = models.CharField(max_length=20)
photo = models.ForeignKey(Image, related_name="profile_photo", on_delete=models.CASCADE, default=DEFAULT_PROFILE_IMG)
name = models.CharField(max_length=50)
gender = models.IntegerField(choices=GENDER_CHOICES, default=GENDER_PRIVATE)
region = models.CharField(max_length=100, blank = True)
introduce = models.CharField(max_length=200, blank = True)
def __str__(self):
return self.nickname
class Meta:
ordering = ('name', )
class Meeting(models.Model):
STATUS_RECRUITING = 0
STATUS_COMPLETE = 1
STATUS_CANCELED = 2
STATUS_CHOICES = [(STATUS_RECRUITING, 'Recruiting'), (STATUS_COMPLETE, 'Complete'), (STATUS_CANCELED, 'Canceled')]
name = models.CharField(max_length=50)
host = models.ForeignKey(Profile, related_name="meeting_hosted", on_delete=models.DO_NOTHING)
date = models.DateTimeField('meeting date')
posted_date = models.DateTimeField('posted date', auto_now_add=True)
participant = models.ManyToManyField(Profile, through = 'Membership')
max_participant = models.IntegerField()
deadline = models.DateTimeField('meeting deadline')
region = models.CharField(max_length=100, blank=True)
photo = models.ForeignKey(Image, related_name="meeting_photo", on_delete=models.CASCADE, default=DEFAULT_MEETING_IMG)
content = models.CharField(max_length=500)
tag_set = models.ManyToManyField('Tag', blank=True)
status = models.IntegerField(choices=STATUS_CHOICES)
open_chat = models.URLField(max_length=100, blank=True)
latitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
longitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
def tag_save(self, tag_string):
tags = re.findall(r'\b(\w+)\b', self.content)
if not tags:
return
for t in tags:
tag, tag_created = Tag.objects.get_or_create(name=t)
self.tag_set.add(tag)
def __str__(self):
return self.name
@staticmethod
def distance_search(result, dist, lat, long):
delta_theta = abs(float(meet.longitude) - long) ** 2
calculated_distance = float(6371 * sqrt(delta_phi + delta_theta) * 2 * pi / 360)
if calculated_distance <= dist:
ret.append((result.get(pk=meet.id), calculated_distance))
ret.sort(key = lambda item : item[1])
print(ret)
return ret
class Meta:
ordering = ['-id']
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.name
class Comment(models.Model):
date = models.DateTimeField('commented date', auto_now_add=True)
comment_text = models.CharField(max_length=1000, default="Test Text")
parent_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
writer = models.ForeignKey(Profile, on_delete=models.CASCADE)
def __str__(self):
return self.comment_text
def save(self, *args, **kwargs):
notification = Notification(meeting=self.parent_meeting, profile=self.parent_meeting.host, notification = Notification.NOTIFICATION_NEW_COMMENT_FOR_HOST)
notification.save()
super().save(*args, **kwargs)
class Notification(models.Model):
NOTIFICATION_NEW_APPLY = 0
NOTIFICATION_NEW_COMMENT_FOR_HOST = 1
NOTIFICATION_APPLY_REJECTED = 2
NOTIFICATION_APPLY_APPROVED = 3
NOTIFICATION_CHOICES = [(NOTIFICATION_NEW_APPLY, 'new apply'), (NOTIFICATION_NEW_COMMENT_FOR_HOST, 'new comment for host'),(NOTIFICATION_APPLY_REJECTED, 'apply is rejected'),(NOTIFICATION_APPLY_APPROVED, 'apply is approved')]
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
checked = models.BooleanField(default=False)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, null=True)
notification = models.IntegerField(choices=NOTIFICATION_CHOICES)
def __str__(self):
return str(self.profile)
class Meta:
ordering = ['checked', '-id']
class Membership(models.Model):
STATUS_WAITING = 0
STATUS_APPROVED = 1
STATUS_REJECTED = 2
STATUS_CHOICES = [(STATUS_WAITING, 'waiting'), (STATUS_APPROVED, 'approved'), (STATUS_REJECTED, 'rejected')]
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS_CHOICES)
message = models.CharField(max_length = 500, null=True, blank=True)
def __str__(self):
return str(self.meeting.id) + '@' + str(self.profile.id)
class Meta:
unique_together = (
('profile', 'meeting')
)
def save(self, *args, **kwargs):
if(self.pk==None):
notification = Notification(meeting=self.meeting, profile=self.meeting.host, notification = Notification.NOTIFICATION_NEW_APPLY)
notification.save()
else:
if(self.status == self.STATUS_CHOICES[1][0]):
notification = Notification(meeting=self.meeting, profile=self.profile, notification = Notification.NOTIFICATION_APPLY_APPROVED)
notification.save()
print("Notify")
elif(self.status == self.STATUS_CHOICES[2][0]):
notification = Notification(meeting = self.meeting, profile = self.profile, notification = Notification.NOTIFICATION_APPLY_REJECTED)
notification.save()
super().save(*args, **kwargs)
| true
| true
|
f7198bd1b623cee47276165d5348854e67b0535b
| 45,311
|
py
|
Python
|
pyNastran/dev/bdf_vectorized/cards/dynamic.py
|
Msegade/pyNastran
|
ae36548579c6bb2ee3a4fff207f7211c1986a5ab
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/dev/bdf_vectorized/cards/dynamic.py
|
Msegade/pyNastran
|
ae36548579c6bb2ee3a4fff207f7211c1986a5ab
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/dev/bdf_vectorized/cards/dynamic.py
|
Msegade/pyNastran
|
ae36548579c6bb2ee3a4fff207f7211c1986a5ab
|
[
"BSD-3-Clause"
] | 1
|
2020-10-04T19:28:07.000Z
|
2020-10-04T19:28:07.000Z
|
# pylint: disable=C0103,R0902,R0904,R0914
"""
All dynamic control cards are defined in this file. This includes:
* FREQ
* FREQ1
* FREQ2 (not implemented)
* FREQ3
* FREQ4
* FREQ5 (not implemented)
* NLPCI
* NLPARM
* TSTEP
* TSTEPNL
All cards are BaseCard objects.
"""
from math import log, exp, ceil
import numpy as np
from numpy import unique, hstack
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank,
string_or_blank, blank, fields, components_or_blank
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class DELAY(BaseCard):
type = 'DELAY'
def __init__(self, sid, nodes, components, delays, comment=''):
"""
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+===========+=====+========+======+=====+========+=====+
| DELAY | SID | POINT ID1 | C1 | T1 | P2 | C2 | T2 | |
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
"""
if comment:
self.comment = comment
#: Identification number of DELAY entry. (Integer > 0)
self.sid = sid
#: Grid, extra, or scalar point identification number. (Integer > 0)
self.nodes = nodes
#: Component number. (Integers 1 through 6 for grid points; zero or blank for extra
#: or scalar points)
self.components = components
#: Time delay (tau) for designated point Pi and component Ci. (Real)
self.delays = delays
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DELAY card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
delays = [double_or_blank(card, 4, 'delay')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
delays.append(double_or_blank(card, 7, 'delay'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DELAY(sid, nodes, components, delays, comment=comment)
def add(self, delay):
assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)
if delay.comment:
if hasattr('_comment'):
self._comment += delay.comment
else:
self._comment = delay.comment
self.nodes += delay.nodes
self.components += delay.components
self.delays += delay.delays
def get_delay_at_freq(self, freq):
return self.nodes, self.components, self.delays
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DELAY sid=%s' % self.sid
#self.nodes_ref = model.Node(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DELAY', self.sid]
for nid, comp, delay in zip(self.node_ids, self.components, self.delays):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_8(['DELAY', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_16(['DELAY', self.sid, nid, comp, delay])
return msg
class DPHASE(BaseCard):
type = 'DPHASE'
def __init__(self, sid, nodes, components, phase_leads, comment=''):
"""
+--------+-----+-----------+-----+------+------+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+===========+=====+======+======+=====+=====+=====+
| DPHASE | SID | POINT ID1 | C1 | TH1 | P2 | C2 | TH2 | |
+--------+-----+-----------+-----+------+------+-----+-----+-----+
"""
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.phase_leads = phase_leads
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DPHASE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
phase_leads = [double_or_blank(card, 4, 'phase_lead')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
phase_leads.append(double_or_blank(card, 7, 'phase_lead'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DPHASE(sid, nodes, components, phase_leads, comment=comment)
def add(self, dphase):
assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)
if dphase.comment:
if hasattr('_comment'):
self._comment += dphase.comment
else:
self._comment = dphase.comment
self.nodes += dphase.nodes
self.components += dphase.components
self.phase_leads += dphase.phase_leads
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DPHASE sid=%s' % self.sid
#self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DPHASE', self.sid]
for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])
return msg
class FREQ(BaseCard):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems.
+------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ | SID | F1 | F2 | etc. | | | | |
+------+-----+-----+-----+------+-----+-----+-----+-----+
"""
type = 'FREQ'
def __init__(self, sid, freqs, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
freqs = fields(double, card, 'freq', i=2, j=len(card))
return FREQ(sid, freqs, comment=comment)
def get_freqs(self):
return self.freqs
def add_frequencies(self, freqs):
"""
Combines the frequencies from 1 FREQx object with another.
All FREQi entries with the same frequency set identification numbers
will be used. Duplicate frequencies will be ignored.
Parameters
----------
freqs : ???
the frequencies for a FREQx object
"""
#print("self.freqs = ",self.freqs)
#print("freqs = ",freqs)
self.freqs = unique(hstack([self.freqs, freqs]))
def add_frequency_object(self, freq):
"""
:param freq: a FREQx object
.. seealso:: :func:`addFrequencies`
"""
self.add_frequencies(freq.freqs)
def raw_fields(self):
list_fields = ['FREQ', self.sid] + list(self.freqs)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ1(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, frequency
increment, and the number of increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ1 | SID | F1 | DF | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ1'
def __init__(self, sid, f1, df, ndf, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.df = df
self.ndf = ndf
freqs = []
for i in range(ndf):
freqs.append(f1 + i * df)
self.freqs = unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
df = double(card, 3, 'df')
ndf = integer_or_blank(card, 4, 'ndf', 1)
assert len(card) <= 5, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ1(sid, f1, df, ndf, comment=comment)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ2(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, final
frequency, and the number of logarithmic increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ2 | SID | F1 | F2 | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ2'
def __init__(self, sid, f1, f2, ndf=1, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.ndf = ndf
d = 1. / ndf * log(f2 / f1)
freqs = []
for i in range(ndf):
freqs.append(f1 * exp(i * d)) # 0 based index
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double(card, 2, 'f1') # default=0.0 ?
f2 = double(card, 3, 'f2')
ndf = integer_or_blank(card, 4, 'nf', 1)
assert len(card) <= 5, 'len(FREQ2 card) = %i\ncard=%s' % (len(card), card)
return FREQ2(sid, f1, f2, ndf, comment=comment)
#return FREQ(sid, freqs, comment=comment)
class FREQ3(FREQ):
"""
+-------+-----+------+-------+--------+-----+---------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+=======+=====+======+=======+========+=====+=========+
| FREQ3 | SID | F1 | F2 | TYPE | NEF | CLUSTER |
+-------+-----+------+-------+--------+-----+---------+
| FREQ3 | 6 | 20.0 | 200.0 | LINEAR | 10 | 2.0 |
+-------+-----+------+-------+--------+-----+---------+
"""
type = 'FREQ3'
def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):
if comment:
self.comment = comment
if f2 is None:
f2 = f1
self.sid = sid
self.f1 = f1
self.f2 = f2
self.Type = Type
self.nef = nef
self.cluster = cluster
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 1, 'f1')
f2 = integer_or_blank(card, 1, 'f2', f1)
Type = string_or_blank(card, 1, 'Type', 'LINEAR')
nef = integer_or_blank(card, 1, 'nef', 10)
cluster = double_or_blank(card, 1, 'cluster', 1.0)
return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')
def raw_fields(self):
return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ4(FREQ):
"""
Defines a set of frequencies used in the solution of modal frequency
response problems by specifying the amount of 'spread' around each natural
frequency and the number of equally spaced excitation frequencies within
the spread.
+-------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ4 | SID | F1 | F2 | FSPD | NFM | | | |
+-------+-----+-----+-----+------+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
.. todo:: not done...
"""
type = 'FREQ4'
def __init__(self, sid, f1, f2, fspread, nfm, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.fspread = fspread
self.nfm = nfm
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
f2 = double_or_blank(card, 3, 'f2', 1.e20)
fspread = double_or_blank(card, 4, 'fspd', 0.1)
nfm = integer_or_blank(card, 5, 'nfm', 3)
assert len(card) <= 6, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)
def raw_fields(self):
list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,
self.nfm]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
#class FREQ5(FREQ):
#type = 'FREQ5'
#def __init__(self, card=None, data=None, comment=''):
#if comment:
# self.comment = comment
#raise NotImplementedError()
#def write_card(self, size: int=8, is_double: bool=False) -> str:
#card = self.repr_fields()
#if size == 8:
#return self.comment + print_card_8(card)
#return self.comment + print_card_16(card)
class NLPARM(BaseCard):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,
max_iter=25, conv='PW', int_out='NO',
eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,
fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):
if comment:
self.comment = comment
self.nlparm_id = nlparm_id
self.ninc = ninc
self.dt = dt
self.kmethod = kmethod
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.int_out = int_out
# line 2
self.eps_p = eps_p
self.eps_u = eps_u
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.ls_tol = ls_tol
# line 3
self.max_bisect = max_bisect
self.max_r = max_r
self.rtol_b = rtol_b
if self.max_qn is None:
if kmethod == 'PFNT':
self.max_qn = 0
else:
self.max_qn = max_iter
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPARM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlparm_id = integer(card, 1, 'nlparm_id')
ninc = integer_or_blank(card, 2, 'ninc', 10)
dt = double_or_blank(card, 3, 'dt', 0.0)
kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')
kstep = integer_or_blank(card, 5, 'kstep', 5)
max_iter = integer_or_blank(card, 6, 'max_iter', 25)
conv = string_or_blank(card, 7, 'conv', 'PW')
int_out = string_or_blank(card, 8, 'intOut', 'NO')
# line 2
eps_u = double_or_blank(card, 9, 'eps_u', 0.01)
eps_p = double_or_blank(card, 10, 'eps_p', 0.01)
eps_w = double_or_blank(card, 11, 'eps_w', 0.01)
max_div = integer_or_blank(card, 12, 'max_div', 3)
if kmethod == 'PFNT':
max_qn = integer_or_blank(card, 13, 'max_qn', 0)
else:
max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)
max_ls = integer_or_blank(card, 14, 'max_ls', 4)
fstress = double_or_blank(card, 15, 'fstress', 0.2)
ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)
# line 3
max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)
max_r = double_or_blank(card, 21, 'max_r', 20.)
rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i\ncard=%s' % (len(card), card)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a NLPARM card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,
eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,
rtol_b) = data
if kmethod == 1:
kmethod = 'AUTO'
elif kmethod == 2:
kmethod = 'ITER'
elif kmethod == 4:
kmethod = 'SEMI'
elif kmethod == 3:
kmethod = 'ADAPT'
else:
msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)
raise NotImplementedError(msg)
if conv == 1:
conv = 'W'
elif conv == 2:
conv = 'P'
elif conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
elif conv == 5:
conv = 'UW'
elif conv == 6:
conv = 'UP'
elif conv == 7:
conv = 'UPW'
else:
msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)
raise NotImplementedError(msg)
if int_out == 0:
int_out = 'NO'
elif int_out == 1:
int_out = 'YES'
elif int_out == 2:
int_out = 'ALL'
else:
msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)
raise NotImplementedError(msg)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,
self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, self.ls_tol, self.max_bisect, None, None, None,
self.max_r, None, self.rtol_b]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kmethod = set_blank_if_default(self.kmethod, 'AUTO')
kstep = set_blank_if_default(self.kstep, 5)
max_iter = set_blank_if_default(self.max_iter, 25)
conv = set_blank_if_default(self.conv, 'PW')
int_out = set_blank_if_default(self.int_out, 'NO')
eps_u = set_blank_if_default(self.eps_u, 0.01)
eps_p = set_blank_if_default(self.eps_p, 0.01)
eps_w = set_blank_if_default(self.eps_w, 0.01)
max_div = set_blank_if_default(self.max_div, 3)
max_qn = set_blank_if_default(self.max_qn, self.max_iter)
max_ls = set_blank_if_default(self.max_ls, 4)
fstress = set_blank_if_default(self.fstress, 0.2)
ls_tol = set_blank_if_default(self.ls_tol, 0.5)
max_bisect = set_blank_if_default(self.max_bisect, 5)
max_r = set_blank_if_default(self.max_r, 20.)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,
conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,
fstress, ls_tol, max_bisect, None, None, None, max_r, None,
rtol_b]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card) # having trouble with double precision...
return self.comment + print_card_16(card)
class NLPCI(BaseCard):
type = 'NLPCI'
def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20, comment=''):
if comment:
self.comment = comment
self.nlpci_id = nlpci_id
self.Type = Type
self.minalr = minalr
self.maxalr = maxalr
self.scale = scale
self.desiter = desiter
self.mxinc = mxinc
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPCI card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlpci_id = integer(card, 1, 'nlpci_id')
Type = string_or_blank(card, 2, 'Type', 'CRIS')
minalr = double_or_blank(card, 3, 'minalr', 0.25)
maxalr = double_or_blank(card, 4, 'maxalr', 4.0)
scale = double_or_blank(card, 5, 'scale', 0.0)
blank(card, 6, 'blank')
desiter = integer_or_blank(card, 7, 'desiter', 12)
mxinc = integer_or_blank(card, 8, 'mxinc', 20)
return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,
scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)
def raw_fields(self):
list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,
self.maxalr, self.scale, None, self.desiter, self.mxinc]
return list_fields
def repr_fields(self):
#minalr = set_blank_if_default(self.minalr, 0.25)
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TF(BaseCard):
"""
Defines a dynamic transfer function of the form:
(B0 + B1 p + B2 *p2)*ud sum(A0_i + A1_i*p + A2_i*p2)*ui = 0
+----+-----+-----+------+------+------+--------+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+====+=====+=====+======+======+======+========+====+====+
| TF | SID | GD | CD | B0 | B1 | B2 | | |
+----+-----+-----+------+------+------+--------+----+----+
| | G_1 | C_1 | A0_1 | A1_1 | A2_1 | etc. | | |
+----+-----+-----+------+------+------+--------+----+----+
"""
type = 'TF'
def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nid0 = nid0
self.c = c
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.nids = nids
self.components = components
self.a = a
def validate(self):
pass
#assert len(self.grids1) > 0, 'ngrids1=%s\n%s' % (len(self.grids1), str(self))
#def cross_reference(self, model: BDF) -> None:
#pass
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TF card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nid0 = integer(card, 2, 'nid0')
# component 0 means an SPOINT/EPOINT
c = components_or_blank(card, 3, 'components_0', 0)
b0 = double_or_blank(card, 4, 'b0', 0.)
b1 = double_or_blank(card, 5, 'b1', 0.)
b2 = double_or_blank(card, 6, 'b2', 0.)
nfields = len(card) - 9
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
nids = []
components = []
a = []
for irow in range(nrows):
j = irow * 8 + 9
#ifield = irow + 1
nid = integer(card, j, 'grid_%i' % (irow + 1))
component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)
a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)
a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)
a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)
nids.append(nid)
components.append(component)
a.append([a0, a1, a2])
return TF(sid, nid0, c, b0, b1, b2, nids, components, a,
comment=comment)
def raw_fields(self):
list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]
for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):
list_fields += [grid, c, a0, a1, a2, None, None, None]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
# double precision?
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEP(BaseCard):
"""
Transient Time Step
Defines time step intervals at which a solution will be generated and
output in transient analysis.
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | SID | N1 | DT1 | NO1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | N2 | DT2 | NO2 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | etc. | | | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | 101 | 9000 | .001 | 9000 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | 1000 | .001 | 1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
"""
type = 'TSTEP'
def __init__(self, sid, N, DT, NO, comment=''):
"""
Creates a TSTEP card
Parameters
----------
sid : int
the time step id
N : List[int/None]
???
DT : List[float/None]
???
NO : List[int/None]
???
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
#: Number of time steps of value DTi. (Integer > 1)
self.N = N
#: Time increment (float)
self.DT = DT
#: Skip factor for output. Every NOi-th step will be saved for output (default=1)
self.NO = NO
def validate(self):
assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)
assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
N = []
DT = []
NO = []
nrows = int(ceil((len(card) - 1.) / 8.))
for i in range(nrows):
n = 8 * i + 1
ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)
dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)
no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)
N.append(ni)
DT.append(dt)
NO.append(no)
return TSTEP(sid, N, DT, NO, comment=comment)
def raw_fields(self):
list_fields = ['TSTEP', self.sid]
for (N, dt, no) in zip(self.N, self.DT, self.NO):
list_fields += [N, dt, no, None, None, None, None, None]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEPNL(BaseCard):
"""
Defines parametric controls and data for nonlinear transient structural or
heat transfer analysis. TSTEPNL is intended for SOLs 129, 159, and 600.
Parameters for Nonlinear Transient Analysis.
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+========+========+=======+========+========+=======+=========+======+
| TSTEPNL | ID | NDT | DT | NO | METHOD | KSTEP | MAXITER | CONV |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | MAXBIS | ADJUST | MSTEP | RB | MAXR | UTOL | RTOLB | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
method = None for NX, but apparently TSTEP as well, which is not in the QRG
"""
type = 'TSTEPNL'
allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT', # MSC
'TSTEP'] # NX
def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,
max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,
eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,
fstress=0.2, max_bisect=5, adjust=5, mstep=None,
rb=0.6, max_r=32., utol=0.1, rtol_b=20.,
min_iter=None, comment=''):
"""
Creates a TSTEPNL card
Parameters
----------
sid : int
the time step id
ndt : ???
???
dt : ???
???
no : ???
???
eps_u : float; default=1.e-2
???
eps_p : float; default=1.e-3
???
eps_w : float; default=1.e-6
???
max_div : int; default=2
???
max_qn : int; default=10
???
max_ls : int; default=2
???
fstress : float; default=0.2
???
max_bisect : int; default=5
???
adjust : int; default=5
???
mstep : int; default=None
???
rb : float; default=0.6
???
max_r = float; default=32.
???
utol = float; default=0.1
???
rtol_b = float; default=20.
???
min_iter : int; default=None
not listed in all QRGs
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
# line 1
self.sid = sid
self.ndt = ndt
self.dt = dt
self.no = no
self.method = method
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.eps_u = eps_u
self.eps_p = eps_p
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
# line 3
self.max_bisect = max_bisect
self.adjust = adjust
self.mstep = mstep
self.rb = rb
self.max_r = max_r
self.utol = utol
self.rtol_b = rtol_b
self.min_iter = min_iter
assert self.ndt >= 3
assert self.dt > 0.
def validate(self):
if self.method not in self.allowed_methods:
msg = 'method=%r allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEPNL card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
ndt = integer(card, 2, 'ndt')
dt = double(card, 3, 'dt')
no = integer_or_blank(card, 4, 'no', 1)
#: .. note:: not listed in all QRGs
method = string_or_blank(card, 5, 'method', 'ADAPT')
if method == 'ADAPT':
kstep = integer_or_blank(card, 6, 'kStep', 2)
elif method == 'ITER':
kstep = integer_or_blank(card, 6, 'kStep', 10)
elif method in ['AUTO', 'TSTEP', 'SEMI']:
kstep = None
#kstep = blank(card, 6, 'kStep') #: .. todo:: not blank
else:
msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise RuntimeError(msg)
max_iter = integer_or_blank(card, 7, 'maxIter', 10)
conv = string_or_blank(card, 8, 'conv', 'PW')
# line 2
eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)
eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)
eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)
max_div = integer_or_blank(card, 12, 'maxDiv', 2)
max_qn = integer_or_blank(card, 13, 'maxQn', 10)
max_ls = integer_or_blank(card, 14, 'MaxLs', 2)
fstress = double_or_blank(card, 15, 'fStress', 0.2)
# line 3
max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)
adjust = integer_or_blank(card, 18, 'adjust', 5)
mstep = integer_or_blank(card, 19, 'mStep')
rb = double_or_blank(card, 20, 'rb', 0.6)
max_r = double_or_blank(card, 21, 'maxR', 32.)
utol = double_or_blank(card, 22, 'uTol', 0.1)
rtol_b = double_or_blank(card, 23, 'rTolB', 20.)
# not listed in all QRGs
min_iter = integer_or_blank(card, 24, 'minIter')
assert len(card) <= 25, 'len(TSTEPNL card) = %i\ncard=%s' % (len(card), card)
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TSTEPNL card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,
max_div, max_qn, max_ls, fstress, max_bisect,
adjust, mstep, rb, max_r, utol, rtol_b) = data
if method == 1:
method = 'AUTO'
elif method == 3:
method = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))
if conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
#elif conv == 3:
#conv = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))
min_iter = None # not listed in DMAP 2005
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
#self.sid = sid
#self.ndt = ndt
#self.dt = dt
#self.no = no
#self.method = method
#self.kStep = kStep
#self.maxIter = maxIter
#self.conv = conv
## line 2
#self.epsU = epsU
#self.epsP = epsP
#self.epsW = epsW
#self.maxDiv = maxDiv
#self.maxQn = maxQn
#self.MaxLs = maxLs
#self.fStress = fStress
## line 3
#self.maxBisect = maxBisect
#self.adjust = adjust
#self.mStep = mStep
#self.rb = rb
#self.maxR = maxR
#self.uTol = uTol
#self.rTolB = rTolB
def raw_fields(self):
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,
self.method, self.kstep, self.max_iter, self.conv, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, None, self.max_bisect, self.adjust, self.mstep,
self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]
return list_fields
def repr_fields(self):
#no = set_blank_if_default(self.no,1)
no = self.no
method = set_blank_if_default(self.method, 'ADAPT')
kstep = self.kstep
#if self.method == 'ADAPT':
#kStep = set_blank_if_default(self.kStep, 2)
#elif self.method == 'ITER':
#kStep = set_blank_if_default(self.kStep, 10)
#else:
#msg = 'invalid TSTEPNL Method. method=|%s|' %(self.method)
#raise RuntimeError(msg)
#maxIter = set_blank_if_default(self.maxIter, 10)
conv = set_blank_if_default(self.conv, 'PW')
eps_u = set_blank_if_default(self.eps_u, 1e-2)
eps_p = set_blank_if_default(self.eps_p, 1e-3)
eps_w = set_blank_if_default(self.eps_w, 1e-6)
max_div = set_blank_if_default(self.max_div, 2)
max_qn = set_blank_if_default(self.max_qn, 10)
max_ls = set_blank_if_default(self.max_ls, 2)
fstress = set_blank_if_default(self.fstress, 0.2)
max_bisect = set_blank_if_default(self.max_bisect, 5)
adjust = set_blank_if_default(self.adjust, 5)
rb = set_blank_if_default(self.rb, 0.6)
max_r = set_blank_if_default(self.max_r, 32.)
utol = set_blank_if_default(self.utol, 0.1)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,
kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,
max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,
max_r, utol, rtol_b, self.min_iter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
| 35.016229
| 96
| 0.493809
|
from math import log, exp, ceil
import numpy as np
from numpy import unique, hstack
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank,
string_or_blank, blank, fields, components_or_blank
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING:
from pyNastran.bdf.bdf import BDF
class DELAY(BaseCard):
type = 'DELAY'
def __init__(self, sid, nodes, components, delays, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.delays = delays
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
delays = [double_or_blank(card, 4, 'delay')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
delays.append(double_or_blank(card, 7, 'delay'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DELAY(sid, nodes, components, delays, comment=comment)
def add(self, delay):
assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)
if delay.comment:
if hasattr('_comment'):
self._comment += delay.comment
else:
self._comment = delay.comment
self.nodes += delay.nodes
self.components += delay.components
self.delays += delay.delays
def get_delay_at_freq(self, freq):
return self.nodes, self.components, self.delays
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DELAY', self.sid]
for nid, comp, delay in zip(self.node_ids, self.components, self.delays):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_8(['DELAY', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_16(['DELAY', self.sid, nid, comp, delay])
return msg
class DPHASE(BaseCard):
type = 'DPHASE'
def __init__(self, sid, nodes, components, phase_leads, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.phase_leads = phase_leads
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
phase_leads = [double_or_blank(card, 4, 'phase_lead')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
phase_leads.append(double_or_blank(card, 7, 'phase_lead'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DPHASE(sid, nodes, components, phase_leads, comment=comment)
def add(self, dphase):
assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)
if dphase.comment:
if hasattr('_comment'):
self._comment += dphase.comment
else:
self._comment = dphase.comment
self.nodes += dphase.nodes
self.components += dphase.components
self.phase_leads += dphase.phase_leads
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DPHASE', self.sid]
for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])
return msg
class FREQ(BaseCard):
type = 'FREQ'
def __init__(self, sid, freqs, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
freqs = fields(double, card, 'freq', i=2, j=len(card))
return FREQ(sid, freqs, comment=comment)
def get_freqs(self):
return self.freqs
def add_frequencies(self, freqs):
self.freqs = unique(hstack([self.freqs, freqs]))
def add_frequency_object(self, freq):
self.add_frequencies(freq.freqs)
def raw_fields(self):
list_fields = ['FREQ', self.sid] + list(self.freqs)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ1(FREQ):
type = 'FREQ1'
def __init__(self, sid, f1, df, ndf, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.df = df
self.ndf = ndf
freqs = []
for i in range(ndf):
freqs.append(f1 + i * df)
self.freqs = unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
df = double(card, 3, 'df')
ndf = integer_or_blank(card, 4, 'ndf', 1)
assert len(card) <= 5, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ1(sid, f1, df, ndf, comment=comment)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ2(FREQ):
type = 'FREQ2'
def __init__(self, sid, f1, f2, ndf=1, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.ndf = ndf
d = 1. / ndf * log(f2 / f1)
freqs = []
for i in range(ndf):
freqs.append(f1 * exp(i * d))
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 2, 'f1')
f2 = double(card, 3, 'f2')
ndf = integer_or_blank(card, 4, 'nf', 1)
assert len(card) <= 5, 'len(FREQ2 card) = %i\ncard=%s' % (len(card), card)
return FREQ2(sid, f1, f2, ndf, comment=comment)
class FREQ3(FREQ):
type = 'FREQ3'
def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):
if comment:
self.comment = comment
if f2 is None:
f2 = f1
self.sid = sid
self.f1 = f1
self.f2 = f2
self.Type = Type
self.nef = nef
self.cluster = cluster
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 1, 'f1')
f2 = integer_or_blank(card, 1, 'f2', f1)
Type = string_or_blank(card, 1, 'Type', 'LINEAR')
nef = integer_or_blank(card, 1, 'nef', 10)
cluster = double_or_blank(card, 1, 'cluster', 1.0)
return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')
def raw_fields(self):
return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ4(FREQ):
type = 'FREQ4'
def __init__(self, sid, f1, f2, fspread, nfm, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.fspread = fspread
self.nfm = nfm
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
f2 = double_or_blank(card, 3, 'f2', 1.e20)
fspread = double_or_blank(card, 4, 'fspd', 0.1)
nfm = integer_or_blank(card, 5, 'nfm', 3)
assert len(card) <= 6, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)
def raw_fields(self):
list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,
self.nfm]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class NLPARM(BaseCard):
type = 'NLPARM'
def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,
max_iter=25, conv='PW', int_out='NO',
eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,
fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):
if comment:
self.comment = comment
self.nlparm_id = nlparm_id
self.ninc = ninc
self.dt = dt
self.kmethod = kmethod
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.int_out = int_out
self.eps_p = eps_p
self.eps_u = eps_u
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.ls_tol = ls_tol
self.max_bisect = max_bisect
self.max_r = max_r
self.rtol_b = rtol_b
if self.max_qn is None:
if kmethod == 'PFNT':
self.max_qn = 0
else:
self.max_qn = max_iter
@classmethod
def add_card(cls, card, comment=''):
nlparm_id = integer(card, 1, 'nlparm_id')
ninc = integer_or_blank(card, 2, 'ninc', 10)
dt = double_or_blank(card, 3, 'dt', 0.0)
kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')
kstep = integer_or_blank(card, 5, 'kstep', 5)
max_iter = integer_or_blank(card, 6, 'max_iter', 25)
conv = string_or_blank(card, 7, 'conv', 'PW')
int_out = string_or_blank(card, 8, 'intOut', 'NO')
eps_u = double_or_blank(card, 9, 'eps_u', 0.01)
eps_p = double_or_blank(card, 10, 'eps_p', 0.01)
eps_w = double_or_blank(card, 11, 'eps_w', 0.01)
max_div = integer_or_blank(card, 12, 'max_div', 3)
if kmethod == 'PFNT':
max_qn = integer_or_blank(card, 13, 'max_qn', 0)
else:
max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)
max_ls = integer_or_blank(card, 14, 'max_ls', 4)
fstress = double_or_blank(card, 15, 'fstress', 0.2)
ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)
max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)
max_r = double_or_blank(card, 21, 'max_r', 20.)
rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i\ncard=%s' % (len(card), card)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,
eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,
rtol_b) = data
if kmethod == 1:
kmethod = 'AUTO'
elif kmethod == 2:
kmethod = 'ITER'
elif kmethod == 4:
kmethod = 'SEMI'
elif kmethod == 3:
kmethod = 'ADAPT'
else:
msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)
raise NotImplementedError(msg)
if conv == 1:
conv = 'W'
elif conv == 2:
conv = 'P'
elif conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
elif conv == 5:
conv = 'UW'
elif conv == 6:
conv = 'UP'
elif conv == 7:
conv = 'UPW'
else:
msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)
raise NotImplementedError(msg)
if int_out == 0:
int_out = 'NO'
elif int_out == 1:
int_out = 'YES'
elif int_out == 2:
int_out = 'ALL'
else:
msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)
raise NotImplementedError(msg)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,
self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, self.ls_tol, self.max_bisect, None, None, None,
self.max_r, None, self.rtol_b]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kmethod = set_blank_if_default(self.kmethod, 'AUTO')
kstep = set_blank_if_default(self.kstep, 5)
max_iter = set_blank_if_default(self.max_iter, 25)
conv = set_blank_if_default(self.conv, 'PW')
int_out = set_blank_if_default(self.int_out, 'NO')
eps_u = set_blank_if_default(self.eps_u, 0.01)
eps_p = set_blank_if_default(self.eps_p, 0.01)
eps_w = set_blank_if_default(self.eps_w, 0.01)
max_div = set_blank_if_default(self.max_div, 3)
max_qn = set_blank_if_default(self.max_qn, self.max_iter)
max_ls = set_blank_if_default(self.max_ls, 4)
fstress = set_blank_if_default(self.fstress, 0.2)
ls_tol = set_blank_if_default(self.ls_tol, 0.5)
max_bisect = set_blank_if_default(self.max_bisect, 5)
max_r = set_blank_if_default(self.max_r, 20.)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,
conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,
fstress, ls_tol, max_bisect, None, None, None, max_r, None,
rtol_b]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class NLPCI(BaseCard):
type = 'NLPCI'
def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20, comment=''):
if comment:
self.comment = comment
self.nlpci_id = nlpci_id
self.Type = Type
self.minalr = minalr
self.maxalr = maxalr
self.scale = scale
self.desiter = desiter
self.mxinc = mxinc
@classmethod
def add_card(cls, card, comment=''):
nlpci_id = integer(card, 1, 'nlpci_id')
Type = string_or_blank(card, 2, 'Type', 'CRIS')
minalr = double_or_blank(card, 3, 'minalr', 0.25)
maxalr = double_or_blank(card, 4, 'maxalr', 4.0)
scale = double_or_blank(card, 5, 'scale', 0.0)
blank(card, 6, 'blank')
desiter = integer_or_blank(card, 7, 'desiter', 12)
mxinc = integer_or_blank(card, 8, 'mxinc', 20)
return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,
scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)
def raw_fields(self):
list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,
self.maxalr, self.scale, None, self.desiter, self.mxinc]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TF(BaseCard):
type = 'TF'
def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nid0 = nid0
self.c = c
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.nids = nids
self.components = components
self.a = a
def validate(self):
pass
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nid0 = integer(card, 2, 'nid0')
c = components_or_blank(card, 3, 'components_0', 0)
b0 = double_or_blank(card, 4, 'b0', 0.)
b1 = double_or_blank(card, 5, 'b1', 0.)
b2 = double_or_blank(card, 6, 'b2', 0.)
nfields = len(card) - 9
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
nids = []
components = []
a = []
for irow in range(nrows):
j = irow * 8 + 9
nid = integer(card, j, 'grid_%i' % (irow + 1))
component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)
a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)
a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)
a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)
nids.append(nid)
components.append(component)
a.append([a0, a1, a2])
return TF(sid, nid0, c, b0, b1, b2, nids, components, a,
comment=comment)
def raw_fields(self):
list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]
for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):
list_fields += [grid, c, a0, a1, a2, None, None, None]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEP(BaseCard):
type = 'TSTEP'
def __init__(self, sid, N, DT, NO, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.N = N
self.DT = DT
self.NO = NO
def validate(self):
assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)
assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
N = []
DT = []
NO = []
nrows = int(ceil((len(card) - 1.) / 8.))
for i in range(nrows):
n = 8 * i + 1
ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)
dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)
no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)
N.append(ni)
DT.append(dt)
NO.append(no)
return TSTEP(sid, N, DT, NO, comment=comment)
def raw_fields(self):
list_fields = ['TSTEP', self.sid]
for (N, dt, no) in zip(self.N, self.DT, self.NO):
list_fields += [N, dt, no, None, None, None, None, None]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEPNL(BaseCard):
type = 'TSTEPNL'
allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT',
'TSTEP']
def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,
max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,
eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,
fstress=0.2, max_bisect=5, adjust=5, mstep=None,
rb=0.6, max_r=32., utol=0.1, rtol_b=20.,
min_iter=None, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.ndt = ndt
self.dt = dt
self.no = no
self.method = method
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.eps_u = eps_u
self.eps_p = eps_p
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.max_bisect = max_bisect
self.adjust = adjust
self.mstep = mstep
self.rb = rb
self.max_r = max_r
self.utol = utol
self.rtol_b = rtol_b
self.min_iter = min_iter
assert self.ndt >= 3
assert self.dt > 0.
def validate(self):
if self.method not in self.allowed_methods:
msg = 'method=%r allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
ndt = integer(card, 2, 'ndt')
dt = double(card, 3, 'dt')
no = integer_or_blank(card, 4, 'no', 1)
method = string_or_blank(card, 5, 'method', 'ADAPT')
if method == 'ADAPT':
kstep = integer_or_blank(card, 6, 'kStep', 2)
elif method == 'ITER':
kstep = integer_or_blank(card, 6, 'kStep', 10)
elif method in ['AUTO', 'TSTEP', 'SEMI']:
kstep = None
msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise RuntimeError(msg)
max_iter = integer_or_blank(card, 7, 'maxIter', 10)
conv = string_or_blank(card, 8, 'conv', 'PW')
eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)
eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)
eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)
max_div = integer_or_blank(card, 12, 'maxDiv', 2)
max_qn = integer_or_blank(card, 13, 'maxQn', 10)
max_ls = integer_or_blank(card, 14, 'MaxLs', 2)
fstress = double_or_blank(card, 15, 'fStress', 0.2)
max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)
adjust = integer_or_blank(card, 18, 'adjust', 5)
mstep = integer_or_blank(card, 19, 'mStep')
rb = double_or_blank(card, 20, 'rb', 0.6)
max_r = double_or_blank(card, 21, 'maxR', 32.)
utol = double_or_blank(card, 22, 'uTol', 0.1)
rtol_b = double_or_blank(card, 23, 'rTolB', 20.)
min_iter = integer_or_blank(card, 24, 'minIter')
assert len(card) <= 25, 'len(TSTEPNL card) = %i\ncard=%s' % (len(card), card)
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
(sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,
max_div, max_qn, max_ls, fstress, max_bisect,
adjust, mstep, rb, max_r, utol, rtol_b) = data
if method == 1:
method = 'AUTO'
elif method == 3:
method = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))
if conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
else:
raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))
min_iter = None
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
def raw_fields(self):
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,
self.method, self.kstep, self.max_iter, self.conv, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, None, self.max_bisect, self.adjust, self.mstep,
self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]
return list_fields
def repr_fields(self):
no = self.no
method = set_blank_if_default(self.method, 'ADAPT')
kstep = self.kstep
conv = set_blank_if_default(self.conv, 'PW')
eps_u = set_blank_if_default(self.eps_u, 1e-2)
eps_p = set_blank_if_default(self.eps_p, 1e-3)
eps_w = set_blank_if_default(self.eps_w, 1e-6)
max_div = set_blank_if_default(self.max_div, 2)
max_qn = set_blank_if_default(self.max_qn, 10)
max_ls = set_blank_if_default(self.max_ls, 2)
fstress = set_blank_if_default(self.fstress, 0.2)
max_bisect = set_blank_if_default(self.max_bisect, 5)
adjust = set_blank_if_default(self.adjust, 5)
rb = set_blank_if_default(self.rb, 0.6)
max_r = set_blank_if_default(self.max_r, 32.)
utol = set_blank_if_default(self.utol, 0.1)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,
kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,
max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,
max_r, utol, rtol_b, self.min_iter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
| true
| true
|
f7198c8a3b00d357347baf407e57a7dd4b984119
| 620
|
py
|
Python
|
polls/admin.py
|
Obsinqsob01/polls
|
52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
Obsinqsob01/polls
|
52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
Obsinqsob01/polls
|
52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
list_display = ('question_text', 'pub_date')
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'pub_date', 'was_published_recently')
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
| 29.52381
| 80
| 0.659677
|
from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
list_display = ('question_text', 'pub_date')
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'pub_date', 'was_published_recently')
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
| true
| true
|
f7198cbf53eb86b681a5ce28880882ab6561e873
| 706
|
py
|
Python
|
2-add-two-numbers/2-add-two-numbers.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | 1
|
2021-10-10T20:21:18.000Z
|
2021-10-10T20:21:18.000Z
|
2-add-two-numbers/2-add-two-numbers.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
2-add-two-numbers/2-add-two-numbers.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
n = cur = ListNode(-1)
carry = 0
while l1 or l2 or carry:
if l1:
carry += l1.val
l1 = l1.next
if l2:
carry += l2.val
l2 = l2.next
cur.next = ListNode(carry % 10)
cur = cur.next
carry = carry // 10
return n.next
| 27.153846
| 98
| 0.441926
|
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
n = cur = ListNode(-1)
carry = 0
while l1 or l2 or carry:
if l1:
carry += l1.val
l1 = l1.next
if l2:
carry += l2.val
l2 = l2.next
cur.next = ListNode(carry % 10)
cur = cur.next
carry = carry // 10
return n.next
| true
| true
|
f7198d790f74aa6993a89e96a1b3903ca05a53bc
| 15,654
|
py
|
Python
|
manim/scene/three_d_scene.py
|
behackl/manim
|
3759b73d555792d077e1d77c854d5dbe88043b98
|
[
"MIT"
] | 2
|
2020-11-17T19:00:44.000Z
|
2021-10-17T16:14:55.000Z
|
manim/scene/three_d_scene.py
|
behackl/manim
|
3759b73d555792d077e1d77c854d5dbe88043b98
|
[
"MIT"
] | null | null | null |
manim/scene/three_d_scene.py
|
behackl/manim
|
3759b73d555792d077e1d77c854d5dbe88043b98
|
[
"MIT"
] | null | null | null |
"""A scene suitable for rendering three-dimensional objects and animations."""
__all__ = ["ThreeDScene", "SpecialThreeDScene"]
from typing import Iterable, Optional, Sequence, Union
import numpy as np
from .. import config
from ..animation.animation import Animation
from ..animation.transform import ApplyMethod
from ..camera.three_d_camera import ThreeDCamera
from ..constants import DEGREES
from ..mobject.coordinate_systems import ThreeDAxes
from ..mobject.geometry import Line
from ..mobject.mobject import Mobject
from ..mobject.three_dimensions import Sphere
from ..mobject.types.vectorized_mobject import VectorizedPoint, VGroup
from ..mobject.value_tracker import ValueTracker
from ..scene.scene import Scene
from ..utils.config_ops import merge_dicts_recursively
class ThreeDScene(Scene):
"""
This is a Scene, with special configurations and properties that
make it suitable for Three Dimensional Scenes.
"""
def __init__(
self,
camera_class=ThreeDCamera,
ambient_camera_rotation=None,
default_angled_camera_orientation_kwargs=None,
**kwargs,
):
self.ambient_camera_rotation = ambient_camera_rotation
if default_angled_camera_orientation_kwargs is None:
default_angled_camera_orientation_kwargs = {
"phi": 70 * DEGREES,
"theta": -135 * DEGREES,
}
self.default_angled_camera_orientation_kwargs = (
default_angled_camera_orientation_kwargs
)
super().__init__(camera_class=camera_class, **kwargs)
def set_camera_orientation(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
):
"""
This method sets the orientation of the camera in the scene.
Parameters
----------
phi : int or float, optional
The polar angle i.e the angle between Z_AXIS and Camera through ORIGIN in radians.
theta : int or float, optional
The azimuthal angle i.e the angle that spins the camera around the Z_AXIS.
distance : int or float, optional
The radial distance between ORIGIN and Camera.
gamma : int or float, optional
The rotation of the camera about the vector from the ORIGIN to the Camera.
frame_center : list, tuple or np.array, optional
The new center of the camera frame in cartesian coordinates.
"""
if phi is not None:
self.renderer.camera.set_phi(phi)
if theta is not None:
self.renderer.camera.set_theta(theta)
if distance is not None:
self.renderer.camera.set_distance(distance)
if gamma is not None:
self.renderer.camera.set_gamma(gamma)
if frame_center is not None:
self.renderer.camera._frame_center.move_to(frame_center)
def begin_ambient_camera_rotation(self, rate=0.02, about="theta"):
"""
This method begins an ambient rotation of the camera about the Z_AXIS,
in the anticlockwise direction
Parameters
----------
rate : int or float, optional
The rate at which the camera should rotate about the Z_AXIS.
Negative rate means clockwise rotation.
about: (str)
one of 3 options: ["theta", "phi", "gamma"]. defaults to theta.
"""
# TODO, use a ValueTracker for rate, so that it
# can begin and end smoothly
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.add_updater(lambda m, dt: m.increment_value(rate * dt))
self.add(x)
def stop_ambient_camera_rotation(self, about="theta"):
"""
This method stops all ambient camera rotation.
"""
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.clear_updaters()
self.remove(x)
def begin_3dillusion_camera_rotation(
self, rate=1, origin_theta=-60 * DEGREES, origin_phi=75 * DEGREES
):
val_tracker_theta = ValueTracker(0)
def update_theta(m, dt):
val_tracker_theta.increment_value(dt * rate)
val_for_left_right = 0.2 * np.sin(val_tracker_theta.get_value())
return m.set_value(origin_theta + val_for_left_right)
self.renderer.camera.theta_tracker.add_updater(update_theta)
self.add(self.renderer.camera.theta_tracker)
val_tracker_phi = ValueTracker(0)
def update_phi(m, dt):
val_tracker_phi.increment_value(dt * rate)
val_for_up_down = 0.1 * np.cos(val_tracker_phi.get_value())
return m.set_value(origin_phi + val_for_up_down)
self.renderer.camera.phi_tracker.add_updater(update_phi)
self.add(self.renderer.camera.phi_tracker)
def stop_3dillusion_camera_rotation(self):
"""
This method stops all illusion camera rotations.
"""
self.renderer.camera.theta_tracker.clear_updaters()
self.remove(self.renderer.camera.theta_tracker)
self.renderer.camera.phi_tracker.clear_updaters()
self.remove(self.renderer.camera.phi_tracker)
def move_camera(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
added_anims: Iterable["Animation"] = [],
**kwargs,
):
"""
This method animates the movement of the camera
to the given spherical coordinates.
Parameters
----------
phi : int or float, optional
The polar angle i.e the angle between Z_AXIS and Camera through ORIGIN in radians.
theta : int or float, optional
The azimuthal angle i.e the angle that spins the camera around the Z_AXIS.
distance : int or float, optional
The radial distance between ORIGIN and Camera.
gamma : int or float, optional
The rotation of the camera about the vector from the ORIGIN to the Camera.
frame_center : list, tuple or np.array, optional
The new center of the camera frame in cartesian coordinates.
added_anims : list, optional
Any other animations to be played at the same time.
"""
anims = []
value_tracker_pairs = [
(phi, self.renderer.camera.phi_tracker),
(theta, self.renderer.camera.theta_tracker),
(distance, self.renderer.camera.distance_tracker),
(gamma, self.renderer.camera.gamma_tracker),
]
for value, tracker in value_tracker_pairs:
if value is not None:
anims.append(ApplyMethod(tracker.set_value, value, **kwargs))
if frame_center is not None:
anims.append(
ApplyMethod(
self.renderer.camera._frame_center.move_to, frame_center, **kwargs
)
)
self.play(*anims + added_anims)
# These lines are added to improve performance. If manim thinks that frame_center is moving,
# it is required to redraw every object. These lines remove frame_center from the Scene once
# its animation is done, ensuring that manim does not think that it is moving. Since the
# frame_center is never actually drawn, this shouldn't break anything.
if frame_center is not None:
self.remove(self.renderer.camera._frame_center)
def get_moving_mobjects(self, *animations):
"""
This method returns a list of all of the Mobjects in the Scene that
are moving, that are also in the animations passed.
Parameters
----------
*animations : Animation
The animations whose mobjects will be checked.
"""
moving_mobjects = Scene.get_moving_mobjects(self, *animations)
camera_mobjects = self.renderer.camera.get_value_trackers() + [
self.renderer.camera._frame_center
]
if any([cm in moving_mobjects for cm in camera_mobjects]):
return self.mobjects
return moving_mobjects
def add_fixed_orientation_mobjects(self, *mobjects, **kwargs):
"""
This method is used to prevent the rotation and tilting
of mobjects as the camera moves around. The mobject can
still move in the x,y,z directions, but will always be
at the angle (relative to the camera) that it was at
when it was passed through this method.)
Parameters
----------
*mobjects : Mobject
The Mobject(s) whose orientation must be fixed.
**kwargs
Some valid kwargs are
use_static_center_func : bool
center_func : function
"""
self.add(*mobjects)
self.renderer.camera.add_fixed_orientation_mobjects(*mobjects, **kwargs)
def add_fixed_in_frame_mobjects(self, *mobjects):
"""
This method is used to prevent the rotation and movement
of mobjects as the camera moves around. The mobject is
essentially overlaid, and is not impacted by the camera's
movement in any way.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose orientation must be fixed.
"""
self.add(*mobjects)
self.renderer.camera.add_fixed_in_frame_mobjects(*mobjects)
def remove_fixed_orientation_mobjects(self, *mobjects):
"""
This method "unfixes" the orientation of the mobjects
passed, meaning they will no longer be at the same angle
relative to the camera. This only makes sense if the
mobject was passed through add_fixed_orientation_mobjects first.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose orientation must be unfixed.
"""
self.renderer.camera.remove_fixed_orientation_mobjects(*mobjects)
def remove_fixed_in_frame_mobjects(self, *mobjects):
"""
This method undoes what add_fixed_in_frame_mobjects does.
It allows the mobject to be affected by the movement of
the camera.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose position and orientation must be unfixed.
"""
self.renderer.camera.remove_fixed_in_frame_mobjects(*mobjects)
##
def set_to_default_angled_camera_orientation(self, **kwargs):
"""
This method sets the default_angled_camera_orientation to the
keyword arguments passed, and sets the camera to that orientation.
Parameters
----------
**kwargs
Some recognised kwargs are phi, theta, distance, gamma,
which have the same meaning as the parameters in set_camera_orientation.
"""
config = dict(
self.default_camera_orientation_kwargs
) # Where doe this come from?
config.update(kwargs)
self.set_camera_orientation(**config)
class SpecialThreeDScene(ThreeDScene):
"""An extension of :class:`ThreeDScene` with more settings.
It has some extra configuration for axes, spheres,
and an override for low quality rendering. Further key differences
are:
* The camera shades applicable 3DMobjects by default,
except if rendering in low quality.
* Some default params for Spheres and Axes have been added.
"""
def __init__(
self,
cut_axes_at_radius=True,
camera_config={"should_apply_shading": True, "exponential_projection": True},
three_d_axes_config={
"num_axis_pieces": 1,
"axis_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
},
sphere_config={"radius": 2, "resolution": (24, 48)},
default_angled_camera_position={
"phi": 70 * DEGREES,
"theta": -110 * DEGREES,
},
# When scene is extracted with -l flag, this
# configuration will override the above configuration.
low_quality_config={
"camera_config": {"should_apply_shading": False},
"three_d_axes_config": {"num_axis_pieces": 1},
"sphere_config": {"resolution": (12, 24)},
},
**kwargs,
):
self.cut_axes_at_radius = cut_axes_at_radius
self.camera_config = camera_config
self.three_d_axes_config = three_d_axes_config
self.sphere_config = sphere_config
self.default_angled_camera_position = default_angled_camera_position
self.low_quality_config = low_quality_config
if self.renderer.camera_config["pixel_width"] == config["pixel_width"]:
_config = {}
else:
_config = self.low_quality_config
_config = merge_dicts_recursively(_config, kwargs)
ThreeDScene.__init__(self, **_config)
def get_axes(self):
"""Return a set of 3D axes.
Returns
-------
:class:`.ThreeDAxes`
A set of 3D axes.
"""
axes = ThreeDAxes(**self.three_d_axes_config)
for axis in axes:
if self.cut_axes_at_radius:
p0 = axis.get_start()
p1 = axis.number_to_point(-1)
p2 = axis.number_to_point(1)
p3 = axis.get_end()
new_pieces = VGroup(Line(p0, p1), Line(p1, p2), Line(p2, p3))
for piece in new_pieces:
piece.shade_in_3d = True
new_pieces.match_style(axis.pieces)
axis.pieces.submobjects = new_pieces.submobjects
for tick in axis.tick_marks:
tick.add(VectorizedPoint(1.5 * tick.get_center()))
return axes
def get_sphere(self, **kwargs):
"""
Returns a sphere with the passed keyword arguments as properties.
Parameters
----------
**kwargs
Any valid parameter of :class:`~.Sphere` or :class:`~.Surface`.
Returns
-------
:class:`~.Sphere`
The sphere object.
"""
config = merge_dicts_recursively(self.sphere_config, kwargs)
return Sphere(**config)
def get_default_camera_position(self):
"""
Returns the default_angled_camera position.
Returns
-------
dict
Dictionary of phi, theta, distance, and gamma.
"""
return self.default_angled_camera_position
def set_camera_to_default_position(self):
"""
Sets the camera to its default position.
"""
self.set_camera_orientation(**self.default_angled_camera_position)
| 35.986207
| 100
| 0.622972
|
__all__ = ["ThreeDScene", "SpecialThreeDScene"]
from typing import Iterable, Optional, Sequence, Union
import numpy as np
from .. import config
from ..animation.animation import Animation
from ..animation.transform import ApplyMethod
from ..camera.three_d_camera import ThreeDCamera
from ..constants import DEGREES
from ..mobject.coordinate_systems import ThreeDAxes
from ..mobject.geometry import Line
from ..mobject.mobject import Mobject
from ..mobject.three_dimensions import Sphere
from ..mobject.types.vectorized_mobject import VectorizedPoint, VGroup
from ..mobject.value_tracker import ValueTracker
from ..scene.scene import Scene
from ..utils.config_ops import merge_dicts_recursively
class ThreeDScene(Scene):
def __init__(
self,
camera_class=ThreeDCamera,
ambient_camera_rotation=None,
default_angled_camera_orientation_kwargs=None,
**kwargs,
):
self.ambient_camera_rotation = ambient_camera_rotation
if default_angled_camera_orientation_kwargs is None:
default_angled_camera_orientation_kwargs = {
"phi": 70 * DEGREES,
"theta": -135 * DEGREES,
}
self.default_angled_camera_orientation_kwargs = (
default_angled_camera_orientation_kwargs
)
super().__init__(camera_class=camera_class, **kwargs)
def set_camera_orientation(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
):
if phi is not None:
self.renderer.camera.set_phi(phi)
if theta is not None:
self.renderer.camera.set_theta(theta)
if distance is not None:
self.renderer.camera.set_distance(distance)
if gamma is not None:
self.renderer.camera.set_gamma(gamma)
if frame_center is not None:
self.renderer.camera._frame_center.move_to(frame_center)
def begin_ambient_camera_rotation(self, rate=0.02, about="theta"):
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.add_updater(lambda m, dt: m.increment_value(rate * dt))
self.add(x)
def stop_ambient_camera_rotation(self, about="theta"):
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.clear_updaters()
self.remove(x)
def begin_3dillusion_camera_rotation(
self, rate=1, origin_theta=-60 * DEGREES, origin_phi=75 * DEGREES
):
val_tracker_theta = ValueTracker(0)
def update_theta(m, dt):
val_tracker_theta.increment_value(dt * rate)
val_for_left_right = 0.2 * np.sin(val_tracker_theta.get_value())
return m.set_value(origin_theta + val_for_left_right)
self.renderer.camera.theta_tracker.add_updater(update_theta)
self.add(self.renderer.camera.theta_tracker)
val_tracker_phi = ValueTracker(0)
def update_phi(m, dt):
val_tracker_phi.increment_value(dt * rate)
val_for_up_down = 0.1 * np.cos(val_tracker_phi.get_value())
return m.set_value(origin_phi + val_for_up_down)
self.renderer.camera.phi_tracker.add_updater(update_phi)
self.add(self.renderer.camera.phi_tracker)
def stop_3dillusion_camera_rotation(self):
self.renderer.camera.theta_tracker.clear_updaters()
self.remove(self.renderer.camera.theta_tracker)
self.renderer.camera.phi_tracker.clear_updaters()
self.remove(self.renderer.camera.phi_tracker)
def move_camera(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
added_anims: Iterable["Animation"] = [],
**kwargs,
):
anims = []
value_tracker_pairs = [
(phi, self.renderer.camera.phi_tracker),
(theta, self.renderer.camera.theta_tracker),
(distance, self.renderer.camera.distance_tracker),
(gamma, self.renderer.camera.gamma_tracker),
]
for value, tracker in value_tracker_pairs:
if value is not None:
anims.append(ApplyMethod(tracker.set_value, value, **kwargs))
if frame_center is not None:
anims.append(
ApplyMethod(
self.renderer.camera._frame_center.move_to, frame_center, **kwargs
)
)
self.play(*anims + added_anims)
if frame_center is not None:
self.remove(self.renderer.camera._frame_center)
def get_moving_mobjects(self, *animations):
moving_mobjects = Scene.get_moving_mobjects(self, *animations)
camera_mobjects = self.renderer.camera.get_value_trackers() + [
self.renderer.camera._frame_center
]
if any([cm in moving_mobjects for cm in camera_mobjects]):
return self.mobjects
return moving_mobjects
def add_fixed_orientation_mobjects(self, *mobjects, **kwargs):
self.add(*mobjects)
self.renderer.camera.add_fixed_orientation_mobjects(*mobjects, **kwargs)
def add_fixed_in_frame_mobjects(self, *mobjects):
self.add(*mobjects)
self.renderer.camera.add_fixed_in_frame_mobjects(*mobjects)
def remove_fixed_orientation_mobjects(self, *mobjects):
self.renderer.camera.remove_fixed_orientation_mobjects(*mobjects)
def remove_fixed_in_frame_mobjects(self, *mobjects):
self.renderer.camera.remove_fixed_in_frame_mobjects(*mobjects)
##
def set_to_default_angled_camera_orientation(self, **kwargs):
config = dict(
self.default_camera_orientation_kwargs
) # Where doe this come from?
config.update(kwargs)
self.set_camera_orientation(**config)
class SpecialThreeDScene(ThreeDScene):
def __init__(
self,
cut_axes_at_radius=True,
camera_config={"should_apply_shading": True, "exponential_projection": True},
three_d_axes_config={
"num_axis_pieces": 1,
"axis_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
},
sphere_config={"radius": 2, "resolution": (24, 48)},
default_angled_camera_position={
"phi": 70 * DEGREES,
"theta": -110 * DEGREES,
},
# When scene is extracted with -l flag, this
# configuration will override the above configuration.
low_quality_config={
"camera_config": {"should_apply_shading": False},
"three_d_axes_config": {"num_axis_pieces": 1},
"sphere_config": {"resolution": (12, 24)},
},
**kwargs,
):
self.cut_axes_at_radius = cut_axes_at_radius
self.camera_config = camera_config
self.three_d_axes_config = three_d_axes_config
self.sphere_config = sphere_config
self.default_angled_camera_position = default_angled_camera_position
self.low_quality_config = low_quality_config
if self.renderer.camera_config["pixel_width"] == config["pixel_width"]:
_config = {}
else:
_config = self.low_quality_config
_config = merge_dicts_recursively(_config, kwargs)
ThreeDScene.__init__(self, **_config)
def get_axes(self):
axes = ThreeDAxes(**self.three_d_axes_config)
for axis in axes:
if self.cut_axes_at_radius:
p0 = axis.get_start()
p1 = axis.number_to_point(-1)
p2 = axis.number_to_point(1)
p3 = axis.get_end()
new_pieces = VGroup(Line(p0, p1), Line(p1, p2), Line(p2, p3))
for piece in new_pieces:
piece.shade_in_3d = True
new_pieces.match_style(axis.pieces)
axis.pieces.submobjects = new_pieces.submobjects
for tick in axis.tick_marks:
tick.add(VectorizedPoint(1.5 * tick.get_center()))
return axes
def get_sphere(self, **kwargs):
config = merge_dicts_recursively(self.sphere_config, kwargs)
return Sphere(**config)
def get_default_camera_position(self):
return self.default_angled_camera_position
def set_camera_to_default_position(self):
self.set_camera_orientation(**self.default_angled_camera_position)
| true
| true
|
f7198e330d6123f84319f87eb566ae8978c38f58
| 7,124
|
py
|
Python
|
corehq/apps/reports/urls.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/apps/reports/urls.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/apps/reports/urls.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from corehq.apps.reports.standard.forms.reports import ReprocessXFormErrorView
from corehq.apps.userreports.reports.view import (
ConfigurableReportView,
CustomConfigurableReportDispatcher,
)
from corehq.apps.userreports.views import (
ConfigureReport,
EditReportInBuilder,
ReportBuilderDataSourceSelect,
ReportBuilderPaywallActivatingSubscription,
ReportBuilderPaywallPricing,
ReportPreview,
)
from .dispatcher import (
CustomProjectReportDispatcher,
ProjectReportDispatcher,
)
from .filters import urls as filter_urls
from .util import get_installed_custom_modules
from .views import (
AddSavedReportConfigView,
CaseAttachmentsView,
CaseDataView,
EditFormInstance,
FormDataView,
MySavedReportsView,
ScheduledReportsView,
archive_form,
case_form_data,
case_forms,
case_property_changes,
case_property_names,
case_xml,
close_case_view,
delete_config,
delete_scheduled_report,
download_case_history,
download_form,
edit_case_view,
edit_form,
email_report,
export_case_transactions,
export_report,
project_health_user_details,
rebuild_case_view,
resave_case_view,
resave_form_view,
restore_edit,
send_test_scheduled_report,
unarchive_form,
undo_close_case_view,
view_scheduled_report,
)
custom_report_urls = [
CustomProjectReportDispatcher.url_pattern(),
]
urlpatterns = [
ConfigurableReportView.url_pattern(),
CustomConfigurableReportDispatcher.url_pattern(),
# Report Builder
url(r'^builder/select_source/$', ReportBuilderDataSourceSelect.as_view(),
name=ReportBuilderDataSourceSelect.urlname),
url(r'^builder/configure/$', ConfigureReport.as_view(), name=ConfigureReport.urlname),
url(r'^builder/preview/(?P<data_source>[\w\-]+)/$', ReportPreview.as_view(), name=ReportPreview.urlname),
url(r'^builder/edit/(?P<report_id>[\w\-]+)/$', EditReportInBuilder.as_view(), name='edit_report_in_builder'),
url(r'builder/subscribe/pricing/$', ReportBuilderPaywallPricing.as_view(),
name=ReportBuilderPaywallPricing.urlname),
url(r'builder/subscribe/activating_subscription/$', ReportBuilderPaywallActivatingSubscription.as_view(),
name=ReportBuilderPaywallActivatingSubscription.urlname),
url(r'^$', MySavedReportsView.as_view(), name="reports_home"),
url(r'^saved/', MySavedReportsView.as_view(), name=MySavedReportsView.urlname),
url(r'^saved_reports', MySavedReportsView.as_view(), name="old_saved_reports"),
url(r'^case_data/(?P<case_id>[\w\-]+)/$', CaseDataView.as_view(), name=CaseDataView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/forms/$', case_forms, name="single_case_forms"),
url(r'^case_data/(?P<case_id>[\w\-]+)/attachments/$',
CaseAttachmentsView.as_view(), name=CaseAttachmentsView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/view/xml/$', case_xml, name="single_case_xml"),
url(r'^case_data/(?P<case_id>[\w\-]+)/properties/$', case_property_names, name="case_property_names"),
url(r'^case_data/(?P<case_id>[\w\-]+)/history/$', download_case_history, name="download_case_history"),
url(r'^case_data/(?P<case_id>[\w\-]+)/edit/$', edit_case_view, name="edit_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/rebuild/$', rebuild_case_view, name="rebuild_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/resave/$', resave_case_view, name="resave_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/close/$', close_case_view, name="close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/undo-close/(?P<xform_id>[\w\-:]+)/$',
undo_close_case_view, name="undo_close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/export_transactions/$',
export_case_transactions, name="export_case_transactions"),
url(r'^case_data/(?P<case_id>[\w\-]+)/(?P<xform_id>[\w\-:]+)/$', case_form_data, name="case_form_data"),
url(r'^case_data/(?P<case_id>[\w\-]+)/case_property/(?P<case_property_name>[\w_\-.]+)/$',
case_property_changes, name="case_property_changes"),
# Download and view form data
url(r'^form_data/(?P<instance_id>[\w\-:]+)/$', FormDataView.as_view(), name=FormDataView.urlname),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download/$', download_form, name='download_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/edit/$', EditFormInstance.as_view(), name='edit_form_instance'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/restore_version/$', restore_edit, name='restore_edit'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/correct_data/$', edit_form, name='edit_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/archive/$', archive_form, name='archive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/unarchive/$', unarchive_form, name='unarchive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/rebuild/$', resave_form_view, name='resave_form'),
# project health ajax
url(r'^project_health/ajax/(?P<user_id>[\w\-]+)/$', project_health_user_details,
name='project_health_user_details'),
# Full Excel export
url(r'^full_excel_export/(?P<export_hash>[\w\-]+)/(?P<format>[\w\-]+)$', export_report, name="export_report"),
# once off email
url(r"^email_onceoff/(?P<report_slug>[\w_]+)/$", email_report, kwargs=dict(once=True), name='email_report'),
url(r"^custom/email_onceoff/(?P<report_slug>[\w_]+)/$", email_report,
kwargs=dict(report_type=CustomProjectReportDispatcher.prefix, once=True), name='email_onceoff'),
# Saved reports
url(r"^configs$", AddSavedReportConfigView.as_view(), name=AddSavedReportConfigView.name),
url(r"^configs/(?P<config_id>[\w-]+)$", delete_config,
name='delete_report_config'),
# Scheduled reports
url(r'^scheduled_reports/(?P<scheduled_report_id>[\w-]+)?$',
ScheduledReportsView.as_view(), name=ScheduledReportsView.urlname),
url(r'^scheduled_report/(?P<scheduled_report_id>[\w-]+)/delete$',
delete_scheduled_report, name='delete_scheduled_report'),
url(r'^send_test_scheduled_report/(?P<scheduled_report_id>[\w-]+)/$',
send_test_scheduled_report, name='send_test_scheduled_report'),
url(r'^view_scheduled_report/(?P<scheduled_report_id>[\w_]+)/$',
view_scheduled_report, name='view_scheduled_report'),
# V2 Reports
url(r'^v2/', include('corehq.apps.reports.v2.urls')),
# Internal Use
url(r'^reprocess_error_form/$', ReprocessXFormErrorView.as_view(),
name=ReprocessXFormErrorView.urlname),
url(r'^custom/', include(custom_report_urls)),
url(r'^filters/', include(filter_urls)),
ProjectReportDispatcher.url_pattern(),
]
for module in get_installed_custom_modules():
module_name = module.__name__.split('.')[-1]
try:
custom_report_urls += [
url(r"^%s/" % module_name, include('{0}.urls'.format(module.__name__))),
]
except ImproperlyConfigured:
logging.info("Module %s does not provide urls" % module_name)
| 44.525
| 114
| 0.701291
|
import logging
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from corehq.apps.reports.standard.forms.reports import ReprocessXFormErrorView
from corehq.apps.userreports.reports.view import (
ConfigurableReportView,
CustomConfigurableReportDispatcher,
)
from corehq.apps.userreports.views import (
ConfigureReport,
EditReportInBuilder,
ReportBuilderDataSourceSelect,
ReportBuilderPaywallActivatingSubscription,
ReportBuilderPaywallPricing,
ReportPreview,
)
from .dispatcher import (
CustomProjectReportDispatcher,
ProjectReportDispatcher,
)
from .filters import urls as filter_urls
from .util import get_installed_custom_modules
from .views import (
AddSavedReportConfigView,
CaseAttachmentsView,
CaseDataView,
EditFormInstance,
FormDataView,
MySavedReportsView,
ScheduledReportsView,
archive_form,
case_form_data,
case_forms,
case_property_changes,
case_property_names,
case_xml,
close_case_view,
delete_config,
delete_scheduled_report,
download_case_history,
download_form,
edit_case_view,
edit_form,
email_report,
export_case_transactions,
export_report,
project_health_user_details,
rebuild_case_view,
resave_case_view,
resave_form_view,
restore_edit,
send_test_scheduled_report,
unarchive_form,
undo_close_case_view,
view_scheduled_report,
)
custom_report_urls = [
CustomProjectReportDispatcher.url_pattern(),
]
urlpatterns = [
ConfigurableReportView.url_pattern(),
CustomConfigurableReportDispatcher.url_pattern(),
url(r'^builder/select_source/$', ReportBuilderDataSourceSelect.as_view(),
name=ReportBuilderDataSourceSelect.urlname),
url(r'^builder/configure/$', ConfigureReport.as_view(), name=ConfigureReport.urlname),
url(r'^builder/preview/(?P<data_source>[\w\-]+)/$', ReportPreview.as_view(), name=ReportPreview.urlname),
url(r'^builder/edit/(?P<report_id>[\w\-]+)/$', EditReportInBuilder.as_view(), name='edit_report_in_builder'),
url(r'builder/subscribe/pricing/$', ReportBuilderPaywallPricing.as_view(),
name=ReportBuilderPaywallPricing.urlname),
url(r'builder/subscribe/activating_subscription/$', ReportBuilderPaywallActivatingSubscription.as_view(),
name=ReportBuilderPaywallActivatingSubscription.urlname),
url(r'^$', MySavedReportsView.as_view(), name="reports_home"),
url(r'^saved/', MySavedReportsView.as_view(), name=MySavedReportsView.urlname),
url(r'^saved_reports', MySavedReportsView.as_view(), name="old_saved_reports"),
url(r'^case_data/(?P<case_id>[\w\-]+)/$', CaseDataView.as_view(), name=CaseDataView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/forms/$', case_forms, name="single_case_forms"),
url(r'^case_data/(?P<case_id>[\w\-]+)/attachments/$',
CaseAttachmentsView.as_view(), name=CaseAttachmentsView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/view/xml/$', case_xml, name="single_case_xml"),
url(r'^case_data/(?P<case_id>[\w\-]+)/properties/$', case_property_names, name="case_property_names"),
url(r'^case_data/(?P<case_id>[\w\-]+)/history/$', download_case_history, name="download_case_history"),
url(r'^case_data/(?P<case_id>[\w\-]+)/edit/$', edit_case_view, name="edit_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/rebuild/$', rebuild_case_view, name="rebuild_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/resave/$', resave_case_view, name="resave_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/close/$', close_case_view, name="close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/undo-close/(?P<xform_id>[\w\-:]+)/$',
undo_close_case_view, name="undo_close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/export_transactions/$',
export_case_transactions, name="export_case_transactions"),
url(r'^case_data/(?P<case_id>[\w\-]+)/(?P<xform_id>[\w\-:]+)/$', case_form_data, name="case_form_data"),
url(r'^case_data/(?P<case_id>[\w\-]+)/case_property/(?P<case_property_name>[\w_\-.]+)/$',
case_property_changes, name="case_property_changes"),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/$', FormDataView.as_view(), name=FormDataView.urlname),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download/$', download_form, name='download_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/edit/$', EditFormInstance.as_view(), name='edit_form_instance'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/restore_version/$', restore_edit, name='restore_edit'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/correct_data/$', edit_form, name='edit_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/archive/$', archive_form, name='archive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/unarchive/$', unarchive_form, name='unarchive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/rebuild/$', resave_form_view, name='resave_form'),
url(r'^project_health/ajax/(?P<user_id>[\w\-]+)/$', project_health_user_details,
name='project_health_user_details'),
url(r'^full_excel_export/(?P<export_hash>[\w\-]+)/(?P<format>[\w\-]+)$', export_report, name="export_report"),
url(r"^email_onceoff/(?P<report_slug>[\w_]+)/$", email_report, kwargs=dict(once=True), name='email_report'),
url(r"^custom/email_onceoff/(?P<report_slug>[\w_]+)/$", email_report,
kwargs=dict(report_type=CustomProjectReportDispatcher.prefix, once=True), name='email_onceoff'),
url(r"^configs$", AddSavedReportConfigView.as_view(), name=AddSavedReportConfigView.name),
url(r"^configs/(?P<config_id>[\w-]+)$", delete_config,
name='delete_report_config'),
url(r'^scheduled_reports/(?P<scheduled_report_id>[\w-]+)?$',
ScheduledReportsView.as_view(), name=ScheduledReportsView.urlname),
url(r'^scheduled_report/(?P<scheduled_report_id>[\w-]+)/delete$',
delete_scheduled_report, name='delete_scheduled_report'),
url(r'^send_test_scheduled_report/(?P<scheduled_report_id>[\w-]+)/$',
send_test_scheduled_report, name='send_test_scheduled_report'),
url(r'^view_scheduled_report/(?P<scheduled_report_id>[\w_]+)/$',
view_scheduled_report, name='view_scheduled_report'),
url(r'^v2/', include('corehq.apps.reports.v2.urls')),
url(r'^reprocess_error_form/$', ReprocessXFormErrorView.as_view(),
name=ReprocessXFormErrorView.urlname),
url(r'^custom/', include(custom_report_urls)),
url(r'^filters/', include(filter_urls)),
ProjectReportDispatcher.url_pattern(),
]
for module in get_installed_custom_modules():
module_name = module.__name__.split('.')[-1]
try:
custom_report_urls += [
url(r"^%s/" % module_name, include('{0}.urls'.format(module.__name__))),
]
except ImproperlyConfigured:
logging.info("Module %s does not provide urls" % module_name)
| true
| true
|
f7198e35f24a43baae21005438b0076176ee416a
| 561
|
py
|
Python
|
oving_8_c.py
|
W3OP/Oving_9_round2
|
090cbc3b135840914659d50c6fa48ab756e5449e
|
[
"MIT"
] | null | null | null |
oving_8_c.py
|
W3OP/Oving_9_round2
|
090cbc3b135840914659d50c6fa48ab756e5449e
|
[
"MIT"
] | null | null | null |
oving_8_c.py
|
W3OP/Oving_9_round2
|
090cbc3b135840914659d50c6fa48ab756e5449e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 22 10:13:37 2021
@author: palme
"""
import oving_8_b as o8b
test = o8b.Quiz("Hvor mange bein har en hest", [1, 2, 3, 4],4)
print(test)
dude = int(input("Svar: "))
svar1 = test.svaret(dude)
if svar1:
print("Svaret er rett")
else:
print("Svaret er feil")
print("\n \n")
test2 = o8b.Quiz("Hvilket land er i i nå?", ["norge", "sverie", "danmark"],1)
print(test2)
dude2 = int(input("Svar: "))
svar2 = test2.svaret(dude2)
if svar2:
print("Svaret er rett!")
else:
print("Svaret er feil")
| 15.162162
| 77
| 0.611408
|
import oving_8_b as o8b
test = o8b.Quiz("Hvor mange bein har en hest", [1, 2, 3, 4],4)
print(test)
dude = int(input("Svar: "))
svar1 = test.svaret(dude)
if svar1:
print("Svaret er rett")
else:
print("Svaret er feil")
print("\n \n")
test2 = o8b.Quiz("Hvilket land er i i nå?", ["norge", "sverie", "danmark"],1)
print(test2)
dude2 = int(input("Svar: "))
svar2 = test2.svaret(dude2)
if svar2:
print("Svaret er rett!")
else:
print("Svaret er feil")
| true
| true
|
f7198ec98548e880b167ef7ccfc9be00d9b58137
| 5,121
|
py
|
Python
|
zipkin/binding/pyramid/pyramidhook.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | null | null | null |
zipkin/binding/pyramid/pyramidhook.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | null | null | null |
zipkin/binding/pyramid/pyramidhook.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import time
import logging
from pyramid.tweens import INGRESS
from pyramid.settings import aslist
from zipkin import local
from zipkin.api import stack_trace
from zipkin.models import Trace, Annotation
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from zipkin.config import configure as configure_zk
log = logging.getLogger(__name__)
class AllTraceTweenView(object):
endpoint = None
@classmethod
def configure(cls, settings):
default_name = "Registry" # Keep compat with `registry.__name__` ?
name = settings.get("zipkin.service_name", default_name)
bindings = aslist(settings.get("zipkin.bindings", "requests celery xmlrpclib"))
cls.endpoint = configure_zk(
name,
settings,
use_requests="requests" in bindings,
use_celery="celery" in bindings,
use_xmlrpclib="xmlrpclib" in bindings,
)
def __init__(self, handler, registry):
self.handler = handler
self.trace = None
def track_start_request(self, request):
headers = request.headers
trace_name = request.path_qs
if request.matched_route:
# we only get a matched route if we've gone through the router.
trace_name = request.matched_route.pattern
trace = Trace(
request.method + " " + trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=self.endpoint,
)
if "X-B3-TraceId" not in headers:
log.info("no trace info from request: %s", request.path_qs)
if request.matchdict: # matchdict maybe none if no route is registered
for k, v in request.matchdict.items():
trace.record(Annotation.string("route.param.%s" % k, v))
trace.record(Annotation.string("http.path", request.path_qs))
log.info("new trace %r", trace.trace_id)
stack_trace(trace)
trace.record(Annotation.server_recv())
self.trace = trace
def track_end_request(self, request, response):
if self.trace:
self.trace.record(Annotation.server_send())
log.info("reporting trace %s", self.trace.name)
response.headers["Trace-Id"] = str(self.trace.trace_id)
zipkin_log(self.trace)
def __call__(self, request):
self.track_start_request(request)
response = None
try:
response = self.handler(request)
finally:
# request.response in case an exception is raised ?
self.track_end_request(request, response or request.response)
local().reset()
self.trace = None
return response or request.response
class SlowQueryTweenView(AllTraceTweenView):
max_duration = None
@classmethod
def configure(cls, settings):
super(SlowQueryTweenView, cls).configure(settings)
setting = settings.get("zipkin.slow_log_duration_exceed")
if setting is None:
log.error(
"Missing setting 'zipkin.slow_log_duration_exceed' %r",
list(settings.keys()),
)
return
try:
cls.max_duration = float(setting)
except ValueError:
log.error("Invalid setting 'zipkin.slow_log_duration_exceed'")
def __init__(self, handler, registry):
super(SlowQueryTweenView, self).__init__(handler, registry)
self.start = None
def track_start_request(self, request):
self.start = time.time()
super(SlowQueryTweenView, self).track_start_request(request)
def track_end_request(self, request, response):
if self.max_duration is None:
# unconfigure, we don't care
return
if self.start:
duration = time.time() - self.start
if duration > self.max_duration:
super(SlowQueryTweenView, self).track_end_request(request, response)
def includeme(config):
"""Include the zipkin definitions"""
# Attach the subscriber a couple of times, this allow to start logging as
# early as possible. Later calls on the same request will enhance the more
# we proceed through the stack (after authentication, after router, ...)
settings = config.registry.settings
tween_factory = settings.get("zipkin.tween_factory", "all")
assert tween_factory in ["all", "slow_query"]
if tween_factory == "all":
tween_factory = AllTraceTweenView
elif tween_factory == "slow_query":
tween_factory = SlowQueryTweenView
else:
log.error(
"Invalid value for settings 'zipkin.tween_factory', should be all or slow_query, not %s",
tween_factory,
)
return
tween_factory.configure(settings)
config.add_tween(
"{}.{}".format(tween_factory.__module__, tween_factory.__name__),
under=INGRESS,
)
| 32.617834
| 101
| 0.641672
|
from __future__ import absolute_import
import time
import logging
from pyramid.tweens import INGRESS
from pyramid.settings import aslist
from zipkin import local
from zipkin.api import stack_trace
from zipkin.models import Trace, Annotation
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from zipkin.config import configure as configure_zk
log = logging.getLogger(__name__)
class AllTraceTweenView(object):
endpoint = None
@classmethod
def configure(cls, settings):
default_name = "Registry"
name = settings.get("zipkin.service_name", default_name)
bindings = aslist(settings.get("zipkin.bindings", "requests celery xmlrpclib"))
cls.endpoint = configure_zk(
name,
settings,
use_requests="requests" in bindings,
use_celery="celery" in bindings,
use_xmlrpclib="xmlrpclib" in bindings,
)
def __init__(self, handler, registry):
self.handler = handler
self.trace = None
def track_start_request(self, request):
headers = request.headers
trace_name = request.path_qs
if request.matched_route:
trace_name = request.matched_route.pattern
trace = Trace(
request.method + " " + trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=self.endpoint,
)
if "X-B3-TraceId" not in headers:
log.info("no trace info from request: %s", request.path_qs)
if request.matchdict: # matchdict maybe none if no route is registered
for k, v in request.matchdict.items():
trace.record(Annotation.string("route.param.%s" % k, v))
trace.record(Annotation.string("http.path", request.path_qs))
log.info("new trace %r", trace.trace_id)
stack_trace(trace)
trace.record(Annotation.server_recv())
self.trace = trace
def track_end_request(self, request, response):
if self.trace:
self.trace.record(Annotation.server_send())
log.info("reporting trace %s", self.trace.name)
response.headers["Trace-Id"] = str(self.trace.trace_id)
zipkin_log(self.trace)
def __call__(self, request):
self.track_start_request(request)
response = None
try:
response = self.handler(request)
finally:
# request.response in case an exception is raised ?
self.track_end_request(request, response or request.response)
local().reset()
self.trace = None
return response or request.response
class SlowQueryTweenView(AllTraceTweenView):
max_duration = None
@classmethod
def configure(cls, settings):
super(SlowQueryTweenView, cls).configure(settings)
setting = settings.get("zipkin.slow_log_duration_exceed")
if setting is None:
log.error(
"Missing setting 'zipkin.slow_log_duration_exceed' %r",
list(settings.keys()),
)
return
try:
cls.max_duration = float(setting)
except ValueError:
log.error("Invalid setting 'zipkin.slow_log_duration_exceed'")
def __init__(self, handler, registry):
super(SlowQueryTweenView, self).__init__(handler, registry)
self.start = None
def track_start_request(self, request):
self.start = time.time()
super(SlowQueryTweenView, self).track_start_request(request)
def track_end_request(self, request, response):
if self.max_duration is None:
# unconfigure, we don't care
return
if self.start:
duration = time.time() - self.start
if duration > self.max_duration:
super(SlowQueryTweenView, self).track_end_request(request, response)
def includeme(config):
settings = config.registry.settings
tween_factory = settings.get("zipkin.tween_factory", "all")
assert tween_factory in ["all", "slow_query"]
if tween_factory == "all":
tween_factory = AllTraceTweenView
elif tween_factory == "slow_query":
tween_factory = SlowQueryTweenView
else:
log.error(
"Invalid value for settings 'zipkin.tween_factory', should be all or slow_query, not %s",
tween_factory,
)
return
tween_factory.configure(settings)
config.add_tween(
"{}.{}".format(tween_factory.__module__, tween_factory.__name__),
under=INGRESS,
)
| true
| true
|
f7198ece6a41b7a5f0f2edead87cf05f2c1c0cd4
| 10,093
|
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py
|
onilton/polyaxon
|
3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py
|
onilton/polyaxon
|
3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py
|
onilton/polyaxon
|
3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.9.4
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Bayes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_initial_runs': 'int',
'max_iterations': 'int',
'utility_function': 'object',
'metric': 'V1OptimizationMetric',
'seed': 'int',
'concurrency': 'int',
'tuner': 'V1Tuner',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_initial_runs': 'numInitialRuns',
'max_iterations': 'maxIterations',
'utility_function': 'utilityFunction',
'metric': 'metric',
'seed': 'seed',
'concurrency': 'concurrency',
'tuner': 'tuner',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='bayes', params=None, num_initial_runs=None, max_iterations=None, utility_function=None, metric=None, seed=None, concurrency=None, tuner=None, early_stopping=None, local_vars_configuration=None): # noqa: E501
"""V1Bayes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_initial_runs = None
self._max_iterations = None
self._utility_function = None
self._metric = None
self._seed = None
self._concurrency = None
self._tuner = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_initial_runs is not None:
self.num_initial_runs = num_initial_runs
if max_iterations is not None:
self.max_iterations = max_iterations
if utility_function is not None:
self.utility_function = utility_function
if metric is not None:
self.metric = metric
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if tuner is not None:
self.tuner = tuner
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
"""Gets the kind of this V1Bayes. # noqa: E501
:return: The kind of this V1Bayes. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Bayes.
:param kind: The kind of this V1Bayes. # noqa: E501
:type: str
"""
self._kind = kind
@property
def params(self):
"""Gets the params of this V1Bayes. # noqa: E501
:return: The params of this V1Bayes. # noqa: E501
:rtype: dict(str, object)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1Bayes.
:param params: The params of this V1Bayes. # noqa: E501
:type: dict(str, object)
"""
self._params = params
@property
def num_initial_runs(self):
"""Gets the num_initial_runs of this V1Bayes. # noqa: E501
:return: The num_initial_runs of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._num_initial_runs
@num_initial_runs.setter
def num_initial_runs(self, num_initial_runs):
"""Sets the num_initial_runs of this V1Bayes.
:param num_initial_runs: The num_initial_runs of this V1Bayes. # noqa: E501
:type: int
"""
self._num_initial_runs = num_initial_runs
@property
def max_iterations(self):
"""Gets the max_iterations of this V1Bayes. # noqa: E501
:return: The max_iterations of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._max_iterations
@max_iterations.setter
def max_iterations(self, max_iterations):
"""Sets the max_iterations of this V1Bayes.
:param max_iterations: The max_iterations of this V1Bayes. # noqa: E501
:type: int
"""
self._max_iterations = max_iterations
@property
def utility_function(self):
"""Gets the utility_function of this V1Bayes. # noqa: E501
:return: The utility_function of this V1Bayes. # noqa: E501
:rtype: object
"""
return self._utility_function
@utility_function.setter
def utility_function(self, utility_function):
"""Sets the utility_function of this V1Bayes.
:param utility_function: The utility_function of this V1Bayes. # noqa: E501
:type: object
"""
self._utility_function = utility_function
@property
def metric(self):
"""Gets the metric of this V1Bayes. # noqa: E501
:return: The metric of this V1Bayes. # noqa: E501
:rtype: V1OptimizationMetric
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this V1Bayes.
:param metric: The metric of this V1Bayes. # noqa: E501
:type: V1OptimizationMetric
"""
self._metric = metric
@property
def seed(self):
"""Gets the seed of this V1Bayes. # noqa: E501
:return: The seed of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._seed
@seed.setter
def seed(self, seed):
"""Sets the seed of this V1Bayes.
:param seed: The seed of this V1Bayes. # noqa: E501
:type: int
"""
self._seed = seed
@property
def concurrency(self):
"""Gets the concurrency of this V1Bayes. # noqa: E501
:return: The concurrency of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
"""Sets the concurrency of this V1Bayes.
:param concurrency: The concurrency of this V1Bayes. # noqa: E501
:type: int
"""
self._concurrency = concurrency
@property
def tuner(self):
"""Gets the tuner of this V1Bayes. # noqa: E501
:return: The tuner of this V1Bayes. # noqa: E501
:rtype: V1Tuner
"""
return self._tuner
@tuner.setter
def tuner(self, tuner):
"""Sets the tuner of this V1Bayes.
:param tuner: The tuner of this V1Bayes. # noqa: E501
:type: V1Tuner
"""
self._tuner = tuner
@property
def early_stopping(self):
"""Gets the early_stopping of this V1Bayes. # noqa: E501
:return: The early_stopping of this V1Bayes. # noqa: E501
:rtype: list[object]
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1Bayes.
:param early_stopping: The early_stopping of this V1Bayes. # noqa: E501
:type: list[object]
"""
self._early_stopping = early_stopping
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Bayes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Bayes):
return True
return self.to_dict() != other.to_dict()
| 27.13172
| 237
| 0.593382
|
import pprint
import re
import six
from polyaxon_sdk.configuration import Configuration
class V1Bayes(object):
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_initial_runs': 'int',
'max_iterations': 'int',
'utility_function': 'object',
'metric': 'V1OptimizationMetric',
'seed': 'int',
'concurrency': 'int',
'tuner': 'V1Tuner',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_initial_runs': 'numInitialRuns',
'max_iterations': 'maxIterations',
'utility_function': 'utilityFunction',
'metric': 'metric',
'seed': 'seed',
'concurrency': 'concurrency',
'tuner': 'tuner',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='bayes', params=None, num_initial_runs=None, max_iterations=None, utility_function=None, metric=None, seed=None, concurrency=None, tuner=None, early_stopping=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_initial_runs = None
self._max_iterations = None
self._utility_function = None
self._metric = None
self._seed = None
self._concurrency = None
self._tuner = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_initial_runs is not None:
self.num_initial_runs = num_initial_runs
if max_iterations is not None:
self.max_iterations = max_iterations
if utility_function is not None:
self.utility_function = utility_function
if metric is not None:
self.metric = metric
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if tuner is not None:
self.tuner = tuner
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = params
@property
def num_initial_runs(self):
return self._num_initial_runs
@num_initial_runs.setter
def num_initial_runs(self, num_initial_runs):
self._num_initial_runs = num_initial_runs
@property
def max_iterations(self):
return self._max_iterations
@max_iterations.setter
def max_iterations(self, max_iterations):
self._max_iterations = max_iterations
@property
def utility_function(self):
return self._utility_function
@utility_function.setter
def utility_function(self, utility_function):
self._utility_function = utility_function
@property
def metric(self):
return self._metric
@metric.setter
def metric(self, metric):
self._metric = metric
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
self._concurrency = concurrency
@property
def tuner(self):
return self._tuner
@tuner.setter
def tuner(self, tuner):
self._tuner = tuner
@property
def early_stopping(self):
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
self._early_stopping = early_stopping
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1Bayes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1Bayes):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f7198f349b0048d3b6330725d65dfdf36b553ff4
| 1,458
|
py
|
Python
|
soltrannet/__init__.py
|
hengwei-chan/molecular_attention_transformer
|
29193d4155df528e3a6a0c1e0da39111d0b8db93
|
[
"Apache-2.0"
] | 16
|
2021-03-10T17:10:06.000Z
|
2022-03-16T13:07:58.000Z
|
soltrannet/__init__.py
|
hengwei-chan/molecular_attention_transformer
|
29193d4155df528e3a6a0c1e0da39111d0b8db93
|
[
"Apache-2.0"
] | null | null | null |
soltrannet/__init__.py
|
hengwei-chan/molecular_attention_transformer
|
29193d4155df528e3a6a0c1e0da39111d0b8db93
|
[
"Apache-2.0"
] | 10
|
2021-06-01T03:36:08.000Z
|
2022-03-18T16:58:25.000Z
|
from .predict import predict
import argparse
import sys, multiprocessing
import torch
def _parse_args():
parser=argparse.ArgumentParser(description="Run SolTranNet aqueous solubility predictor")
parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')
parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')
parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')
parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')
parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')
args=parser.parse_args()
return args
def _run(args):
smiles=[x.rstrip() for x in args.input]
if args.cpu_predict:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))
else:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)
for pred, smi, warn in predictions:
args.output.write(f'{smi},{pred:.3f},{warn}\n')
| 52.071429
| 199
| 0.739369
|
from .predict import predict
import argparse
import sys, multiprocessing
import torch
def _parse_args():
parser=argparse.ArgumentParser(description="Run SolTranNet aqueous solubility predictor")
parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')
parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')
parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')
parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')
parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')
args=parser.parse_args()
return args
def _run(args):
smiles=[x.rstrip() for x in args.input]
if args.cpu_predict:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))
else:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)
for pred, smi, warn in predictions:
args.output.write(f'{smi},{pred:.3f},{warn}\n')
| true
| true
|
f7198f927dcfc0aeb6186a86d48263d8c4b1d8eb
| 5,831
|
py
|
Python
|
src/garage/torch/algos/_utils.py
|
adibellathur/garage
|
8394f0cf2b77c0a5b3a7b1ea977fa6cb3f9df0ca
|
[
"MIT"
] | 1
|
2020-02-19T00:01:29.000Z
|
2020-02-19T00:01:29.000Z
|
src/garage/torch/algos/_utils.py
|
Ashutosh-Adhikari/garage
|
482a26a07d46091f878c41b582f1478588e397ff
|
[
"MIT"
] | null | null | null |
src/garage/torch/algos/_utils.py
|
Ashutosh-Adhikari/garage
|
482a26a07d46091f878c41b582f1478588e397ff
|
[
"MIT"
] | 1
|
2020-02-13T12:05:35.000Z
|
2020-02-13T12:05:35.000Z
|
"""Utility functions used by PyTorch algorithms."""
import torch
import torch.nn.functional as F
class _Default: # pylint: disable=too-few-public-methods
"""A wrapper class to represent default arguments.
Args:
val (object): Argument value.
"""
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module, **kwargs):
"""Create an optimizer for PyTorch algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' = 1e-3})
module (torch.nn.Module): The module whose parameters needs to be
optimized.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
"""
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
return opt_type(module.parameters(), **opt_args)
opt_args = {}
for name, arg in kwargs.items():
if isinstance(arg, _Default):
opt_args[name] = arg.val
else:
opt_args[name] = arg
return optimizer_type(module.parameters(), **opt_args)
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline (value function) according to
Generalized Advantage Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_path_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_path_length (int): Maximum length of a single rollout.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum path length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining elements in
that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining values in that
episode should be set to 0.
"""
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
"""Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
"""
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
"""Filter out tensor using valids (last index of valid tensors).
valids contains last indices of each rows.
Args:
tensor (torch.Tensor): The tensor to filter
valids (list[int]): Array of length of the valid values
Returns:
torch.Tensor: Filtered Tensor
"""
return [tensor[i][:valids[i]] for i in range(len(valids))]
| 35.993827
| 79
| 0.630081
|
import torch
import torch.nn.functional as F
class _Default:
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module, **kwargs):
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
return opt_type(module.parameters(), **opt_args)
opt_args = {}
for name, arg in kwargs.items():
if isinstance(arg, _Default):
opt_args[name] = arg.val
else:
opt_args[name] = arg
return optimizer_type(module.parameters(), **opt_args)
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
return [tensor[i][:valids[i]] for i in range(len(valids))]
| true
| true
|
f7198f9535491c7521d5ae47ee77aaa8910d0441
| 801
|
py
|
Python
|
tests/test_export_id.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | null | null | null |
tests/test_export_id.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 2
|
2022-02-13T19:49:18.000Z
|
2022-02-13T19:49:18.000Z
|
tests/test_export_id.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | null | null | null |
import json
import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="needs", srcdir="doc_test/doc_export_id")
def test_export_id(app, status, warning):
app.build()
content = Path(app.outdir, "needs.json").read_text()
assert "filters" in content
content_obj = json.loads(content)
assert content_obj is not None
assert "created" in content_obj
assert "FLOW_1" in content_obj["versions"]["1.0"]["filters"]
assert "TABLE_1" in content_obj["versions"]["1.0"]["filters"]
assert "LIST_1" in content_obj["versions"]["1.0"]["filters"]
@with_app(buildername="html", srcdir="doc_test/doc_export_id")
def test_export_id_html(app, status, warning):
app.build()
assert not os.path.exists(os.path.join(app.outdir, "needs.json"))
| 30.807692
| 69
| 0.716604
|
import json
import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="needs", srcdir="doc_test/doc_export_id")
def test_export_id(app, status, warning):
app.build()
content = Path(app.outdir, "needs.json").read_text()
assert "filters" in content
content_obj = json.loads(content)
assert content_obj is not None
assert "created" in content_obj
assert "FLOW_1" in content_obj["versions"]["1.0"]["filters"]
assert "TABLE_1" in content_obj["versions"]["1.0"]["filters"]
assert "LIST_1" in content_obj["versions"]["1.0"]["filters"]
@with_app(buildername="html", srcdir="doc_test/doc_export_id")
def test_export_id_html(app, status, warning):
app.build()
assert not os.path.exists(os.path.join(app.outdir, "needs.json"))
| true
| true
|
f719907ff48a40bf779cf6020839f0d298c921ad
| 7,308
|
py
|
Python
|
wavedata/tools/core/voxel_grid_2d.py
|
amuamushu/wavedata
|
1745c646ff3a76b38a81c439a0edd900c986c9f7
|
[
"MIT"
] | null | null | null |
wavedata/tools/core/voxel_grid_2d.py
|
amuamushu/wavedata
|
1745c646ff3a76b38a81c439a0edd900c986c9f7
|
[
"MIT"
] | null | null | null |
wavedata/tools/core/voxel_grid_2d.py
|
amuamushu/wavedata
|
1745c646ff3a76b38a81c439a0edd900c986c9f7
|
[
"MIT"
] | null | null | null |
import numpy as np
from wavedata.wavedata.tools.core import geometry_utils
class VoxelGrid2D(object):
"""
Voxel grids represent occupancy info. The voxelize_2d method projects a point cloud
onto a plane, while saving height and point density information for each voxel.
"""
# Class Constants
VOXEL_EMPTY = -1
VOXEL_FILLED = 0
def __init__(self):
# Quantization size of the voxel grid
self.voxel_size = 0.0
# Voxels at the most negative/positive xyz
self.min_voxel_coord = np.array([])
self.max_voxel_coord = np.array([])
# Size of the voxel grid along each axis
self.num_divisions = np.array([0, 0, 0])
# Points in sorted order, to match the order of the voxels
self.points = []
# Indices of filled voxels
self.voxel_indices = []
# Max point height in projected voxel
self.heights = []
# Number of points corresponding to projected voxel
self.num_pts_in_voxel = []
# Full occupancy grid, VOXEL_EMPTY or VOXEL_FILLED
self.leaf_layout_2d = []
def voxelize_2d(self, pts, voxel_size, extents=None,
ground_plane=None, create_leaf_layout=True):
"""Voxelizes the point cloud into a 2D voxel grid by
projecting it down into a flat plane, and stores the maximum
point height, and number of points corresponding to the voxel
:param pts: Point cloud as N x [x, y, z]
:param voxel_size: Quantization size for the grid
:param extents: Optional, specifies the full extents of the point cloud.
Used for creating same sized voxel grids.
:param ground_plane: Plane coefficients (a, b, c, d), xz plane used if
not specified
:param create_leaf_layout: Set this to False to create an empty
leaf_layout, which will save computation
time.
"""
# Check if points are 3D, otherwise early exit
if pts.shape[1] != 3:
raise ValueError("Points have the wrong shape: {}".format(
pts.shape))
self.voxel_size = voxel_size
# Discretize voxel coordinates to given quantization size
discrete_pts = np.floor(pts / voxel_size).astype(np.int32)
# Use Lex Sort, sort by x, then z, then y (
x_col = discrete_pts[:, 0]
y_col = discrete_pts[:, 1]
z_col = discrete_pts[:, 2]
sorted_order = np.lexsort((y_col, z_col, x_col))
# Save original points in sorted order
self.points = pts[sorted_order]
# Save discrete points in sorted order
discrete_pts = discrete_pts[sorted_order]
# Project all points to a 2D plane
discrete_pts_2d = discrete_pts.copy()
discrete_pts_2d[:, 1] = 0
# Format the array to c-contiguous array for unique function
contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(
np.dtype((np.void, discrete_pts_2d.dtype.itemsize *
discrete_pts_2d.shape[1])))
# The new coordinates are the discretized array with its unique indexes
_, unique_indices = np.unique(contiguous_array, return_index=True)
# Sort unique indices to preserve order
unique_indices.sort()
voxel_coords = discrete_pts_2d[unique_indices]
# Number of points per voxel, last voxel calculated separately
num_points_in_voxel = np.diff(unique_indices)
num_points_in_voxel = np.append(num_points_in_voxel,
discrete_pts_2d.shape[0] -
unique_indices[-1])
if ground_plane is None:
# Use first point in voxel as highest point
height_in_voxel = self.points[unique_indices, 1]
else:
# Ground plane provided
height_in_voxel = geometry_utils.dist_to_plane(
ground_plane, self.points[unique_indices])
# Set the height and number of points for each voxel
self.heights = height_in_voxel
self.num_pts_in_voxel = num_points_in_voxel
# Find the minimum and maximum voxel coordinates
if extents is not None:
# Check provided extents
extents_transpose = np.array(extents).transpose()
if extents_transpose.shape != (2, 3):
raise ValueError("Extents are the wrong shape {}".format(
extents.shape))
# Set voxel grid extents
self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)
self.max_voxel_coord = \
np.ceil((extents_transpose[1] / voxel_size) - 1)
self.min_voxel_coord[1] = 0
self.max_voxel_coord[1] = 0
# Check that points are bounded by new extents
if not (self.min_voxel_coord <= np.amin(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than min_voxel_coord")
if not (self.max_voxel_coord >= np.amax(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than max_voxel_coord")
else:
# Automatically calculate extents
self.min_voxel_coord = np.amin(voxel_coords, axis=0)
self.max_voxel_coord = np.amax(voxel_coords, axis=0)
# Get the voxel grid dimensions
self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)
+ 1).astype(np.int32)
# Bring the min voxel to the origin
self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)
if create_leaf_layout:
# Create Voxel Object with -1 as empty/occluded, 0 as occupied
self.leaf_layout_2d = self.VOXEL_EMPTY * \
np.ones(self.num_divisions.astype(int))
# Fill out the leaf layout
self.leaf_layout_2d[self.voxel_indices[:, 0], 0,
self.voxel_indices[:, 2]] = \
self.VOXEL_FILLED
def map_to_index(self, map_index):
"""Converts map coordinate values to 1-based discretized grid index
coordinate. Note: Any values outside the extent of the grid will be
forced to be the maximum grid coordinate.
:param map_index: N x 2 points
:return: N x length(dim) (grid coordinate)
[] if min_voxel_coord or voxel_size or grid_index or dim is not set
"""
if self.voxel_size == 0 \
or len(self.min_voxel_coord) == 0 \
or len(map_index) == 0:
return []
num_divisions_2d = self.num_divisions[[0, 2]]
min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]
# Truncate index (same as np.floor for positive values) and clip
# to valid voxel index range
indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d
indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])
indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])
return indices
| 39.080214
| 87
| 0.601122
|
import numpy as np
from wavedata.wavedata.tools.core import geometry_utils
class VoxelGrid2D(object):
VOXEL_EMPTY = -1
VOXEL_FILLED = 0
def __init__(self):
self.voxel_size = 0.0
self.min_voxel_coord = np.array([])
self.max_voxel_coord = np.array([])
self.num_divisions = np.array([0, 0, 0])
self.points = []
self.voxel_indices = []
self.heights = []
self.num_pts_in_voxel = []
self.leaf_layout_2d = []
def voxelize_2d(self, pts, voxel_size, extents=None,
ground_plane=None, create_leaf_layout=True):
if pts.shape[1] != 3:
raise ValueError("Points have the wrong shape: {}".format(
pts.shape))
self.voxel_size = voxel_size
discrete_pts = np.floor(pts / voxel_size).astype(np.int32)
x_col = discrete_pts[:, 0]
y_col = discrete_pts[:, 1]
z_col = discrete_pts[:, 2]
sorted_order = np.lexsort((y_col, z_col, x_col))
self.points = pts[sorted_order]
discrete_pts = discrete_pts[sorted_order]
discrete_pts_2d = discrete_pts.copy()
discrete_pts_2d[:, 1] = 0
contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(
np.dtype((np.void, discrete_pts_2d.dtype.itemsize *
discrete_pts_2d.shape[1])))
_, unique_indices = np.unique(contiguous_array, return_index=True)
unique_indices.sort()
voxel_coords = discrete_pts_2d[unique_indices]
num_points_in_voxel = np.diff(unique_indices)
num_points_in_voxel = np.append(num_points_in_voxel,
discrete_pts_2d.shape[0] -
unique_indices[-1])
if ground_plane is None:
height_in_voxel = self.points[unique_indices, 1]
else:
height_in_voxel = geometry_utils.dist_to_plane(
ground_plane, self.points[unique_indices])
self.heights = height_in_voxel
self.num_pts_in_voxel = num_points_in_voxel
if extents is not None:
extents_transpose = np.array(extents).transpose()
if extents_transpose.shape != (2, 3):
raise ValueError("Extents are the wrong shape {}".format(
extents.shape))
self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)
self.max_voxel_coord = \
np.ceil((extents_transpose[1] / voxel_size) - 1)
self.min_voxel_coord[1] = 0
self.max_voxel_coord[1] = 0
if not (self.min_voxel_coord <= np.amin(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than min_voxel_coord")
if not (self.max_voxel_coord >= np.amax(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than max_voxel_coord")
else:
self.min_voxel_coord = np.amin(voxel_coords, axis=0)
self.max_voxel_coord = np.amax(voxel_coords, axis=0)
self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)
+ 1).astype(np.int32)
self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)
if create_leaf_layout:
self.leaf_layout_2d = self.VOXEL_EMPTY * \
np.ones(self.num_divisions.astype(int))
self.leaf_layout_2d[self.voxel_indices[:, 0], 0,
self.voxel_indices[:, 2]] = \
self.VOXEL_FILLED
def map_to_index(self, map_index):
if self.voxel_size == 0 \
or len(self.min_voxel_coord) == 0 \
or len(map_index) == 0:
return []
num_divisions_2d = self.num_divisions[[0, 2]]
min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]
indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d
indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])
indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])
return indices
| true
| true
|
f719919bea61d2bf5cccc3f7d4e1bee9157cfd2e
| 1,230
|
py
|
Python
|
service/scripts/resetadmin.py
|
OA-DeepGreen/jper
|
042719a790a34f877050a32f896b947ce4407b4e
|
[
"Apache-2.0"
] | null | null | null |
service/scripts/resetadmin.py
|
OA-DeepGreen/jper
|
042719a790a34f877050a32f896b947ce4407b4e
|
[
"Apache-2.0"
] | 1
|
2022-02-03T12:35:18.000Z
|
2022-02-03T12:35:18.000Z
|
service/scripts/resetadmin.py
|
OA-DeepGreen/jper
|
042719a790a34f877050a32f896b947ce4407b4e
|
[
"Apache-2.0"
] | 3
|
2016-07-15T07:29:33.000Z
|
2020-02-03T11:20:34.000Z
|
"""
This is a script to reset the admin account in a live system.
On production this should be run once, and never again, as it removes the old
account and builds a new one in its place. This means no historical data will
be kept from the before time.
"""
from octopus.core import add_configuration, app
from service.models import Account
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
# some general script running features
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
a = Account.pull('admin')
if not a:
a = Account()
username = 'admin'
password = 'D33pGr33n'
params = {
"id": username,
"role": ["admin"],
"email": "green@deepgreen.org",
"api_key": "admin",
"password": password
}
a.add_account(params)
a.save()
print("superuser account reseted for user " + username + " with password " + password)
print("THIS SUPERUSER ACCOUNT IS INSECURE! GENERATE A NEW PASSWORD FOR IT IMMEDIATELY! OR CREATE A NEW ACCOUNT AND DELETE THIS ONE...")
| 31.538462
| 139
| 0.664228
|
from octopus.core import add_configuration, app
from service.models import Account
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
a = Account.pull('admin')
if not a:
a = Account()
username = 'admin'
password = 'D33pGr33n'
params = {
"id": username,
"role": ["admin"],
"email": "green@deepgreen.org",
"api_key": "admin",
"password": password
}
a.add_account(params)
a.save()
print("superuser account reseted for user " + username + " with password " + password)
print("THIS SUPERUSER ACCOUNT IS INSECURE! GENERATE A NEW PASSWORD FOR IT IMMEDIATELY! OR CREATE A NEW ACCOUNT AND DELETE THIS ONE...")
| true
| true
|
f719923795059f5abc5f26d2960058e68c7ca4e6
| 539
|
py
|
Python
|
game_data/migrations/0003_auto_20210103_1621.py
|
cmerwin3/Adventure_Project
|
1816978e952f1250049e8d1e7fcf172620903596
|
[
"Apache-2.0"
] | null | null | null |
game_data/migrations/0003_auto_20210103_1621.py
|
cmerwin3/Adventure_Project
|
1816978e952f1250049e8d1e7fcf172620903596
|
[
"Apache-2.0"
] | null | null | null |
game_data/migrations/0003_auto_20210103_1621.py
|
cmerwin3/Adventure_Project
|
1816978e952f1250049e8d1e7fcf172620903596
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.1 on 2021-01-03 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_data', '0002_auto_20201220_2025'),
]
operations = [
migrations.RemoveField(
model_name='gamedata',
name='pin',
),
migrations.AddField(
model_name='gamedata',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| 22.458333
| 61
| 0.575139
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_data', '0002_auto_20201220_2025'),
]
operations = [
migrations.RemoveField(
model_name='gamedata',
name='pin',
),
migrations.AddField(
model_name='gamedata',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| true
| true
|
f719927ab980abbbc3d3ffdce109f65dd7ddd35e
| 118
|
py
|
Python
|
framework/conf.py
|
shew91/Retropy
|
9feb34855b997c48d93a5343a9842788d19582e6
|
[
"MIT"
] | 13
|
2018-06-02T09:11:15.000Z
|
2020-08-29T01:01:19.000Z
|
framework/conf.py
|
shew91/Retropy
|
9feb34855b997c48d93a5343a9842788d19582e6
|
[
"MIT"
] | 1
|
2021-01-17T14:03:13.000Z
|
2021-01-17T14:03:13.000Z
|
framework/conf.py
|
shew91/Retropy
|
9feb34855b997c48d93a5343a9842788d19582e6
|
[
"MIT"
] | 6
|
2018-06-02T16:20:47.000Z
|
2021-12-30T22:26:54.000Z
|
# (hack) Global configs
conf_cache_disk = True
conf_cache_memory = True
conf_cache_fails = False
ignoredAssets = []
| 14.75
| 24
| 0.771186
|
conf_cache_disk = True
conf_cache_memory = True
conf_cache_fails = False
ignoredAssets = []
| true
| true
|
f71992c33b60881673856eebed695c0f089619b3
| 8,381
|
py
|
Python
|
adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a ProductPartition tree.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import adwords
ADGROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
class ProductPartitionHelper(object):
"""A helper for creating ProductPartition trees."""
def __init__(self, adgroup_id):
"""Initializer.
Args:
adgroup_id: The ID of the AdGroup that we wish to attach the partition
tree to.
"""
# The next temporary criterion ID to be used.
# When creating our tree we need to specify the parent-child relationships
# between nodes. However, until a criterion has been created on the server
# we do not have a criterion ID with which to refer to it.
# Instead we can specify temporary IDs that are specific to a single mutate
# request. Once the criteria have been created they are assigned an ID as
# normal and the temporary ID will no longer refer to it.
# A valid temporary ID is any negative integer.
self.next_id = -1
# The set of mutate operations needed to create the current tree.
self.operations = []
self.adgroup_id = adgroup_id
def CreateSubdivision(self, parent=None, value=None):
"""Creates a subdivision node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
Returns:
A new subdivision node.
"""
division = {
'xsi_type': 'ProductPartition',
'partitionType': 'SUBDIVISION',
'id': str(self.next_id)
}
# The root has neither a parent nor a value.
if parent is not None:
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.adgroup_id,
'criterion': division
}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
def GetOperations(self):
"""Returns the set of mutate operations needed to create the current tree.
Returns:
The set of operations
"""
return self.operations
def CreateAddOperation(self, criterion):
"""Creates an AdGroupCriterionOperation for the given criterion.
Args:
criterion: The criterion we want to add.
"""
operation = {
'operator': 'ADD',
'operand': criterion
}
self.operations.append(operation)
def main(client, adgroup_id):
"""Runs the example."""
adgroup_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201802')
helper = ProductPartitionHelper(adgroup_id)
# The most trivial partition tree has only a unit node as the root, e.g.:
# helper.CreateUnit(bid_amount=100000)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'NEW'
}
used_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'USED'
}
other_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CoolBrand'
}
cheap_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CheapBrand'
}
other_product_brand = {
'xsi_type': 'ProductBrand',
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
# The value for the bidding category is a fixed ID for the 'Luggage & Bags'
# category. You can retrieve IDs for categories from the ConstantDataService.
# See the 'GetProductTaxonomy' example for more details.
luggage_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
'value': '-5914235892932915235'
}
generic_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
# Make the mutate request
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
# For each criterion, make an array containing each of its children.
# We always create the parent before the child, so we can rely on that here.
for adgroup_criterion in result['value']:
children[adgroup_criterion['criterion']['id']] = []
if 'parentCriterionId' in adgroup_criterion['criterion']:
children[adgroup_criterion['criterion']['parentCriterionId']].append(
adgroup_criterion['criterion'])
else:
root_node = adgroup_criterion['criterion']
# Show the tree
DisplayTree(root_node, children)
def DisplayTree(node, children, level=0):
"""Recursively display a node and each of its children.
Args:
node: The node we're displaying the children of.
children: Children of the parent node.
level: How deep in the tree we are.
"""
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print(('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value)))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
| 29.932143
| 79
| 0.681064
|
from googleads import adwords
ADGROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
class ProductPartitionHelper(object):
def __init__(self, adgroup_id):
self.next_id = -1
self.operations = []
self.adgroup_id = adgroup_id
def CreateSubdivision(self, parent=None, value=None):
division = {
'xsi_type': 'ProductPartition',
'partitionType': 'SUBDIVISION',
'id': str(self.next_id)
}
if parent is not None:
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.adgroup_id,
'criterion': division
}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
def CreateUnit(self, parent=None, value=None, bid_amount=None):
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
def GetOperations(self):
return self.operations
def CreateAddOperation(self, criterion):
operation = {
'operator': 'ADD',
'operand': criterion
}
self.operations.append(operation)
def main(client, adgroup_id):
adgroup_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201802')
helper = ProductPartitionHelper(adgroup_id)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'NEW'
}
used_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'USED'
}
other_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CoolBrand'
}
cheap_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CheapBrand'
}
other_product_brand = {
'xsi_type': 'ProductBrand',
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
luggage_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
'value': '-5914235892932915235'
}
generic_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
for adgroup_criterion in result['value']:
children[adgroup_criterion['criterion']['id']] = []
if 'parentCriterionId' in adgroup_criterion['criterion']:
children[adgroup_criterion['criterion']['parentCriterionId']].append(
adgroup_criterion['criterion'])
else:
root_node = adgroup_criterion['criterion']
DisplayTree(root_node, children)
def DisplayTree(node, children, level=0):
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print(('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value)))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
if __name__ == '__main__':
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
| true
| true
|
f71992ce83b2d3db02c5c551a3d398f75815bd4c
| 1,118
|
py
|
Python
|
tests/test_integration.py
|
vadim2404/pybox
|
3c4686245dca3d58afa5b923bcfede2172436bfd
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
vadim2404/pybox
|
3c4686245dca3d58afa5b923bcfede2172436bfd
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
vadim2404/pybox
|
3c4686245dca3d58afa5b923bcfede2172436bfd
|
[
"MIT"
] | null | null | null |
from pybox.inject import Inject, InjectLazy
from pybox.service import IService, ServiceMode
class SingletonService(IService):
def who_am_i(self):
print(f'Singleton {id(self)}')
class FactoryService(IService):
singleton = Inject(SingletonService)
@classmethod
def service_mode(self):
return ServiceMode.FACTORY
def who_am_i(self):
print(f'Factory {id(self)}')
class A:
singleton1 = Inject(SingletonService)
singleton2 = InjectLazy(SingletonService)
factory1 = Inject(FactoryService)
factory2 = InjectLazy(FactoryService)
def who_am_i(self):
print(f'A {id(self)}')
if __name__ == '__main__':
a = A()
assert a.singleton1 is a.singleton2
assert isinstance(a.singleton1, SingletonService)
assert isinstance(a.factory1, FactoryService)
assert isinstance(a.factory2, FactoryService)
assert a.factory1 is not a.factory2
a.factory1.who_am_i()
a.factory2.who_am_i()
a.singleton1.who_am_i()
a.singleton2.who_am_i()
a.factory1.singleton.who_am_i()
a.factory2.singleton.who_am_i()
a.who_am_i()
| 23.787234
| 53
| 0.701252
|
from pybox.inject import Inject, InjectLazy
from pybox.service import IService, ServiceMode
class SingletonService(IService):
def who_am_i(self):
print(f'Singleton {id(self)}')
class FactoryService(IService):
singleton = Inject(SingletonService)
@classmethod
def service_mode(self):
return ServiceMode.FACTORY
def who_am_i(self):
print(f'Factory {id(self)}')
class A:
singleton1 = Inject(SingletonService)
singleton2 = InjectLazy(SingletonService)
factory1 = Inject(FactoryService)
factory2 = InjectLazy(FactoryService)
def who_am_i(self):
print(f'A {id(self)}')
if __name__ == '__main__':
a = A()
assert a.singleton1 is a.singleton2
assert isinstance(a.singleton1, SingletonService)
assert isinstance(a.factory1, FactoryService)
assert isinstance(a.factory2, FactoryService)
assert a.factory1 is not a.factory2
a.factory1.who_am_i()
a.factory2.who_am_i()
a.singleton1.who_am_i()
a.singleton2.who_am_i()
a.factory1.singleton.who_am_i()
a.factory2.singleton.who_am_i()
a.who_am_i()
| true
| true
|
f719931b5d6abfb3ad9bbf8bcd7dabd34ac4e957
| 1,023
|
py
|
Python
|
stacked_queue/stack_queue.py
|
steveflys/data-structures-and-algorithms
|
9c89cb24449ca7bc09578408cba3c877fe74e000
|
[
"MIT"
] | null | null | null |
stacked_queue/stack_queue.py
|
steveflys/data-structures-and-algorithms
|
9c89cb24449ca7bc09578408cba3c877fe74e000
|
[
"MIT"
] | 3
|
2018-05-01T18:07:50.000Z
|
2018-05-11T16:52:16.000Z
|
stacked_queue/stack_queue.py
|
steveflys/data-structures-and-algorithms
|
9c89cb24449ca7bc09578408cba3c877fe74e000
|
[
"MIT"
] | null | null | null |
from .node import Node
from .stack import Stack
class Stack_Queue:
def __init__(self):
self.stack_front = Stack()
self.stack_back = Stack()
self._size = 0
def enqueue(self, val):
"""This will add a node the back of the queue and increment the ._size"""
try:
node = Node(val)
except TypeError:
raise TypeError('Cannot enqueue a value of none')
node._next = self.stack_back.top
self.stack_back.top = node
self._size += 1
return self.stack_back.top
def dequeue(self):
"""remove the node at the front of the queue, decrement the ._size and return the value"""
while self.stack_back.top._next:
self.stack_front.push(self.stack_back.pop())
val = self.stack_back.pop()
while self.stack_front.top._next:
self.stack_back.push(self.stack_front.pop())
self.stack_back.push(self.stack_front.pop())
self._size -= 1
return val
| 25.575
| 98
| 0.605083
|
from .node import Node
from .stack import Stack
class Stack_Queue:
def __init__(self):
self.stack_front = Stack()
self.stack_back = Stack()
self._size = 0
def enqueue(self, val):
try:
node = Node(val)
except TypeError:
raise TypeError('Cannot enqueue a value of none')
node._next = self.stack_back.top
self.stack_back.top = node
self._size += 1
return self.stack_back.top
def dequeue(self):
while self.stack_back.top._next:
self.stack_front.push(self.stack_back.pop())
val = self.stack_back.pop()
while self.stack_front.top._next:
self.stack_back.push(self.stack_front.pop())
self.stack_back.push(self.stack_front.pop())
self._size -= 1
return val
| true
| true
|
f7199346c4d451ef333dfac98139b138cfe947b2
| 1,924
|
py
|
Python
|
_discord.py
|
blairg23/discord-scheduler-bot
|
bd6bcc25b51b50c9eeca195adefe5cfc2eab4923
|
[
"MIT"
] | null | null | null |
_discord.py
|
blairg23/discord-scheduler-bot
|
bd6bcc25b51b50c9eeca195adefe5cfc2eab4923
|
[
"MIT"
] | null | null | null |
_discord.py
|
blairg23/discord-scheduler-bot
|
bd6bcc25b51b50c9eeca195adefe5cfc2eab4923
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
from datetime import datetime
class Discord:
_instance = None
client = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self):
if self.client is None:
self.client = discord.Client()
if self.client is not None:
print("Discord bot pooling created successfully")
def get_client(self):
return self.client
async def send_message(self, channel, content="", embed=None):
'''
Just a wrapper for sending messages, so I don't have to deal with exceptions inside code
'''
try:
return await self.client.send_message(channel, content=content, embed=embed)
except Exception as e:
pass
#print("ERROR: cmonBruh (send_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
async def get_message(self, channel, id):
'''
Wrapper for getting a message to handle exceptions
'''
msg = None
try:
msg = await self.client.get_message(channel, id)
except Exception as e:
pass
#print("ERROR: SwiftStrike (get_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg
async def edit_message(self, message, new_content=None, embed=None):
'''
Wrapper for editing a message to handle exceptions
'''
msg = None
try:
msg = await self.client.edit_message(message, new_content=new_content, embed=embed)
except Exception as e:
pass
#print("ERROR: :rage: (edit_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg
| 34.357143
| 118
| 0.576403
|
import discord
import asyncio
from datetime import datetime
class Discord:
_instance = None
client = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self):
if self.client is None:
self.client = discord.Client()
if self.client is not None:
print("Discord bot pooling created successfully")
def get_client(self):
return self.client
async def send_message(self, channel, content="", embed=None):
try:
return await self.client.send_message(channel, content=content, embed=embed)
except Exception as e:
pass
async def get_message(self, channel, id):
msg = None
try:
msg = await self.client.get_message(channel, id)
except Exception as e:
pass
return msg
async def edit_message(self, message, new_content=None, embed=None):
msg = None
try:
msg = await self.client.edit_message(message, new_content=new_content, embed=embed)
except Exception as e:
pass
return msg
| true
| true
|
f719938d7b8a9a714e3e8d344249a6a2588ede43
| 3,085
|
py
|
Python
|
src/app/routes.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | null | null | null |
src/app/routes.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | null | null | null |
src/app/routes.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | null | null | null |
import os
import requests
import json
from flask import request, make_response
from app import app
from app import route_handler as rt_handle
"""
This module handles the "intake" of requests to the server.
The requests are then passed off the route_handler.py where arguments
are then parsed and passed off to other packages for the different
functionalities: comment, create, search.
All verification for reqets is made at this level.
Attributes:
base_url (String): Module level variable pulls in environment
variable (JAMA_URL). which is the url of the specified Jama
instance.
url_rule (String): Variable uses environment variable which stands
for the main/base url slug.
Example: URL_RULE="/jama"
"""
base_url = os.environ['JAMA_URL']
url_rule = os.environ['URL_RULE']
@app.route(url_rule + "/dialog", methods=['GET', 'PUT', 'POST'])
def jama_dialog():
"""API intake for dialog submissions from Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("DIALOG")
try:
submit_payload = json.loads(request.form['payload'])
return rt_handle.resolve_dialog_submit(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/menu', methods=['GET', 'PUT', 'POST'])
def jama_menu():
"""API intake to pass off dynamic dialog data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("MENU")
try:
submit_payload = json.loads(request.form["payload"])
return rt_handle.resolve_menu_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/bot', methods=['GET', 'PUT', 'POST'])
def jama_bot():
"""API intake to pass off slackbot data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("BOT")
try:
submit_payload = request.get_json()
return rt_handle.resolve_bot_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule, methods=['GET', 'PUT', 'POST'])
def jama():
"""API intake to pass off dynamic dialog data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
return rt_handle.resolve_jama_req(base_url, request)
| 24.879032
| 72
| 0.666775
|
import os
import requests
import json
from flask import request, make_response
from app import app
from app import route_handler as rt_handle
base_url = os.environ['JAMA_URL']
url_rule = os.environ['URL_RULE']
@app.route(url_rule + "/dialog", methods=['GET', 'PUT', 'POST'])
def jama_dialog():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("DIALOG")
try:
submit_payload = json.loads(request.form['payload'])
return rt_handle.resolve_dialog_submit(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/menu', methods=['GET', 'PUT', 'POST'])
def jama_menu():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("MENU")
try:
submit_payload = json.loads(request.form["payload"])
return rt_handle.resolve_menu_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/bot', methods=['GET', 'PUT', 'POST'])
def jama_bot():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("BOT")
try:
submit_payload = request.get_json()
return rt_handle.resolve_bot_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule, methods=['GET', 'PUT', 'POST'])
def jama():
if not rt_handle.verify_req(request):
return make_response("", 401)
return rt_handle.resolve_jama_req(base_url, request)
| true
| true
|
f719944e384288656e4d709f07457f69d21c6a92
| 1,473
|
pyw
|
Python
|
Tkinter/tk5.pyw
|
Jav10/Python
|
b419a86825313b8ee537757079c95f3097f4dbad
|
[
"MIT"
] | null | null | null |
Tkinter/tk5.pyw
|
Jav10/Python
|
b419a86825313b8ee537757079c95f3097f4dbad
|
[
"MIT"
] | null | null | null |
Tkinter/tk5.pyw
|
Jav10/Python
|
b419a86825313b8ee537757079c95f3097f4dbad
|
[
"MIT"
] | null | null | null |
#GUI con TKinter
#Autor: Javier Arturo Hernández Sosa
#Fecha: 20/Sep/2017
#Descripcion: Curso Python FES Acatlán
from tkinter import *
#Definición de funciones
def suma():
r.set(x.get() + y.get())
def multi():
r.set(x.get() * y.get())
def resta():
r.set(x.get() - y.get())
def dividir():
r.set(x.get() / y.get())
#Ventana raíz
root = Tk()
#Configuración raíz
root.geometry("300x300")
root.title("Botones y funciones")
root.config(bd=15)
#variables para widgets
x = DoubleVar()
y = DoubleVar()
r = StringVar()
#Entradas y resultado
numero1 = Entry(root,textvariable=x, justify="center")
numero2 =Entry(root,textvariable=y, justify="center")
resultado = Entry(root, textvariable=r, justify="center", state="disabled") #stated para bloquear el widget
#Empaquetado
numero1.grid(row=0,column=0,padx=5,pady=5)
numero2.grid(row=0,column=1,padx=5,pady=5)
resultado.grid(row=3,column=0, columnspan=2,padx=5,pady=5) #Expandir columnas
#Botones
sumar = Button(root, text="Sumar", command=suma) #botones, command para pasar funcion
sumar.grid(row=1,column=0,padx=5,pady=5)
multiplicar = Button(root, text="Multiplicar", command=multi)
multiplicar.grid(row=1,column=1,padx=5,pady=5)
restar = Button(root, text="Restar", command=resta)
restar.grid(row=2,column=0,padx=5,pady=5)
dividir = Button(root, text="Dividir", command=dividir)
dividir.grid(row=2,column=1,padx=5,pady=5)
#loop principal
root.mainloop()
| 25.396552
| 108
| 0.696538
|
from tkinter import *
def suma():
r.set(x.get() + y.get())
def multi():
r.set(x.get() * y.get())
def resta():
r.set(x.get() - y.get())
def dividir():
r.set(x.get() / y.get())
root = Tk()
root.geometry("300x300")
root.title("Botones y funciones")
root.config(bd=15)
x = DoubleVar()
y = DoubleVar()
r = StringVar()
numero1 = Entry(root,textvariable=x, justify="center")
numero2 =Entry(root,textvariable=y, justify="center")
resultado = Entry(root, textvariable=r, justify="center", state="disabled")
numero1.grid(row=0,column=0,padx=5,pady=5)
numero2.grid(row=0,column=1,padx=5,pady=5)
resultado.grid(row=3,column=0, columnspan=2,padx=5,pady=5)
sumar = Button(root, text="Sumar", command=suma)
sumar.grid(row=1,column=0,padx=5,pady=5)
multiplicar = Button(root, text="Multiplicar", command=multi)
multiplicar.grid(row=1,column=1,padx=5,pady=5)
restar = Button(root, text="Restar", command=resta)
restar.grid(row=2,column=0,padx=5,pady=5)
dividir = Button(root, text="Dividir", command=dividir)
dividir.grid(row=2,column=1,padx=5,pady=5)
root.mainloop()
| true
| true
|
f719946d0d254ecdc9ccfe5fb6f0233c8c62eb2a
| 1,485
|
py
|
Python
|
src/data/dataset.py
|
zmcx16/ReclassifyAnimeCG
|
f5f95b229447564502564d9ffc7edf6215fec83d
|
[
"MIT"
] | 3
|
2021-10-30T10:13:40.000Z
|
2021-12-12T10:26:14.000Z
|
src/data/dataset.py
|
zmcx16/ReclassifyAnimeCG
|
f5f95b229447564502564d9ffc7edf6215fec83d
|
[
"MIT"
] | null | null | null |
src/data/dataset.py
|
zmcx16/ReclassifyAnimeCG
|
f5f95b229447564502564d9ffc7edf6215fec83d
|
[
"MIT"
] | null | null | null |
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def __init__(self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
# print(self.imgs)
# print(index)
# print(len(self.imgs[index]))
img_path, label = self.imgs[index]
# print(img_path)
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| 30.9375
| 90
| 0.658586
|
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def __init__(self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
img_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| true
| true
|
f71994d1600fc241664b82c32779973864dfe5a1
| 337
|
py
|
Python
|
AHtask2.py
|
Irinakene/AHtask
|
6f776477c6867b8f7650394aac1c3292bced8ca9
|
[
"MIT"
] | null | null | null |
AHtask2.py
|
Irinakene/AHtask
|
6f776477c6867b8f7650394aac1c3292bced8ca9
|
[
"MIT"
] | null | null | null |
AHtask2.py
|
Irinakene/AHtask
|
6f776477c6867b8f7650394aac1c3292bced8ca9
|
[
"MIT"
] | null | null | null |
import csv
name = input('Enter your name: ')
email = input('Enter your email: ')
phone = input('Enter your phone: ')
githublink = input('Enter your githublink: ')
save = input('Save to CSV? ')
if save == 'yes':
file = open('results.csv', 'a')
csv_writer = csv.writer(file)
csv_writer.writerow([name, githublink, email, phone])
| 22.466667
| 54
| 0.664688
|
import csv
name = input('Enter your name: ')
email = input('Enter your email: ')
phone = input('Enter your phone: ')
githublink = input('Enter your githublink: ')
save = input('Save to CSV? ')
if save == 'yes':
file = open('results.csv', 'a')
csv_writer = csv.writer(file)
csv_writer.writerow([name, githublink, email, phone])
| true
| true
|
f719957c1c4356a3f8209af000c59c417741c746
| 8,827
|
py
|
Python
|
parameter_sweep.py
|
yairchn/SCAMPy
|
a204b4220d722cf3dbf4e81997f8d2ed7a7324a9
|
[
"Apache-2.0"
] | 1
|
2018-08-23T21:53:01.000Z
|
2018-08-23T21:53:01.000Z
|
parameter_sweep.py
|
yairchn/SCAMPy
|
a204b4220d722cf3dbf4e81997f8d2ed7a7324a9
|
[
"Apache-2.0"
] | 1
|
2019-09-08T03:32:04.000Z
|
2019-09-08T03:32:04.000Z
|
parameter_sweep.py
|
yairchn/SCAMPy
|
a204b4220d722cf3dbf4e81997f8d2ed7a7324a9
|
[
"Apache-2.0"
] | 1
|
2018-08-23T21:53:14.000Z
|
2018-08-23T21:53:14.000Z
|
import subprocess
import argparse
import json
import pprint
from sys import exit
import uuid
import ast
import numpy as np
import netCDF4 as nc
import os
# python parameter_sweep.py case_name
def main():
parser = argparse.ArgumentParser(prog='Paramlist Generator')
parser.add_argument('case_name')
args = parser.parse_args()
case_name = args.case_name
file_case = open(case_name + '_sweep.in').read()
namelist = json.loads(file_case)
uuid = namelist['meta']['uuid']
print(uuid)
path = namelist['output']['output_root'] + 'Output.' + case_name + '.' + uuid[-5:] + '/stats/Stats.' + case_name + '.nc'
path1 = namelist['output']['output_root'] + 'Output.' + case_name + '.' + uuid[-5:] + '/paramlist_sweep.in'
tmax = namelist['time_stepping']['t_max']
#dt = namelist['time_stepping']['dt']
freq = namelist['stats_io']['frequency']
nz = namelist['grid']['nz']
nt = int(tmax/freq)+1
print nt
II=1
nvar = 11
sweep_var = np.linspace(0.7, 2.2, num=nvar)
#sweep_var = [0.05,0.06,0.07,0.08,0.09,0.1,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18]
_z = np.zeros((nz))
_t = np.zeros((nt))
_lwp = np.zeros((nt,nvar))
_cloud_cover = np.zeros((nt,nvar))
_cloud_top = np.zeros((nt,nvar))
_cloud_base = np.zeros((nt,nvar))
_updraft_area = np.zeros((nt,nz,nvar))
_ql_mean = np.zeros((nt,nz,nvar))
_updraft_w = np.zeros((nt,nz,nvar))
_thetal_mean = np.zeros((nt,nz,nvar))
_massflux = np.zeros((nt, nz, nvar))
_buoyancy_mean = np.zeros((nt,nz,nvar))
_env_tke = np.zeros((nt,nz,nvar))
_updraft_thetal_precip = np.zeros((nt,nz,nvar))
_sweep_var = np.zeros(nvar)
for i in range(0,nvar):
sweep_var_i = sweep_var[i]
paramlist = sweep(sweep_var_i)
write_file(paramlist)
file_case = open('paramlist_sweep.in').read()
current = json.loads(file_case)
print('========================')
print('running '+case_name+' var = '+ str(sweep_var_i))
print('========================')
subprocess.call("python main.py " + case_name + "_sweep.in paramlist_sweep.in", shell=True)
data = nc.Dataset(path, 'r')
zz = data.groups['profiles'].variables['z']
tt = data.groups['profiles'].variables['t']
lwp_ = np.multiply(data.groups['timeseries'].variables['lwp'], 1.0)
cloud_cover_ = np.multiply(data.groups['timeseries'].variables['cloud_cover'],1.0)
cloud_top_ = np.multiply(data.groups['timeseries'].variables['cloud_top'],1.0)
cloud_base_ = np.multiply(data.groups['timeseries'].variables['cloud_base'],1.0)
updraft_area_ = np.multiply(data.groups['profiles'].variables['updraft_area'],1.0)
ql_mean_ = np.multiply(data.groups['profiles'].variables['ql_mean'],1.0)
updraft_w_ = np.multiply(data.groups['profiles'].variables['updraft_w'],1.0)
thetal_mean_ = np.multiply(data.groups['profiles'].variables['thetal_mean'],1.0)
massflux_ = np.multiply(data.groups['profiles'].variables['massflux'], 1.0)
buoyancy_mean_ = np.multiply(data.groups['profiles'].variables['buoyancy_mean'],1.0)
env_tke_ = np.multiply(data.groups['profiles'].variables['env_tke'],1.0)
updraft_thetal_precip_ = np.multiply(data.groups['profiles'].variables['updraft_thetal_precip'], 1.0)
print np.shape(lwp_)
try:
_lwp[:, II] = lwp_[0:nt]
_cloud_cover[:,II] = cloud_cover_[0:nt]
_cloud_top[:,II] = cloud_top_[0:nt]
_cloud_base[:,II] = cloud_base_[0:nt]
_t = tt[0:nt]
_z = zz
_updraft_area[:,:,II] = updraft_area_[0:nt,0:nz]
_ql_mean[:,:,II] = ql_mean_[0:nt,0:nz]
_updraft_w[:,:,II] = updraft_w_[0:nt,0:nz]
_thetal_mean[:,:,II] = thetal_mean_[0:nt,0:nz]
_massflux[:, :, II] = massflux_[0:nt, 0:nz]
_buoyancy_mean[:,:,II] = buoyancy_mean_[0:nt,0:nz]
_env_tke[:,:,II] = env_tke_[0:nt,0:nz]
_updraft_thetal_precip[:,:,II] = updraft_thetal_precip_[0:nt,0:nz]
_sweep_var[II] = sweep_var_i
II += 1
except:
pass
os.remove(path)
os.remove(path1)
destination = '/Users/yaircohen/Documents/SCAMPy_out/parameter_sweep/'
out_stats = nc.Dataset(destination + '/Stats.sweep_' + case_name + '.nc', 'w', format='NETCDF4')
grp_stats = out_stats.createGroup('profiles')
grp_stats.createDimension('z', nz)
grp_stats.createDimension('t', nt)
grp_stats.createDimension('var', II)
t = grp_stats.createVariable('t', 'f4', 't')
z = grp_stats.createVariable('z', 'f4', 'z')
var = grp_stats.createVariable('var', 'f4', 'var')
lwp = grp_stats.createVariable('lwp', 'f4', ('t', 'var'))
cloud_cover = grp_stats.createVariable('cloud_cover', 'f4', ('t', 'var'))
cloud_top = grp_stats.createVariable('cloud_top', 'f4', ('t', 'var'))
cloud_base = grp_stats.createVariable('cloud_base', 'f4', ('t', 'var'))
updraft_area = grp_stats.createVariable('updraft_area', 'f4', ('t', 'z','var'))
ql_mean = grp_stats.createVariable('ql_mean', 'f4', ('t', 'z', 'var'))
updraft_w = grp_stats.createVariable('updraft_w', 'f4', ('t', 'z', 'var'))
thetal_mean = grp_stats.createVariable('thetal_mean', 'f4', ('t', 'z', 'var'))
massflux = grp_stats.createVariable('massflux', 'f4', ('t', 'z', 'var'))
buoyancy_mean = grp_stats.createVariable('buoyancy_mean', 'f4', ('t', 'z', 'var'))
env_tke = grp_stats.createVariable('env_tke', 'f4', ('t', 'z', 'var'))
updraft_thetal_precip = grp_stats.createVariable('updraft_thetal_precip', 'f4', ('t', 'z', 'var'))
print '---------------------------------'
print np.shape(var)
print np.shape(_sweep_var)
print II
print '---------------------------------'
var[:] = _sweep_var[0:II]
print np.shape(_t)
print np.shape(t)
#t[:] = _t
#z[:] = _z
print '---------------------------------'
print np.shape(lwp)
print np.shape(_lwp)
print II
print '---------------------------------'
lwp[:,:] = _lwp[:,0:II]
cloud_cover[:,:] = _cloud_cover[:,0:II]
cloud_top[:,:] = _cloud_top[:,0:II]
cloud_base[:,:] = _cloud_base[:,0:II]
updraft_area[:,:,:] = _updraft_area[:,:,0:II]
ql_mean[:,:,:] = _ql_mean[:,:,0:II]
updraft_w[:,:,:] = _updraft_w[:,:,0:II]
massflux[:,:,:] = _massflux[:,:,0:II]
buoyancy_mean[:,:,:] = _buoyancy_mean[:,:,0:II]
env_tke[:,:,:] = _env_tke[:,:,0:II]
updraft_thetal_precip[:, :, :] = _updraft_thetal_precip[:,:,0:II]
out_stats.close()
print('========================')
print('======= SWEEP END ======')
print('========================')
def sweep(sweep_var_i): # vel_pressure_coeff_i
paramlist = {}
paramlist['meta'] = {}
paramlist['meta']['casename'] = 'sweep'
paramlist['turbulence'] = {}
paramlist['turbulence']['prandtl_number'] = 1.0
paramlist['turbulence']['Ri_bulk_crit'] = 0.0
paramlist['turbulence']['EDMF_PrognosticTKE'] = {}
paramlist['turbulence']['EDMF_PrognosticTKE']['surface_area'] = sweep_var_i
#paramlist['turbulence']['EDMF_PrognosticTKE']['surface_scalar_coeff'] = 0.1
paramlist['turbulence']['EDMF_PrognosticTKE']['tke_ed_coeff'] = 0.1
#paramlist['turbulence']['EDMF_PrognosticTKE']['w_entr_coeff'] = 0.5 # "b1"
#paramlist['turbulence']['EDMF_PrognosticTKE']['w_buoy_coeff'] = 0.5 # "b2"
paramlist['turbulence']['EDMF_PrognosticTKE']['tke_diss_coeff'] = 0.3
paramlist['turbulence']['EDMF_PrognosticTKE']['max_area_factor'] = 10.0
paramlist['turbulence']['EDMF_PrognosticTKE']['entrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_PrognosticTKE']['detrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_PrognosticTKE']['vel_pressure_coeff'] = 5e-5
paramlist['turbulence']['EDMF_PrognosticTKE']['vel_buoy_coeff'] = 0.6666666666666666
paramlist['turbulence']['EDMF_BulkSteady'] = {}
paramlist['turbulence']['EDMF_BulkSteady']['surface_area'] = 0.1
paramlist['turbulence']['EDMF_BulkSteady']['w_entr_coeff'] = 2.0
paramlist['turbulence']['EDMF_BulkSteady']['w_buoy_coeff'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['max_area_factor'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['entrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['detrainment_factor'] = 1.0
paramlist['turbulence']['updraft_microphysics'] = {}
paramlist['turbulence']['updraft_microphysics']['max_supersaturation'] = 0.1
return paramlist
def write_file(paramlist):
fh = open('paramlist_'+paramlist['meta']['casename']+ '.in', 'w')
json.dump(paramlist, fh, sort_keys=True, indent=4)
fh.close()
return
if __name__ == '__main__':
main()
| 40.865741
| 124
| 0.60972
|
import subprocess
import argparse
import json
import pprint
from sys import exit
import uuid
import ast
import numpy as np
import netCDF4 as nc
import os
def main():
parser = argparse.ArgumentParser(prog='Paramlist Generator')
parser.add_argument('case_name')
args = parser.parse_args()
case_name = args.case_name
file_case = open(case_name + '_sweep.in').read()
namelist = json.loads(file_case)
uuid = namelist['meta']['uuid']
print(uuid)
path = namelist['output']['output_root'] + 'Output.' + case_name + '.' + uuid[-5:] + '/stats/Stats.' + case_name + '.nc'
path1 = namelist['output']['output_root'] + 'Output.' + case_name + '.' + uuid[-5:] + '/paramlist_sweep.in'
tmax = namelist['time_stepping']['t_max']
freq = namelist['stats_io']['frequency']
nz = namelist['grid']['nz']
nt = int(tmax/freq)+1
print nt
II=1
nvar = 11
sweep_var = np.linspace(0.7, 2.2, num=nvar)
_z = np.zeros((nz))
_t = np.zeros((nt))
_lwp = np.zeros((nt,nvar))
_cloud_cover = np.zeros((nt,nvar))
_cloud_top = np.zeros((nt,nvar))
_cloud_base = np.zeros((nt,nvar))
_updraft_area = np.zeros((nt,nz,nvar))
_ql_mean = np.zeros((nt,nz,nvar))
_updraft_w = np.zeros((nt,nz,nvar))
_thetal_mean = np.zeros((nt,nz,nvar))
_massflux = np.zeros((nt, nz, nvar))
_buoyancy_mean = np.zeros((nt,nz,nvar))
_env_tke = np.zeros((nt,nz,nvar))
_updraft_thetal_precip = np.zeros((nt,nz,nvar))
_sweep_var = np.zeros(nvar)
for i in range(0,nvar):
sweep_var_i = sweep_var[i]
paramlist = sweep(sweep_var_i)
write_file(paramlist)
file_case = open('paramlist_sweep.in').read()
current = json.loads(file_case)
print('========================')
print('running '+case_name+' var = '+ str(sweep_var_i))
print('========================')
subprocess.call("python main.py " + case_name + "_sweep.in paramlist_sweep.in", shell=True)
data = nc.Dataset(path, 'r')
zz = data.groups['profiles'].variables['z']
tt = data.groups['profiles'].variables['t']
lwp_ = np.multiply(data.groups['timeseries'].variables['lwp'], 1.0)
cloud_cover_ = np.multiply(data.groups['timeseries'].variables['cloud_cover'],1.0)
cloud_top_ = np.multiply(data.groups['timeseries'].variables['cloud_top'],1.0)
cloud_base_ = np.multiply(data.groups['timeseries'].variables['cloud_base'],1.0)
updraft_area_ = np.multiply(data.groups['profiles'].variables['updraft_area'],1.0)
ql_mean_ = np.multiply(data.groups['profiles'].variables['ql_mean'],1.0)
updraft_w_ = np.multiply(data.groups['profiles'].variables['updraft_w'],1.0)
thetal_mean_ = np.multiply(data.groups['profiles'].variables['thetal_mean'],1.0)
massflux_ = np.multiply(data.groups['profiles'].variables['massflux'], 1.0)
buoyancy_mean_ = np.multiply(data.groups['profiles'].variables['buoyancy_mean'],1.0)
env_tke_ = np.multiply(data.groups['profiles'].variables['env_tke'],1.0)
updraft_thetal_precip_ = np.multiply(data.groups['profiles'].variables['updraft_thetal_precip'], 1.0)
print np.shape(lwp_)
try:
_lwp[:, II] = lwp_[0:nt]
_cloud_cover[:,II] = cloud_cover_[0:nt]
_cloud_top[:,II] = cloud_top_[0:nt]
_cloud_base[:,II] = cloud_base_[0:nt]
_t = tt[0:nt]
_z = zz
_updraft_area[:,:,II] = updraft_area_[0:nt,0:nz]
_ql_mean[:,:,II] = ql_mean_[0:nt,0:nz]
_updraft_w[:,:,II] = updraft_w_[0:nt,0:nz]
_thetal_mean[:,:,II] = thetal_mean_[0:nt,0:nz]
_massflux[:, :, II] = massflux_[0:nt, 0:nz]
_buoyancy_mean[:,:,II] = buoyancy_mean_[0:nt,0:nz]
_env_tke[:,:,II] = env_tke_[0:nt,0:nz]
_updraft_thetal_precip[:,:,II] = updraft_thetal_precip_[0:nt,0:nz]
_sweep_var[II] = sweep_var_i
II += 1
except:
pass
os.remove(path)
os.remove(path1)
destination = '/Users/yaircohen/Documents/SCAMPy_out/parameter_sweep/'
out_stats = nc.Dataset(destination + '/Stats.sweep_' + case_name + '.nc', 'w', format='NETCDF4')
grp_stats = out_stats.createGroup('profiles')
grp_stats.createDimension('z', nz)
grp_stats.createDimension('t', nt)
grp_stats.createDimension('var', II)
t = grp_stats.createVariable('t', 'f4', 't')
z = grp_stats.createVariable('z', 'f4', 'z')
var = grp_stats.createVariable('var', 'f4', 'var')
lwp = grp_stats.createVariable('lwp', 'f4', ('t', 'var'))
cloud_cover = grp_stats.createVariable('cloud_cover', 'f4', ('t', 'var'))
cloud_top = grp_stats.createVariable('cloud_top', 'f4', ('t', 'var'))
cloud_base = grp_stats.createVariable('cloud_base', 'f4', ('t', 'var'))
updraft_area = grp_stats.createVariable('updraft_area', 'f4', ('t', 'z','var'))
ql_mean = grp_stats.createVariable('ql_mean', 'f4', ('t', 'z', 'var'))
updraft_w = grp_stats.createVariable('updraft_w', 'f4', ('t', 'z', 'var'))
thetal_mean = grp_stats.createVariable('thetal_mean', 'f4', ('t', 'z', 'var'))
massflux = grp_stats.createVariable('massflux', 'f4', ('t', 'z', 'var'))
buoyancy_mean = grp_stats.createVariable('buoyancy_mean', 'f4', ('t', 'z', 'var'))
env_tke = grp_stats.createVariable('env_tke', 'f4', ('t', 'z', 'var'))
updraft_thetal_precip = grp_stats.createVariable('updraft_thetal_precip', 'f4', ('t', 'z', 'var'))
print '---------------------------------'
print np.shape(var)
print np.shape(_sweep_var)
print II
print '---------------------------------'
var[:] = _sweep_var[0:II]
print np.shape(_t)
print np.shape(t)
print '---------------------------------'
print np.shape(lwp)
print np.shape(_lwp)
print II
print '---------------------------------'
lwp[:,:] = _lwp[:,0:II]
cloud_cover[:,:] = _cloud_cover[:,0:II]
cloud_top[:,:] = _cloud_top[:,0:II]
cloud_base[:,:] = _cloud_base[:,0:II]
updraft_area[:,:,:] = _updraft_area[:,:,0:II]
ql_mean[:,:,:] = _ql_mean[:,:,0:II]
updraft_w[:,:,:] = _updraft_w[:,:,0:II]
massflux[:,:,:] = _massflux[:,:,0:II]
buoyancy_mean[:,:,:] = _buoyancy_mean[:,:,0:II]
env_tke[:,:,:] = _env_tke[:,:,0:II]
updraft_thetal_precip[:, :, :] = _updraft_thetal_precip[:,:,0:II]
out_stats.close()
print('========================')
print('======= SWEEP END ======')
print('========================')
def sweep(sweep_var_i):
paramlist = {}
paramlist['meta'] = {}
paramlist['meta']['casename'] = 'sweep'
paramlist['turbulence'] = {}
paramlist['turbulence']['prandtl_number'] = 1.0
paramlist['turbulence']['Ri_bulk_crit'] = 0.0
paramlist['turbulence']['EDMF_PrognosticTKE'] = {}
paramlist['turbulence']['EDMF_PrognosticTKE']['surface_area'] = sweep_var_i
paramlist['turbulence']['EDMF_PrognosticTKE']['tke_ed_coeff'] = 0.1
ramlist['turbulence']['EDMF_PrognosticTKE']['tke_diss_coeff'] = 0.3
paramlist['turbulence']['EDMF_PrognosticTKE']['max_area_factor'] = 10.0
paramlist['turbulence']['EDMF_PrognosticTKE']['entrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_PrognosticTKE']['detrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_PrognosticTKE']['vel_pressure_coeff'] = 5e-5
paramlist['turbulence']['EDMF_PrognosticTKE']['vel_buoy_coeff'] = 0.6666666666666666
paramlist['turbulence']['EDMF_BulkSteady'] = {}
paramlist['turbulence']['EDMF_BulkSteady']['surface_area'] = 0.1
paramlist['turbulence']['EDMF_BulkSteady']['w_entr_coeff'] = 2.0
paramlist['turbulence']['EDMF_BulkSteady']['w_buoy_coeff'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['max_area_factor'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['entrainment_factor'] = 1.0
paramlist['turbulence']['EDMF_BulkSteady']['detrainment_factor'] = 1.0
paramlist['turbulence']['updraft_microphysics'] = {}
paramlist['turbulence']['updraft_microphysics']['max_supersaturation'] = 0.1
return paramlist
def write_file(paramlist):
fh = open('paramlist_'+paramlist['meta']['casename']+ '.in', 'w')
json.dump(paramlist, fh, sort_keys=True, indent=4)
fh.close()
return
if __name__ == '__main__':
main()
| false
| true
|
f71997563231cf56306173544d65e6f6f5c14345
| 27,571
|
py
|
Python
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 1
|
2021-05-31T08:56:01.000Z
|
2021-05-31T08:56:01.000Z
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
from ._models import KeyVaultKey, KeyProperties, DeletedKey
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=unused-import
from typing import Any, Optional, Union
from azure.core.paging import ItemPaged
from ._models import JsonWebKey
class KeyClient(KeyVaultClientBase):
"""A high-level interface for managing a vault's keys.
:param str vault_url: URL of the vault the client will access. This is also called the vault's "DNS Name".
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
:keyword api_version: version of the Key Vault API to use. Defaults to the most recent.
:paramtype api_version: ~azure.keyvault.keys.ApiVersion
:keyword transport: transport to use. Defaults to :class:`~azure.core.pipeline.transport.RequestsTransport`.
:paramtype transport: ~azure.core.pipeline.transport.HttpTransport
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key_client]
:end-before: [END create_key_client]
:language: python
:caption: Create a new ``KeyClient``
:dedent: 4
"""
# pylint:disable=protected-access
@distributed_trace
def create_key(self, name, key_type, **kwargs):
# type: (str, Union[str, azure.keyvault.keys.KeyType], **Any) -> KeyVaultKey
"""Create a key or, if `name` is already in use, create a new version of the key.
Requires keys/create permission.
:param str name: The name of the new key.
:param key_type: The type of key to create
:type key_type: ~azure.keyvault.keys.KeyType or str
:keyword int size: Key size in bits. Applies only to RSA and symmetric keys. Consider using
:func:`create_rsa_key` or :func:`create_oct_key` instead.
:keyword curve: Elliptic curve name. Applies only to elliptic curve keys. Defaults to the NIST P-256
elliptic curve. To create an elliptic curve key, consider using :func:`create_ec_key` instead.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key]
:end-before: [END create_key]
:language: python
:caption: Create a key
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyCreateParameters(
kty=key_type,
key_size=kwargs.pop("size", None),
key_attributes=attributes,
key_ops=kwargs.pop("key_operations", None),
tags=kwargs.pop("tags", None),
curve=kwargs.pop("curve", None),
public_exponent=kwargs.pop("public_exponent", None)
)
bundle = self._client.create_key(
vault_base_url=self.vault_url,
key_name=name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def create_rsa_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new RSA key or, if `name` is already in use, create a new version of the key
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 2048, 3072, or 4096.
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_rsa_key]
:end-before: [END create_rsa_key]
:language: python
:caption: Create RSA key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="RSA-HSM" if hsm else "RSA", **kwargs)
@distributed_trace
def create_ec_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new elliptic curve key or, if `name` is already in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_ec_key]
:end-before: [END create_ec_key]
:language: python
:caption: Create an elliptic curve key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="EC-HSM" if hsm else "EC", **kwargs)
@distributed_trace
def create_oct_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new octet sequence (symmetric) key or, if `name` is already in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 128, 192, or 256.
:keyword key_operations: Allowed key operations.
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_oct_key]
:end-before: [END create_oct_key]
:language: python
:caption: Create an octet sequence (symmetric) key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="oct-HSM" if hsm else "oct", **kwargs)
@distributed_trace
def begin_delete_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Delete all versions of a key and its cryptographic material. Requires keys/delete permission.
When this method returns Key Vault has begun deleting the key. Deletion may take several seconds in a vault
with soft-delete enabled. This method therefore returns a poller enabling you to wait for deletion to complete.
:param str name: The name of the key to delete.
:returns: A poller for the delete key operation. The poller's `result` method returns the
:class:`~azure.keyvault.keys.DeletedKey` without waiting for deletion to complete. If the vault has
soft-delete enabled and you want to permanently delete the key with :func:`purge_deleted_key`, call the
poller's `wait` method first. It will block until the deletion is complete. The `wait` method requires
keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.DeletedKey]
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START delete_key]
:end-before: [END delete_key]
:language: python
:caption: Delete a key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
deleted_key = DeletedKey._from_deleted_key_bundle(
self._client.delete_key(self.vault_url, name, error_map=_error_map, **kwargs)
)
command = partial(self.get_deleted_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
# no recovery ID means soft-delete is disabled, in which case we initialize the poller as finished
finished=deleted_key.recovery_id is None,
command=command,
final_resource=deleted_key,
interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def get_key(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Get a key's attributes and, if it's an asymmetric key, its public material. Requires keys/get permission.
:param str name: The name of the key to get.
:param str version: (optional) A specific version of the key to get. If not specified, gets the latest version
of the key.
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_key]
:end-before: [END get_key]
:language: python
:caption: Get a key
:dedent: 8
"""
bundle = self._client.get_key(self.vault_url, name, key_version=version or "", error_map=_error_map, **kwargs)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def get_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Get a deleted key. Possible only in a vault with soft-delete enabled. Requires keys/get permission.
:param str name: The name of the key
:returns: The deleted key
:rtype: ~azure.keyvault.keys.DeletedKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_deleted_key]
:end-before: [END get_deleted_key]
:language: python
:caption: Get a deleted key
:dedent: 8
"""
bundle = self._client.get_deleted_key(self.vault_url, name, error_map=_error_map, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
@distributed_trace
def list_deleted_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[DeletedKey]
"""List all deleted keys, including the public part of each. Possible only in a vault with soft-delete enabled.
Requires keys/list permission.
:returns: An iterator of deleted keys
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.DeletedKey]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_deleted_keys]
:end-before: [END list_deleted_keys]
:language: python
:caption: List all the deleted keys
:dedent: 8
"""
return self._client.get_deleted_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [DeletedKey._from_deleted_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[KeyProperties]
"""List identifiers and properties of all keys in the vault. Requires keys/list permission.
:returns: An iterator of keys without their cryptographic material or version information
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_keys]
:end-before: [END list_keys]
:language: python
:caption: List all keys
:dedent: 8
"""
return self._client.get_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_key_versions(self, name, **kwargs):
# type: (str, **Any) -> ItemPaged[KeyProperties]
"""List the identifiers and properties of a key's versions. Requires keys/list permission.
:param str name: The name of the key
:returns: An iterator of keys without their cryptographic material
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_properties_of_key_versions]
:end-before: [END list_properties_of_key_versions]
:language: python
:caption: List all versions of a key
:dedent: 8
"""
return self._client.get_key_versions(
self._vault_url,
name,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def purge_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> None
"""Permanently deletes a deleted key. Only possible in a vault with soft-delete enabled.
Performs an irreversible deletion of the specified key, without
possibility for recovery. The operation is not available if the
:py:attr:`~azure.keyvault.keys.KeyProperties.recovery_level` does not specify 'Purgeable'.
This method is only necessary for purging a key before its
:py:attr:`~azure.keyvault.keys.DeletedKey.scheduled_purge_date`.
Requires keys/purge permission.
:param str name: The name of the deleted key to purge
:returns: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. code-block:: python
# if the vault has soft-delete enabled, purge permanently deletes a deleted key
# (with soft-delete disabled, begin_delete_key is permanent)
key_client.purge_deleted_key("key-name")
"""
self._client.purge_deleted_key(vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs)
@distributed_trace
def begin_recover_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Recover a deleted key to its latest version. Possible only in a vault with soft-delete enabled.
Requires keys/recover permission.
When this method returns Key Vault has begun recovering the key. Recovery may take several seconds. This
method therefore returns a poller enabling you to wait for recovery to complete. Waiting is only necessary when
you want to use the recovered key in another operation immediately.
:param str name: The name of the deleted key to recover
:returns: A poller for the recovery operation. The poller's `result` method returns the recovered
:class:`~azure.keyvault.keys.KeyVaultKey` without waiting for recovery to complete. If you want to use the
recovered key immediately, call the poller's `wait` method, which blocks until the key is ready to use. The
`wait` method requires keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.KeyVaultKey]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START recover_deleted_key]
:end-before: [END recover_deleted_key]
:language: python
:caption: Recover a deleted key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
recovered_key = KeyVaultKey._from_key_bundle(
self._client.recover_deleted_key(
vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs
)
)
command = partial(self.get_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=False, command=command, final_resource=recovered_key, interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def update_key_properties(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Change a key's properties (not its cryptographic material). Requires keys/update permission.
:param str name: The name of key to update
:param str version: (optional) The version of the key to update. If unspecified, the latest version is updated.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The updated key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START update_key]
:end-before: [END update_key]
:language: python
:caption: Update a key's attributes
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyUpdateParameters(
key_ops=kwargs.pop("key_operations", None),
key_attributes=attributes,
tags=kwargs.pop("tags", None)
)
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def backup_key(self, name, **kwargs):
# type: (str, **Any) -> bytes
"""Back up a key in a protected form useable only by Azure Key Vault. Requires keys/backup permission.
This is intended to allow copying a key from one vault to another. Both vaults must be owned by the same Azure
subscription. Also, backup / restore cannot be performed across geopolitical boundaries. For example, a backup
from a vault in a USA region cannot be restored to a vault in an EU region.
:param str name: The name of the key to back up
:rtype: bytes
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START backup_key]
:end-before: [END backup_key]
:language: python
:caption: Get a key backup
:dedent: 8
"""
backup_result = self._client.backup_key(self.vault_url, name, error_map=_error_map, **kwargs)
return backup_result.value
@distributed_trace
def restore_key_backup(self, backup, **kwargs):
# type: (bytes, **Any) -> KeyVaultKey
"""Restore a key backup to the vault. Requires keys/restore permission.
This imports all versions of the key, with its name, attributes, and access control policies. If the key's name
is already in use, restoring it will fail. Also, the target vault must be owned by the same Microsoft Azure
subscription as the source vault.
:param bytes backup: A key backup as returned by :func:`backup_key`
:returns: The restored key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceExistsError` if the backed up key's name is already in use,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START restore_key_backup]
:end-before: [END restore_key_backup]
:language: python
:caption: Restore a key backup
:dedent: 8
"""
bundle = self._client.restore_key(
self.vault_url,
parameters=self._models.KeyRestoreParameters(key_bundle_backup=backup),
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def import_key(self, name, key, **kwargs):
# type: (str, JsonWebKey, **Any) -> KeyVaultKey
"""Import a key created externally. Requires keys/import permission.
If `name` is already in use, the key will be imported as a new version.
:param str name: Name for the imported key
:param key: The JSON web key to import
:type key: ~azure.keyvault.keys.JsonWebKey
:keyword bool hardware_protected: Whether the key should be backed by a hardware security module
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The imported key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyImportParameters(
key=key._to_generated_model(),
key_attributes=attributes,
hsm=kwargs.pop("hardware_protected", None),
tags=kwargs.pop("tags", None)
)
bundle = self._client.import_key(
self.vault_url,
name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
| 45.875208
| 120
| 0.641507
|
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
from ._models import KeyVaultKey, KeyProperties, DeletedKey
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Optional, Union
from azure.core.paging import ItemPaged
from ._models import JsonWebKey
class KeyClient(KeyVaultClientBase):
@distributed_trace
def create_key(self, name, key_type, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyCreateParameters(
kty=key_type,
key_size=kwargs.pop("size", None),
key_attributes=attributes,
key_ops=kwargs.pop("key_operations", None),
tags=kwargs.pop("tags", None),
curve=kwargs.pop("curve", None),
public_exponent=kwargs.pop("public_exponent", None)
)
bundle = self._client.create_key(
vault_base_url=self.vault_url,
key_name=name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def create_rsa_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="RSA-HSM" if hsm else "RSA", **kwargs)
@distributed_trace
def create_ec_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="EC-HSM" if hsm else "EC", **kwargs)
@distributed_trace
def create_oct_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="oct-HSM" if hsm else "oct", **kwargs)
@distributed_trace
def begin_delete_key(self, name, **kwargs):
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
deleted_key = DeletedKey._from_deleted_key_bundle(
self._client.delete_key(self.vault_url, name, error_map=_error_map, **kwargs)
)
command = partial(self.get_deleted_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=deleted_key.recovery_id is None,
command=command,
final_resource=deleted_key,
interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def get_key(self, name, version=None, **kwargs):
bundle = self._client.get_key(self.vault_url, name, key_version=version or "", error_map=_error_map, **kwargs)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def get_deleted_key(self, name, **kwargs):
bundle = self._client.get_deleted_key(self.vault_url, name, error_map=_error_map, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
@distributed_trace
def list_deleted_keys(self, **kwargs):
return self._client.get_deleted_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [DeletedKey._from_deleted_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_keys(self, **kwargs):
return self._client.get_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_key_versions(self, name, **kwargs):
return self._client.get_key_versions(
self._vault_url,
name,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def purge_deleted_key(self, name, **kwargs):
self._client.purge_deleted_key(vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs)
@distributed_trace
def begin_recover_deleted_key(self, name, **kwargs):
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
recovered_key = KeyVaultKey._from_key_bundle(
self._client.recover_deleted_key(
vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs
)
)
command = partial(self.get_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=False, command=command, final_resource=recovered_key, interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def update_key_properties(self, name, version=None, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyUpdateParameters(
key_ops=kwargs.pop("key_operations", None),
key_attributes=attributes,
tags=kwargs.pop("tags", None)
)
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def backup_key(self, name, **kwargs):
backup_result = self._client.backup_key(self.vault_url, name, error_map=_error_map, **kwargs)
return backup_result.value
@distributed_trace
def restore_key_backup(self, backup, **kwargs):
bundle = self._client.restore_key(
self.vault_url,
parameters=self._models.KeyRestoreParameters(key_bundle_backup=backup),
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def import_key(self, name, key, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyImportParameters(
key=key._to_generated_model(),
key_attributes=attributes,
hsm=kwargs.pop("hardware_protected", None),
tags=kwargs.pop("tags", None)
)
bundle = self._client.import_key(
self.vault_url,
name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
| true
| true
|
f7199764cac1f3e56cc1b5f43ff6f14fb40c8601
| 3,487
|
py
|
Python
|
tests/test_cookies.py
|
tripsolutions/pyramid_jwt
|
320ed080216971467ae5e12b1f9888b50a9a29b7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_cookies.py
|
tripsolutions/pyramid_jwt
|
320ed080216971467ae5e12b1f9888b50a9a29b7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_cookies.py
|
tripsolutions/pyramid_jwt
|
320ed080216971467ae5e12b1f9888b50a9a29b7
|
[
"BSD-2-Clause"
] | null | null | null |
import uuid
import pytest
from pyramid.interfaces import IAuthenticationPolicy
from webob import Request
from zope.interface.verify import verifyObject
from pyramid_jwt.policy import JWTCookieAuthenticationPolicy
@pytest.fixture(scope="module")
def principal():
return str(uuid.uuid4())
def test_interface():
verifyObject(IAuthenticationPolicy, JWTCookieAuthenticationPolicy("secret"))
def test_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret")
token = policy.create_token(principal)
cookie = policy.remember(dummy_request, token).pop()
assert len(cookie) == 2
header, cookie = cookie
assert header == "Set-Cookie"
assert len(cookie) > 0
def test_cookie_name(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth")
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
assert name == "auth"
def test_secure_cookie():
policy = JWTCookieAuthenticationPolicy("secret", https_only=True)
dummy_request = Request.blank("/")
token = policy.create_token(str(uuid.uuid4()))
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" in cookie
assert "; HttpOnly" in cookie
def test_insecure_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" not in cookie
assert "; HttpOnly" in cookie
def test_cookie_decode(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims["sub"] == principal
def test_invalid_cookie_reissue(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False, reissue_time=10)
token = "invalid value"
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert not claims
def test_cookie_max_age(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=100)
_, cookie = policy.remember(dummy_request, principal).pop()
_, value = cookie.split("=", 1)
_, meta = value.split(";", 1)
assert "Max-Age=100" in meta
assert "expires" in meta
@pytest.mark.freeze_time
def test_expired_token(principal, freezer):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=1)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
freezer.tick(delta=2)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims == {}
| 29.058333
| 88
| 0.706051
|
import uuid
import pytest
from pyramid.interfaces import IAuthenticationPolicy
from webob import Request
from zope.interface.verify import verifyObject
from pyramid_jwt.policy import JWTCookieAuthenticationPolicy
@pytest.fixture(scope="module")
def principal():
return str(uuid.uuid4())
def test_interface():
verifyObject(IAuthenticationPolicy, JWTCookieAuthenticationPolicy("secret"))
def test_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret")
token = policy.create_token(principal)
cookie = policy.remember(dummy_request, token).pop()
assert len(cookie) == 2
header, cookie = cookie
assert header == "Set-Cookie"
assert len(cookie) > 0
def test_cookie_name(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth")
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
assert name == "auth"
def test_secure_cookie():
policy = JWTCookieAuthenticationPolicy("secret", https_only=True)
dummy_request = Request.blank("/")
token = policy.create_token(str(uuid.uuid4()))
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" in cookie
assert "; HttpOnly" in cookie
def test_insecure_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" not in cookie
assert "; HttpOnly" in cookie
def test_cookie_decode(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims["sub"] == principal
def test_invalid_cookie_reissue(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False, reissue_time=10)
token = "invalid value"
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert not claims
def test_cookie_max_age(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=100)
_, cookie = policy.remember(dummy_request, principal).pop()
_, value = cookie.split("=", 1)
_, meta = value.split(";", 1)
assert "Max-Age=100" in meta
assert "expires" in meta
@pytest.mark.freeze_time
def test_expired_token(principal, freezer):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=1)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
freezer.tick(delta=2)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims == {}
| true
| true
|
f719982c32746d402b0277ba15a13000bcc77119
| 94
|
py
|
Python
|
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
import sys
import importer
module1 = importer.import_('module1', 'module1_source.py', '.')
| 13.428571
| 63
| 0.723404
|
import sys
import importer
module1 = importer.import_('module1', 'module1_source.py', '.')
| true
| true
|
f719994b12c769c14062f52ec104eb9f369ef914
| 757
|
py
|
Python
|
Exercicios Loop/exercicio 35 - secao 06.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Loop/exercicio 35 - secao 06.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Loop/exercicio 35 - secao 06.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
"""
35 - Faça um programa que some os números impares contidos em um intervalo definido pelo usuário.
O usuário define o valor inicial do intervalo e o valor final deste intervalo e o programa deve
somar todos os números ímpares contidos neste intervalo (começando por um valor maior que o valor final)
deve ser escrito uma mensagem de erro na tela, "Intervalo de valores inválido" e o programa termina.
Exemplo de tela de saída:
Digite o valor inicial e valor final: 5 10
Soma dos ímpares neste intervalo: 21
"""
impar = 0
inicial, final = [int(x) for x in input("Digite o valor inicial e valor final: ").split()]
for i in list(range(inicial, final)):
if i % 2 != 0:
impar = impar + i
print(f'A soma dos ímpares neste intervalo é {impar}')
| 42.055556
| 104
| 0.73712
|
impar = 0
inicial, final = [int(x) for x in input("Digite o valor inicial e valor final: ").split()]
for i in list(range(inicial, final)):
if i % 2 != 0:
impar = impar + i
print(f'A soma dos ímpares neste intervalo é {impar}')
| true
| true
|
f71999d547a46a0a1493f4a1de55c28d65419f04
| 421
|
py
|
Python
|
strava/cli/activity/commands.py
|
dparret/strava-cli
|
2426ea7f3fe4580aea352476b261cec31d3f0b11
|
[
"MIT"
] | null | null | null |
strava/cli/activity/commands.py
|
dparret/strava-cli
|
2426ea7f3fe4580aea352476b261cec31d3f0b11
|
[
"MIT"
] | null | null | null |
strava/cli/activity/commands.py
|
dparret/strava-cli
|
2426ea7f3fe4580aea352476b261cec31d3f0b11
|
[
"MIT"
] | null | null | null |
import click
from strava.commands import get_activity, get_constrain_activity, get_weekly_activity, get_lap_activity
@click.group(name='activity', help='[GROUP] Get the summary of one or multiple activities.')
def cli_activity():
pass
cli_activity.add_command(get_activity)
cli_activity.add_command(get_constrain_activity)
cli_activity.add_command(get_weekly_activity)
cli_activity.add_command(get_lap_activity)
| 28.066667
| 103
| 0.83848
|
import click
from strava.commands import get_activity, get_constrain_activity, get_weekly_activity, get_lap_activity
@click.group(name='activity', help='[GROUP] Get the summary of one or multiple activities.')
def cli_activity():
pass
cli_activity.add_command(get_activity)
cli_activity.add_command(get_constrain_activity)
cli_activity.add_command(get_weekly_activity)
cli_activity.add_command(get_lap_activity)
| true
| true
|
f7199aebd95eaaf673576198d3754ac18ebe3786
| 4,928
|
py
|
Python
|
3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py
|
NordicID/ar8x_samples
|
2ac78750d6f4ff924628d1e225990f4bfcecfda0
|
[
"MIT"
] | 4
|
2017-10-17T13:28:28.000Z
|
2020-12-23T09:46:10.000Z
|
3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py
|
NordicID/ar8x_samples
|
2ac78750d6f4ff924628d1e225990f4bfcecfda0
|
[
"MIT"
] | 8
|
2019-02-09T15:29:12.000Z
|
2021-03-15T17:45:49.000Z
|
3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py
|
NordicID/ar8x_samples
|
2ac78750d6f4ff924628d1e225990f4bfcecfda0
|
[
"MIT"
] | 3
|
2018-05-24T16:27:43.000Z
|
2019-08-04T23:39:22.000Z
|
# -*- coding: utf-8 -*-
# Description: cpuidle netdata python.d module
# Author: Steven Noonan (tycho)
import glob
import os
import platform
import time
from base import SimpleService
import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
# default module values (can be overridden per job in `config`)
# update_every = 2
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices/system/cpu"
self.schedstat_path = prefix + "/proc/schedstat"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
self._orig_name = ""
self.assignment = {}
def __gettid(self):
# This is horrendous. We need the *thread id* (not the *process id*),
# but there's no Python standard library way of doing that. If you need
# to enable this module on a non-x86 machine type, you'll have to find
# the Linux syscall number for gettid() and add it to the dictionary
# below.
syscalls = {
'i386': 224,
'x86_64': 186,
}
if platform.machine() not in syscalls:
return None
tid = syscall(syscalls[platform.machine()])
return tid
def __wake_cpus(self):
# Requires Python 3.3+. This will "tickle" each CPU to force it to
# update its idle counters.
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
for idx in range(0, len(self.assignment)):
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
def __read_schedstat(self):
cpus = {}
for line in open(self.schedstat_path, 'r'):
if not line.startswith('cpu'):
continue
line = line.rstrip().split()
cpu = line[0]
active_time = line[7]
cpus[cpu] = int(active_time) // 1000
return cpus
def _get_data(self):
results = {}
# This line is critical for the stats to update. If we don't "tickle"
# all the CPUs, then all the counters stop counting.
self.__wake_cpus()
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
for metric, path in metrics.items():
residency = int(open(path, 'r').read())
results[metric] = residency
return results
def check(self):
if self.__gettid() is None:
self.error("Cannot get thread ID. Stats would be completely broken.")
return False
self._orig_name = self.chart_name
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
# ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
path_elem = path.split('/')
cpu = path_elem[-4]
state = path_elem[-2]
statename = open(path, 'rt').read().rstrip()
orderid = '%s_cpuidle' % (cpu,)
if orderid not in self.definitions:
self.order.append(orderid)
active_name = '%s_active_time' % (cpu,)
self.definitions[orderid] = {
'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
'lines': [
[active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
],
}
self.assignment[cpu] = {}
defid = '%s_%s_time' % (orderid, state)
self.definitions[orderid]['lines'].append(
[defid, statename, 'percentage-of-incremental-row', 1, 1]
)
self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
if len(self.definitions) == 0:
self.error("couldn't find cstate stats")
return False
return True
def create(self):
self.chart_name = "cpu"
status = SimpleService.create(self)
self.chart_name = self._orig_name
return status
def update(self, interval):
self.chart_name = "cpu"
status = SimpleService.update(self, interval=interval)
self.chart_name = self._orig_name
return status
# vim: set ts=4 sts=4 sw=4 et:
| 34.222222
| 96
| 0.565544
|
import glob
import os
import platform
import time
from base import SimpleService
import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices/system/cpu"
self.schedstat_path = prefix + "/proc/schedstat"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
self._orig_name = ""
self.assignment = {}
def __gettid(self):
# to enable this module on a non-x86 machine type, you'll have to find
syscalls = {
'i386': 224,
'x86_64': 186,
}
if platform.machine() not in syscalls:
return None
tid = syscall(syscalls[platform.machine()])
return tid
def __wake_cpus(self):
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
for idx in range(0, len(self.assignment)):
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
def __read_schedstat(self):
cpus = {}
for line in open(self.schedstat_path, 'r'):
if not line.startswith('cpu'):
continue
line = line.rstrip().split()
cpu = line[0]
active_time = line[7]
cpus[cpu] = int(active_time) // 1000
return cpus
def _get_data(self):
results = {}
# all the CPUs, then all the counters stop counting.
self.__wake_cpus()
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
for metric, path in metrics.items():
residency = int(open(path, 'r').read())
results[metric] = residency
return results
def check(self):
if self.__gettid() is None:
self.error("Cannot get thread ID. Stats would be completely broken.")
return False
self._orig_name = self.chart_name
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
# ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
path_elem = path.split('/')
cpu = path_elem[-4]
state = path_elem[-2]
statename = open(path, 'rt').read().rstrip()
orderid = '%s_cpuidle' % (cpu,)
if orderid not in self.definitions:
self.order.append(orderid)
active_name = '%s_active_time' % (cpu,)
self.definitions[orderid] = {
'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
'lines': [
[active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
],
}
self.assignment[cpu] = {}
defid = '%s_%s_time' % (orderid, state)
self.definitions[orderid]['lines'].append(
[defid, statename, 'percentage-of-incremental-row', 1, 1]
)
self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
if len(self.definitions) == 0:
self.error("couldn't find cstate stats")
return False
return True
def create(self):
self.chart_name = "cpu"
status = SimpleService.create(self)
self.chart_name = self._orig_name
return status
def update(self, interval):
self.chart_name = "cpu"
status = SimpleService.update(self, interval=interval)
self.chart_name = self._orig_name
return status
| true
| true
|
f7199b4c4ff664a5de4259b1a156f514807f75ec
| 358
|
py
|
Python
|
Ch6/picnic_table.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
Ch6/picnic_table.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
Ch6/picnic_table.py
|
dmdinh22/ATBS
|
3ddd331757cc434faa5f27997b178f8a39e3b5d2
|
[
"MIT"
] | null | null | null |
def print_picnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k, v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnic_items = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}
print_picnic(picnic_items, 12, 5)
print_picnic(picnic_items, 20, 6)
| 44.75
| 74
| 0.684358
|
def print_picnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k, v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnic_items = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}
print_picnic(picnic_items, 12, 5)
print_picnic(picnic_items, 20, 6)
| true
| true
|
f7199b5ab43ce56280af5d2f042fc3bf18ea33f9
| 241
|
py
|
Python
|
examples/externalpyproc/test.py
|
scala-steward/prox
|
fdcab42cbdbe6a1cf4d9ffde796657d75dac6235
|
[
"Apache-2.0"
] | 95
|
2018-01-19T00:09:22.000Z
|
2022-02-05T15:22:59.000Z
|
examples/externalpyproc/test.py
|
scala-steward/prox
|
fdcab42cbdbe6a1cf4d9ffde796657d75dac6235
|
[
"Apache-2.0"
] | 312
|
2017-11-22T19:41:41.000Z
|
2022-03-30T13:31:06.000Z
|
examples/externalpyproc/test.py
|
scala-steward/prox
|
fdcab42cbdbe6a1cf4d9ffde796657d75dac6235
|
[
"Apache-2.0"
] | 6
|
2018-05-02T10:30:44.000Z
|
2020-10-17T17:06:11.000Z
|
import sys
def run():
stop = False
while not stop:
line = sys.stdin.readline().strip()
if len(line) == 0:
stop = True
else:
print line + "!?!?"
sys.stdout.flush()
run()
| 15.0625
| 43
| 0.452282
|
import sys
def run():
stop = False
while not stop:
line = sys.stdin.readline().strip()
if len(line) == 0:
stop = True
else:
print line + "!?!?"
sys.stdout.flush()
run()
| false
| true
|
f7199b6017b06f096a888ac161723abab17bf6d1
| 80
|
py
|
Python
|
notebooks/_solutions/13-raster-processing42.py
|
jorisvandenbossche/DS-python-geospatial
|
893a12edc5c203a75815f6dcb5f1e18c577c8cd5
|
[
"BSD-3-Clause"
] | 58
|
2020-10-09T10:10:59.000Z
|
2022-03-07T14:58:07.000Z
|
notebooks/_solutions/13-raster-processing42.py
|
jorisvandenbossche/DS-python-geospatial
|
893a12edc5c203a75815f6dcb5f1e18c577c8cd5
|
[
"BSD-3-Clause"
] | 24
|
2020-09-30T19:57:14.000Z
|
2021-10-05T07:21:09.000Z
|
notebooks/_solutions/13-raster-processing42.py
|
jorisvandenbossche/DS-python-geospatial
|
893a12edc5c203a75815f6dcb5f1e18c577c8cd5
|
[
"BSD-3-Clause"
] | 19
|
2020-10-05T09:32:18.000Z
|
2022-03-20T00:09:14.000Z
|
green = geopandas.read_file("data/gent/vector/parken-gent.geojson")
green.head()
| 40
| 67
| 0.7875
|
green = geopandas.read_file("data/gent/vector/parken-gent.geojson")
green.head()
| true
| true
|
f7199bd2f937de5095eb9d5c4cafe386b70039eb
| 1,325
|
py
|
Python
|
kale/util/ints.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
kale/util/ints.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
kale/util/ints.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, BinaryIO
from kale.util.struct_stream import StructStream
class int8(StructStream):
PACK = "!b"
class uint8(StructStream):
PACK = "!B"
class int16(StructStream):
PACK = "!h"
class uint16(StructStream):
PACK = "!H"
class int32(StructStream):
PACK = "!l"
class uint32(StructStream):
PACK = "!L"
class int64(StructStream):
PACK = "!q"
class uint64(StructStream):
PACK = "!Q"
class uint128(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(16)
assert len(read_bytes) == 16
n = int.from_bytes(read_bytes, "big", signed=False)
assert n <= (2 ** 128) - 1 and n >= 0
return cls(n)
def stream(self, f):
assert self <= (2 ** 128) - 1 and self >= 0
f.write(self.to_bytes(16, "big", signed=False))
class int512(int):
# Uses 65 bytes to fit in the sign bit
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(65)
assert len(read_bytes) == 65
n = int.from_bytes(read_bytes, "big", signed=True)
assert n <= (2 ** 512) - 1 and n >= -(2 ** 512)
return cls(n)
def stream(self, f):
assert self <= (2 ** 512) - 1 and self >= -(2 ** 512)
f.write(self.to_bytes(65, "big", signed=True))
| 20.384615
| 61
| 0.577358
|
from typing import Any, BinaryIO
from kale.util.struct_stream import StructStream
class int8(StructStream):
PACK = "!b"
class uint8(StructStream):
PACK = "!B"
class int16(StructStream):
PACK = "!h"
class uint16(StructStream):
PACK = "!H"
class int32(StructStream):
PACK = "!l"
class uint32(StructStream):
PACK = "!L"
class int64(StructStream):
PACK = "!q"
class uint64(StructStream):
PACK = "!Q"
class uint128(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(16)
assert len(read_bytes) == 16
n = int.from_bytes(read_bytes, "big", signed=False)
assert n <= (2 ** 128) - 1 and n >= 0
return cls(n)
def stream(self, f):
assert self <= (2 ** 128) - 1 and self >= 0
f.write(self.to_bytes(16, "big", signed=False))
class int512(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(65)
assert len(read_bytes) == 65
n = int.from_bytes(read_bytes, "big", signed=True)
assert n <= (2 ** 512) - 1 and n >= -(2 ** 512)
return cls(n)
def stream(self, f):
assert self <= (2 ** 512) - 1 and self >= -(2 ** 512)
f.write(self.to_bytes(65, "big", signed=True))
| true
| true
|
f7199cc541ada1d15fae75b62fc319d80df9c669
| 428
|
py
|
Python
|
src/upload/admin.py
|
bpilkerton/vendor-upload
|
ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1
|
[
"Unlicense"
] | null | null | null |
src/upload/admin.py
|
bpilkerton/vendor-upload
|
ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1
|
[
"Unlicense"
] | null | null | null |
src/upload/admin.py
|
bpilkerton/vendor-upload
|
ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Upload,VendorData
class UploadAdmin(admin.ModelAdmin):
list_display = ('id','uploaded_file','uploaded_date')
class VendordataAdmin(admin.ModelAdmin):
list_display = ('id','sub_id','first_name','last_name','status')
admin.site.site_header = "Subscription Fulfillment Upload"
admin.site.register(Upload, UploadAdmin)
admin.site.register(VendorData, VendordataAdmin)
| 32.923077
| 68
| 0.785047
|
from django.contrib import admin
from .models import Upload,VendorData
class UploadAdmin(admin.ModelAdmin):
list_display = ('id','uploaded_file','uploaded_date')
class VendordataAdmin(admin.ModelAdmin):
list_display = ('id','sub_id','first_name','last_name','status')
admin.site.site_header = "Subscription Fulfillment Upload"
admin.site.register(Upload, UploadAdmin)
admin.site.register(VendorData, VendordataAdmin)
| true
| true
|
f7199d3d3a6e51cfe86975c9d26b03a1bb377073
| 228
|
py
|
Python
|
Django Rest Class Based API view/Person/admin.py
|
abhisheksahu92/Django-Rest-Framework
|
45ddafb93ed1f2e232d2f537f144bf79cb30bf3d
|
[
"MIT"
] | null | null | null |
Django Rest Class Based API view/Person/admin.py
|
abhisheksahu92/Django-Rest-Framework
|
45ddafb93ed1f2e232d2f537f144bf79cb30bf3d
|
[
"MIT"
] | null | null | null |
Django Rest Class Based API view/Person/admin.py
|
abhisheksahu92/Django-Rest-Framework
|
45ddafb93ed1f2e232d2f537f144bf79cb30bf3d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Person
# Register your models here.
@admin.register(Person)
class PersonModel(admin.ModelAdmin):
list_display = ['first_name','last_name','email','phone','date_of_birth']
| 32.571429
| 77
| 0.77193
|
from django.contrib import admin
from .models import Person
@admin.register(Person)
class PersonModel(admin.ModelAdmin):
list_display = ['first_name','last_name','email','phone','date_of_birth']
| true
| true
|
f7199de7d432eb5ce623737f74e8d53b751b22d7
| 9,267
|
py
|
Python
|
ckan/views/admin.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | 1
|
2021-10-01T12:47:19.000Z
|
2021-10-01T12:47:19.000Z
|
ckan/views/admin.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | null | null | null |
ckan/views/admin.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | 2
|
2018-01-21T17:03:08.000Z
|
2019-07-23T08:49:52.000Z
|
# encoding: utf-8
from __future__ import annotations
import logging
from typing import Any, Union, cast, List
from flask import Blueprint
from flask.views import MethodView
from flask.wrappers import Response
import ckan.lib.app_globals as app_globals
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.model as model
import ckan.logic.schema
from ckan.common import g, _, config, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.types import Context, Query
log = logging.getLogger(__name__)
admin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')
def _get_sysadmins() -> "Query[model.User]":
q = model.Session.query(model.User).filter(
# type_ignore_reason: incomplete SQLAlchemy types
model.User.sysadmin.is_(True), # type: ignore
model.User.state == u'active')
return q
def _get_config_options() -> dict[str, list[dict[str, str]]]:
homepages = [{
u'value': u'1',
u'text': (u'Introductory area, search, featured'
u' group and featured organization')
}, {
u'value': u'2',
u'text': (u'Search, stats, introductory area, '
u'featured organization and featured group')
}, {
u'value': u'3',
u'text': u'Search, introductory area and stats'
}]
return dict(homepages=homepages)
def _get_config_items() -> list[str]:
return [
u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',
u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',
u'ckan.site_custom_css', u'ckan.homepage_style'
]
@admin.before_request
def before_request() -> None:
try:
context = cast(
Context,
{"model": model, "user": g.user, "auth_user_obj": g.userobj}
)
logic.check_access(u'sysadmin', context)
except logic.NotAuthorized:
base.abort(403, _(u'Need to be system administrator to administer'))
def index() -> str:
data = dict(sysadmins=[a.name for a in _get_sysadmins()])
return base.render(u'admin/index.html', extra_vars=data)
class ResetConfigView(MethodView):
def get(self) -> Union[str, Response]:
if u'cancel' in request.args:
return h.redirect_to(u'admin.config')
return base.render(u'admin/confirm_reset.html', extra_vars={})
def post(self) -> Response:
# remove sys info items
for item in _get_config_items():
model.delete_system_info(item)
# reset to values in config
app_globals.reset()
return h.redirect_to(u'admin.config')
class ConfigView(MethodView):
def get(self) -> str:
items = _get_config_options()
schema = ckan.logic.schema.update_configuration_schema()
data = {}
for key in schema:
data[key] = config.get(key)
vars: dict[str, Any] = dict(data=data, errors={}, **items)
return base.render(u'admin/config.html', extra_vars=vars)
def post(self) -> Union[str, Response]:
try:
req: dict[str, Any] = request.form.copy()
req.update(request.files.to_dict())
data_dict = logic.clean_dict(
dict_fns.unflatten(
logic.tuplize_dict(
logic.parse_params(req,
ignore_keys=CACHE_PARAMETERS))))
del data_dict['save']
data = logic.get_action(u'config_option_update')({
u'user': g.user
}, data_dict)
except logic.ValidationError as e:
items = _get_config_options()
data = request.form
errors = e.error_dict
error_summary = e.error_summary
vars = dict(data=data,
errors=errors,
error_summary=error_summary,
form_items=items,
**items)
return base.render(u'admin/config.html', extra_vars=vars)
return h.redirect_to(u'admin.config')
class TrashView(MethodView):
def __init__(self):
self.deleted_packages = self._get_deleted_datasets()
self.deleted_orgs = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=True)
self.deleted_groups = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=False)
self.deleted_entities = {
u'package': self.deleted_packages,
u'organization': self.deleted_orgs,
u'group': self.deleted_groups
}
self.messages = {
u'confirm': {
u'all': _(u'Are you sure you want to purge everything?'),
u'package': _(u'Are you sure you want to purge datasets?'),
u'organization':
_(u'Are you sure you want to purge organizations?'),
u'group': _(u'Are you sure you want to purge groups?')
},
u'success': {
u'package': _(u'{number} datasets have been purged'),
u'organization': _(u'{number} organizations have been purged'),
u'group': _(u'{number} groups have been purged')
},
u'empty': {
u'package': _(u'There are no datasets to purge'),
u'organization': _(u'There are no organizations to purge'),
u'group': _(u'There are no groups to purge')
}
}
def _get_deleted_datasets(
self
) -> Union["Query[model.Package]", List[Any]]:
if config.get_value('ckan.search.remove_deleted_packages'):
return self._get_deleted_datasets_from_db()
else:
return self._get_deleted_datasets_from_search_index()
def _get_deleted_datasets_from_db(self) -> "Query[model.Package]":
return model.Session.query(
model.Package
).filter_by(
state=model.State.DELETED
)
def _get_deleted_datasets_from_search_index(self) -> List[Any]:
package_search = logic.get_action('package_search')
search_params = {
'fq': '+state:deleted',
'include_private': True,
}
base_results = package_search(
{'ignore_auth': True},
search_params
)
return base_results['results']
def get(self) -> str:
ent_type = request.args.get(u'name')
if ent_type:
return base.render(u'admin/snippets/confirm_delete.html',
extra_vars={
u'ent_type': ent_type,
u'messages': self.messages})
data = dict(data=self.deleted_entities, messages=self.messages)
return base.render(u'admin/trash.html', extra_vars=data)
def post(self) -> Response:
if u'cancel' in request.form:
return h.redirect_to(u'admin.trash')
req_action = request.form.get(u'action', '')
if req_action == u'all':
self.purge_all()
elif req_action in (u'package', u'organization', u'group'):
self.purge_entity(req_action)
else:
h.flash_error(_(u'Action not implemented.'))
return h.redirect_to(u'admin.trash')
def purge_all(self):
actions = (u'dataset_purge', u'group_purge', u'organization_purge')
entities = (
self.deleted_packages,
self.deleted_groups,
self.deleted_orgs
)
for action, deleted_entities in zip(actions, entities):
for entity in deleted_entities:
ent_id = entity.id if hasattr(entity, 'id') \
else entity['id'] # type: ignore
logic.get_action(action)(
{u'user': g.user}, {u'id': ent_id}
)
model.Session.remove()
h.flash_success(_(u'Massive purge complete'))
def purge_entity(self, ent_type: str):
entities = self.deleted_entities[ent_type]
number = len(entities) if type(entities) == list else entities.count()
for ent in entities:
entity_id = ent.id if hasattr(ent, 'id') else ent['id']
logic.get_action(self._get_purge_action(ent_type))(
{u'user': g.user},
{u'id': entity_id}
)
model.Session.remove()
h.flash_success(self.messages[u'success'][ent_type].format(
number=number
))
@staticmethod
def _get_purge_action(ent_type: str) -> str:
actions = {
"package": "dataset_purge",
"organization": "organization_purge",
"group": "group_purge",
}
return actions[ent_type]
admin.add_url_rule(
u'/', view_func=index, methods=['GET'], strict_slashes=False
)
admin.add_url_rule(u'/reset_config',
view_func=ResetConfigView.as_view(str(u'reset_config')))
admin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))
admin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))
| 33.698182
| 79
| 0.589403
|
from __future__ import annotations
import logging
from typing import Any, Union, cast, List
from flask import Blueprint
from flask.views import MethodView
from flask.wrappers import Response
import ckan.lib.app_globals as app_globals
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.model as model
import ckan.logic.schema
from ckan.common import g, _, config, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.types import Context, Query
log = logging.getLogger(__name__)
admin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')
def _get_sysadmins() -> "Query[model.User]":
q = model.Session.query(model.User).filter(
model.User.sysadmin.is_(True),
model.User.state == u'active')
return q
def _get_config_options() -> dict[str, list[dict[str, str]]]:
homepages = [{
u'value': u'1',
u'text': (u'Introductory area, search, featured'
u' group and featured organization')
}, {
u'value': u'2',
u'text': (u'Search, stats, introductory area, '
u'featured organization and featured group')
}, {
u'value': u'3',
u'text': u'Search, introductory area and stats'
}]
return dict(homepages=homepages)
def _get_config_items() -> list[str]:
return [
u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',
u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',
u'ckan.site_custom_css', u'ckan.homepage_style'
]
@admin.before_request
def before_request() -> None:
try:
context = cast(
Context,
{"model": model, "user": g.user, "auth_user_obj": g.userobj}
)
logic.check_access(u'sysadmin', context)
except logic.NotAuthorized:
base.abort(403, _(u'Need to be system administrator to administer'))
def index() -> str:
data = dict(sysadmins=[a.name for a in _get_sysadmins()])
return base.render(u'admin/index.html', extra_vars=data)
class ResetConfigView(MethodView):
def get(self) -> Union[str, Response]:
if u'cancel' in request.args:
return h.redirect_to(u'admin.config')
return base.render(u'admin/confirm_reset.html', extra_vars={})
def post(self) -> Response:
for item in _get_config_items():
model.delete_system_info(item)
app_globals.reset()
return h.redirect_to(u'admin.config')
class ConfigView(MethodView):
def get(self) -> str:
items = _get_config_options()
schema = ckan.logic.schema.update_configuration_schema()
data = {}
for key in schema:
data[key] = config.get(key)
vars: dict[str, Any] = dict(data=data, errors={}, **items)
return base.render(u'admin/config.html', extra_vars=vars)
def post(self) -> Union[str, Response]:
try:
req: dict[str, Any] = request.form.copy()
req.update(request.files.to_dict())
data_dict = logic.clean_dict(
dict_fns.unflatten(
logic.tuplize_dict(
logic.parse_params(req,
ignore_keys=CACHE_PARAMETERS))))
del data_dict['save']
data = logic.get_action(u'config_option_update')({
u'user': g.user
}, data_dict)
except logic.ValidationError as e:
items = _get_config_options()
data = request.form
errors = e.error_dict
error_summary = e.error_summary
vars = dict(data=data,
errors=errors,
error_summary=error_summary,
form_items=items,
**items)
return base.render(u'admin/config.html', extra_vars=vars)
return h.redirect_to(u'admin.config')
class TrashView(MethodView):
def __init__(self):
self.deleted_packages = self._get_deleted_datasets()
self.deleted_orgs = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=True)
self.deleted_groups = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=False)
self.deleted_entities = {
u'package': self.deleted_packages,
u'organization': self.deleted_orgs,
u'group': self.deleted_groups
}
self.messages = {
u'confirm': {
u'all': _(u'Are you sure you want to purge everything?'),
u'package': _(u'Are you sure you want to purge datasets?'),
u'organization':
_(u'Are you sure you want to purge organizations?'),
u'group': _(u'Are you sure you want to purge groups?')
},
u'success': {
u'package': _(u'{number} datasets have been purged'),
u'organization': _(u'{number} organizations have been purged'),
u'group': _(u'{number} groups have been purged')
},
u'empty': {
u'package': _(u'There are no datasets to purge'),
u'organization': _(u'There are no organizations to purge'),
u'group': _(u'There are no groups to purge')
}
}
def _get_deleted_datasets(
self
) -> Union["Query[model.Package]", List[Any]]:
if config.get_value('ckan.search.remove_deleted_packages'):
return self._get_deleted_datasets_from_db()
else:
return self._get_deleted_datasets_from_search_index()
def _get_deleted_datasets_from_db(self) -> "Query[model.Package]":
return model.Session.query(
model.Package
).filter_by(
state=model.State.DELETED
)
def _get_deleted_datasets_from_search_index(self) -> List[Any]:
package_search = logic.get_action('package_search')
search_params = {
'fq': '+state:deleted',
'include_private': True,
}
base_results = package_search(
{'ignore_auth': True},
search_params
)
return base_results['results']
def get(self) -> str:
ent_type = request.args.get(u'name')
if ent_type:
return base.render(u'admin/snippets/confirm_delete.html',
extra_vars={
u'ent_type': ent_type,
u'messages': self.messages})
data = dict(data=self.deleted_entities, messages=self.messages)
return base.render(u'admin/trash.html', extra_vars=data)
def post(self) -> Response:
if u'cancel' in request.form:
return h.redirect_to(u'admin.trash')
req_action = request.form.get(u'action', '')
if req_action == u'all':
self.purge_all()
elif req_action in (u'package', u'organization', u'group'):
self.purge_entity(req_action)
else:
h.flash_error(_(u'Action not implemented.'))
return h.redirect_to(u'admin.trash')
def purge_all(self):
actions = (u'dataset_purge', u'group_purge', u'organization_purge')
entities = (
self.deleted_packages,
self.deleted_groups,
self.deleted_orgs
)
for action, deleted_entities in zip(actions, entities):
for entity in deleted_entities:
ent_id = entity.id if hasattr(entity, 'id') \
else entity['id']
logic.get_action(action)(
{u'user': g.user}, {u'id': ent_id}
)
model.Session.remove()
h.flash_success(_(u'Massive purge complete'))
def purge_entity(self, ent_type: str):
entities = self.deleted_entities[ent_type]
number = len(entities) if type(entities) == list else entities.count()
for ent in entities:
entity_id = ent.id if hasattr(ent, 'id') else ent['id']
logic.get_action(self._get_purge_action(ent_type))(
{u'user': g.user},
{u'id': entity_id}
)
model.Session.remove()
h.flash_success(self.messages[u'success'][ent_type].format(
number=number
))
@staticmethod
def _get_purge_action(ent_type: str) -> str:
actions = {
"package": "dataset_purge",
"organization": "organization_purge",
"group": "group_purge",
}
return actions[ent_type]
admin.add_url_rule(
u'/', view_func=index, methods=['GET'], strict_slashes=False
)
admin.add_url_rule(u'/reset_config',
view_func=ResetConfigView.as_view(str(u'reset_config')))
admin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))
admin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))
| true
| true
|
f7199e3af009f350705cd13527301b007761a105
| 2,956
|
py
|
Python
|
examples/interface/CP.py
|
jeffhammond/Elemental
|
a9e6236ce9d92dd56c7d3cd5ffd52f796a35cd0c
|
[
"Apache-2.0"
] | null | null | null |
examples/interface/CP.py
|
jeffhammond/Elemental
|
a9e6236ce9d92dd56c7d3cd5ffd52f796a35cd0c
|
[
"Apache-2.0"
] | null | null | null |
examples/interface/CP.py
|
jeffhammond/Elemental
|
a9e6236ce9d92dd56c7d3cd5ffd52f796a35cd0c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2009-2016, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 50
n1 = 50
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Stack two 2D finite-difference matrices on top of each other
# and make the last column dense
def StackedFD2D(N0,N1):
A = El.DistSparseMatrix()
height = 2*N0*N1
width = N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(6*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < N0*N1:
x0 = s % N0
x1 = s / N0
A.QueueLocalUpdate( sLoc, s, 11 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
else:
sRel = s-N0*N1
x0 = sRel % N0
x1 = sRel / N0
A.QueueLocalUpdate( sLoc, sRel, -2 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, sRel-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, sRel+1, -2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, sRel+N0, 3 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
A = StackedFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, 2*n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.solveCtrl.progress = True
startCP = El.mpi.Time()
x = El.CP( A, b, ctrl )
endCP = El.mpi.Time()
if worldRank == 0:
print "CP time:", endCP-startCP, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMultiVec()
El.Copy( b, r )
El.Multiply( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rInfNorm = El.MaxNorm( r )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_oo =", rInfNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMultiVec()
El.Copy( b, rLS )
El.Multiply( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSInfNorm = El.MaxNorm(rLS)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_oo =", rLSInfNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| 25.704348
| 73
| 0.609269
|
import El
n0 = 50
n1 = 50
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def StackedFD2D(N0,N1):
A = El.DistSparseMatrix()
height = 2*N0*N1
width = N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(6*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < N0*N1:
x0 = s % N0
x1 = s / N0
A.QueueLocalUpdate( sLoc, s, 11 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
else:
sRel = s-N0*N1
x0 = sRel % N0
x1 = sRel / N0
A.QueueLocalUpdate( sLoc, sRel, -2 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, sRel-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, sRel+1, -2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, sRel+N0, 3 )
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
A = StackedFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, 2*n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.solveCtrl.progress = True
startCP = El.mpi.Time()
x = El.CP( A, b, ctrl )
endCP = El.mpi.Time()
if worldRank == 0:
print "CP time:", endCP-startCP, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMultiVec()
El.Copy( b, r )
El.Multiply( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rInfNorm = El.MaxNorm( r )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_oo =", rInfNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMultiVec()
El.Copy( b, rLS )
El.Multiply( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSInfNorm = El.MaxNorm(rLS)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_oo =", rLSInfNorm
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| false
| true
|
f7199e876ff568e200ceb2dbf17c8e228d670c71
| 1,919
|
py
|
Python
|
test/Entry.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1
|
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/Entry.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/Entry.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Entry.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the Entry() global function and environment method work
correctly, and that the former does not try to expand construction
variables.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.523077
| 87
| 0.738927
|
__revision__ = "test/Entry.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| true
| true
|
f719a19921e0717fd82f09f6ab40bd54a1718ceb
| 18,564
|
py
|
Python
|
sciencebeam_parser/models/model.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 13
|
2021-08-04T12:11:17.000Z
|
2022-03-28T20:41:20.000Z
|
sciencebeam_parser/models/model.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 33
|
2021-08-05T08:37:59.000Z
|
2022-03-29T18:42:09.000Z
|
sciencebeam_parser/models/model.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 1
|
2022-01-05T14:53:06.000Z
|
2022-01-05T14:53:06.000Z
|
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData,
ModelDataGenerator
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
"""
Similar to get_entities, but also other (`O`) tag
"""
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
): # pylint: disable=too-many-branches
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages: # pylint: disable=too-many-nested-blocks
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
# @abstractmethod
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
# @abstractmethod
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to( # pylint: disable=too-many-locals
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
# Note: this should get merged with Model.iter_label_layout_document
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
| 37.053892
| 96
| 0.652823
|
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData,
ModelDataGenerator
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
):
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages:
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents(
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
| true
| true
|
f719a28a0f454eca48dc84c19a7a003b8073c988
| 225
|
py
|
Python
|
tests/test_case_files/class_test_1.py
|
calkerns/dyc
|
ddc35e6c183137dc30b2a3a2f481098280167bd1
|
[
"MIT"
] | 100
|
2019-04-04T23:38:20.000Z
|
2022-03-30T18:14:16.000Z
|
tests/test_case_files/class_test_1.py
|
calkerns/dyc
|
ddc35e6c183137dc30b2a3a2f481098280167bd1
|
[
"MIT"
] | 51
|
2019-04-04T20:18:47.000Z
|
2021-10-05T17:17:20.000Z
|
tests/test_case_files/class_test_1.py
|
calkerns/dyc
|
ddc35e6c183137dc30b2a3a2f481098280167bd1
|
[
"MIT"
] | 63
|
2019-04-04T20:38:57.000Z
|
2021-05-25T02:23:16.000Z
|
class MyClass:
x = 1
class MyClass1(Parent1):
y = 1
class MyClass2(Parent1, Parent2):
z = 1
class MyClass3(Parent1):
a = 1
class MyClass4(Parent1, Parent2):
b = 1
| 10.714286
| 41
| 0.515556
|
class MyClass:
x = 1
class MyClass1(Parent1):
y = 1
class MyClass2(Parent1, Parent2):
z = 1
class MyClass3(Parent1):
a = 1
class MyClass4(Parent1, Parent2):
b = 1
| true
| true
|
f719a460ed4a51e9b13467d22b0a48aecf11f8ca
| 346
|
py
|
Python
|
students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 10
|
2020-03-20T09:06:12.000Z
|
2021-07-27T13:06:02.000Z
|
students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 134
|
2020-03-23T09:47:48.000Z
|
2022-03-12T01:05:19.000Z
|
students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 71
|
2020-03-20T12:45:56.000Z
|
2021-10-31T19:22:25.000Z
|
from django.urls import path
from django.conf.urls import url
from project_first_app.views import *
urlpatterns = [
path('',main,name='main'),
path('createowner/',createowner,name='createowner'),
path('login/',log_in,name='login'),
path(r'<int:ho_id>',review,name='detail')
]
#path(r'getowners/<int:ow_id>',detail,name='detail'),
| 31.454545
| 56
| 0.699422
|
from django.urls import path
from django.conf.urls import url
from project_first_app.views import *
urlpatterns = [
path('',main,name='main'),
path('createowner/',createowner,name='createowner'),
path('login/',log_in,name='login'),
path(r'<int:ho_id>',review,name='detail')
]
| true
| true
|
f719a465158b15ac1c1bfd62374aefc6ed61f38a
| 36,465
|
py
|
Python
|
owslib/iso.py
|
peterataylor/OWSLib
|
8c15832da0c27dadfb567929ddd52a7570b7c231
|
[
"BSD-3-Clause"
] | 1
|
2015-03-16T05:22:04.000Z
|
2015-03-16T05:22:04.000Z
|
owslib/iso.py
|
peterataylor/OWSLib
|
8c15832da0c27dadfb567929ddd52a7570b7c231
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/iso.py
|
peterataylor/OWSLib
|
8c15832da0c27dadfb567929ddd52a7570b7c231
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| 47.480469
| 225
| 0.604854
|
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'):
self.xml = etree.tostring(md.getroot())
else:
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
if elpath is not None:
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else:
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| true
| true
|
f719a47cc5a7d23e73cc98dbe3e60cc827cae0aa
| 3,057
|
py
|
Python
|
fun.py
|
Grymlock/Guardian_Bot
|
0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9
|
[
"MIT"
] | 1
|
2018-06-22T03:52:49.000Z
|
2018-06-22T03:52:49.000Z
|
fun.py
|
Grymlock/Guardian_Bot
|
0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9
|
[
"MIT"
] | null | null | null |
fun.py
|
Grymlock/Guardian_Bot
|
0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9
|
[
"MIT"
] | null | null | null |
import discord
import constants as c
from discord.ext import commands
import random as r
urls=['https://cdn.discordapp.com/attachments/433007901800398858/433047585121501194/maxresdefault.jpg','https://cdn.discordapp.com/attachments/442868510776098818/442879211296915466/9bt3n9w40bp01.jpg','https://cdn.discordapp.com/attachments/442323518860951589/443142715761360915/Dap.PNG',"https://cdn.discordapp.com/attachments/442323518860951589/443250907501821964/IMG_20180222_192827.jpg"]
badWords=["gamer","frick","fudge","heck","bubby"]
class Fun:
def __init__(self,bot):
self.bot=bot
@commands.command()
async def dab(self,ctx, *, member: discord.Member):
"everybody pause at 1:18"
try:
if member.id==426560497781833748 or member.id==c.owner_id:
await ctx.send("haha no")
else:
em=discord.Embed(title="",description='')
rand=r.randint(0,3)
em.set_image(url=str(urls[rand]))
await ctx.send(embed=em)
await ctx.send(str(member.mention))
except:
await ctx.send("Invalid user")
@commands.command()
async def bruhcat(self,ctx):
"bruh"
catembed=discord.Embed()
catembed.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445300639631671296/bruh.gif")
await ctx.send(embed=catembed)
@commands.command()
async def blicky(self,ctx):
em=discord.Embed()
em.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445407209359409163/27c3yf.png")
await ctx.send(embed=em)
async def on_message(self,message):
if message.author.bot:#prevents the bot from reacting to itself
pass
else:
for word in badWords:
if message.content==(word):
await message.channel.send(f"Please do not use the word '{word}' or I will report you and block you")
ran=r.randint(1,2000)
if ran==1:
await message.channel.send("^Are you listening to this retard lmao")
if message.content==("gm") or message.content==("good morning"):
await message.channel.send("Another day closer to death" + str(message.author.mention))
if message.content==("gn") or message.content==("good night"):
await message.channel.send("sleep tight boyo")
if message.content==("good bye"):
await message.channel.send("bye loser")
if message.content==("what do we want"):
await message.channel.send("Equality for women")
if message.content==("when do we want it"):
await message.channel.send("Now")
async def on_member_ban(self,guild,member):
if member==c.owner_id:
await guild.owner.send("Can y'all stop banning my master")
def setup(bot):
bot.add_cog(Fun(bot))
| 44.955882
| 391
| 0.615309
|
import discord
import constants as c
from discord.ext import commands
import random as r
urls=['https://cdn.discordapp.com/attachments/433007901800398858/433047585121501194/maxresdefault.jpg','https://cdn.discordapp.com/attachments/442868510776098818/442879211296915466/9bt3n9w40bp01.jpg','https://cdn.discordapp.com/attachments/442323518860951589/443142715761360915/Dap.PNG',"https://cdn.discordapp.com/attachments/442323518860951589/443250907501821964/IMG_20180222_192827.jpg"]
badWords=["gamer","frick","fudge","heck","bubby"]
class Fun:
def __init__(self,bot):
self.bot=bot
@commands.command()
async def dab(self,ctx, *, member: discord.Member):
try:
if member.id==426560497781833748 or member.id==c.owner_id:
await ctx.send("haha no")
else:
em=discord.Embed(title="",description='')
rand=r.randint(0,3)
em.set_image(url=str(urls[rand]))
await ctx.send(embed=em)
await ctx.send(str(member.mention))
except:
await ctx.send("Invalid user")
@commands.command()
async def bruhcat(self,ctx):
catembed=discord.Embed()
catembed.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445300639631671296/bruh.gif")
await ctx.send(embed=catembed)
@commands.command()
async def blicky(self,ctx):
em=discord.Embed()
em.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445407209359409163/27c3yf.png")
await ctx.send(embed=em)
async def on_message(self,message):
if message.author.bot:
pass
else:
for word in badWords:
if message.content==(word):
await message.channel.send(f"Please do not use the word '{word}' or I will report you and block you")
ran=r.randint(1,2000)
if ran==1:
await message.channel.send("^Are you listening to this retard lmao")
if message.content==("gm") or message.content==("good morning"):
await message.channel.send("Another day closer to death" + str(message.author.mention))
if message.content==("gn") or message.content==("good night"):
await message.channel.send("sleep tight boyo")
if message.content==("good bye"):
await message.channel.send("bye loser")
if message.content==("what do we want"):
await message.channel.send("Equality for women")
if message.content==("when do we want it"):
await message.channel.send("Now")
async def on_member_ban(self,guild,member):
if member==c.owner_id:
await guild.owner.send("Can y'all stop banning my master")
def setup(bot):
bot.add_cog(Fun(bot))
| true
| true
|
f719a5d3a4154a174de4fc3bb0bdc9ef6f49b521
| 1,012
|
py
|
Python
|
test/schemes/test_qz.py
|
stormymcstorm/condensa
|
ee3bf993b0032e5d84aeb3cc7f0ddcdb8d846bd9
|
[
"Apache-2.0"
] | 153
|
2019-05-29T15:10:38.000Z
|
2022-03-05T05:20:55.000Z
|
test/schemes/test_qz.py
|
rogerxujiang/condensa
|
c7321e0a362f73eca9349769b341a7dd688ee1b9
|
[
"Apache-2.0"
] | 5
|
2019-07-11T20:56:38.000Z
|
2022-03-14T10:12:15.000Z
|
test/schemes/test_qz.py
|
rogerxujiang/condensa
|
c7321e0a362f73eca9349769b341a7dd688ee1b9
|
[
"Apache-2.0"
] | 21
|
2019-05-30T22:21:54.000Z
|
2022-03-14T07:06:52.000Z
|
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import condensa
from condensa import schemes
def test_float16(device):
scheme = schemes.Quantize(condensa.float16)
fc = torch.nn.Linear(100, 10).float().to(device)
scheme.pi(fc)
assert fc.weight.dtype == torch.float16
scheme.delta(fc)
assert fc.weight.dtype == torch.float32
if __name__ == '__main__':
test_float16('cpu')
if torch.cuda.is_available():
test_float16('cpu')
| 30.666667
| 74
| 0.733202
|
import torch
import condensa
from condensa import schemes
def test_float16(device):
scheme = schemes.Quantize(condensa.float16)
fc = torch.nn.Linear(100, 10).float().to(device)
scheme.pi(fc)
assert fc.weight.dtype == torch.float16
scheme.delta(fc)
assert fc.weight.dtype == torch.float32
if __name__ == '__main__':
test_float16('cpu')
if torch.cuda.is_available():
test_float16('cpu')
| true
| true
|
f719a5ec02915c2d40aa2c28ddf93147dd695082
| 6,791
|
py
|
Python
|
objects/CSCG/_3d/forms/standard/base/export/field.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_3d/forms/standard/base/export/field.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_3d/forms/standard/base/export/field.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
"""We want to export the field to some data files.
"""
from root.config.main import *
from screws.freeze.main import FrozenOnly
from screws.miscellaneous.timer import check_filename, check_no_splcharacter
from scipy.io import savemat
class _3dCSC_SF_Export_Field(FrozenOnly):
""""""
def __init__(self, sf):
""""""
assert '3dCSCG_standard_form' in sf.standard_properties.tags
self._sf_ = sf
self._freeze_self_()
def to_file(self, filename, numOfSamples=1e6, regions=None):
""""""
filename, extension = check_filename(filename)
if extension is None: extension = 'txt'
supported_formats = ('txt', 'mat')
assert extension in supported_formats, \
f"format={extension} is not among the supported formats {supported_formats}."
if isinstance(numOfSamples, (int, float)):
assert numOfSamples > 0, f"numOfSamples={numOfSamples} is wrong."
numOfSamples = [numOfSamples, numOfSamples, numOfSamples]
else:
assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \
f"numOfSamples={numOfSamples} wrong."
for nos in numOfSamples:
assert isinstance(nos, (int, float)) and nos > 0, f"numOfSamples={numOfSamples} wrong."
mesh = self._sf_.mesh
if regions is None:
regions = mesh.domain.regions.names
elif isinstance(regions, str):
regions = [regions,]
else:
pass
assert isinstance(regions, (list, tuple)), f"regions={regions} is wrong."
assert len(set(regions)) == len(regions), f"regions={regions} has repeated regions."
for i, r in enumerate(regions):
assert r in mesh.domain.regions, f"regions[{i}]={r} is wrong."
rst = list()
for i in range(3):
density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1
interval = 2 / density
rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))
xyz, v = self._sf_.reconstruct(*rst, regions=regions)
# Now, we gather xyz & v from all cores into Master Core, store in XYZ & V --- BELOW ---
if rAnk == mAster_rank:
X = [None for _ in range(mesh.elements.GLOBAL_num)]
Y = [None for _ in range(mesh.elements.GLOBAL_num)]
Z = [None for _ in range(mesh.elements.GLOBAL_num)]
Vx = [None for _ in range(mesh.elements.GLOBAL_num)]
if self._sf_.k in (1, 2):
Vy = [None for _ in range(mesh.elements.GLOBAL_num)]
Vz = [None for _ in range(mesh.elements.GLOBAL_num)]
for j in mesh.elements.indices:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
Vy[j] = v[j][1]
# noinspection PyUnboundLocalVariable
Vz[j] = v[j][2]
for i in sLave_ranks:
xyz, v = cOmm.recv(source=i, tag=0)
for j in xyz:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
del xyz, v
else:
cOmm.send([xyz, v], dest=mAster_rank, tag=0)
del xyz, v
# Now, we reshape the XYZ and V for export in the master core. -------- BELOW ----------
if rAnk == mAster_rank:
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)
else:
# noinspection PyUnboundLocalVariable
X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)
for rn in regions:
assert rn in X and rn in Y and rn in Z, "Data not full!"
x, y, z = X[rn], Y[rn], Z[rn]
if self._sf_.k in (1, 2):
vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]
else:
# noinspection PyUnboundLocalVariable
vx = V[rn]
# we take care of the file names ------------------ BELOW -----------------------
RN = rn[2:] # if regions name is R:center, we select
assert check_no_splcharacter(RN), f"region name={RN} wrong."
FILE_NAME = filename + '__InRegion_' + RN
if self._sf_.k in (1, 2):
FILE_NAME += '__x_y_z_vx_vy_vz'
else:
FILE_NAME += '__x_y_z_v'
FILE_NAME = FILE_NAME + '.' + extension
# It's time to do the save or writing ------------------- BELOW -----------------
if extension == 'txt':
# for .txt, we have to flat the data =====================
x = x.ravel(order='F')[:,np.newaxis]
y = y.ravel(order='F')[:,np.newaxis]
z = z.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
vx = vx.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vy = vy.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vz = vz.ravel(order='F')[:,np.newaxis]
else:
vx = vx.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))
else:
TO_BE_WRITTEN = np.hstack((x, y, z, vx))
# noinspection PyTypeChecker
np.savetxt(FILE_NAME, TO_BE_WRITTEN)
elif extension == 'mat':
# for .mat, we save 3-d arrays. ==========================
m_dic = dict()
m_dic['x'] = x
m_dic['y'] = y
m_dic['z'] = z
if self._sf_.k in (1, 2):
m_dic['vx'] = vx
m_dic['vy'] = vy
m_dic['vz'] = vz
else:
m_dic['v'] = vx
savemat(FILE_NAME, m_dic)
else:
raise Exception(f"Format=.{extension} is not supported.")
| 41.408537
| 103
| 0.472684
|
from root.config.main import *
from screws.freeze.main import FrozenOnly
from screws.miscellaneous.timer import check_filename, check_no_splcharacter
from scipy.io import savemat
class _3dCSC_SF_Export_Field(FrozenOnly):
def __init__(self, sf):
assert '3dCSCG_standard_form' in sf.standard_properties.tags
self._sf_ = sf
self._freeze_self_()
def to_file(self, filename, numOfSamples=1e6, regions=None):
filename, extension = check_filename(filename)
if extension is None: extension = 'txt'
supported_formats = ('txt', 'mat')
assert extension in supported_formats, \
f"format={extension} is not among the supported formats {supported_formats}."
if isinstance(numOfSamples, (int, float)):
assert numOfSamples > 0, f"numOfSamples={numOfSamples} is wrong."
numOfSamples = [numOfSamples, numOfSamples, numOfSamples]
else:
assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \
f"numOfSamples={numOfSamples} wrong."
for nos in numOfSamples:
assert isinstance(nos, (int, float)) and nos > 0, f"numOfSamples={numOfSamples} wrong."
mesh = self._sf_.mesh
if regions is None:
regions = mesh.domain.regions.names
elif isinstance(regions, str):
regions = [regions,]
else:
pass
assert isinstance(regions, (list, tuple)), f"regions={regions} is wrong."
assert len(set(regions)) == len(regions), f"regions={regions} has repeated regions."
for i, r in enumerate(regions):
assert r in mesh.domain.regions, f"regions[{i}]={r} is wrong."
rst = list()
for i in range(3):
density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1
interval = 2 / density
rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))
xyz, v = self._sf_.reconstruct(*rst, regions=regions)
if rAnk == mAster_rank:
X = [None for _ in range(mesh.elements.GLOBAL_num)]
Y = [None for _ in range(mesh.elements.GLOBAL_num)]
Z = [None for _ in range(mesh.elements.GLOBAL_num)]
Vx = [None for _ in range(mesh.elements.GLOBAL_num)]
if self._sf_.k in (1, 2):
Vy = [None for _ in range(mesh.elements.GLOBAL_num)]
Vz = [None for _ in range(mesh.elements.GLOBAL_num)]
for j in mesh.elements.indices:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
for i in sLave_ranks:
xyz, v = cOmm.recv(source=i, tag=0)
for j in xyz:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
del xyz, v
else:
cOmm.send([xyz, v], dest=mAster_rank, tag=0)
del xyz, v
if rAnk == mAster_rank:
if self._sf_.k in (1, 2):
X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)
else:
X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)
for rn in regions:
assert rn in X and rn in Y and rn in Z, "Data not full!"
x, y, z = X[rn], Y[rn], Z[rn]
if self._sf_.k in (1, 2):
vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]
else:
vx = V[rn]
RN = rn[2:]
assert check_no_splcharacter(RN), f"region name={RN} wrong."
FILE_NAME = filename + '__InRegion_' + RN
if self._sf_.k in (1, 2):
FILE_NAME += '__x_y_z_vx_vy_vz'
else:
FILE_NAME += '__x_y_z_v'
FILE_NAME = FILE_NAME + '.' + extension
if extension == 'txt':
# for .txt, we have to flat the data =====================
x = x.ravel(order='F')[:,np.newaxis]
y = y.ravel(order='F')[:,np.newaxis]
z = z.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
vx = vx.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vy = vy.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vz = vz.ravel(order='F')[:,np.newaxis]
else:
vx = vx.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))
else:
TO_BE_WRITTEN = np.hstack((x, y, z, vx))
# noinspection PyTypeChecker
np.savetxt(FILE_NAME, TO_BE_WRITTEN)
elif extension == 'mat':
# for .mat, we save 3-d arrays. ==========================
m_dic = dict()
m_dic['x'] = x
m_dic['y'] = y
m_dic['z'] = z
if self._sf_.k in (1, 2):
m_dic['vx'] = vx
m_dic['vy'] = vy
m_dic['vz'] = vz
else:
m_dic['v'] = vx
savemat(FILE_NAME, m_dic)
else:
raise Exception(f"Format=.{extension} is not supported.")
| true
| true
|
f719a60077cb4b23bbe3c54efafc1d30bc3f8163
| 3,252
|
py
|
Python
|
config.py
|
LongKt7/Face_Recognize_Pytorch
|
baa02e633d379abe1001c8b8acb942617177329c
|
[
"MIT"
] | 1
|
2019-03-13T16:05:11.000Z
|
2019-03-13T16:05:11.000Z
|
config.py
|
LongKt7/Face_Recognize_Pytorch
|
baa02e633d379abe1001c8b8acb942617177329c
|
[
"MIT"
] | null | null | null |
config.py
|
LongKt7/Face_Recognize_Pytorch
|
baa02e633d379abe1001c8b8acb942617177329c
|
[
"MIT"
] | 1
|
2019-03-15T09:09:08.000Z
|
2019-03-15T09:09:08.000Z
|
from easydict import EasyDict as edict
# from pathlib import Path
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
#when inference, at maximum detect 10 faces in one image, my laptop is slow
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se' # or 'ir'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir' # or 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
# conf.num_workers = 4 # when batchsize is 200
conf.num_workers = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf
| 47.823529
| 119
| 0.634071
|
from easydict import EasyDict as edict
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
rs = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf
| true
| true
|
f719a616152547d0300a25992cdb6dbefb41b0a6
| 16,599
|
py
|
Python
|
utils/tests/test_util.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
utils/tests/test_util.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
utils/tests/test_util.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Graphcore Ltd.
from statistics import mean
import numpy as np
import os
import re
import subprocess
import sys
import time
"""Library of utility functions common between frameworks"""
def parse_results_for_speed(output, iter_tolerance, speed_tolerance):
"""Look for <iter number> sec/itr. <speed number> {other stuff}"""
found_a_result = False
for line in output.split("\n"):
matches = re.match(r"([\d.]+) +sec/itr. +([\d.]+)", line)
if matches:
found_a_result = True
iterations, speed = matches.groups()
iterations = float(iterations)
speed = float(speed)
_verify_model_numbers(
iter_tolerance, iterations, speed_tolerance, speed, line
)
if not found_a_result:
raise AssertionError("No results detected in this run")
def parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):
"""Look for Accuracy=<accuracy>%"""
accuracies = []
for line in output.split("\n"):
if re.match(r" + Accuracy=+([\d.]+)%", line):
accuracy = float(re.match(r" + Accuracy=+([\d.]+)%", line).groups()[0])
accuracies.append(accuracy)
elif re.search(r"Validation accuracy", line):
accuracy_str = re.search(r"accuracy:\s(.*)", line).group(1)
accuracy = float(accuracy_str[:accuracy_str.rfind("%")])
accuracies.append(accuracy)
if len(accuracies) == 0:
raise AssertionError("No results detected in this run")
elif len(accuracies) != len(expected_accuracies):
raise AssertionError("Expected accuracies and parsed accuracies have"
" different lengths")
_verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)
def _verify_model_numbers(iter_tolerance, iterations,
speed_tolerance, speed, line):
iter_error = ""
speed_error = ""
# Verify iteration speed
if iterations > iter_tolerance[1]:
iter_error = ("The time per iteration has regressed above"
" the tolerance maximum: " +
str(iter_tolerance[1]))
elif iterations < iter_tolerance[0]:
iter_error = ("Time taken to compete an iteration was "
"suspiciously fast. Please verify the model"
" is operating correctly and tune tolerances"
" accordingly.")
# Verify item processing speed
if speed < speed_tolerance[0]:
speed_error = ("The number of items processed per second"
" has regressed below the tolerance: " +
str(speed_tolerance[0]))
elif speed > speed_tolerance[1]:
speed_error = ("The number of items processed per second"
" was suspiciously high. Please verify the"
" model is behaving correctly and tune"
" tolerances accordingly.")
if iter_error and speed_error:
sys.stderr.write("\n".join([line, iter_error, speed_error]))
raise AssertionError("Timings out of tolerance range")
elif iter_error or speed_error:
sys.stderr.write(line)
raise AssertionError(iter_error + speed_error)
def _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):
"""Asserts a list of accuracies is within a list of expected accuracies
with a tolerance applied.
Args:
accuracies: A list of floats representing the accuracies (%) produced
by the model at each step.
expected_accuracy: A list of floats representing the expected
accuracies (%) produced by the model at each step.
acc_tolerance: A float representing a percentage tolerance applied on
top of the expected accuracies that the accuracies produced by
the model should sit within.
Raises:
Assertion Error: Accuracy produced by the model are not within
the expected limits.
"""
for iter_num in range(len(accuracies)):
exp_acc = expected_accuracy[iter_num]
exp_acc_str = (
"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]".format(
"Expected accuracy (%)".ljust(22),
exp_acc,
acc_tolerance,
exp_acc - acc_tolerance,
exp_acc + acc_tolerance,
2
)
)
acc = accuracies[iter_num]
acc_str = "{} = {:.{}f}".format(
"Accuracy (%)".ljust(22),
acc,
2
)
full_acc_str = "{}\n{}".format(acc_str, exp_acc_str)
if acc < exp_acc - acc_tolerance:
raise AssertionError(
"After iteration {}, the model is less accurate"
" than expected.\n"
"{}".format(iter_num + 1, full_acc_str)
)
elif acc > exp_acc + acc_tolerance:
raise AssertionError(
"After iteration {}, the model is producing an accuracy"
" that is suspiciously high and should be reviewed.\n"
"{}".format(iter_num + 1, full_acc_str)
)
def assert_result_equals_tensor_value(output, tensor):
"""Searches for a single tensor result in the first line of the output
Searches the first line of the string output for a line with format
'[array([3., 8.], dtype=float32)]' and asserts its equal to the numpy
tensor argument
Args:
output: String containing the string representation of a numpy
tensor
tensor: numpy tensor representing the expected result
Returns:
None
Raises:
Assertion Error: Output is not in correct format
Assertion Error: Output does not contain a string representation
of a numpy array
Assertion Error: Output numpy array does not equal the expected
numpy array
"""
# TODO - np representation over multiple lines
# TODO - large np array output
# TODO - multiple dimension np output
list_regex = r"^\[.*?\]$"
np_array_str_regex = r"array\(.*?, dtype=.*?\)$"
first_line = output.split("\n")[0]
if not re.match(list_regex, first_line):
raise AssertionError(
"Result not in expected string format."
" Expecting stringified list "
" eg. [array([3., 8.], dtype=float32)]"
)
contents = first_line[1:-1]
if not re.match(np_array_str_regex, contents):
raise AssertionError(
"Expecting numpy representation "
"array with dtype "
"eg. array([3., 8.], dtype=float32)"
)
assert contents == np.array_repr(tensor), (
"Output value {} does not "
"equal expected value {}".format(np.array_repr(contents), tensor)
)
def parse_results_for_ipus_used(output):
"""Finds the number of IPUs used in the model by looking for
string with format ' On 2 IPUs.' in output"""
shards_regex = r" On ([\d.]+) IPUs."
for line in output.split("\n"):
matches = re.match(shards_regex, line)
if matches:
shards = matches.group(1)
return int(shards)
raise AssertionError("Expecting line detailing IPU usage "
"eg. ' On 2 IPUs.'")
def assert_shards(output, expected_shards):
"""Verify the expected number of shards used were actually
used"""
actual_shards = parse_results_for_ipus_used(output)
assert actual_shards == expected_shards
def get_final_accuracy(output):
"""Find and return the accuracy reported in a test's output."""
result_regex = r"Accuracy=([\d.]+)\%"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_final_loss(output):
"""Find and return the loss reported in a test's output."""
result_regex = r"Loss=([\d.]+)"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_average_speeds(output):
"""Finds the average seconds/iteration and tokens/second
Args:
output: String representing the output of a test.
Returns:
A tuple where the first element is a float representing
the average iterations per second and the second the
average tokens processed per second
"""
result_regex = r"([\d.]+) +sec/itr. +([\d.]+)"
results = parse_results_with_regex(output, result_regex)
itr_sec_list = results[0]
tokens_sec_list = results[1]
return mean(itr_sec_list), mean(tokens_sec_list)
def parse_results_with_regex(output, regex):
"""Find and returns the regex matching results in output
Looks through the output line by line looking for a matching regex.
The function assembles a list of lists where each parent list is
the results for that position in the regex string and each item in
the child lists represents an order of the results found in the output
Args:
output: String representing the output of a test.
regex: Regex of result to find.
Returns:
A list of lists of floats. Parent list represents the result at each
position in the regex. Child list contains results received in the
order they were output.
Raises:
AssertionError: a line matching the regex could not be found in the
output
"""
results = []
for line in output.split("\n"):
matches = re.search(regex, line)
if matches:
number_of_results = matches.lastindex
if results == []:
results = [None] * number_of_results
for match_index in range(0, number_of_results):
result = float(matches.group(match_index + 1))
if results[match_index]:
results[match_index].append(result)
continue
results[match_index] = [result]
if results == []:
raise AssertionError("Regex {} not found in result".format(regex))
return results
def get_total_epochs(output):
"""Finds the number of epochs model has run through by looking for
string with format 'Epoch #3' in the models raw output"""
epochs = None
for line in output.split("\n"):
epoch_match = re.search(r"Epoch #([\d.]+)", line)
if epoch_match:
epochs = int(epoch_match.group(1))
if not epochs:
raise AssertionError("Epochs not found in output, eg. "
"Epoch #3")
return epochs
def assert_total_run_time(total_time, time_range):
"""Checks total run time is within the required range
Args:
total_time: float representing number of seconds the test took to
run
time_range: a tuple of floats where the first element is the minimum
time the test should run in in seconds and the second the
maximum
Raises:
AssertionError: if the total_time is not between the minimum time
and maximum time
"""
minimum_time = time_range[0]
maximum_time = time_range[1]
assert total_time >= minimum_time
assert total_time <= maximum_time
def assert_final_accuracy(output, minimum, maximum):
"""Gets the final accuracy given a raw model output and checks its value
is between the minimum and maximum
Args:
output: String representing the raw output of a model
minimum: a float representing a percentage (between 0.0% and 100%)
that is the minimum accuracy for the model after running
maximum: a float representing a percentage (between 0.0% and 100%)
that is the maximum accuracy for the model after running
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
accuracy = get_final_accuracy(output)
assert accuracy >= minimum
assert accuracy <= maximum
def run_python_script_helper(cwd, script, **kwargs):
"""A function that given a path and python script name, runs the script
with kwargs as the command line arguments
Args:
cwd: string representing the directory of the python script
script: string representing the full name of the python script
kwargs: dictionary of string key and values that form the command
line arguments when the script is run.
Returns:
A string representing the raw output of the python script run
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
py_version = "python{}".format(sys.version_info[0])
cmd = [py_version, script]
if kwargs:
args = [
str(item) for sublist in kwargs.items() for item in sublist if item != ""
]
cmd.extend(args)
out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)
print(out)
return out
def run_test_helper(subprocess_function, total_run_time=None,
total_run_time_tolerance=0.1, **kwargs):
"""Helper function for running tests
Takes in testable parameters, runs the test and checks the relevant
parameters against test results
Args:
subprocess_function: the function that runs a subprocess of
the model in question
total_run_time_range: tuple float representing the expected
upper and lower bounds for the total time taken to run
the test
Returns:
A String representing the raw output of the models subprocess
Raises:
AssertionError: If the accuracy, time taken etc. are not within
the expected bounds
"""
start_time = time.time()
out = subprocess_function(**kwargs)
total_time = time.time() - start_time
if total_run_time:
total_run_time_range = range_from_tolerances(
total_run_time, total_run_time_tolerance
)
assert_total_run_time(total_time, total_run_time_range)
return out
def range_from_tolerances(value, tolerance):
"""Helper function that takes a value and applies the tolerance
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied symmetrically across the value argument
Returns:
A tuple of floats, the first element representing the tolerance
applied below the value (minimum) and the second above (maximum)
"""
return (
get_minimum_with_tolerance(value, tolerance),
get_maximum_with_tolerance(value, tolerance),
)
def get_minimum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
below the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied below the value (maximum)
"""
return value * (1 - tolerance)
def get_maximum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
above the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied above the value (minimum)
"""
return value * (1 + tolerance)
def check_data_exists(data_path, expected_files_list):
"""Helper function that checks the expected data exists in a directory
Args:
data_path: A string representing the directory of where the
data is expected to be
expected_files_list: a list of strings representing the expected
file names in the data_path directory
Returns:
A boolean which represents whether the expected files are found in
the data_path directory
"""
if os.path.exists(data_path):
for filename in expected_files_list:
if not os.path.isfile(os.path.join(data_path, filename)):
return False
return True
return False
| 34.36646
| 85
| 0.636123
|
from statistics import mean
import numpy as np
import os
import re
import subprocess
import sys
import time
def parse_results_for_speed(output, iter_tolerance, speed_tolerance):
found_a_result = False
for line in output.split("\n"):
matches = re.match(r"([\d.]+) +sec/itr. +([\d.]+)", line)
if matches:
found_a_result = True
iterations, speed = matches.groups()
iterations = float(iterations)
speed = float(speed)
_verify_model_numbers(
iter_tolerance, iterations, speed_tolerance, speed, line
)
if not found_a_result:
raise AssertionError("No results detected in this run")
def parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):
accuracies = []
for line in output.split("\n"):
if re.match(r" + Accuracy=+([\d.]+)%", line):
accuracy = float(re.match(r" + Accuracy=+([\d.]+)%", line).groups()[0])
accuracies.append(accuracy)
elif re.search(r"Validation accuracy", line):
accuracy_str = re.search(r"accuracy:\s(.*)", line).group(1)
accuracy = float(accuracy_str[:accuracy_str.rfind("%")])
accuracies.append(accuracy)
if len(accuracies) == 0:
raise AssertionError("No results detected in this run")
elif len(accuracies) != len(expected_accuracies):
raise AssertionError("Expected accuracies and parsed accuracies have"
" different lengths")
_verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)
def _verify_model_numbers(iter_tolerance, iterations,
speed_tolerance, speed, line):
iter_error = ""
speed_error = ""
if iterations > iter_tolerance[1]:
iter_error = ("The time per iteration has regressed above"
" the tolerance maximum: " +
str(iter_tolerance[1]))
elif iterations < iter_tolerance[0]:
iter_error = ("Time taken to compete an iteration was "
"suspiciously fast. Please verify the model"
" is operating correctly and tune tolerances"
" accordingly.")
if speed < speed_tolerance[0]:
speed_error = ("The number of items processed per second"
" has regressed below the tolerance: " +
str(speed_tolerance[0]))
elif speed > speed_tolerance[1]:
speed_error = ("The number of items processed per second"
" was suspiciously high. Please verify the"
" model is behaving correctly and tune"
" tolerances accordingly.")
if iter_error and speed_error:
sys.stderr.write("\n".join([line, iter_error, speed_error]))
raise AssertionError("Timings out of tolerance range")
elif iter_error or speed_error:
sys.stderr.write(line)
raise AssertionError(iter_error + speed_error)
def _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):
for iter_num in range(len(accuracies)):
exp_acc = expected_accuracy[iter_num]
exp_acc_str = (
"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]".format(
"Expected accuracy (%)".ljust(22),
exp_acc,
acc_tolerance,
exp_acc - acc_tolerance,
exp_acc + acc_tolerance,
2
)
)
acc = accuracies[iter_num]
acc_str = "{} = {:.{}f}".format(
"Accuracy (%)".ljust(22),
acc,
2
)
full_acc_str = "{}\n{}".format(acc_str, exp_acc_str)
if acc < exp_acc - acc_tolerance:
raise AssertionError(
"After iteration {}, the model is less accurate"
" than expected.\n"
"{}".format(iter_num + 1, full_acc_str)
)
elif acc > exp_acc + acc_tolerance:
raise AssertionError(
"After iteration {}, the model is producing an accuracy"
" that is suspiciously high and should be reviewed.\n"
"{}".format(iter_num + 1, full_acc_str)
)
def assert_result_equals_tensor_value(output, tensor):
list_regex = r"^\[.*?\]$"
np_array_str_regex = r"array\(.*?, dtype=.*?\)$"
first_line = output.split("\n")[0]
if not re.match(list_regex, first_line):
raise AssertionError(
"Result not in expected string format."
" Expecting stringified list "
" eg. [array([3., 8.], dtype=float32)]"
)
contents = first_line[1:-1]
if not re.match(np_array_str_regex, contents):
raise AssertionError(
"Expecting numpy representation "
"array with dtype "
"eg. array([3., 8.], dtype=float32)"
)
assert contents == np.array_repr(tensor), (
"Output value {} does not "
"equal expected value {}".format(np.array_repr(contents), tensor)
)
def parse_results_for_ipus_used(output):
shards_regex = r" On ([\d.]+) IPUs."
for line in output.split("\n"):
matches = re.match(shards_regex, line)
if matches:
shards = matches.group(1)
return int(shards)
raise AssertionError("Expecting line detailing IPU usage "
"eg. ' On 2 IPUs.'")
def assert_shards(output, expected_shards):
actual_shards = parse_results_for_ipus_used(output)
assert actual_shards == expected_shards
def get_final_accuracy(output):
result_regex = r"Accuracy=([\d.]+)\%"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_final_loss(output):
result_regex = r"Loss=([\d.]+)"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_average_speeds(output):
result_regex = r"([\d.]+) +sec/itr. +([\d.]+)"
results = parse_results_with_regex(output, result_regex)
itr_sec_list = results[0]
tokens_sec_list = results[1]
return mean(itr_sec_list), mean(tokens_sec_list)
def parse_results_with_regex(output, regex):
results = []
for line in output.split("\n"):
matches = re.search(regex, line)
if matches:
number_of_results = matches.lastindex
if results == []:
results = [None] * number_of_results
for match_index in range(0, number_of_results):
result = float(matches.group(match_index + 1))
if results[match_index]:
results[match_index].append(result)
continue
results[match_index] = [result]
if results == []:
raise AssertionError("Regex {} not found in result".format(regex))
return results
def get_total_epochs(output):
epochs = None
for line in output.split("\n"):
epoch_match = re.search(r"Epoch #([\d.]+)", line)
if epoch_match:
epochs = int(epoch_match.group(1))
if not epochs:
raise AssertionError("Epochs not found in output, eg. "
"Epoch #3")
return epochs
def assert_total_run_time(total_time, time_range):
minimum_time = time_range[0]
maximum_time = time_range[1]
assert total_time >= minimum_time
assert total_time <= maximum_time
def assert_final_accuracy(output, minimum, maximum):
accuracy = get_final_accuracy(output)
assert accuracy >= minimum
assert accuracy <= maximum
def run_python_script_helper(cwd, script, **kwargs):
py_version = "python{}".format(sys.version_info[0])
cmd = [py_version, script]
if kwargs:
args = [
str(item) for sublist in kwargs.items() for item in sublist if item != ""
]
cmd.extend(args)
out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)
print(out)
return out
def run_test_helper(subprocess_function, total_run_time=None,
total_run_time_tolerance=0.1, **kwargs):
start_time = time.time()
out = subprocess_function(**kwargs)
total_time = time.time() - start_time
if total_run_time:
total_run_time_range = range_from_tolerances(
total_run_time, total_run_time_tolerance
)
assert_total_run_time(total_time, total_run_time_range)
return out
def range_from_tolerances(value, tolerance):
return (
get_minimum_with_tolerance(value, tolerance),
get_maximum_with_tolerance(value, tolerance),
)
def get_minimum_with_tolerance(value, tolerance):
return value * (1 - tolerance)
def get_maximum_with_tolerance(value, tolerance):
return value * (1 + tolerance)
def check_data_exists(data_path, expected_files_list):
if os.path.exists(data_path):
for filename in expected_files_list:
if not os.path.isfile(os.path.join(data_path, filename)):
return False
return True
return False
| true
| true
|
f719a788aa6769dc9f43b9f60b9a57cc0504643a
| 1,535
|
py
|
Python
|
code/clients/requests.py
|
lpmatos/gitlab-analytics
|
47a220bb54efa473f01bf033291f65b38accdbca
|
[
"MIT"
] | 2
|
2020-09-16T11:03:01.000Z
|
2021-07-30T07:05:58.000Z
|
code/clients/requests.py
|
lpmatos/gitlab-analytics
|
47a220bb54efa473f01bf033291f65b38accdbca
|
[
"MIT"
] | null | null | null |
code/clients/requests.py
|
lpmatos/gitlab-analytics
|
47a220bb54efa473f01bf033291f65b38accdbca
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
import requests
from validators.url import URL
from abc import ABC, abstractmethod
from requests.adapters import HTTPAdapter
from typing import Text, NoReturn, Callable, Dict
from requests.packages.urllib3.util.retry import Retry
class RequestResponse:
def __init__(self, response: Text) -> NoReturn:
self.status = response.status_code
self.reason = response.reason
self.json = response.json()
def get_json(self) -> Dict:
return self.json
class RequestsImplementation(ABC):
def __init__(self, url: Text, *args, **kwargs) -> NoReturn:
if URL.url_validator(url):
if not kwargs["is_secure"]:
url = url.replace("https", "http")
self.url = url
self._logger = kwargs["logger"]
if kwargs["retry"]:
self.session = self.requests_retry_session(kwargs["session"])
else:
self.session = requests.Session()
@staticmethod
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None) -> requests.Session():
session = session or requests.Session()
retry = Retry(total=retries, read=retries,
connect=retries, backoff_factor=backoff_factor,
status_forcelist=status_forcelist,)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
@abstractmethod
def get(self) -> NoReturn:
pass
@property
def logger(self) -> Callable:
return self._logger
| 28.962264
| 130
| 0.704235
|
from __future__ import annotations
import requests
from validators.url import URL
from abc import ABC, abstractmethod
from requests.adapters import HTTPAdapter
from typing import Text, NoReturn, Callable, Dict
from requests.packages.urllib3.util.retry import Retry
class RequestResponse:
def __init__(self, response: Text) -> NoReturn:
self.status = response.status_code
self.reason = response.reason
self.json = response.json()
def get_json(self) -> Dict:
return self.json
class RequestsImplementation(ABC):
def __init__(self, url: Text, *args, **kwargs) -> NoReturn:
if URL.url_validator(url):
if not kwargs["is_secure"]:
url = url.replace("https", "http")
self.url = url
self._logger = kwargs["logger"]
if kwargs["retry"]:
self.session = self.requests_retry_session(kwargs["session"])
else:
self.session = requests.Session()
@staticmethod
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None) -> requests.Session():
session = session or requests.Session()
retry = Retry(total=retries, read=retries,
connect=retries, backoff_factor=backoff_factor,
status_forcelist=status_forcelist,)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
@abstractmethod
def get(self) -> NoReturn:
pass
@property
def logger(self) -> Callable:
return self._logger
| true
| true
|
f719a9168a4d3106600fffcc47c14cc90f3cadc7
| 6,299
|
py
|
Python
|
official/vision/detection/dataloader/tf_example_decoder.py
|
gujralsanyam22/models
|
d96f8f043dbe2b5ca8ea1785f57df8faf68d8875
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/vision/detection/dataloader/tf_example_decoder.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
official/vision/detection/dataloader/tf_example_decoder.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False):
self._include_mask = include_mask
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string),
'image/source_id':
tf.io.FixedLenFeature((), tf.string),
'image/height':
tf.io.FixedLenFeature((), tf.int64),
'image/width':
tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': parsed_tensors['image/source_id'],
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| 40.121019
| 122
| 0.657247
|
import tensorflow as tf
class TfExampleDecoder(object):
def __init__(self, include_mask=False):
self._include_mask = include_mask
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string),
'image/source_id':
tf.io.FixedLenFeature((), tf.string),
'image/height':
tf.io.FixedLenFeature((), tf.int64),
'image/width':
tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': parsed_tensors['image/source_id'],
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| true
| true
|
f719a9bfc05dbb1ca8c4fffbbf92b7f387621266
| 859
|
py
|
Python
|
taskobra/orm/components/cpu.py
|
Vipyr/taskobra
|
d9884f006ef9c735852075912d5a945543de52f5
|
[
"MIT"
] | null | null | null |
taskobra/orm/components/cpu.py
|
Vipyr/taskobra
|
d9884f006ef9c735852075912d5a945543de52f5
|
[
"MIT"
] | 43
|
2020-02-06T22:23:42.000Z
|
2020-04-29T23:56:43.000Z
|
taskobra/orm/components/cpu.py
|
Vipyr/taskobra
|
d9884f006ef9c735852075912d5a945543de52f5
|
[
"MIT"
] | 2
|
2020-02-06T21:01:42.000Z
|
2020-02-06T23:43:11.000Z
|
# Libraries
from sqlalchemy import Column, Float, ForeignKey, Integer, String
# Taskobra
from taskobra.orm.components import Component
class CPU(Component):
__tablename__ = "CPU"
unique_id = Column(Integer, ForeignKey("Component.unique_id"), primary_key=True)
manufacturer = Column(String)
model = Column(String)
isa = Column(String)
tdp = Column(Integer)
core_count = Column(Integer)
threads_per_core = Column(Integer)
nominal_frequency = Column(Float)
maximum_frequency = Column(Float)
__mapper_args__ = {
"polymorphic_identity": __tablename__,
}
@property
def threads(self):
return self.core_count * self.threads_per_core
def __repr__(self):
return f"<CPU({self.manufacturer} {self.model} ({self.core_count}/{self.threads}x{self.nominal_frequency} GHz {self.isa}))>"
| 29.62069
| 132
| 0.705471
|
from sqlalchemy import Column, Float, ForeignKey, Integer, String
from taskobra.orm.components import Component
class CPU(Component):
__tablename__ = "CPU"
unique_id = Column(Integer, ForeignKey("Component.unique_id"), primary_key=True)
manufacturer = Column(String)
model = Column(String)
isa = Column(String)
tdp = Column(Integer)
core_count = Column(Integer)
threads_per_core = Column(Integer)
nominal_frequency = Column(Float)
maximum_frequency = Column(Float)
__mapper_args__ = {
"polymorphic_identity": __tablename__,
}
@property
def threads(self):
return self.core_count * self.threads_per_core
def __repr__(self):
return f"<CPU({self.manufacturer} {self.model} ({self.core_count}/{self.threads}x{self.nominal_frequency} GHz {self.isa}))>"
| true
| true
|
f719a9d668b8a403e901541f650b87db1bf30dbc
| 1,112
|
py
|
Python
|
music/migrations/0010_auto_20150427_2304.py
|
Amoki/Amoki-Music
|
77b0e426fe9cc6c9cd12346a5e5e81a62362bb83
|
[
"MIT"
] | 3
|
2015-06-16T11:12:29.000Z
|
2019-05-03T09:09:21.000Z
|
music/migrations/0010_auto_20150427_2304.py
|
Amoki/Amoki-Music
|
77b0e426fe9cc6c9cd12346a5e5e81a62362bb83
|
[
"MIT"
] | 16
|
2015-08-18T14:35:55.000Z
|
2021-06-10T17:31:04.000Z
|
music/migrations/0010_auto_20150427_2304.py
|
Amoki/Amoki-Music
|
77b0e426fe9cc6c9cd12346a5e5e81a62362bb83
|
[
"MIT"
] | 1
|
2016-10-19T14:48:52.000Z
|
2016-10-19T14:48:52.000Z
|
from __future__ import unicode_literals
from django.db import models, migrations
def set_sources(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Source = apps.get_model("music", "Source")
TemporaryMusic = apps.get_model("music", "TemporaryMusic")
youtube = Source.objects.get(name="Youtube")
for tempMusic in TemporaryMusic.objects.all():
tempMusic.source = youtube
tempMusic.save()
class Migration(migrations.Migration):
dependencies = [
('music', '0009_auto_20150427_2038'),
]
operations = [
migrations.AddField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', null=True, on_delete=models.CASCADE),
),
migrations.RunPython(set_sources),
migrations.AlterField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', on_delete=models.CASCADE),
),
]
| 30.888889
| 92
| 0.654676
|
from __future__ import unicode_literals
from django.db import models, migrations
def set_sources(apps, schema_editor):
# version than this migration expects. We use the historical version.
Source = apps.get_model("music", "Source")
TemporaryMusic = apps.get_model("music", "TemporaryMusic")
youtube = Source.objects.get(name="Youtube")
for tempMusic in TemporaryMusic.objects.all():
tempMusic.source = youtube
tempMusic.save()
class Migration(migrations.Migration):
dependencies = [
('music', '0009_auto_20150427_2038'),
]
operations = [
migrations.AddField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', null=True, on_delete=models.CASCADE),
),
migrations.RunPython(set_sources),
migrations.AlterField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', on_delete=models.CASCADE),
),
]
| true
| true
|
f719aae1c7a532a452c6a6c2a3522f59f033bbfa
| 1,533
|
py
|
Python
|
tests/test_fieldtype_model.py
|
MasterScott/Formasaurus
|
d7d916237a6d2ca4c80c4c8ae5d66999c8beebed
|
[
"MIT"
] | 132
|
2015-04-18T01:53:52.000Z
|
2022-03-31T08:33:26.000Z
|
tests/test_fieldtype_model.py
|
Eglet27/Formasaurus
|
d7d916237a6d2ca4c80c4c8ae5d66999c8beebed
|
[
"MIT"
] | 26
|
2015-07-08T20:09:26.000Z
|
2022-03-03T16:50:08.000Z
|
tests/test_fieldtype_model.py
|
Eglet27/Formasaurus
|
d7d916237a6d2ca4c80c4c8ae5d66999c8beebed
|
[
"MIT"
] | 63
|
2015-02-17T08:41:00.000Z
|
2022-03-31T08:58:18.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import itertools
import numpy as np
from sklearn_crfsuite.metrics import flat_accuracy_score
from formasaurus.fieldtype_model import (
train,
_PRECISE_C1_C2,
_REALISTIC_C1_C2,
get_Xy,
)
def test_training(storage, capsys):
annotations = (a for a in storage.iter_annotations(
simplify_form_types=True,
simplify_field_types=True,
) if a.fields_annotated)
annotations = list(itertools.islice(annotations, 0, 300))
crf = train(
annotations=annotations,
use_precise_form_types=False,
optimize_hyperparameters_iters=2,
optimize_hyperparameters_folds=2,
optimize_hyperparameters_jobs=-1,
full_form_type_names=False,
full_field_type_names=False
)
out, err = capsys.readouterr()
assert 'Training on 300 forms' in out
assert 'realistic form types' in out
assert 'Best hyperparameters' in out
assert 0.0 < crf.c1 < 2.5
assert 0.0 < crf.c2 < 0.9
assert crf.c1, crf.c2 != _REALISTIC_C1_C2
assert crf.c1, crf.c2 != _PRECISE_C1_C2
form_types = np.asarray([a.type for a in annotations])
X, y = get_Xy(annotations, form_types, full_type_names=False)
y_pred = crf.predict(X)
score = flat_accuracy_score(y, y_pred)
assert 0.9 < score < 1.0 # overfitting FTW!
field_schema = storage.get_field_schema()
short_names = set(field_schema.types_inv.keys())
assert set(crf.classes_).issubset(short_names)
| 28.924528
| 65
| 0.701239
|
from __future__ import absolute_import, division
import itertools
import numpy as np
from sklearn_crfsuite.metrics import flat_accuracy_score
from formasaurus.fieldtype_model import (
train,
_PRECISE_C1_C2,
_REALISTIC_C1_C2,
get_Xy,
)
def test_training(storage, capsys):
annotations = (a for a in storage.iter_annotations(
simplify_form_types=True,
simplify_field_types=True,
) if a.fields_annotated)
annotations = list(itertools.islice(annotations, 0, 300))
crf = train(
annotations=annotations,
use_precise_form_types=False,
optimize_hyperparameters_iters=2,
optimize_hyperparameters_folds=2,
optimize_hyperparameters_jobs=-1,
full_form_type_names=False,
full_field_type_names=False
)
out, err = capsys.readouterr()
assert 'Training on 300 forms' in out
assert 'realistic form types' in out
assert 'Best hyperparameters' in out
assert 0.0 < crf.c1 < 2.5
assert 0.0 < crf.c2 < 0.9
assert crf.c1, crf.c2 != _REALISTIC_C1_C2
assert crf.c1, crf.c2 != _PRECISE_C1_C2
form_types = np.asarray([a.type for a in annotations])
X, y = get_Xy(annotations, form_types, full_type_names=False)
y_pred = crf.predict(X)
score = flat_accuracy_score(y, y_pred)
assert 0.9 < score < 1.0
field_schema = storage.get_field_schema()
short_names = set(field_schema.types_inv.keys())
assert set(crf.classes_).issubset(short_names)
| true
| true
|
f719ac12ab39a81ed2df4d9c929c5f6b2e9f5724
| 2,399
|
py
|
Python
|
Lib/glyphsLib/__main__.py
|
silnrsi/glyphsLib
|
fc9ac286874e30130679430b028a173062c311a0
|
[
"Apache-2.0"
] | 1
|
2019-01-19T05:50:30.000Z
|
2019-01-19T05:50:30.000Z
|
Lib/glyphsLib/__main__.py
|
DalavanCloud/glyphsLib
|
fc9ac286874e30130679430b028a173062c311a0
|
[
"Apache-2.0"
] | null | null | null |
Lib/glyphsLib/__main__.py
|
DalavanCloud/glyphsLib
|
fc9ac286874e30130679430b028a173062c311a0
|
[
"Apache-2.0"
] | 1
|
2019-01-19T05:50:14.000Z
|
2019-01-19T05:50:14.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import argparse
import glyphsLib
description = """\n
Converts a Glyphs.app source file into UFO masters
or UFO instances and MutatorMath designspace.
"""
def parse_options(args):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--version", action="version",
version='glyphsLib %s' % (glyphsLib.__version__))
parser.add_argument("-g", "--glyphs", metavar="GLYPHS", required=True,
help="Glyphs file to convert.")
parser.add_argument("-m", "--masters", metavar="MASTERS",
default="master_ufo",
help="Ouput masters UFO to folder MASTERS. "
"(default: %(default)s)")
parser.add_argument("-n", "--instances", metavar="INSTANCES", nargs="?",
const="instance_ufo", default=None,
help="Output and generate interpolated instances UFO "
"to folder INSTANCES. "
"(default: %(const)s)")
parser.add_argument("-r", "--round-instances", action="store_true",
help="Apply integer rounding to all geometry when "
"interpolating")
options = parser.parse_args(args)
return options
def main(args=None):
opt = parse_options(args)
if opt.glyphs is not None:
if opt.instances is None:
glyphsLib.build_masters(opt.glyphs, opt.masters)
else:
glyphsLib.build_instances(opt.glyphs, opt.masters, opt.instances,
round_geometry=opt.round_instances)
if __name__ == '__main__':
main(sys.argv[1:])
| 38.693548
| 82
| 0.631513
|
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import argparse
import glyphsLib
description = """\n
Converts a Glyphs.app source file into UFO masters
or UFO instances and MutatorMath designspace.
"""
def parse_options(args):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--version", action="version",
version='glyphsLib %s' % (glyphsLib.__version__))
parser.add_argument("-g", "--glyphs", metavar="GLYPHS", required=True,
help="Glyphs file to convert.")
parser.add_argument("-m", "--masters", metavar="MASTERS",
default="master_ufo",
help="Ouput masters UFO to folder MASTERS. "
"(default: %(default)s)")
parser.add_argument("-n", "--instances", metavar="INSTANCES", nargs="?",
const="instance_ufo", default=None,
help="Output and generate interpolated instances UFO "
"to folder INSTANCES. "
"(default: %(const)s)")
parser.add_argument("-r", "--round-instances", action="store_true",
help="Apply integer rounding to all geometry when "
"interpolating")
options = parser.parse_args(args)
return options
def main(args=None):
opt = parse_options(args)
if opt.glyphs is not None:
if opt.instances is None:
glyphsLib.build_masters(opt.glyphs, opt.masters)
else:
glyphsLib.build_instances(opt.glyphs, opt.masters, opt.instances,
round_geometry=opt.round_instances)
if __name__ == '__main__':
main(sys.argv[1:])
| true
| true
|
f719ac201c882a4f33c304211ff792834b6fe5b0
| 640
|
py
|
Python
|
fm2o2.py
|
dumpydog212/fm2o2
|
b5e173735bb08466d6c20f7868725e627260dd88
|
[
"MIT"
] | null | null | null |
fm2o2.py
|
dumpydog212/fm2o2
|
b5e173735bb08466d6c20f7868725e627260dd88
|
[
"MIT"
] | null | null | null |
fm2o2.py
|
dumpydog212/fm2o2
|
b5e173735bb08466d6c20f7868725e627260dd88
|
[
"MIT"
] | null | null | null |
import glob
import os
from xml.dom import minidom
import xml.etree.ElementTree as ET
path = r"C:\Users\shamb\Desktop\dita_demo"
valid_path = r"C:\Users\shamb\Desktop\dita_demo_scrubbed"
wildcard = "*.xml"
full_path = os.path.join(path, wildcard)
os.makedirs(valid_path, exist_ok=True)
file_list = glob.glob(full_path)
print("The file set includes:")
for this_file in file_list:
print(this_file)
# mydoc = minidom.parse(this_file)
# print(type(mydoc))
tree = ET.parse(this_file)
root = tree.getroot()
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text)
| 22.068966
| 57
| 0.696875
|
import glob
import os
from xml.dom import minidom
import xml.etree.ElementTree as ET
path = r"C:\Users\shamb\Desktop\dita_demo"
valid_path = r"C:\Users\shamb\Desktop\dita_demo_scrubbed"
wildcard = "*.xml"
full_path = os.path.join(path, wildcard)
os.makedirs(valid_path, exist_ok=True)
file_list = glob.glob(full_path)
print("The file set includes:")
for this_file in file_list:
print(this_file)
tree = ET.parse(this_file)
root = tree.getroot()
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text)
| true
| true
|
f719acd0bf5519f70da4e2324dadedc8b1906093
| 12,049
|
py
|
Python
|
gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 7
|
2022-01-24T16:27:06.000Z
|
2022-02-25T10:18:49.000Z
|
gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 29
|
2022-01-20T15:45:38.000Z
|
2022-03-31T09:39:25.000Z
|
gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py
|
gooddata/gooddata-python-sdk
|
df4d4a4d730ab376960ae2ed01e7d86498e85c6a
|
[
"MIT"
] | 7
|
2022-01-20T07:11:15.000Z
|
2022-03-09T14:50:17.000Z
|
"""
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_afm_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_afm_client.exceptions import ApiAttributeError
class IncludedDimensionProps(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'dimension_attributes_values': ({str: ([str],)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'dimension_attributes_values': 'dimensionAttributesValues', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, dimension_attributes_values, *args, **kwargs): # noqa: E501
"""IncludedDimensionProps - a model defined in OpenAPI
Args:
dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, dimension_attributes_values, *args, **kwargs): # noqa: E501
"""IncludedDimensionProps - a model defined in OpenAPI
Args:
dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.813688
| 361
| 0.590256
|
import re
import sys
from gooddata_afm_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_afm_client.exceptions import ApiAttributeError
class IncludedDimensionProps(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = True
@cached_property
def openapi_types():
return {
'dimension_attributes_values': ({str: ([str],)},),
}
@cached_property
def discriminator():
return None
attribute_map = {
'dimension_attributes_values': 'dimensionAttributesValues',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, dimension_attributes_values, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, dimension_attributes_values, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true
| true
|
f719ad57e58a44fc929ef55ed10a1ee635466eb2
| 326
|
py
|
Python
|
setup.py
|
droberin/cyberdyne-dyndns
|
7d495390413cff2829f6b00a482f7b9dff3dcb5a
|
[
"MIT"
] | null | null | null |
setup.py
|
droberin/cyberdyne-dyndns
|
7d495390413cff2829f6b00a482f7b9dff3dcb5a
|
[
"MIT"
] | null | null | null |
setup.py
|
droberin/cyberdyne-dyndns
|
7d495390413cff2829f6b00a482f7b9dff3dcb5a
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='cyberdynedyndnscli',
version='0.1.0',
packages=['cyberdynedyndnscli'],
url='https://github.com/droberin/cyberdynedyndnscli',
license='MIT',
author='DRoBeR',
author_email='drober+software@gmail.com',
description='Cyberdyne.es Dynamic DNS client'
)
| 25.076923
| 57
| 0.699387
|
from distutils.core import setup
setup(
name='cyberdynedyndnscli',
version='0.1.0',
packages=['cyberdynedyndnscli'],
url='https://github.com/droberin/cyberdynedyndnscli',
license='MIT',
author='DRoBeR',
author_email='drober+software@gmail.com',
description='Cyberdyne.es Dynamic DNS client'
)
| true
| true
|
f719ae112f660d822e36dfe8386ebed7cf3c5760
| 13,464
|
py
|
Python
|
Doc/tools/extensions/pyspecific.py
|
deadsnakes/python3.4
|
e8ac58ab083b57aa04b46c79f764c68bdab607a0
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Doc/tools/extensions/pyspecific.py
|
deadsnakes/python3.4
|
e8ac58ab083b57aa04b46c79f764c68bdab607a0
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Doc/tools/extensions/pyspecific.py
|
deadsnakes/python3.4
|
e8ac58ab083b57aa04b46c79f764c68bdab607a0
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pyspecific.py
~~~~~~~~~~~~~
Sphinx extension with Python doc-specific markup.
:copyright: 2008-2014 by Georg Brandl.
:license: Python license.
"""
import re
import codecs
from os import path
from time import asctime
from pprint import pformat
from docutils.io import StringOutput
from docutils.parsers.rst import Directive
from docutils.utils import new_document
from docutils import nodes, utils
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.util.nodes import split_explicit_title
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.text import TextWriter
from sphinx.writers.latex import LaTeXTranslator
from sphinx.domains.python import PyModulelevel, PyClassmember
# Support for checking for suspicious markup
import suspicious
ISSUE_URI = 'https://bugs.python.org/issue%s'
SOURCE_URI = 'https://github.com/python/cpython/tree/3.4/%s'
# monkey-patch reST parser to disable alphabetic and roman enumerated lists
from docutils.parsers.rst.states import Body
Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperalpha'] = \
Body.enum.converters['lowerroman'] = \
Body.enum.converters['upperroman'] = lambda x: None
# monkey-patch HTML and LaTeX translators to keep doctest blocks in the
# doctest docs themselves
orig_visit_literal_block = HTMLTranslator.visit_literal_block
orig_depart_literal_block = LaTeXTranslator.depart_literal_block
def new_visit_literal_block(self, node):
meta = self.builder.env.metadata[self.builder.current_docname]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_visit_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
def new_depart_literal_block(self, node):
meta = self.builder.env.metadata[self.curfilestack[-1]]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_depart_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
HTMLTranslator.visit_literal_block = new_visit_literal_block
LaTeXTranslator.depart_literal_block = new_depart_literal_block
# Support for marking up and linking to bugs.python.org issues
def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
issue = utils.unescape(text)
text = 'issue ' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
# Support for linking to Python source files easily
def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
has_t, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
refnode = nodes.reference(title, title, refuri=SOURCE_URI % target)
return [refnode], []
# Support for marking up implementation details
class ImplementationDetail(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
# Support for documenting decorators
class PyDecoratorMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self):
return False
class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
def run(self):
# a decorator function is a function after all
self.name = 'py:function'
return PyModulelevel.run(self)
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class PyCoroutineMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyCoroutineMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_annotation('coroutine ', 'coroutine '))
return ret
class PyCoroutineFunction(PyCoroutineMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyCoroutineMethod(PyCoroutineMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
# Support for documenting version of removal in deprecations
class DeprecatedRemoved(Directive):
has_content = True
required_arguments = 2
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
_label = 'Deprecated since version %s, will be removed in version %s'
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
node['type'] = 'deprecated-removed'
version = (self.arguments[0], self.arguments[1])
node['version'] = version
text = self._label % version
if len(self.arguments) == 3:
inodes, messages = self.state.inline_text(self.arguments[2],
self.lineno+1)
para = nodes.paragraph(self.arguments[2], '', *inodes)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=['versionmodified']))
node.append(para)
env = self.state.document.settings.env
env.note_versionchange('deprecated', version[0], node, self.lineno)
return [node] + messages
# Support for including Misc/NEWS
issue_re = re.compile('([Ii])ssue #([0-9]+)')
whatsnew_re = re.compile(r"(?im)^what's new in (.*?)\??$")
class MiscNews(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
fname = self.arguments[0]
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = path.dirname(path.abspath(source))
fpath = path.join(source_dir, fname)
self.state.document.settings.record_dependencies.add(fpath)
try:
fp = codecs.open(fpath, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
except Exception:
text = 'The NEWS file is not available.'
node = nodes.strong(text, text)
return [node]
content = issue_re.sub(r'`\1ssue #\2 <https://bugs.python.org/\2>`__',
content)
content = whatsnew_re.sub(r'\1', content)
# remove first 3 lines as they are the main heading
lines = ['.. default-role:: obj', ''] + content.splitlines()[3:]
self.state_machine.insert_input(lines, fname)
return []
# Support for building "topic help" for pydoc
pydoc_topic_labels = [
'assert', 'assignment', 'atom-identifiers', 'atom-literals',
'attribute-access', 'attribute-references', 'augassign', 'binary',
'bitwise', 'bltin-code-objects', 'bltin-ellipsis-object',
'bltin-null-object', 'bltin-type-objects', 'booleans',
'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound',
'context-managers', 'continue', 'conversions', 'customization', 'debugger',
'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel',
'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global',
'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers',
'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types',
'objects', 'operator-summary', 'pass', 'power', 'raise', 'return',
'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames',
'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types',
'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules',
'typesseq', 'typesseq-mutable', 'unary', 'while', 'with', 'yield'
]
class PydocTopicsBuilder(Builder):
name = 'pydoc-topics'
def init(self):
self.topics = {}
def get_outdated_docs(self):
return 'all pydoc topics'
def get_target_uri(self, docname, typ=None):
return '' # no URIs
def write(self, *ignored):
writer = TextWriter(self)
for label in self.status_iterator(pydoc_topic_labels,
'building topics... ',
length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
doctree = self.env.get_and_resolve_doctree(docname, self)
document = new_document('<section node>')
document.append(doctree.ids[labelid])
destination = StringOutput(encoding='utf-8')
writer.write(document, destination)
self.topics[label] = writer.output
def finish(self):
f = open(path.join(self.outdir, 'topics.py'), 'wb')
try:
f.write('# -*- coding: utf-8 -*-\n'.encode('utf-8'))
f.write(('# Autogenerated by Sphinx on %s\n' % asctime()).encode('utf-8'))
f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8'))
finally:
f.close()
# Support for documenting Opcodes
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)(?:\s*\((.*)\))?')
def parse_opcode_signature(env, sig, signode):
"""Transform an opcode signature into RST nodes."""
m = opcode_sig_re.match(sig)
if m is None:
raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
if arglist is not None:
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
# Support for documenting pdb commands
pdbcmd_sig_re = re.compile(r'([a-z()!]+)\s*(.*)')
# later...
# pdbargs_tokens_re = re.compile(r'''[a-zA-Z]+ | # identifiers
# [.,:]+ | # punctuation
# [\[\]()] | # parens
# \s+ # whitespace
# ''', re.X)
def parse_pdb_command(env, sig, signode):
"""Transform a pdb command signature into RST nodes."""
m = pdbcmd_sig_re.match(sig)
if m is None:
raise ValueError
name, args = m.groups()
fullname = name.replace('(', '').replace(')', '')
signode += addnodes.desc_name(name, name)
if args:
signode += addnodes.desc_addname(' '+args, ' '+args)
return fullname
def setup(app):
app.add_role('issue', issue_role)
app.add_role('source', source_role)
app.add_directive('impl-detail', ImplementationDetail)
app.add_directive('deprecated-removed', DeprecatedRemoved)
app.add_builder(PydocTopicsBuilder)
app.add_builder(suspicious.CheckSuspiciousMarkupBuilder)
app.add_description_unit('opcode', 'opcode', '%s (opcode)',
parse_opcode_signature)
app.add_description_unit('pdbcommand', 'pdbcmd', '%s (pdb command)',
parse_pdb_command)
app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)')
app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction)
app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod)
app.add_directive_to_domain('py', 'coroutinefunction', PyCoroutineFunction)
app.add_directive_to_domain('py', 'coroutinemethod', PyCoroutineMethod)
app.add_directive('miscnews', MiscNews)
return {'version': '1.0', 'parallel_read_safe': True}
| 36.096515
| 86
| 0.635844
|
import re
import codecs
from os import path
from time import asctime
from pprint import pformat
from docutils.io import StringOutput
from docutils.parsers.rst import Directive
from docutils.utils import new_document
from docutils import nodes, utils
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.util.nodes import split_explicit_title
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.text import TextWriter
from sphinx.writers.latex import LaTeXTranslator
from sphinx.domains.python import PyModulelevel, PyClassmember
import suspicious
ISSUE_URI = 'https://bugs.python.org/issue%s'
SOURCE_URI = 'https://github.com/python/cpython/tree/3.4/%s'
from docutils.parsers.rst.states import Body
Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperalpha'] = \
Body.enum.converters['lowerroman'] = \
Body.enum.converters['upperroman'] = lambda x: None
orig_visit_literal_block = HTMLTranslator.visit_literal_block
orig_depart_literal_block = LaTeXTranslator.depart_literal_block
def new_visit_literal_block(self, node):
meta = self.builder.env.metadata[self.builder.current_docname]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_visit_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
def new_depart_literal_block(self, node):
meta = self.builder.env.metadata[self.curfilestack[-1]]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_depart_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
HTMLTranslator.visit_literal_block = new_visit_literal_block
LaTeXTranslator.depart_literal_block = new_depart_literal_block
def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
issue = utils.unescape(text)
text = 'issue ' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
has_t, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
refnode = nodes.reference(title, title, refuri=SOURCE_URI % target)
return [refnode], []
class ImplementationDetail(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
class PyDecoratorMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self):
return False
class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class PyCoroutineMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyCoroutineMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_annotation('coroutine ', 'coroutine '))
return ret
class PyCoroutineFunction(PyCoroutineMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyCoroutineMethod(PyCoroutineMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class DeprecatedRemoved(Directive):
has_content = True
required_arguments = 2
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
_label = 'Deprecated since version %s, will be removed in version %s'
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
node['type'] = 'deprecated-removed'
version = (self.arguments[0], self.arguments[1])
node['version'] = version
text = self._label % version
if len(self.arguments) == 3:
inodes, messages = self.state.inline_text(self.arguments[2],
self.lineno+1)
para = nodes.paragraph(self.arguments[2], '', *inodes)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=['versionmodified']))
node.append(para)
env = self.state.document.settings.env
env.note_versionchange('deprecated', version[0], node, self.lineno)
return [node] + messages
issue_re = re.compile('([Ii])ssue #([0-9]+)')
whatsnew_re = re.compile(r"(?im)^what's new in (.*?)\??$")
class MiscNews(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
fname = self.arguments[0]
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = path.dirname(path.abspath(source))
fpath = path.join(source_dir, fname)
self.state.document.settings.record_dependencies.add(fpath)
try:
fp = codecs.open(fpath, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
except Exception:
text = 'The NEWS file is not available.'
node = nodes.strong(text, text)
return [node]
content = issue_re.sub(r'`\1ssue
content)
content = whatsnew_re.sub(r'\1', content)
# remove first 3 lines as they are the main heading
lines = ['.. default-role:: obj', ''] + content.splitlines()[3:]
self.state_machine.insert_input(lines, fname)
return []
# Support for building "topic help" for pydoc
pydoc_topic_labels = [
'assert', 'assignment', 'atom-identifiers', 'atom-literals',
'attribute-access', 'attribute-references', 'augassign', 'binary',
'bitwise', 'bltin-code-objects', 'bltin-ellipsis-object',
'bltin-null-object', 'bltin-type-objects', 'booleans',
'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound',
'context-managers', 'continue', 'conversions', 'customization', 'debugger',
'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel',
'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global',
'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers',
'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types',
'objects', 'operator-summary', 'pass', 'power', 'raise', 'return',
'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames',
'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types',
'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules',
'typesseq', 'typesseq-mutable', 'unary', 'while', 'with', 'yield'
]
class PydocTopicsBuilder(Builder):
name = 'pydoc-topics'
def init(self):
self.topics = {}
def get_outdated_docs(self):
return 'all pydoc topics'
def get_target_uri(self, docname, typ=None):
return '' # no URIs
def write(self, *ignored):
writer = TextWriter(self)
for label in self.status_iterator(pydoc_topic_labels,
'building topics... ',
length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
doctree = self.env.get_and_resolve_doctree(docname, self)
document = new_document('<section node>')
document.append(doctree.ids[labelid])
destination = StringOutput(encoding='utf-8')
writer.write(document, destination)
self.topics[label] = writer.output
def finish(self):
f = open(path.join(self.outdir, 'topics.py'), 'wb')
try:
f.write('
f.write(('
f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8'))
finally:
f.close()
# Support for documenting Opcodes
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)(?:\s*\((.*)\))?')
def parse_opcode_signature(env, sig, signode):
m = opcode_sig_re.match(sig)
if m is None:
raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
if arglist is not None:
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
# Support for documenting pdb commands
pdbcmd_sig_re = re.compile(r'([a-z()!]+)\s*(.*)')
# later...
# pdbargs_tokens_re = re.compile(r'''[a-zA-Z]+ | # identifiers
# [.,:]+ | # punctuation
# [\[\]()] | # parens
# \s+ # whitespace
# ''', re.X)
def parse_pdb_command(env, sig, signode):
m = pdbcmd_sig_re.match(sig)
if m is None:
raise ValueError
name, args = m.groups()
fullname = name.replace('(', '').replace(')', '')
signode += addnodes.desc_name(name, name)
if args:
signode += addnodes.desc_addname(' '+args, ' '+args)
return fullname
def setup(app):
app.add_role('issue', issue_role)
app.add_role('source', source_role)
app.add_directive('impl-detail', ImplementationDetail)
app.add_directive('deprecated-removed', DeprecatedRemoved)
app.add_builder(PydocTopicsBuilder)
app.add_builder(suspicious.CheckSuspiciousMarkupBuilder)
app.add_description_unit('opcode', 'opcode', '%s (opcode)',
parse_opcode_signature)
app.add_description_unit('pdbcommand', 'pdbcmd', '%s (pdb command)',
parse_pdb_command)
app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)')
app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction)
app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod)
app.add_directive_to_domain('py', 'coroutinefunction', PyCoroutineFunction)
app.add_directive_to_domain('py', 'coroutinemethod', PyCoroutineMethod)
app.add_directive('miscnews', MiscNews)
return {'version': '1.0', 'parallel_read_safe': True}
| true
| true
|
f719ae360e05e3d0b1462b0875f0af93d02276fd
| 5,643
|
py
|
Python
|
airflow/executors/debug_executor.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 3
|
2019-12-11T15:54:13.000Z
|
2021-05-24T20:21:08.000Z
|
airflow/executors/debug_executor.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 8
|
2021-02-08T20:40:47.000Z
|
2022-03-29T22:27:53.000Z
|
airflow/executors/debug_executor.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 2
|
2021-01-11T13:53:03.000Z
|
2021-10-02T05:06:34.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains DebugExecutor that is a single
process executor meaning it does not use multiprocessing.
"""
import threading
from typing import Any, Dict, List, Optional
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.models.taskinstance import TaskInstance, TaskInstanceKeyType
from airflow.utils.state import State
class DebugExecutor(BaseExecutor):
"""
This executor is meant for debugging purposes. It can be used with SQLite.
It executes one task instance at time. Additionally to support working
with sensors, all sensors ``mode`` will be automatically set to "reschedule".
"""
_terminated = threading.Event()
def __init__(self):
super().__init__()
self.tasks_to_run: List[TaskInstance] = []
# Place where we keep information for task instance raw run
self.tasks_params: Dict[TaskInstanceKeyType, Dict[str, Any]] = {}
self.fail_fast = conf.getboolean("debug", "fail_fast")
def execute_async(self, *args, **kwargs) -> None:
"""
The method is replaced by custom trigger_task implementation.
"""
def sync(self) -> None:
task_succeeded = True
while self.tasks_to_run:
ti = self.tasks_to_run.pop(0)
if self.fail_fast and not task_succeeded:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
continue
if self._terminated.is_set():
self.log.info(
"Executor is terminated! Stopping %s to %s", ti.key, State.FAILED
)
ti.set_state(State.FAILED)
self.change_state(ti.key, State.FAILED)
continue
task_succeeded = self._run_task(ti)
def _run_task(self, ti: TaskInstance) -> bool:
self.log.debug("Executing task: %s", ti)
key = ti.key
try:
params = self.tasks_params.pop(ti.key, {})
ti._run_raw_task( # pylint: disable=protected-access
job_id=ti.job_id, **params
)
self.change_state(key, State.SUCCESS)
return True
except Exception as e: # pylint: disable=broad-except
self.change_state(key, State.FAILED)
self.log.exception("Failed to execute task: %s.", str(e))
return False
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: Optional[str] = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: Optional[str] = None,
cfg_path: Optional[str] = None,
) -> None:
"""
Queues task instance with empty command because we do not need it.
"""
self.queue_command(
task_instance,
[str(task_instance)], # Just for better logging, it's not used anywhere
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
# Save params for TaskInstance._run_raw_task
self.tasks_params[task_instance.key] = {
"mark_success": mark_success,
"pool": pool,
}
def trigger_tasks(self, open_slots: int) -> None:
"""
Triggers tasks. Instead of calling exec_async we just
add task instance to tasks_to_run queue.
:param open_slots: Number of open slots
"""
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()], # pylint: disable=unnecessary-comprehension
key=lambda x: x[1][1],
reverse=True,
)
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (_, _, _, ti) = sorted_queue.pop(0)
self.queued_tasks.pop(key)
self.running.add(key)
self.tasks_to_run.append(ti) # type: ignore
def end(self) -> None:
"""
When the method is called we just set states of queued tasks
to UPSTREAM_FAILED marking them as not executed.
"""
for ti in self.tasks_to_run:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
def terminate(self) -> None:
self._terminated.set()
def change_state(self, key: TaskInstanceKeyType, state: str) -> None:
self.log.debug("Popping %s from executor task queue.", key)
self.running.remove(key)
self.event_buffer[key] = state
| 37.370861
| 104
| 0.633174
|
import threading
from typing import Any, Dict, List, Optional
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.models.taskinstance import TaskInstance, TaskInstanceKeyType
from airflow.utils.state import State
class DebugExecutor(BaseExecutor):
_terminated = threading.Event()
def __init__(self):
super().__init__()
self.tasks_to_run: List[TaskInstance] = []
self.tasks_params: Dict[TaskInstanceKeyType, Dict[str, Any]] = {}
self.fail_fast = conf.getboolean("debug", "fail_fast")
def execute_async(self, *args, **kwargs) -> None:
def sync(self) -> None:
task_succeeded = True
while self.tasks_to_run:
ti = self.tasks_to_run.pop(0)
if self.fail_fast and not task_succeeded:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
continue
if self._terminated.is_set():
self.log.info(
"Executor is terminated! Stopping %s to %s", ti.key, State.FAILED
)
ti.set_state(State.FAILED)
self.change_state(ti.key, State.FAILED)
continue
task_succeeded = self._run_task(ti)
def _run_task(self, ti: TaskInstance) -> bool:
self.log.debug("Executing task: %s", ti)
key = ti.key
try:
params = self.tasks_params.pop(ti.key, {})
ti._run_raw_task(
job_id=ti.job_id, **params
)
self.change_state(key, State.SUCCESS)
return True
except Exception as e:
self.change_state(key, State.FAILED)
self.log.exception("Failed to execute task: %s.", str(e))
return False
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: Optional[str] = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: Optional[str] = None,
cfg_path: Optional[str] = None,
) -> None:
self.queue_command(
task_instance,
[str(task_instance)],
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
# Save params for TaskInstance._run_raw_task
self.tasks_params[task_instance.key] = {
"mark_success": mark_success,
"pool": pool,
}
def trigger_tasks(self, open_slots: int) -> None:
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()], # pylint: disable=unnecessary-comprehension
key=lambda x: x[1][1],
reverse=True,
)
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (_, _, _, ti) = sorted_queue.pop(0)
self.queued_tasks.pop(key)
self.running.add(key)
self.tasks_to_run.append(ti) # type: ignore
def end(self) -> None:
for ti in self.tasks_to_run:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
def terminate(self) -> None:
self._terminated.set()
def change_state(self, key: TaskInstanceKeyType, state: str) -> None:
self.log.debug("Popping %s from executor task queue.", key)
self.running.remove(key)
self.event_buffer[key] = state
| true
| true
|
f719af5392c1befb33e7fc5a3df49b8e3154b0ce
| 2,063
|
py
|
Python
|
aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkeas.endpoint import endpoint_data
class ListServicesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'eas', '2021-07-01', 'ListServices','eas')
self.set_uri_pattern('/api/v2/services')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Filter(self):
return self.get_query_params().get('Filter')
def set_Filter(self,Filter):
self.add_query_param('Filter',Filter)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Sort(self):
return self.get_query_params().get('Sort')
def set_Sort(self,Sort):
self.add_query_param('Sort',Sort)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Order(self):
return self.get_query_params().get('Order')
def set_Order(self,Order):
self.add_query_param('Order',Order)
| 32.746032
| 74
| 0.750848
|
from aliyunsdkcore.request import RoaRequest
from aliyunsdkeas.endpoint import endpoint_data
class ListServicesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'eas', '2021-07-01', 'ListServices','eas')
self.set_uri_pattern('/api/v2/services')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Filter(self):
return self.get_query_params().get('Filter')
def set_Filter(self,Filter):
self.add_query_param('Filter',Filter)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Sort(self):
return self.get_query_params().get('Sort')
def set_Sort(self,Sort):
self.add_query_param('Sort',Sort)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Order(self):
return self.get_query_params().get('Order')
def set_Order(self,Order):
self.add_query_param('Order',Order)
| true
| true
|
f719af5c196d30f0eb97eff99d60406c1d503639
| 1,912
|
py
|
Python
|
tests/unit/recommenders/models/test_newsrec_utils.py
|
enowy/Recommenders
|
60033231b9167438032843c23158c0c776856e0e
|
[
"MIT"
] | 10
|
2019-05-06T21:57:10.000Z
|
2019-05-07T06:15:39.000Z
|
tests/unit/recommenders/models/test_newsrec_utils.py
|
enowy/Recommenders
|
60033231b9167438032843c23158c0c776856e0e
|
[
"MIT"
] | 2
|
2022-01-19T20:24:51.000Z
|
2022-02-18T20:25:24.000Z
|
tests/unit/recommenders/models/test_newsrec_utils.py
|
enowy/Recommenders
|
60033231b9167438032843c23158c0c776856e0e
|
[
"MIT"
] | 3
|
2019-05-06T22:24:21.000Z
|
2019-05-07T02:50:46.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
try:
from recommenders.models.deeprec.deeprec_utils import download_deeprec_resources
from recommenders.models.newsrec.newsrec_utils import prepare_hparams, load_yaml
except ImportError:
pass # skip this import if we are in cpu environment
@pytest.mark.parametrize(
"must_exist_attributes", ["wordEmb_file", "wordDict_file", "userDict_file"]
)
@pytest.mark.gpu
def test_prepare_hparams(must_exist_attributes, deeprec_resource_path):
wordEmb_file = os.path.join(deeprec_resource_path, "mind", "utils", "embedding.npy")
userDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "uid2index.pkl"
)
wordDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "word_dict.pkl"
)
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
assert hasattr(hparams, must_exist_attributes)
@pytest.mark.gpu
def test_load_yaml_file(deeprec_resource_path):
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
config = load_yaml(yaml_file)
assert config is not None
| 33.54386
| 88
| 0.69613
|
import os
import pytest
try:
from recommenders.models.deeprec.deeprec_utils import download_deeprec_resources
from recommenders.models.newsrec.newsrec_utils import prepare_hparams, load_yaml
except ImportError:
pass
@pytest.mark.parametrize(
"must_exist_attributes", ["wordEmb_file", "wordDict_file", "userDict_file"]
)
@pytest.mark.gpu
def test_prepare_hparams(must_exist_attributes, deeprec_resource_path):
wordEmb_file = os.path.join(deeprec_resource_path, "mind", "utils", "embedding.npy")
userDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "uid2index.pkl"
)
wordDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "word_dict.pkl"
)
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
assert hasattr(hparams, must_exist_attributes)
@pytest.mark.gpu
def test_load_yaml_file(deeprec_resource_path):
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
config = load_yaml(yaml_file)
assert config is not None
| true
| true
|
f719af7723defb10087e667c5753c6f31f956520
| 12,081
|
py
|
Python
|
Self_Driving_Car/P1/LaneLines-P1/P1.py
|
Wentaobi/Udacity
|
00af9c36b42d6bca5f2d42d2744efed2ddb51587
|
[
"Apache-2.0"
] | null | null | null |
Self_Driving_Car/P1/LaneLines-P1/P1.py
|
Wentaobi/Udacity
|
00af9c36b42d6bca5f2d42d2744efed2ddb51587
|
[
"Apache-2.0"
] | null | null | null |
Self_Driving_Car/P1/LaneLines-P1/P1.py
|
Wentaobi/Udacity
|
00af9c36b42d6bca5f2d42d2744efed2ddb51587
|
[
"Apache-2.0"
] | null | null | null |
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg');
#printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image); #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=13):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
x_size = img.shape[1]
y_size = img.shape[0]
lines_slope_intercept = np.zeros(shape=(len(lines),2))
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
intercept = y1 - x1 * slope
lines_slope_intercept[index]=[slope,intercept]
max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]
min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]
left_slopes = []
left_intercepts = []
right_slopes = []
right_intercepts = []
# this gets slopes and intercepts of lines similar to the lines with the max (immediate left) and min
# (immediate right) slopes (i.e. slope and intercept within x%)
for line in lines_slope_intercept:
if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):
left_slopes.append(line[0])
left_intercepts.append(line[1])
elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):
right_slopes.append(line[0])
right_intercepts.append(line[1])
# left and right lines are averages of these slopes and intercepts, extrapolate lines to edges and center*
# *roughly
new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)
if len(left_slopes) > 0:
left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]
left_bottom_x = (y_size - left_line[1])/left_line[0]
left_top_x = (y_size*.575 - left_line[1])/left_line[0]
if (left_bottom_x >= 0):
new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]
if len(right_slopes) > 0:
right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]
right_bottom_x = (y_size - right_line[1])/right_line[0]
right_top_x = (y_size*.575 - right_line[1])/right_line[0]
if (right_bottom_x <= x_size):
new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]
for line in new_lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
#reading in an image
for index, img in enumerate(os.listdir("test_images/")):
image = mpimg.imread('test_images/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(6,10))
plt.imshow(result, cmap="gray") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
#reading in an image
for index, img in enumerate(os.listdir("test_images2/")):
image = mpimg.imread('test_images2/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(8,10))
plt.imshow(result, cmap="gray") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image with lines are drawn on lanes)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
#return cv2.cvtColor(masked_img, cv2.COLOR_GRAY2RGB)
return result
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(white_output))
yellow_output = 'yellow.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
#
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(challenge_output))
| 35.848665
| 122
| 0.698121
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
image = mpimg.imread('test_images/solidWhiteRight.jpg');
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image);
import math
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=13):
x_size = img.shape[1]
y_size = img.shape[0]
lines_slope_intercept = np.zeros(shape=(len(lines),2))
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
intercept = y1 - x1 * slope
lines_slope_intercept[index]=[slope,intercept]
max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]
min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]
left_slopes = []
left_intercepts = []
right_slopes = []
right_intercepts = []
for line in lines_slope_intercept:
if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):
left_slopes.append(line[0])
left_intercepts.append(line[1])
elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):
right_slopes.append(line[0])
right_intercepts.append(line[1])
new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)
if len(left_slopes) > 0:
left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]
left_bottom_x = (y_size - left_line[1])/left_line[0]
left_top_x = (y_size*.575 - left_line[1])/left_line[0]
if (left_bottom_x >= 0):
new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]
if len(right_slopes) > 0:
right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]
right_bottom_x = (y_size - right_line[1])/right_line[0]
right_top_x = (y_size*.575 - right_line[1])/right_line[0]
if (right_bottom_x <= x_size):
new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]
for line in new_lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
for index, img in enumerate(os.listdir("test_images/")):
image = mpimg.imread('test_images/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(6,10))
plt.imshow(result, cmap="gray")
for index, img in enumerate(os.listdir("test_images2/")):
image = mpimg.imread('test_images2/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(8,10))
plt.imshow(result, cmap="gray")
from moviepy.editor import VideoFileClip
def process_image(image):
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
return result
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(white_output))
yellow_output = 'yellow.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(challenge_output))
| true
| true
|
f719afb71003662d81876c64edd582861d9f11a6
| 1,088
|
py
|
Python
|
exercicios-Python/desaf045.py
|
marcelo-py/Exercicios-Python
|
d654d54821983897dbc377a2d3db97671dd75b5b
|
[
"MIT"
] | null | null | null |
exercicios-Python/desaf045.py
|
marcelo-py/Exercicios-Python
|
d654d54821983897dbc377a2d3db97671dd75b5b
|
[
"MIT"
] | null | null | null |
exercicios-Python/desaf045.py
|
marcelo-py/Exercicios-Python
|
d654d54821983897dbc377a2d3db97671dd75b5b
|
[
"MIT"
] | null | null | null |
import random
from emoji import emojize
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
print (emojize('''Suas opções:
[0] PEDRA :punch:
[1] PAPEL :hand:
[2] TESOURA :v:''',use_aliases=True))
escolha = int(input('Qual sua escolha? '))
computador = random.randint(0,2)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
print('-='*20)
print('O computador escolheu {}'.format(itens[computador]))
if escolha == 0:
print('Você escolheu PEDRA')
if computador == 1:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 2:
print('Você ganhou!!!')
elif escolha == 1:
print('Você escolheu PAPEL')
if computador == 2:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 0 :
print('Você ganhou!!!')
elif escolha == 2:
print('Você escolheu TESOURA')
if computador == 0:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 1 :
print('Você ganhou!!!')
print('=-'*20)
| 25.302326
| 59
| 0.607537
|
import random
from emoji import emojize
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
print (emojize('''Suas opções:
[0] PEDRA :punch:
[1] PAPEL :hand:
[2] TESOURA :v:''',use_aliases=True))
escolha = int(input('Qual sua escolha? '))
computador = random.randint(0,2)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
print('-='*20)
print('O computador escolheu {}'.format(itens[computador]))
if escolha == 0:
print('Você escolheu PEDRA')
if computador == 1:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 2:
print('Você ganhou!!!')
elif escolha == 1:
print('Você escolheu PAPEL')
if computador == 2:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 0 :
print('Você ganhou!!!')
elif escolha == 2:
print('Você escolheu TESOURA')
if computador == 0:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 1 :
print('Você ganhou!!!')
print('=-'*20)
| true
| true
|
f719afef6ce3f033481568e9522937db2bfbd069
| 86
|
py
|
Python
|
my_exceptions.py
|
robert-dzikowski/api-smoke-test
|
64394049ce82a0cf80fc128587a4a83e491725b7
|
[
"MIT"
] | 1
|
2021-01-30T23:01:00.000Z
|
2021-01-30T23:01:00.000Z
|
my_exceptions.py
|
robert-dzikowski/api-smoke-test
|
64394049ce82a0cf80fc128587a4a83e491725b7
|
[
"MIT"
] | null | null | null |
my_exceptions.py
|
robert-dzikowski/api-smoke-test
|
64394049ce82a0cf80fc128587a4a83e491725b7
|
[
"MIT"
] | null | null | null |
class TestFail(Exception):
"""
Exception raised when test has failed.
"""
| 17.2
| 42
| 0.627907
|
class TestFail(Exception):
| true
| true
|
f719b0534049d456a9239569b20111fc6dcfa5fb
| 292
|
py
|
Python
|
esphome/components/json/__init__.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
esphome/components/json/__init__.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
esphome/components/json/__init__.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
import esphome.codegen as cg
from esphome.core import coroutine_with_priority
json_ns = cg.esphome_ns.namespace('json')
@coroutine_with_priority(1.0)
def to_code(config):
cg.add_library('ArduinoJson-esphomelib', '5.13.3')
cg.add_define('USE_JSON')
cg.add_global(json_ns.using)
| 24.333333
| 54
| 0.763699
|
import esphome.codegen as cg
from esphome.core import coroutine_with_priority
json_ns = cg.esphome_ns.namespace('json')
@coroutine_with_priority(1.0)
def to_code(config):
cg.add_library('ArduinoJson-esphomelib', '5.13.3')
cg.add_define('USE_JSON')
cg.add_global(json_ns.using)
| true
| true
|
f719b0960e13ee24f7ce64d60d298220d2513dc0
| 53
|
py
|
Python
|
shiftscheduler/gui/constants.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 2
|
2020-04-16T17:03:56.000Z
|
2021-04-08T17:23:21.000Z
|
shiftscheduler/gui/constants.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | null | null | null |
shiftscheduler/gui/constants.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 1
|
2020-05-04T18:03:59.000Z
|
2020-05-04T18:03:59.000Z
|
EXCEL_FILE_TYPE = (("Excel 2007 files","*.xlsx"),)
| 13.25
| 50
| 0.622642
|
EXCEL_FILE_TYPE = (("Excel 2007 files","*.xlsx"),)
| true
| true
|
f719b09aaa3ce37ed804af7fc5327f4ef6a12908
| 645
|
py
|
Python
|
noxfile.py
|
HarshNarayanJha/diddi-and-the-bugs
|
82af417a2ab324de7bde38736bfc42430b6b46fa
|
[
"MIT"
] | null | null | null |
noxfile.py
|
HarshNarayanJha/diddi-and-the-bugs
|
82af417a2ab324de7bde38736bfc42430b6b46fa
|
[
"MIT"
] | null | null | null |
noxfile.py
|
HarshNarayanJha/diddi-and-the-bugs
|
82af417a2ab324de7bde38736bfc42430b6b46fa
|
[
"MIT"
] | null | null | null |
"""
I use Nox here to reformat the code.
"""
import nox
files = ["noxfile.py", "main.py", "setup.py"]
@nox.session(name="keep-codebase-clean")
def keep_codebase_clean(session):
"Run formatters."
session.install("-r", "test-requirements.txt")
session.run("isort", *files)
session.run("black", *files)
@nox.session(name="check-quality")
def check_quality(session):
"Check the style and quality."
session.install("-r", "test-requirements.txt")
session.run("flake8", *files, "--max-line-length=127")
session.run("isort", "--check-only", *files)
session.run("black", "--check", *files)
| 26.875
| 59
| 0.632558
|
import nox
files = ["noxfile.py", "main.py", "setup.py"]
@nox.session(name="keep-codebase-clean")
def keep_codebase_clean(session):
session.install("-r", "test-requirements.txt")
session.run("isort", *files)
session.run("black", *files)
@nox.session(name="check-quality")
def check_quality(session):
session.install("-r", "test-requirements.txt")
session.run("flake8", *files, "--max-line-length=127")
session.run("isort", "--check-only", *files)
session.run("black", "--check", *files)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.