index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,300 | c10ed0ee80fcc7b056890ee41d789618a96ea91a | """
Secret Tasks are a special kind of Prefect Task used to represent the retrieval of sensitive data.
The base implementation uses Prefect Cloud secrets, but users are encouraged to subclass the `Secret` task
class for interacting with other secret providers. Secrets always use a special kind of result handler that
prevents the persistence of sensitive information.
"""
from .base import SecretBase, PrefectSecret
from .env_var import EnvVarSecret
__all__ = ["EnvVarSecret", "PrefectSecret", "SecretBase"]
|
989,301 | 045fa3b5dd35f001adf6c035868a6f4ec9fd43bf |
#calss header
class _CABARETS():
def __init__(self,):
self.name = "CABARETS"
self.definitions = cabaret
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cabaret']
|
989,302 | 5dc6cb5fe394f94c29b5793890cec1b75ca51fa4 | # coding=utf-8
"""内控易校内项目自动审批"""
import json
import time
from service.logger import Log
import pymysql
import requests
service = [
{"service": "demo4.neikongyi.com", "round": "14936",
"password": "nga+eNSuUrhHx/K9W1C/a/qtWqsV30AHQjjm0tWToik=,1ihNzkC+zh+TKlMqP4Jz2jjyq6xf35sX,otJ+EuH/6L3TW51gTEKaLULuik2L+KbvrunnwS/G0P1VMKGG9F6JvGfWHn+NinGF3cFdVhnnDDAjDDBJbeBvhBvVcmRp03iAP7eSpZh1Zz4="},
{"service": "39.107.221.188", "round": "14604",
"password": "mYy8+QClL3k7B0tw7hxatyKuki1JJ7pyE++6JQKu+yw=,YeWqwTvWP32OHuttwv+j+NLT3vQrQ6tT,uXjqZj8MFez3rBlFL7xptS++0+MljjyVjBGd/VnNHBpstxmVgpvJoYcT9B5Jr9sOQktSrnMAgBG8IRRG0x51HYda/zeNBz4qQeqQA8lj6lw="}
]
log = Log()
# demo4_ip = 'demo4.neikongyi.com'
# demo4_user = 'admin1'
# demo4_password = ''
# demo4_round = ''
# demo4_headers={
# 'Accept': 'application/json',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Connection': 'keep-alive'
# }
# ip = demo4_ip
# user = demo4_user
# password = demo4_password
# round=demo4_round
class workSpace():
def __init__(self):
self.user = "admin1"
useService = service[1]
self.ip = useService['service']
self.cookie = self.need_Verify_Code()
self.password = useService['password']
self.round = useService["round"]
self.userId = '1'
self.headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
# 'content-length': "240",
'content-type': "application/json; charset=utf-8",
'Cookie': self.cookie,
"Host": self.ip,
'origin': "http://" + self.ip,
"Referer": "http://" + self.ip + "/nky",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/80.0.3987.122 Safari/537.36",
"x-Current-User-Id": self.userId,
}
def need_Verify_Code(self): # 获取cookie
url = "http://" + self.ip + "/nky/service/session/needVerifyCode"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Host": self.ip,
"Referer": "http://" + self.ip + "/nky",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36",
}
_params = {'orgnizationId': 200}
response = requests.request("GET", url, params=_params, headers=headers)
header = eval(str(response.headers)) # 将请求后的字符类型先简单转成str,再换成dict
set_cookie = header['Set-Cookie']
cookie = set_cookie[0:43]
log.info("cookie:" + str(cookie))
return cookie
def login(self):
url = "http://" + self.ip + "/nky/service/session/login"
_payload = {
'orgnizationId': '200',
"userName": self.user,
"password": self.password,
"round": self.round,
}
_headers = self.headers
del _headers['x-Current-User-Id']
_response = requests.request("POST", url, data=json.dumps(_payload), headers=_headers)
if '200' in str(_response):
log.info('登录成功')
return self.headers
else:
log.info("登录失败,错误码" + str(_response))
return "登录失败,错误码" + str(_response)
def standard_approve_log(self, log_id):
_url = "http://" + self.ip + "/nky/service/ApprovalLog/" + str(log_id)
_payload = {
'approvalDate': "2021-11-15T02:06:02.998Z",
'approvalStatusId': 502,
'description': "自动同意",
'id': log_id,
'additionalValues': {}
}
_header = self.headers
response = requests.request("PUT", _url, data=json.dumps(_payload), headers=_header)
if '200' in str(response):
return response.text
else:
return "审批失败,审批节点为" + str(log_id)
def pmZjlz(self, _log_id):
# 先不管这个节点
_url = "http://" + self.ip + "/nky/service/ApprovalLog/" + str(_log_id)
_payload = {
'approvalDate': "2021-11-15T02:06:02.998Z",
'approvalStatusId': 502,
'description': "自动同意",
'id': _log_id,
'additionalValues': {},
'approTempProperties': '{\"items\":[{\"title\":\"论证金额(元)\",\"value\":\"1,146,200.00\"}]}'
}
_header = self.headers
response = requests.request("PUT", _url, data=json.dumps(_payload), headers=_header)
if '200' in str(response):
return response.text
else:
return "审批失败,审批节点为" + str(_log_id)
class DB(object):
def __init__(self, host=service[1]['service'], port=3306, db='nky', user='nky2018', passwd='Neikongyi201*',
charset='utf8'):
# 建立连接
self.conn = pymysql.connect(host=host, port=port, db=db, user=user, passwd=passwd, charset=charset)
# 创建游标,操作设置为字典类型
self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __enter__(self):
# 返回游标
return self.cur
def __exit__(self, exc_type, exc_val, exc_tb):
# 提交数据库并执行
self.conn.commit()
# 关闭游标
self.cur.close()
# 关闭数据库连接
self.conn.close()
def sql_project():
# 查询待审项目
with DB() as db_1:
sql_1 = "SELECT id,`name`,status_id,activity_id,xm_total_amount FROM pm_special_project WHERE status_id=2 AND " \
"orgnization_id=200 AND activity_id !='mergeProject' AND project_term=1; "
db_1.execute(sql_1)
return list(db_1)
def sql_approve_log(bill_id, activity):
# 查询下一审批人
try:
with DB() as db_2:
sql_2 = "SELECT id,activity_id FROM approval_log WHERE orgnization_id=200 AND " \
"bill_type='pmSpecialProject' AND activity_id !='" + str(
activity) + "' AND approval_status_id=501 AND " \
"bill_id=" + str(bill_id) + "; "
db_2.execute(sql_2)
return list(db_2)
except:
with DB() as db_2:
sql_2 = "SELECT id,activity_id FROM approval_log WHERE orgnization_id=200 AND " \
"bill_type='pmSpecialProject' AND activity_id !=" + str(
activity) + " AND approval_status_id=501 AND " \
"bill_id=" + str(bill_id) + "; "
db_2.execute(sql_2)
return list(db_2)
def sql_budget_item(bill_id):
# 数据库插本年测算数据
with DB() as db_3:
sql_0 = "SELECT pm_special_project_id,years,group_name,group_name_des,`name` ,standard_control_id, financial_account_id,amount,description FROM pm_project_budget_item WHERE years =2022 AND pm_special_project_id=" + str(
bill_id) + ";"
db_3.execute(sql_0)
list_0 = list(db_3)
if len(list_0) == 0:
sql_3 = "SELECT pm_special_project_id,years,group_name,`name` ,standard_control_id, financial_account_id,amount,description FROM pm_project_budget_item WHERE pm_special_project_id=" + str(
bill_id) + ";"
db_3.execute(sql_3)
listdata = list(db_3)
for i in listdata:
i['years'] = 2022
data_str = str(i['pm_special_project_id']) + "," + str(i['years']) + ",'" + str(
i['group_name']) + "','" + str(
i['name']) + "'," + str(i['standard_control_id']) + "," + str(
i['financial_account_id']) + "," + str(
i['amount']) + ",'" + str(i['description']) + "'"
sql_4 = "INSERT INTO `pm_project_budget_item` (`pm_special_project_id`, `years`, `group_name`, " \
" `name`, `standard_control_id`, `financial_account_id`, `amount`, `description`) " \
"VALUES (%s); " % data_str
db_3.execute(sql_4)
else:
log.info("项目%s已有本年测算,无需二次生成" % (str(bill_id)))
def sql_indicator_item(bill_id):
# 数据库插本年绩效目标
with DB() as db_4:
sql_0 = "SELECT pm_special_project_id,years,indicator_id,operation_type_id,target,budget_rate FROM " \
"pm_project_indicator_item WHERE years = 2022 AND pm_special_project_id=" + str(
bill_id) + ";"
db_4.execute(sql_0)
list_0 = list(db_4)
if len(list_0) == 0:
sql_3 = "SELECT pm_special_project_id,years,indicator_id,operation_type_id,target,budget_rate FROM " \
"pm_project_indicator_item WHERE pm_special_project_id=" + str(
bill_id) + ";"
db_4.execute(sql_3)
listdata = list(db_4)
for i in listdata:
i['years'] = 2022
data_str_1 = str(i['pm_special_project_id']) + "," + str(i['years']) + "," + str(
i['indicator_id']) + "," + str(i['operation_type_id']) + ",'" + str(i['target']) + "'," + str(
i['budget_rate'])
sql_4 = "INSERT INTO `pm_project_indicator_item` (`pm_special_project_id`, `years`, `indicator_id`, " \
"`operation_type_id`, `target`, `budget_rate`) VALUES (%s);" % data_str_1
db_4.execute(sql_4)
else:
log.info("项目%s已有本年指标,无需二次生成" % (str(bill_id)))
def sql_year_budget_item(bill_id):
# 根据测算明细插入部门经济科目
with DB() as db_5:
sql_0 = "SELECT * FROM pm_project_year_budget_item WHERE years=2022 AND pm_special_project_id=" + str(
bill_id) + ";"
db_5.execute(sql_0)
list_0 = list(db_5)
if len(list_0) == 0:
sql_3 = "SELECT pm_special_project_id,years,financial_account_id,SUM(amount) as amount FROM " \
"pm_project_budget_item WHERE years=2022 AND pm_special_project_id=%s GROUP BY " \
"financial_account_id;" % (str(bill_id))
db_5.execute(sql_3)
listdata = list(db_5)
for i in listdata:
i['years'] = 2022
data_str = str(i['pm_special_project_id']) + "," + str(i['years']) + ",111,10001," + str(
i['financial_account_id']) + "," + str(i['amount'])
sql_4 = "INSERT INTO `pm_project_year_budget_item` (`pm_special_project_id`, `years`, " \
"`pm_capital_property_id`, `functional_account_id`, `financial_account_id`, " \
"`ben_one_up_amount`) VALUES (%s);" % data_str
db_5.execute(sql_4)
else:
log.info("项目%s已有本年年度预算,无需二次生成" % (str(bill_id)))
def sql_px(bill_id):
# 排序入库
with DB() as db_7:
sql = " UPDATE pm_special_project SET is_sort=1 AND special_project_phases_id=5251 WHERE id=%s;" % str(bill_id)
db_7.execute(sql)
def main(activity1='mergeProject'):
while True:
_number = 0
a = workSpace()
time.sleep(1)
l = a.login()
project_list = sql_project()
if '错误码' in l:
log.info("正在重试")
requests.session().close()
continue
elif len(project_list) == 0:
requests.session().close()
break
elif _number == 500:
requests.session().close()
break
else:
for i in project_list:
project_id = i['id']
project_activity = i['activity_id']
while project_activity != activity1:
approve_log_id = sql_approve_log(project_id, activity1)
if len(approve_log_id) != 0:
approve_log_id = approve_log_id[0]
if activity1 in approve_log_id['activity_id']:
break
else:
if 'pmYsbz' in approve_log_id['activity_id']:
sql_budget_item(project_id)
sql_indicator_item(project_id)
sql_year_budget_item(project_id)
elif 'workgroupSorting' in approve_log_id['activity_id']:
sql_px(project_id)
a.standard_approve_log(approve_log_id['id'])
else:
requests.session().close()
break
_number += 1
log.info("项目" + str(project_id) + "已审批至%s,请检查" % activity1)
requests.session().close()
requests.session().close()
def test():
a = workSpace()
a.login()
if __name__ == '__main__':
# sql_budget_item(10126)
# sql_indicator_item(10126)
# sql_year_budget_item(10126)
# test()
for ab in range(0,50):
log.info("第%s轮"%str(ab))
time.sleep(10)
try:
main()
except:
continue
|
989,303 | 41a5f13026b54f9fda7f8ccb5d4ac92f4ebd6bbe | import RPi.GPIO as gpio
gpio.setmode(gpio.BCM)
MAG_PIN = 9
class IRListener(object):
def __init__(self, pin=MAG_PIN):
self.pin = pin
gpio.setup(MAG_PIN, gpio.IN)
def __del__(self):
gpio.cleanup(self.pin)
def is_ir_present(self):
if gpio.input(self.pin) == 1:
return False
else:
return True
if __name__ == "__main__":
import time
ir = IRListener()
while True:
if ir.is_ir_present():
print "Yup"
else:
print "Nah"
time.sleep(0.05)
|
989,304 | 6b57b84456366b547a4ccd9fea25cdf914835a84 |
Python solution using set (36ms)
https://leetcode.com/problems/missing-ranges/discuss/50623
* Lang: python3
* Author: LordCHTsai
* Votes: 3
class Solution(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
start = [lower] + sorted(set(map(lambda x: x+1, nums)) - set(nums))
end = sorted(set(map(lambda x: x-1, nums)) - set(nums)) + [upper]
ret = []
for s, e in zip(start, end):
if s < e:
ret.append('{}->{}'.format(s, e))
elif s == e:
ret.append('{}'.format(s))
return ret
Increase numbers by 1 to find all the starts and decrease numbers by 1 to find all the ends. Then add lower and upper boundaries to starts and ends. If start < end then add as 'start->end', and if start == end then add as 'start(or end)'.
|
989,305 | 96421681dbcbb5d8481630d0f16e8c301d285efa | a=[-30, 8, 23 ,6 ,10, 9, 31, 7, 19, 20, 1, 33, 21, 27, 28, 3, 25, 26]
n=len(a)
print(n)
c=0
x=86
a.sort()
for i in range(n-2):
l=i+1
r=n-1
while(l<n):
if l<r:
su=a[i]+a[l]+a[r]
if su <x:
print(a[i],a[l],a[r])
c+=1
r-=1
else:
r-=1
else:
l+=1
r=n-1
print(c)
|
989,306 | 473de0be48df9b9ee672737b3c08476691ef4186 | class Phone(object):
def __init__(self, carrier, change_left = 50):
# There are attributes that a phone has
# There should all be relevant to our program
self.screen = True
self.camera = 2
self.mircophone = True
self.carrier = carrier
self.battery_left = change_left
def charge(self, time):
self.battery_left += time
if self.battery_left > 100:
self.battery_left = 100
def makecall(self, duration):
if not self.screen:
print("You can't make a phone call.")
print("Your screen is broken.")
return
battery_loss_per_minute = 1
self.battery_left -= duration * battery_loss_per_minute
if self.battery_left < 0:
self.battery_left = 0
print("Your phone dies during the conversation")
elif self.battery_left == 0:
print("Your phone dies at the end of the conversation")
else:
print("You successfully make the phone call")
print("Your phone is now at %s" % self.battery_left)
def smash_phone(self):
print("SMASH!!!!!!!!!!!!!!")
print("It broke.")
self.screen = False
my_phone = Phone ("ATT", 100)
your_phone = Phone("Bell")
default_phone = Phone ("Verizon")
my_phone.makecall(10)
my_phone.makecall(10)
my_phone.charge(100)
my_phone.makecall(10)
your_phone.smash_phone()
your_phone.makecall(1)
print(Special_Random.RandomCaleb.myrandom()) |
989,307 | 6ded825d190093395bab1a63867b9219fe8f4557 | """dropbox quota fields
Revision ID: 4d4ffe9376ad
Revises: 509229b69f0a
Create Date: 2013-09-22 14:15:53.636905
"""
# revision identifiers, used by Alembic.
revision = '4d4ffe9376ad'
down_revision = '509229b69f0a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('dropbox_data_quota', sa.BigInteger(), nullable=True))
op.add_column('users', sa.Column('dropbox_data_normal', sa.BigInteger(), nullable=True))
op.add_column('users', sa.Column('dropbox_data_shared', sa.BigInteger(), nullable=True))
op.add_column('users', sa.Column('dropbox_data_datastores', sa.BigInteger(), nullable=True))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'dropbox_data_quota')
op.drop_column('users', 'dropbox_data_shared')
op.drop_column('users', 'dropbox_data_normal')
op.drop_column('users', 'dropbox_data_datastores')
### end Alembic commands ###
|
989,308 | 96270028ee376e1a91cde0a17a8f572286c6801a | #########################################
#Download and unzip US EPA Fuel economy data from fueleconomy.gov
#########################################
import urllib.request
import urllib.parse
import io
import os
import zipfile
#import shutil
import datetime
#####################################
# downloadAndExtract - Downloads a ZIP archive and extracts it to the specified folder
#####################################
def downloadAndExtract(url, dest) : #Download and extract ZIP archive
#Request the URL
response = urllib.request.urlopen(url)
print("\tExtracting data from archive...")
#Convert the response to a file-like object
zipFile = io.BytesIO()
zipFile.write(response.read())
#Convert zipFile to a ZipFile object
archive = zipfile.ZipFile(zipFile, 'r')
#Get list of zipFile contents
archiveNameList = archive.namelist()
#Extract all of the files in archive
for fileName in archiveNameList :
print("\tArchive File:", fileName)
archive.extract(fileName, path=dest)
#Clean up
archive.close()
zipFile.close()
response.close()
print("\tExtraction complete")
########################################################
##MAIN PROGRAM
########################################################
#Write data into a subfolder of the source data folder stamped with today's date
dataFolder = os.path.join("..", "data", "source", datetime.date.today().strftime("%Y%m%d"))
print("Creating data folder")
#Create the source data subfolder if it doesn't exist
if not os.path.isdir(dataFolder) :
os.makedirs(dataFolder)
print("Downloading Fuel Economy Data")
#Download fuel economy data
downloadAndExtract("https://www.fueleconomy.gov/feg/epadata/vehicles.csv.zip", dataFolder)
print("Downloading Emissions Data")
#Download Emissions data
downloadAndExtract("https://www.fueleconomy.gov/feg/epadata/emissions.csv.zip", dataFolder)
|
989,309 | 8d463ac1fb23db71370526f6598782f9221e8e92 | #!/bin/python
xmax = 2.115 # max length of the car
ymax = 0.912 # max y width without mirrors
zmax = 1.431 # max height
wcrearz = 0.313 # z value of the rear wheelbase center / SLR
cPillary = 0.624 # y position of the c-pillar/flaps
cPillarz = 1.150 # z position of the c-pillar/flaps
wakeClosure = 2.88 # closing point of the wake in X direction
floor = 0 # z position of the floor
#--------------------------------------------------------------
print("Using values for car size...\n xmax: {}\n ymax: {}\n zmax: {}\nas reference points...\n wcrearz: {}\n cPillary: {}\n cPillarz: {}\nfor the wake and floor...\n wakeClosure: {}\n floor: {}".format(xmax, ymax, zmax,wcrearz,cPillary,cPillarz,wakeClosure,floor))
#--------------------------------------------------------------
# plane positions
x1 = float(xmax + ((wakeClosure - xmax)/2)) # between wake closure point and rear end
x2 = float(wakeClosure) # wake closure point
x3 = float(wakeClosure + 1.00) # 1 m offset behind wake closure point
y0 = 0 # symmetry plane
y1 = float(cPillary) # c pillar
z1 = float(wcrearz) # wheel center rear
z2 = float(zmax/2) # car height / 2
z3 = float(cPillarz) # c pillar
#--------------------------------------------------------------
# space between points and offsets
di = 0.02 # homogen increment between points
offsetX = 2.5 # plane length in for y and z planes
offsetY = 0.2 # +-0.2 m to the sides of the car (without mirrors!)
offsetZ = 0.4 # +0.4 m higher than the car height
#--------------------------------------------------------------
x = x1 # fixed value
z = float(floor) # Start value
data = []
while z <= float(round(zmax,1)+offsetZ+di):
y = int(round(-ymax))-offsetY
while y <= float(int(round(ymax))+offsetY+di):
data.append([x,y,z])
y = y + di
z = z + di
with open("plane_x1.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
x = x2 # fixed value
z = float(floor) # Start value
data = []
while z <= float(round(zmax,1)+offsetZ+di):
y = int(round(-ymax))-offsetY
while y <= float(int(round(ymax))+offsetY+di):
data.append([x,y,z])
y = y + di
z = z + di
with open("plane_x2.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
x = x3 # fixed value
z = float(floor) # Start value
data = []
while z <= float(round(zmax,1)+offsetZ+di):
y = int(round(-ymax))-offsetY
while y <= float(int(round(ymax))+offsetY+di):
data.append([x,y,z])
y = y + di
z = z + di
with open("plane_x3.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
#---------------------------------------------------------------------------
y = y0 # fixed value
z = float(floor) # Start value
data = []
while z <= float(round(zmax,1)+offsetZ+di):
x = float(x3-offsetX)
while x <= float(x3+di):
data.append([x,y,z])
x = x + di
z = z + di
with open("plane_y0.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
y = y1 # Start value
z = float(floor) # Start value
data = []
while z <= float(round(zmax,1)+offsetZ+di):
x = float(x3-offsetX)
while x <= float(x3+di):
data.append([x,y,z])
x = x + di
z = z + di
with open("plane_y1.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
#----------------------------------------------------------------------------
y = int(round(-ymax))-offsetY
z = z1 # Start value
data = []
while y <= float(int(round(ymax))+offsetY+di):
x = float(x3-offsetX)
while x <= float(x3+di):
data.append([x,y,z])
x = x + di
y = y + di
with open("plane_z1.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
y = int(round(-ymax))-offsetY
z = z2 # Start value
data = []
while y <= float(int(round(ymax))+offsetY+di):
x = float(x3-offsetX)
while x <= float(x3+di):
data.append([x,y,z])
x = x + di
y = y + di
with open("plane_z2.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
y = int(round(-ymax))-offsetY
z = z3 # Start value
data = []
while y <= float(int(round(ymax))+offsetY+di):
x = float(x3-offsetX)
while x <= float(x3+di):
data.append([x,y,z])
x = x + di
y = y + di
with open("plane_z3.txt", "w") as g:
for line in data:
g.write("({:.2f} {:.2f} {:.2f})\n".format(line[0],line[1],line[2]))
|
989,310 | de937259129b4e0345c86dd6f7d8fa3b8087894b | from operator import itemgetter
import sys
total_count=0
cword=None
for line in sys.stdin:
line=line.strip()
word,count = line.split('\t',1)
try:
count=int(count)
if cword==word:
total_count +=count
else:
if cword:
print(cword, "\t" ,str(total_count))
total_count=count
cword=word
except ValueError:
continue
if cword==word:
print(cword+"\t"+str(total_count))
|
989,311 | 9fe5abce3d2b302986c7522e527399f2d72f2c1e | #!/bin/env python
from DIRAC.Core.Base import Script
from DIRAC import S_OK, S_ERROR, exit as dexit
import pprint, types
class Params(object):
def __init__(self):
self.file = ""
self.prodid = 0
def setFile(self, opt):
self.file = opt
return S_OK()
def setProdID(self, opt):
try:
self.prodid = int(opt)
except ValueError:
return S_ERROR('Prod ID must be integer')
return S_OK()
def registerSwitch(self):
Script.registerSwitch('p:', "ProductionID=", "Production ID", self.setProdID)
Script.registerSwitch('f:', "File=", "File name", self.setFile)
Script.setUsageMessage("%s -p 12345" % Script.scriptName)
def createTransfoInfo(trans):
info = []
info.append(" - It's a %s production" % trans['Type'])
info.append(" - It's described as %s" % trans["LongDescription"])
info.append(" - It's part of the %s group" % trans['TransformationGroup'])
info.append(" - Its status is currently %s" % trans["Status"])
info.append(" - It uses the %s plugin"%trans['Plugin'])
info.append(" - Its name is %s" % trans['TransformationName'])
info.append(" - It was created by %s" % trans['AuthorDN'])
if 'InputDataQuery' in trans:
info.append(' - Was input with %s ' % str(trans['InputDataQuery']))
if 'AddParams' in trans:
for key, val in trans['AddParams'].items():
if key == 'SWPackages':
info.append(" - Uses the software %s" % trans['AddParams']['SWPackages'].replace(";", ", "))
if key.lower().count("steeringfile"):
info.append(" - The steering file used for %s is %s" % (key.split("_")[0], trans['AddParams'][key]))
if key.lower().count("detectormodel"):
info.append(" - Detector model %s" % trans['AddParams'][key])
if key.lower().count('trackingstra'):
info.append(" - Tracking strategy %s" % trans['AddParams'][key])
if key.count('whizardparams'):
pp = pprint.PrettyPrinter(indent=4)
whizp = pp.pformat(eval(trans['AddParams'][key]))
info.append(" - Uses the following whizard parameters:")
info.append(" %s" % whizp)
info.append('')
return info
def createFileInfo(fmeta):
from DIRAC.Core.Utilities import DEncode
if 'ProdID' in fmeta:
del fmeta['ProdID']
info = []
info.append(" - Machine %s" % fmeta['Machine'])
del fmeta['Machine']
info.append(" - Energy %sGeV"% fmeta['Energy'])
del fmeta['Energy']
if 'MachineParams' in fmeta:
info.append(' - The machine parameters are %s' % fmeta['MachineParams'])
del fmeta['MachineParams']
if 'EvtClass' in fmeta:
info.append(' - Is among the %s event class' % fmeta['EvtClass'])
del fmeta['EvtClass']
if 'ProcessID' in fmeta:
info.append(' - Is the ProcessID %s' % str(fmeta['ProcessID']))
del fmeta['ProcessID']
elif 'GenProcessID' in fmeta:
info.append(' - Is the GenProcessID %s' % str(fmeta['GenProcessID']))
del fmeta['GenProcessID']
info.append(" - Is the %s event type" % fmeta["EvtType"])
del fmeta["EvtType"]
if 'Polarisation' in fmeta:
info.append(" - Has %s polarisation" % fmeta['Polarisation'])
del fmeta["Polarisation"]
if 'BeamParticle1' in fmeta:
info.append(" - Beam 1 particle is %s" % fmeta['BeamParticle1'])
info.append(" - Beam 2 particle is %s" % fmeta['BeamParticle2'])
del fmeta['BeamParticle1']
del fmeta['BeamParticle2']
if 'PolarizationB1' in fmeta:
info.append(' - Has %s polarization for Beam 1 and %s for beam 2' %
(fmeta['PolarizationB1'], fmeta['PolarizationB2']))
del fmeta['PolarizationB1']
del fmeta["PolarizationB2"]
if 'Datatype' in fmeta:
if fmeta['Datatype'] == 'gen':
info.append(' - This is a generator level sample')
elif fmeta["Datatype"] == 'SIM':
info.append(" - This is a simulated sample")
elif fmeta['Datatype'] in ['REC', 'DST']:
info.append(' - This is a reconstructed sample')
else:
info.append(' - The datatype is unknown: %s' % fmeta['Datatype'])
del fmeta['Datatype']
if "SWPackages" in fmeta:
info.append(" - Was produced with %s" % ", ".join(fmeta["SWPackages"].split(';')))
del fmeta["SWPackages"]
if "SoftwareTag" in fmeta:
info.append(' - Was produced with %s' % fmeta['SoftwareTag'])
del fmeta['SoftwareTag']
if 'ILDConfig' in fmeta:
info.append(' - Used the %s ILDConfig package' % fmeta["ILDConfig"])
del fmeta["ILDConfig"]
if 'DetectorModel' in fmeta:
info.append(" - Using the %s detector model" % fmeta['DetectorModel'])
del fmeta['DetectorModel']
if 'NumberOfEvents' in fmeta:
info.append(' - Has %s events or less per file' % fmeta['NumberOfEvents'])
del fmeta['NumberOfEvents']
if "CrossSection" in fmeta:
xsec = str(fmeta["CrossSection"])
del fmeta["CrossSection"]
if 'CrossSectionError' in fmeta:
xsec += " +/- "+str(fmeta["CrossSectionError"])
del fmeta["CrossSectionError"]
xsec += " fb"
info.append(" - Cross section %s" % xsec)
if "AdditionalInfo" in fmeta:
try:
dinfo = DEncode.decode(fmeta["AdditionalInfo"])
except:
dinfo = eval(fmeta["AdditionalInfo"])
info.append(" - There is some additional info:")
if type(dinfo) == types.TupleType:
dinfo = dinfo[0]
if type(dinfo) == types.DictType:
dictinfo = dinfo
if 'xsection' in dictinfo:
if 'sum' in dictinfo['xsection']:
if 'xsection' in dictinfo['xsection']['sum']:
xsec= str(dictinfo['xsection']['sum']['xsection'])
if 'err_xsection' in dictinfo['xsection']['sum']:
xsec += ' +/- %s' % dictinfo['xsection']['sum']['err_xsection']
xsec += "fb"
info.append(' Cross section %s' % xsec)
else:
info.append(' %s' % dinfo)
del fmeta["AdditionalInfo"]
if 'Luminosity' in fmeta:
info.append(' - Sample corresponds to a luminosity of %sfb'%fmeta["Luminosity"])
del fmeta['Luminosity']
if 'Ancestors' in fmeta:
if len(fmeta["Ancestors"]):
info.append(" - Was produced from:")
for anc in fmeta["Ancestors"]:
info.append(' %s' % anc)
del fmeta["Ancestors"]
if 'Descendants' in fmeta:
if len(fmeta["Descendants"]):
info.append(" - Gave the following files:")
for des in fmeta["Descendants"]:
info.append(' %s' % des)
del fmeta["Descendants"]
if 'DetectorType' in fmeta:
#We don't need this here
del fmeta['DetectorType']
if fmeta:
info.append('Remaining metadata: %s' % str(fmeta))
return info
if __name__ == "__main__":
clip = Params()
clip.registerSwitch()
Script.parseCommandLine()
if not clip.prodid and not clip.file:
Script.showHelp()
dexit(1)
from DIRAC import gLogger
import os
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
tc = TransformationClient()
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fc = FileCatalogClient()
fmeta = {}
trans = None
info = []
if clip.prodid:
res = tc.getTransformation(clip.prodid)
if not res['OK']:
gLogger.error(res['Message'])
dexit(1)
trans = res['Value']
res = tc.getTransformationInputDataQuery( clip.prodid )
if res['OK']:
trans['InputDataQuery'] = res['Value']
res = tc.getAdditionalParameters ( clip.prodid )
if res['OK']:
trans['AddParams'] = res['Value']
#do something with transf
res = fc.findDirectoriesByMetadata({'ProdID':clip.prodid})
if res['OK']:
if len(res['Value'].values()):
gLogger.verbose("Found some directory matching the metadata")
for dirs in res['Value'].values():
res = fc.getDirectoryMetadata(dirs)
if res['OK']:
fmeta.update(res['Value'])
else:
gLogger.warn("Failed to get dir metadata")
res = fc.listDirectory(dirs)
if not res['OK']:
continue
content = res['Value']['Successful'][dirs]
if content["Files"]:
for f_ex in content["Files"].keys():
res = fc.getFileUserMetadata(f_ex)
if res['OK']:
fmeta.update(res['Value'])
break
#here we have trans and fmeta
info.append("")
info.append("Production %s has the following parameters:" % trans['TransformationID'])
info.extend(createTransfoInfo(trans))
if fmeta:
info.append('The files created by this production have the following metadata:')
info.extend(createFileInfo(fmeta))
info.append("It's possible that some meta data was not brought back,")
info.append("in particular file level metadata, so check some individual files")
if clip.file:
f = clip.file
pid = ""
if f.count("/"):
fpath = os.path.dirname(f)
res = fc.getDirectoryMetadata(fpath)
if not res['OK']:
gLogger.error(res['Message'])
dexit(0)
fmeta.update(res['Value'])
res = fc.getFileUserMetadata(f)
if not res['OK']:
gLogger.error(res['Message'])
dexit(1)
fmeta.update(res['Value'])
if 'ProdID' in fmeta:
pid = str(fmeta['ProdID'])
res = fc.getFileAncestors([f], 1)
if res["OK"]:
for lfn,ancestorsDict in res['Value']['Successful'].items():
if ancestorsDict.keys():
fmeta["Ancestors"] = ancestorsDict.keys()
res = fc.getFileDescendents([f], 1)
if res["OK"]:
for lfn,descendDict in res['Value']['Successful'].items():
if descendDict.keys():
fmeta['Descendants'] = descendDict.keys()
else:
ext = f.split(".")[-1]
fitems = []
[fitems.extend(i.split('_')) for i in f.split('.')[:-1]]
pid = ''
if ext == 'stdhep':
pid = fitems[fitems.index('gen')+1]
if ext == 'slcio':
if 'rec' in fitems:
pid = fitems[fitems.index('rec')+1]
elif 'dst' in fitems:
pid = fitems[fitems.index('dst')+1]
elif 'sim' in fitems:
pid = fitems[fitems.index('sim')+1]
else:
gLogger.error("This file does not follow the ILCDIRAC production conventions!")
gLogger.error("Please specify a prod ID directly or check the file.")
dexit(0)
#as task follows the prod id, to get it we need
tid = fitems[fitems.index(pid)+1]
last_folder = str(int(tid)/1000).zfill(3)
res = fc.findDirectoriesByMetadata({'ProdID':int(pid)})
if not res['OK']:
gLogger.error(res['Message'])
dexit(1)
dir_ex = res['Value'].values()[0]
fpath = ""
if int(dir_ex.split("/")[-1]) == int(pid):
fpath = dir_ex+last_folder+"/"
elif int(dir_ex.split("/")[-2]) == int(pid):
fpath = "/".join(dir_ex.split('/')[:-2])+"/"+pid.zfill(8)+"/"+last_folder+"/"
else:
gLogger.error('Path does not follow conventions, will not get file family')
if fpath:
fpath += f
res = fc.getFileAncestors([fpath], 1)
if res["OK"]:
for lfn,ancestorsDict in res['Value']['Successful'].items():
fmeta["Ancestors"] = ancestorsDict.keys()
res = fc.getFileDescendents([fpath], 1)
if res["OK"]:
for lfn,descendDict in res['Value']['Successful'].items():
fmeta['Descendants'] = descendDict.keys()
res = fc.getDirectoryMetadata(dir_ex)
if not res['OK']:
gLogger.error(res['Message'])
else:
fmeta.update(res['Value'])
res = tc.getTransformation(pid)
if not res['OK']:
gLogger.error(res['Message'])
gLogger.error('Will proceed anyway')
else:
trans = res['Value']
res = tc.getTransformationInputDataQuery( pid )
if res['OK']:
trans['InputDataQuery'] = res['Value']
res = tc.getAdditionalParameters ( pid )
if res['OK']:
trans['AddParams'] = res['Value']
info.append("")
info.append("Input file has the following properties:")
info.extend(createFileInfo(fmeta))
info.append("")
info.append('It was created with the production %s:' % pid)
if trans:
info.extend(createTransfoInfo(trans))
gLogger.notice("\n".join(info))
dexit(0) |
989,312 | 083c2416f7fc38a3253523f08f602af882f25f62 | from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and 'dedupe' not in f]
#__all__ = ['gui','passchats','sparklog','users','utils','hgfix',
# 'dbsearch','domain']
|
989,313 | a7d77275ea7a7f23c14cc9b661bc2195673f05f4 | from django.contrib import admin
from sms.models import Message, Template, Config, SavedMessage, DelayedCommand
admin.site.register(Message)
admin.site.register(SavedMessage)
admin.site.register(Template)
admin.site.register(Config)
admin.site.register(DelayedCommand)
|
989,314 | edfd686291bc72c9f00dd5c7c4d2c1ec27560d2c | from rest_framework import serializers, viewsets
from ..core.models import Furniture
class FurnitureSerializer(serializers.ModelSerializer):
room = serializers.StringRelatedField()
class Meta:
model = Furniture
fields = '__all__'
class FurnitureViewSet(viewsets.ModelViewSet):
serializer_class = FurnitureSerializer
queryset = Furniture.objects.select_related('room__apartment__building')
|
989,315 | e9629a1899541d462d50aa45d5eead21fa095db0 | n = int(input())
A = list(map(int,input().split()))
B = list(map(int,input().split()))
need1 = 0
need2 = 0
for i in range(n):
s = B[i]-A[i]
if s < 0:
need1 -= s
else:
need2 += s//2
if need1 > need2:
print("No")
else:
print("Yes") |
989,316 | be6aa7d11ac4c9eac6ebf134ca1f7c21d65f63de | """ Module for iterator implementation """
import re
class SentenceIterator:
"""Iterator for Sentence"""
def __init__(self, words):
self.words = words
self.counter = 0
self.end = len(self.words)
def __iter__(self):
return self
def __next__(self):
if self.counter < self.end:
result = self.words[self.counter]
self.counter += 1
return result
raise StopIteration
class Sentence:
"""Sentence that generates words form text dynamically"""
def __init__(self, text: str):
if not isinstance(text, str):
raise TypeError("The type of variable is not str")
# The sentence is finished
if text[-1] not in ('!', '.', '?'):
raise ValueError("Please, end your sentence")
self.text = text
def __getitem__(self, items):
"""Return items from sentence """
return list(self._words())[items]
def __repr__(self):
return f"<Sentence(words={len(self.words)}, other_chars={len(self.other_chars)})>"
def __iter__(self):
return SentenceIterator(self.words)
def _words(self):
"""Generate sequence of words from text"""
regex = r'\b\w+\b'
for word in re.findall(regex, self.text):
yield word
@property
def words(self):
"""Create list of words from _words generator"""
return list(self._words())
@property
def other_chars(self):
"""Create list of other characters in text"""
return [sign for sign in re.findall(r'[^\w\s]', self.text)]
if __name__ == '__main__':
# Accepts only string
print("*" * 50)
try:
Sentence(23534534)
except TypeError:
print("Calling Sentence(23534534)...")
print("Raised TypeError")
print("*" * 50)
# Accepts only finished strings
try:
print("Calling Sentence('sdsd dfdfdf')")
Sentence("sdsd dfdfdf")
except ValueError:
print("Raised ValueError")
print("*" * 50)
# Check __repr__
print("String representation of 'lazy !!! fox d*6 &&& dog.'")
print(Sentence("lazy !!! fox d*6 &&& dog."))
print("*" * 50)
# Lazy iterator
print("Calling Sentence('lazy fox jumps over the brown dog.')._words()")
print(Sentence("lazy fox jumps over the brown dog.")._words())
print("*" * 50)
# List of all words
print("Calling Sentence('lazy !!! fox d*6 &&& dog.').words")
print(Sentence("lazy !!! fox d*6 &&& dog.").words)
print("*" * 50)
# List of not words
print("Calling Sentence('lazy !!! fox d*6 &&& dog.').other_chars")
print(Sentence("lazy !!! fox d*6 &&& dog.").other_chars)
print("*" * 50)
# Index
print("Calling Sentence('lazy !!! fox d*6 &&& dog.')[:]")
print(Sentence('lazy !!! fox d*6 &&& dog.')[:])
print("*" * 50)
# Slice
print("Calling Sentence('lazy fox jumps over the brown dog.')[0:2]")
print(Sentence('lazy fox jumps over the brown dog.')[0:2])
print("*" * 50)
# Iterator
print("Returning iterator from Sentence")
print(type(iter(Sentence("lazy fox jumps over the brown dog."))))
print("*" * 50)
# For
print("Calling Sentence('lazy fox !!! jumps .... over the brown dog.')")
for item in Sentence("lazy fox !!! jumps .... over the brown dog."):
print(item)
print("*" * 50)
|
989,317 | 047800bb98fd87439df2c9ebcd0c669377d0bcd1 | from app import app, db
import os
import sys
from flask import render_template
import flask
from app.models import User
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Flask Tutorial', flask_version=flask.__version__, flask_app=os.environ['FLASK_APP'], db_connection=app.config.get('SQLALCHEMY_DATABASE_URI'), environment=os.environ['APP_SETTINGS'], python_version=sys.version)
@app.route('/db')
def dbTest():
users = User.query.all()
if users:
return render_template('db.html', users=users)
elif any(us.name == 'daoud' for us in users):
u = User(username='audry', email='audry@shamanic.io')
db.session.add(u)
db.session.commit()
return render_template('db.html', users=users)
else:
u = User(username='daoud', email='daoud@shamanic.io')
db.session.add(u)
db.session.commit()
return render_template('db.html', users=users)
@app.route('/react/index')
def react():
return render_template('react/index.html') |
989,318 | 3a21c213fc0844a912e07b72b26bdbcfd8461904 | import random
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
def copy(self):
return Vector(self.x, self.y)
def __str__(self):
return "<{}, {}>".format(self.x, self.y)
def __repr__(self):
return str(self)
def rand_vec(w, h):
x = random.randint(0, w-1)
y = random.randint(0, h-1)
return Vector(x, y) |
989,319 | 5f1d24ace98f9c2b5cc91ac39bf8513827b74ff7 | {'_data': [[u'Unknown',
[['Immune system', u'Systemisk allergisk reaktion'],
['Psychiatric', u'Oro, s\xf6mnl\xf6shet'],
['Nervous system', u'Huvudv\xe4rk, yrsel'],
['Eye', u'\xd6verg\xe5ende synst\xf6rningar'],
['Cardiac', u'Hj\xe4rtklappning'],
['Vascular', u'H\xf6gt blodtryck'],
['Respiratory',
<<<<<<< HEAD
u'Stickande eller br\xe4nnande k\xe4nsla i n\xe4sa och svalg, torr']]]],
=======
u'Stickande eller br\xe4nnande k\xe4nsla i n\xe4sa och svalg, torr']]]],
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
'_pages': [2, 3],
u'_rank': 7,
u'_type': u'LFSU'} |
989,320 | fb2e554d0667c00d16cacf2f75c8c724a365d25a | #Création des listes contenant les mots
unites = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
dizaines = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
autre = ['hundred', 'thousand', 'and']
resultat = 0
#Calcul du nombre de lettres à utiliser pour écrire les nombres de 1 à 1000
for i in range(1, 1001):
#Stockage du chiffre des unités, dizaines, centaines, milliers dans des variables
unite = i%10
dizaine = (i//10)%10
centaine = (i//100)%10
millier = (i//1000)%10
#Testes pour savoir à quelles conditions le nombre répond
if millier != 0:
resultat += len(unites[0]) + len(autre[1])
if i%1000 != 0:
if centaine != 0:
resultat += len(unites[centaine-1]) + len(autre[0])
if i%100 != 0:
resultat += len(autre[2])
if i%100 != 0:
if dizaine < 2:
resultat += len(unites[i%100-1])
else:
resultat += len(dizaines[dizaine-2])
if unite != 0:
resultat += len(unites[unite-1])
print(resultat)
|
989,321 | 5431876f465bb405c36c03bd5742c0940b6c72aa | # Display various stats about the charger on the PYBOARD-UNO-R3 board
#
from unoextra import *
from time import sleep
ch = Charger()
CHARGING_TEXT = { CHARGING_NOT_CHARGING : "Not charging",
CHARGING_PRE_CHARGE : "< V BATLOWV",
CHARGING_FAST_CHARGE : "Fast Charging",
CHARGING_DONE : "Charge Termination Done" }
VBUS_TEXT = { VBUS_NO_INPUT : "No input",
VBUS_USB_SDP : "USB Host SDP",
VBUS_USB_CDP : "USB CDP (1.5A)",
VBUS_USB_DCP : "USB DCP (3.25A)",
VBUS_USB_DCP_MAX : "Adjustable High Voltage DCP (MaxCharge) (1.5A)",
VBUS_USB_UNKNOW : "Unknown Adapter (500mA)",
VBUS_NOT_STD : "Non-Standard Adapter (1A/2A/2.1A/2.4A)",
VBUS_OTG : "USB OTG" }
CHARGING_FAULT_TEXT = { CHARGING_FAULT_NORMAL : "Normal",
CHARGING_FAULT_INPUT : "Input fault. VBUS > V ACOV or VBAT < VBUS < V VBUSMIN (typical 3.8V)",
CHARGING_FAULT_THERMAL: "Thermal shutdown",
CHARGING_FAULT_TIMER : "Charge Safety Timer Expiration" }
NTC_FAULT_TEXT = { NTC_FAULT_NORMAL : "Normal",
NTC_FAULT_BUCK_COLD : "TS Cold in Buck mode",
NTC_FAULT_BUCK_HOT : "TS Hot in Buck mode",
NTC_FAULT_BOOST_COLD: "TS Cold in Boost mode",
NTC_FAULT_BOOST_HOT : "TS Hot in Boost mode" }
# Activate ADC conversion rate (1 sec)
ch.config( conv_rate=True )
while True:
# Display last know status
print( "-"*40 )
print( "USB Input Status : %s" % ("USB500" if ch.usb_input_status == USB500 else "USB100") )
print( "VSYS regulation status: %s" % ("BAT < VSYSMIN" if ch.vsys_regulation else "BAT > VSYSMIN") )
print( "Power Good : %s" % ch.power_good )
print( "CHARGING : %s" % CHARGING_TEXT[ch.charging_status] )
print( "VBUS Status : %s" % VBUS_TEXT[ch.vbus_status] )
print( "Watchdog fault : %s" % ("Watchdog timer expiration" if ch.watchdog_fault else "Normal") )
print( "Boost fault : %s" % ("VBUS overloaded in OTG, or VBUS OVP, or battery is too low in boost mode" if ch.boost_fault else "Normal") )
print( "Charging fault : %s" % CHARGING_FAULT_TEXT[ch.charging_fault] )
print( "Battery Fault : %s" % ("BATOVP (VBAT > V BATOVP)" if ch.battery_fault else "Normal") )
print( "NTC Fault : %s" % NTC_FAULT_TEXT[ch.ntc_fault] )
print( "Battery Voltage : %s" % ch.vbat )
print( "SYS Voltage : %s" % ch.vsys )
print( "BUS Voltage : %s" % ch.vbus )
sleep(1)
# Request status update
ch.update_status()
# Request Fault update
ch.update_fault()
|
989,322 | f8a040ba5ae7c1f9754ad300e9298198681fe272 | strRepeats = nuke.getInput('Enter number of copies:', '10')
intRepeats = int(strRepeats)
bFirstLoop = True
#nukescripts.misc.clear_selection_recursive()
nRecGroup = nuke.nodes.Group()
nRecGroup.begin()
kX_Trans = nuke.Double_Knob('x_trans', 'Translate X:')
kX_Trans.setRange(-50., 50.)
kX_Trans.setValue(20.)
nRecGroup.addKnob(kX_Trans)
kY_Trans = nuke.Double_Knob('y_trans', 'Translate Y:')
kY_Trans.setRange(-50., 50.)
kY_Trans.setValue(20.)
nRecGroup.addKnob(kY_Trans)
kX_Rot = nuke.Double_Knob('rot', 'Rotate:')
kX_Rot.setRange(-20., 20.)
kX_Rot.setValue(0.)
nRecGroup.addKnob(kX_Rot)
kX_Scale = nuke.Double_Knob('scale', 'Scale:')
kX_Scale.setRange(-1., 3.)
kX_Scale.setValue(1.)
nRecGroup.addKnob(kX_Scale)
nInput = nuke.nodes.Input()
nDot = nuke.nodes.Dot()
nDot.setInput(0, nInput)
for i in range(iRepeats):
nTrans = nuke.nodes.Transform(name = 'tform' + str(i), translate = 'parent.x_trans parent.y_trans', rotate = 'parent.rot', scale = 'parent.scale', center = '960 540')
nMerge = nuke.nodes.Merge2(name = 'mer' + str(i))
nMerge.setInput(1,nTrans
if bFirstLoop:
bFirstLoop = False
nTrans.setInput(0, nDot)
nMerge.setInput(0, nDot)
else:
nTrans.setInput(0, nPrevMerge)
nMerge.setInput(0, nPrevMerge)
nPrevMerge = nMerge
nOutput = nuke.nodes.Output()
nOutput.setInput(0, nMerge)
nRecGroup.end() |
989,323 | 453dfc772ebd91eb20521fb50f9dfa9dfa0f1ccb |
import os
import regex
# ------------------------------------------------------------------------------
#
def collapse_ranges (ranges):
"""
Given be a set of ranges (as a set of pairs of floats [start, end] with
'start <= end'. This algorithm will then collapse that set into the
smallest possible set of ranges which cover the same, but not more nor
less, of the domain (floats).
We first sort the ranges by their starting point. We then start with the
range with the smallest starting point [start_1, end_1], and compare to
the next following range [start_2, end_2], where we now know that
start_1 <= start_2. We have now two cases:
a) when start_2 <= end_1, then the ranges overlap, and we collapse them into
range_1: range_1 = [start_1, max[end_1, end_2]
b) when start_2 > end_1, then ranges don't overlap. Importantly, none of
the other later ranges can ever overlap range_1, because there start points
are even larger. So we move range_1 to the set of final ranges, and restart
the algorithm with range_2 being the smallest one.
Termination condition is if only one range is left -- it is also moved to
the list of final ranges then, and that list is returned.
"""
# FIXME: does tuple and set conversion really add anything?
# Ranges must be unique: we do not count timings when they start and end at
# exactly the same time. By using a set, we do not repeat ranges.
# we convert to a list before return.
final = set()
# return empty list if given an empty list
if not ranges:
return final
START = 0
END = 1
# sort ranges into a copy list, by their start time
_ranges = sorted(ranges, key=lambda x: x[START])
# sat 'base' to the earliest range (smallest starting point)
base = _ranges[0]
for _range in _ranges[1:]:
# if range is empty, skip it
if _range[START] == _range[END]:
continue
if _range[START] <= base[END]:
# ranges overlap -- extend the base
base[END] = max(base[END], _range[END])
else:
# ranges don't overlap -- move base to final, and current _range
# becomes the new base
final.add(tuple(base))
base = _range
# termination: push last base to final
final.add(tuple(base))
# Return final as list of list in case a mutable type is needed.
return [list(b) for b in final]
# ------------------------------------------------------------------------------
#
def partition(space, nparts):
'''
create balanced partitions from an iterable space. This method preserves
contiguous order.
kudos:
http://code.activestate.com/recipes/425397-split-a-list-into-roughly-equal-sized-pieces/
'''
n = len(space)
b = 0
ret = list()
for k in range(nparts):
q, r = divmod(n-k, nparts)
a, b = b, b + q + (r!=0)
ret.append(space[a:b])
return ret
# ------------------------------------------------------------------------------
#
def in_range(value, ranges):
"""
checks if a float value `value` is in any of the given `ranges`, which are
assumed to be a list of tuples (or a single tuple) of start end end points
(floats).
Returns `True` or `False`.
"""
# is there anythin to check?
if not ranges or not len(ranges):
return False
if not isinstance(ranges[0], list):
ranges = [ranges]
for r in ranges:
if value >= r[0] and value <= r[1]:
return True
return False
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test = [ [ 0, 10],
[20, 30],
[40, 50],
[60, 70],
[80, 90],
[ 5, 15],
[35, 55] ]
import pprint
pprint.pprint (test)
pprint.pprint (collapse_ranges (test))
space = range(75)
parts = partition(space, 8)
for part in parts:
print "%3d: %s" % (len(part), part)
# ------------------------------------------------------------------------------
|
989,324 | d294907f2b61cefabd9ce804e2de72659c678473 | def mysolve(vals):
print vals
print "Len is :" , len(vals)
mylist = []
for i in range(10):
mylist.append(vals[i])
print "mylist is : " , mylist
return mylist
|
989,325 | f33cb807c33acf4ea7b5bc9c9d4b3f6e48d7ead8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='KDaily',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=8)),
('market', models.CharField(max_length=8)),
('p', models.DecimalField(max_digits=8, decimal_places=5)),
('o', models.DecimalField(max_digits=8, decimal_places=5)),
('h', models.DecimalField(max_digits=8, decimal_places=5)),
('l', models.DecimalField(max_digits=8, decimal_places=5)),
('c', models.DecimalField(max_digits=8, decimal_places=5)),
('amt', models.DecimalField(max_digits=16, decimal_places=2)),
('vol', models.DecimalField(max_digits=8, decimal_places=0)),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Market',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=8)),
('currency', models.CharField(max_length=3)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=8)),
('market', models.CharField(max_length=8)),
('companyName', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='WatchList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=8)),
('market', models.CharField(max_length=8)),
('watchReason', models.CharField(max_length=256)),
],
),
]
|
989,326 | 32fe7427463bbee03a7d66046c45636583cae666 |
x = 9
y = 7
if x == y:
print 'equal'
else:
if x > y:
print x, 'is greater than', y
else:
print x, 'is less than', y
|
989,327 | 3b5167b0fafaabb1356185b432e905df20503cf5 | from collections import deque
def reverseString(stringToReverse):
if not isinstance(stringToReverse, str) or len(stringToReverse) < 2:
return False
reversedString = deque()
for letter in stringToReverse:
reversedString.appendleft(letter)
print (reversedString)
return ''.join(reversedString)
def reverseString2(stringToReverse): #solution with deque, makes code smaller
reversedString = deque()
for index in range(len(stringToReverse)-1, -1, -1):
reversedString.append(stringToReverse[index])
print (reversedString)
return ''.join(reversedString)
def reverseString3(stringToReverse): #less lines solution, hard to understand f you dont know python
return stringToReverse[::-1] |
989,328 | 45c0f3ca6e3be465866ab8ee873549f56ea44a61 | # Dictionary
dict1 = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4,
'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9,
'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14,
'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19,
'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
dict2 = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e',
5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j',
10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o',
15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't',
20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
def remove(string):
return string.replace(" ", "")
#C = (a*P + b) % 26
def affine_encrypt(msg, a, b):
cipher = ''
for letter in msg:
if letter == ' ':
cipher += ' '
else:
z = (a*dict1[letter] + b) % 26
cipher += dict2[z]
return cipher
#P = (a^-1 * (C - b)) % 26
def affine_decrypt(cipher, a, b):
message = ''
a_inv = 0
flag = 0
for i in range(26):
flag = (a*i) % 26
if flag == 1:
a_inv = i
break
for letter in cipher:
if letter == ' ':
message += ' '
else:
z = (a_inv*(dict1[letter]-b)) % 26
message += dict2[z]
return message
def main():
mode = int(input("1. Type Text\n2. Input File\nChoose(1,2): "))
if mode == 1:
msg = input("Enter Message: ")
msg = remove(msg).lower()
keycode = []
for i in range(0, 2):
if (i == 0):
print("Enter a key")
else:
print("Enter b key")
ele = int(input())
keycode.append(ele)
if (i == 0):
print()
print(keycode)
choice = int(input("1. Encryption\n2. Decryption\nChoose(1,2): "))
if choice == 1:
space = int(input("1. No space\n2. Space every 5 char\nChoose(1,2):"))
if space == 1:
print("--Encryption--")
encrypted_text = affine_encrypt(msg, keycode[0], keycode[1])
print('Encrypted Text: {}'.format( encrypted_text ).lower())
elif space == 2:
print("--Encryption--")
encrypted_text = affine_encrypt(msg, keycode[0], keycode[1])
encrypted_text = " ".join(encrypted_text[i:i + 5] for i in range(0, len(encrypted_text), 5))
print('Encrypted Text: {}'.format( encrypted_text ).lower())
else:
print("Invalid Input")
elif choice == 2:
space = int(input("1. No space\n2. Space every 5 char\nChoose(1,2):"))
if space == 1:
print("--Decryption--")
decrypted_text = affine_decrypt(msg, keycode[0], keycode[1])
print('Decrypted Text: {}'.format( decrypted_text).lower())
elif space == 2:
print("--Decryption--")
decrypted_text = affine_decrypt(msg, keycode[0], keycode[1])
decrypted_text = " ".join(decrypted_text[i:i + 5] for i in range(0, len(decrypted_text), 5))
print('Decrypted Text: {}'.format( decrypted_text).lower())
else:
print("Invalid Input")
elif mode == 2:
infile_name = input("Enter input file name: ")
infile = open(infile_name, 'r+')
msg = infile.read()
msg = remove(msg).lower()
keycode = []
for i in range(0, 2):
if (i == 0):
print("Enter a key")
else:
print("Enter b key")
ele = int(input())
keycode.append(ele)
if (i == 0):
print()
print(keycode)
choice = int(input("1. Encryption\n2. Decryption\nChoose(1,2): "))
if choice == 1:
space = int(input("1. No space\n2. Space every 5 char\nChoose(1,2):"))
if space == 1:
print("--Encryption--")
encrypted_text = affine_encrypt(msg, keycode[0], keycode[1])
print('Encrypted Text: {}'.format( encrypted_text ).lower())
save = int(input("1. Save message to file\n2. Dont Save\nChoose(1,2):"))
if save == 1:
infile.write(encrypted_text)
else:
pass
elif space == 2:
print("--Encryption--")
encrypted_text = affine_encrypt(msg, keycode[0], keycode[1])
encrypted_text = " ".join(encrypted_text[i:i + 5] for i in range(0, len(encrypted_text), 5))
print('Encrypted Text: {}'.format( encrypted_text ).lower())
save = int(input("1. Save message to file\n2. Dont Save\nChoose(1,2):"))
if save == 1:
infile.write(encrypted_text)
else:
pass
else:
print("Invalid Input")
elif choice == 2:
space = int(input("1. No space\n2. Space every 5 char\nChoose(1,2):"))
if space == 1:
print("--Decryption--")
decrypted_text = affine_decrypt(msg, keycode[0], keycode[1])
print('Decrypted Text: {}'.format( decrypted_text).lower())
elif space == 2:
print("--Decryption--")
decrypted_text = affine_decrypt(msg, keycode[0], keycode[1])
decrypted_text = " ".join(decrypted_text[i:i + 5] for i in range(0, len(decrypted_text), 5))
print('Decrypted Text: {}'.format( decrypted_text).lower())
else:
print("Invalid Input")
else:
print("Invalid input")
else:
print("Invalid Input")
main()
|
989,329 | 08a49d8456b50031e74a075c8e172813766956d6 | # Generated by Django 3.2.5 on 2021-07-31 21:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0010_remove_recurring_purchase_type'),
]
operations = [
migrations.CreateModel(
name='InterPayment',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, unique=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('payment_for', models.CharField(max_length=200)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inter_payments_sent', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inter_payments_received', to=settings.AUTH_USER_MODEL)),
],
),
]
|
989,330 | 7169618326bc8acaa8cd96f3165b9c8423a04728 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#字典练习题
dic = { "k1":"v1","k2":"v2","k3":"v3" }
#1.循环遍历出所有的key
for key in dic.keys():
print(key)
#2.循环遍历出所有的value
for value in dic.values():
print(value)
#3.循环遍历出所有的key和value
for k,v in dic.items():
print(k,v)
#4.在字典中添加"k4":"v4",并输出添加后的字典
dic["k4"] = "v4"
print(dic)
#5.删除字典中"k1","v1",并输出删除后的字典
dic.pop("k1")
print(dic)
#6.删除"k5"对应的键值对,如果不存在则不报错,并让其返回None
if "k5" in dic.keys():
dic.pop("k5")
print(dic)
else:
print("None")
#7.获取"k2"对应的值
print(dic.get("k2"))
#8.获取"k6"对应的键值对,如果不存在则不报错,并让其返回None
if "k6" in dic.keys():
print(dic.get("k6"))
else:
print("None")
#9.合并两个字典
dic1 = { "k1":"v1","k2":"v2","k3":"v3" }
dic2 = {"k1":"v111","a":"b"}
dic2.update(dic1)
print(dic2)
#10.
lis = [["k",["qwe",20,{"k1":["tt",3,"1"]},89],"ab"]]
#10.1 将lis中的"tt"变成大写(2种方法)
###### 方法一
# print(type(lis[0][1][2]["k1"][0]))
lis[0][1][2]["k1"][0] = "TT"
# print(lis[0][1][2]["k1"][0])
print(lis)
###### 方法二
dic3 = {"k1":["TT",3,"1"]}
lis[0][1][2].update(dic3)
print(lis)
#10.2将列表中的数字3,变成字符串100(两种方式)
###### 方法二
lis[0][1][2]["k1"][1] = "100"
print(lis)
###### 方法二
dic4 = {"k1":["TT","100","1"]}
lis[0][1][2].update(dic4)
print(lis)
#10.3将列表中的字符串“1”,变成数字101,(两种方法)
###### 方法一
lis[0][1][2]["k1"][2] = 101
print(lis)
###### 方法二
dic5 = {"k1":["TT",3,101]}
lis[0][1][2].update(dic5)
print(lis)
#11.
li = [1,2,3,"a","b",4,"c"]
dic = {}
#如果字典中没有"k1"这个键,则添加一个"k1":[],并将li中索引位为奇数的元素添加到"k1"对应的空列表中。
#如果字典中有"k1"这个键,且"k1"对应的value是列表类型,将li中索引位为奇数的元素添加到"k1"对应的空列表中。
if "k1" not in dic.keys():
dic["k1"] = []
for k,v in enumerate(li):
if k / 2 != 0:
dic["k1"].append(v)
else:
if isinstance(dic["k1"],list):
for k, v in enumerate(li):
if k / 2 != 0:
dic["k1"].append(v)
print(dic) |
989,331 | 333e76df2aff8105a45f3ddb43968e8339148e30 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 12:43:00 2019
@author: FWass
"""
import numpy as np
import os
from functions.fg_norm import fg_norm
def write_data_lex(blade_name, data):
data_keys = []
check = False
counter = 0
eigen = {}
for key in data.keys():
data_keys.append(key)
data_formated = np.empty(np.shape(data[key])).T
for column in range(0, len(data[key])):
data_formated[:, column] = data[key][column]
eigen[key] = fg_norm(data[key][0], data[key][2])[0]
filepath = os.getcwd()
# Loop over cwd to check if already existing
for (dirpath, dirs, files) in os.walk(filepath + '/data_lex'): #Schleife über Ordner und eventuelle Unterverzeichnisse
for file in files: #Schleife über Messdateien
if blade_name in file:
check = True
counter += 1 # Counts preexisting files of same name
if not check:
with open('data_lex/' + blade_name, 'a+') as data_lex_file:
np.savetxt(data_lex_file, data_formated, fmt = '%s', delimiter = ' ', header = 'Frequency Voltage Deflection')
else:
with open('data_lex/' + blade_name + '_{}'.format(counter), 'a+') as data_lex_file:
np.savetxt(data_lex_file, data_formated, fmt = '%s', delimiter = ' ', header = 'Frequency Voltage Deflection')
# Close file to be sure
data_lex_file.close()
print(eigen)
return data_formated |
989,332 | 69c9f055310e3d0c54bc9c4797e58e9939108a1d | #
# @lc app=leetcode.cn id=821 lang=python
#
# [821] 字符的最短距离
#
# @lc code=start
class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
# @lc code=end
|
989,333 | c00a255906e7011dd007c801774c3dad2faa9c8e | #!/usr/bin/env python3
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
# Comment added by rherbst for demonstration purposes.
import pyrogue as pr
import pyrogue.interfaces.simulation
import rogue.interfaces.memory
import numpy as np
import random
#rogue.Logging.setLevel(rogue.Logging.Warning)
#import logging
#logger = logging.getLogger('pyrogue')
#logger.setLevel(logging.DEBUG)
class ListDevice(pr.Device):
# Last comment added by rherbst for demonstration.
def __init__(
self,
name = 'ListDevice',
description = 'List Device Test',
**kwargs):
super().__init__(
name = name,
description = description,
**kwargs)
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = 'UInt32List',
offset = 0x0000,
bitSize = 32 * 32,
bitOffset = 0x0000,
base = pr.UInt,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 32,
valueStride = 32
))
self.add(pr.RemoteVariable(
name = 'Int32List',
offset = 0x1000,
bitSize = 32 * 32,
bitOffset = 0x0000,
base = pr.Int,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 32,
valueStride = 32
))
self.add(pr.RemoteVariable(
name = 'UInt48List',
offset = 0x2000,
bitSize = 48 * 32,
bitOffset = 0x0000,
base = pr.UInt,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 48,
valueStride = 48
))
self.add(pr.RemoteVariable(
name = 'FloatList',
offset = 0x3000,
bitSize = 32 * 32,
bitOffset = 0x0000,
base = pr.Float,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 32,
valueStride = 32
))
self.add(pr.RemoteVariable(
name = 'DoubleList',
offset = 0x4000,
bitSize = 64 * 32,
bitOffset = 0x0000,
base = pr.Double,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 64,
valueStride = 64
))
self.add(pr.RemoteVariable(
name = 'UInt16List',
offset = 0x5000,
bitSize = 16 * 32,
bitOffset = 0x0000,
base = pr.UInt,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 16,
valueStride = 16
))
self.add(pr.RemoteVariable(
name = 'UInt21List',
offset = 0x6000,
bitSize = 32 * 32,
bitOffset = 0x0000,
base = pr.UInt,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 21,
valueStride = 32
))
self.add(pr.RemoteVariable(
name = 'BoolList',
offset = 0x7000,
bitSize = 32,
bitOffset = 0x0000,
base = pr.Bool,
mode = 'RW',
disp = '{}',
numValues = 32,
valueBits = 1,
valueStride = 1
))
class DummyTree(pr.Root):
def __init__(self):
pr.Root.__init__(self,
name='dummyTree',
description="Dummy tree for example",
timeout=2.0,
pollEn=False)
# Use a memory space emulator
sim = rogue.interfaces.memory.Emulate(4,0x1000)
self.addInterface(sim)
self.add(ListDevice(
offset = 0,
memBase = sim
))
def test_memory():
UInt32ListARaw = [int(random.random()*1000) for i in range(32)]
Int32ListARaw = [int(random.random()*1000) for i in range(32)]
UInt48ListARaw = [int(random.random()*1000) for i in range(32)]
FloatListARaw = [random.random()*1000 for i in range(32)]
DoubleListARaw = [random.random()*1000 for i in range(32)]
UInt16ListARaw = [int(random.random()*1000) for i in range(32)]
UInt21ListARaw = [int(random.random()*1000) for i in range(32)]
BoolListARaw = [int(random.random()*1000)%2==0 for i in range(32)]
UInt32ListA = np.array(UInt32ListARaw,np.uint32)
Int32ListA = np.array(Int32ListARaw,np.int32)
UInt48ListA = np.array(UInt48ListARaw,np.uint64)
FloatListA = np.array(FloatListARaw,np.float32)
DoubleListA = np.array(DoubleListARaw,np.float64)
UInt16ListA = np.array(UInt16ListARaw,np.uint32)
UInt21ListA = np.array(UInt21ListARaw,np.uint32)
BoolListA = np.array(BoolListARaw,bool)
UInt32ListB = [int(random.random()*1000) for i in range(32)]
Int32ListB = [int(random.random()*1000) for i in range(32)]
UInt48ListB = [int(random.random()*1000) for i in range(32)]
FloatListB = [random.random()*1000 for i in range(32)]
DoubleListB = [random.random()*1000 for i in range(32)]
UInt16ListB = [int(random.random()*1000) for i in range(32)]
UInt21ListB = [int(random.random()*1000) for i in range(32)]
BoolListB = [int(random.random()*1000)%2==0 for i in range(32)]
with DummyTree() as root:
with root.updateGroup():
root.ListDevice.UInt32List.set(UInt32ListARaw)
root.ListDevice.Int32List.set(Int32ListARaw)
root.ListDevice.UInt48List.set(UInt48ListARaw)
root.ListDevice.FloatList.set(FloatListARaw)
root.ListDevice.DoubleList.set(DoubleListARaw)
root.ListDevice.UInt16List.set(UInt16ListARaw)
root.ListDevice.UInt21List.set(UInt21ListARaw)
root.ListDevice.BoolList.set(BoolListARaw)
UInt32ListAA = root.ListDevice.UInt32List.get()
Int32ListAA = root.ListDevice.Int32List.get()
UInt48ListAA = root.ListDevice.UInt48List.get()
FloatListAA = root.ListDevice.FloatList.get()
DoubleListAA = root.ListDevice.DoubleList.get()
UInt16ListAA = root.ListDevice.UInt16List.get()
UInt21ListAA = root.ListDevice.UInt21List.get()
BoolListAA = root.ListDevice.BoolList.get()
UInt32ListAB = np.array([0] * 32,np.uint32)
Int32ListAB = np.array([0] * 32,np.int32)
UInt48ListAB = np.array([0] * 32,np.uint64)
FloatListAB = np.array([0] * 32,np.float32)
DoubleListAB = np.array([0] * 32,np.float64)
UInt16ListAB = np.array([0] * 32,np.uint32)
UInt21ListAB = np.array([0] * 32,np.uint32)
BoolListAB = np.array([0] * 32,bool)
for i in range(32):
UInt32ListAB[i] = root.ListDevice.UInt32List.get(index=i)
Int32ListAB[i] = root.ListDevice.Int32List.get(index=i)
UInt48ListAB[i] = root.ListDevice.UInt48List.get(index=i)
FloatListAB[i] = root.ListDevice.FloatList.get(index=i)
DoubleListAB[i] = root.ListDevice.DoubleList.get(index=i)
UInt16ListAB[i] = root.ListDevice.UInt16List.get(index=i)
UInt21ListAB[i] = root.ListDevice.UInt21List.get(index=i)
BoolListAB[i] = root.ListDevice.BoolList.get(index=i)
for i in range(32):
if UInt32ListAA[i] != UInt32ListA[i]:
raise AssertionError(f'Verification Failure for UInt32ListAA at position {i}')
if Int32ListAA[i] != Int32ListA[i]:
raise AssertionError(f'Verification Failure for Int32ListAA at position {i}')
if UInt48ListAA[i] != UInt48ListA[i]:
raise AssertionError(f'Verification Failure for UInt48ListAA at position {i}')
if abs(FloatListAA[i] - FloatListA[i]) > 0.001:
raise AssertionError(f'Verification Failure for FloatListAA at position {i}')
if abs(DoubleListAA[i] - DoubleListA[i]) > 0.001:
raise AssertionError(f'Verification Failure for DoubleListAA at position {i}')
if UInt16ListAA[i] != UInt16ListA[i]:
raise AssertionError(f'Verification Failure for UInt16ListAA at position {i}')
if UInt21ListAA[i] != UInt21ListA[i]:
raise AssertionError(f'Verification Failure for UInt21ListAA at position {i}')
if BoolListAA[i] != BoolListA[i]:
raise AssertionError(f'Verification Failure for BoolListAA at position {i}')
if UInt32ListAB[i] != UInt32ListA[i]:
raise AssertionError(f'Verification Failure for UInt32ListAB at position {i}')
if UInt48ListAB[i] != UInt48ListA[i]:
raise AssertionError(f'Verification Failure for UInt48ListAB at position {i}')
if abs(FloatListAB[i] - FloatListA[i]) > 0.001:
raise AssertionError(f'Verification Failure for FloatListAB at position {i}')
if abs(DoubleListAB[i] - DoubleListA[i]) > 0.001:
raise AssertionError(f'Verification Failure for DoubleListAB at position {i}')
if UInt16ListAB[i] != UInt16ListA[i]:
raise AssertionError(f'Verification Failure for UInt16ListAB at position {i}')
if UInt21ListAB[i] != UInt21ListA[i]:
raise AssertionError(f'Verification Failure for UInt21ListAB at position {i}')
if BoolListAB[i] != BoolListA[i]:
raise AssertionError(f'Verification Failure for BoolListAB at position {i}')
for i in range(32):
root.ListDevice.UInt32List.set(UInt32ListB[i],index=i)
root.ListDevice.Int32List.set(Int32ListB[i],index=i)
root.ListDevice.UInt48List.set(UInt48ListB[i],index=i)
root.ListDevice.FloatList.set(FloatListB[i],index=i)
root.ListDevice.DoubleList.set(DoubleListB[i],index=i)
root.ListDevice.UInt16List.set(UInt16ListB[i],index=i)
root.ListDevice.UInt21List.set(UInt21ListB[i],index=i)
root.ListDevice.BoolList.set(BoolListB[i],index=i)
UInt32ListBA = root.ListDevice.UInt32List.get()
Int32ListBA = root.ListDevice.Int32List.get()
UInt48ListBA = root.ListDevice.UInt48List.get()
FloatListBA = root.ListDevice.FloatList.get()
DoubleListBA = root.ListDevice.DoubleList.get()
UInt16ListBA = root.ListDevice.UInt16List.get()
UInt21ListBA = root.ListDevice.UInt21List.get()
BoolListBA = root.ListDevice.BoolList.get()
UInt32ListBB = np.array([0] * 32,np.uint32)
Int32ListBB = np.array([0] * 32,np.int32)
UInt48ListBB = np.array([0] * 32,np.uint64)
FloatListBB = np.array([0] * 32,np.float32)
DoubleListBB = np.array([0] * 32,np.float64)
UInt16ListBB = np.array([0] * 32,np.uint32)
UInt21ListBB = np.array([0] * 32,np.uint32)
BoolListBB = np.array([0] * 32,bool)
for i in range(32):
UInt32ListBB[i] = root.ListDevice.UInt32List.get(index=i)
Int32ListBB[i] = root.ListDevice.Int32List.get(index=i)
UInt48ListBB[i] = root.ListDevice.UInt48List.get(index=i)
FloatListBB[i] = root.ListDevice.FloatList.get(index=i)
DoubleListBB[i] = root.ListDevice.DoubleList.get(index=i)
UInt16ListBB[i] = root.ListDevice.UInt16List.get(index=i)
UInt21ListBB[i] = root.ListDevice.UInt21List.get(index=i)
BoolListBB[i] = root.ListDevice.BoolList.get(index=i)
for i in range(32):
if UInt32ListBA[i] != UInt32ListB[i]:
raise AssertionError(f'Verification Failure for UInt32ListBA at position {i}')
if Int32ListBA[i] != Int32ListB[i]:
raise AssertionError(f'Verification Failure for Int32ListBA at position {i}')
if UInt48ListBA[i] != UInt48ListB[i]:
raise AssertionError(f'Verification Failure for UInt48ListBA at position {i}')
if abs(FloatListBA[i] - FloatListB[i]) > 0.001:
raise AssertionError(f'Verification Failure for FloatListBA at position {i}')
if abs(DoubleListBA[i] != DoubleListB[i]) > 0.001:
raise AssertionError(f'Verification Failure for DoubleListBA at position {i}')
if UInt16ListBA[i] != UInt16ListB[i]:
raise AssertionError(f'Verification Failure for UInt16ListBA at position {i}')
if UInt21ListBA[i] != UInt21ListB[i]:
raise AssertionError(f'Verification Failure for UInt21ListBA at position {i}')
if BoolListBA[i] != BoolListB[i]:
raise AssertionError(f'Verification Failure for BoolListBA at position {i}')
if UInt32ListBB[i] != UInt32ListB[i]:
raise AssertionError(f'Verification Failure for UInt32ListBB at position {i}')
if abs(FloatListBB[i] - FloatListB[i]) > 0.001:
raise AssertionError(f'Verification Failure for FloatListBB at position {i}')
if abs(DoubleListBB[i] - DoubleListB[i]) > 0.001:
raise AssertionError(f'Verification Failure for DoubleListBB at position {i}')
if UInt16ListBB[i] != UInt16ListB[i]:
raise AssertionError(f'Verification Failure for UInt16ListBB at position {i}')
if UInt21ListBB[i] != UInt21ListB[i]:
raise AssertionError(f'Verification Failure for UInt21ListBB at position {i}')
if BoolListBB[i] != BoolListB[i]:
raise AssertionError(f'Verification Failure for BoolListBB at position {i}')
root.ListDevice.UInt32List.set(UInt32ListA)
root.ListDevice.Int32List.set(Int32ListA)
root.ListDevice.UInt32List.set(np.array([1,2,3],np.uint32),index=7)
root.ListDevice.Int32List.set([1,-22,-33],index=5)
resA = root.ListDevice.UInt32List.get()
resB = root.ListDevice.Int32List.get()
UInt32ListA[7:10] = [1,2,3]
Int32ListA[5:8] = [1,-22,-33]
# Verify update
for i in range(32):
if resA[i] != UInt32ListA[i]:
raise AssertionError(f'Stripe Verification Failure for UInt32ListA at position {i}')
if resB[i] != Int32ListA[i]:
raise AssertionError(f'Stripe Verification Failure for Int32ListA at position {i}')
# Test value shift
_ = resA[0] >> 5
def run_gui():
import pyrogue.pydm
with DummyTree() as root:
pyrogue.pydm.runPyDM(root=root,title='test123',sizeX=1000,sizeY=500)
if __name__ == "__main__":
test_memory()
#run_gui()
|
989,334 | 02ec48d0a34dc5c2a8900e7be68161dbcd6a8ba0 | """
Crawler de Informacion academica. Detecta las siguientes irregularidades:
- Materias obligatorias que no se dictan en el cuatrimestre.
- Materias del mismo cuatrimestre (segun programa) que no tienen horarios
compatibles entre si.
- Materias que no tienen horarios compatibles con jornada laboral.
Utiliza las librerias pyquery y requests.
Modo de uso:
>> verificar_programa(path)
path es la ruta a un archivo json con el programa que se quiere verificar.
"""
import json
from pyquery import PyQuery as pq
import requests
""" CRAWLER """
URL_MATERIA = 'http://intra.fi.uba.ar/insc/consultas/consulta_cursos.jsp?\
materia={materia}'
TIPOS_CURSO = ['CP', 'CPO', 'DC', 'EP', 'EPO', 'LO', 'P', 'PO', 'T', 'TO',
'TP', 'TPO', 'VT', 'SP']
def _get_clases(pq_curso):
clases = pq_curso('.tablaitem:eq(4)').text().split()
resultado = []
# la separacion de los campos no es consistente, hay que chequear cada fila
for i in range(len(clases)):
if clases[i] in TIPOS_CURSO:
resultado.append({'dia': clases[i + 1],
'comienzo': clases[i + 2],
'fin': clases[i + 3]})
return resultado
def get_info_materia(codigo):
"""
Extrae de info academica los horarios de los cursos de la materia
especificada.
"""
materia = {}
codigo = codigo.replace('.', '')
d = pq(requests.get(URL_MATERIA.format(materia=codigo)).text)
materia['nombre'] = d('#principal h3').text().replace(
'Cursos de la materia ', '')
materia['cursos'] = []
for tr in d('#principal tr'):
pq_curso = pq(tr)
profesor = pq_curso('.tablaitem:eq(2)').text()
if profesor:
materia['cursos'].append({'profesor': profesor,
'clases': _get_clases(pq_curso)})
return materia
""" ANALIZADOR """
# Chequeos:
def se_dicta(materia):
""" Devuelve True si hay al menos un curso publicado de la materia. """
return len(materia['cursos']) > 0
def horario_laboral(materia):
"""
Devuelve True si hay al menos un curso publicado con horario para alumnos
que trabajan (lunes a viernes a partir de las 18:00 y sabados).
"""
for curso in materia['cursos']:
compatible = True
for clase in curso['clases']:
if clase['dia'] != 'sabado' and clase['comienzo'] < '18:00':
compatible = False
break
if compatible:
return True
return False
def _clases_compatibles(c1, c2):
if c1['dia'] == c2['dia']:
return c1['fin'] <= c2['comienzo'] or c1['comienzo'] >= c2['fin']
return True
def _cursos_compatibles(cursos, nuevo_curso):
"""
Devuelve True si se puede cursar el curso con la lista de cursos anterior.
"""
if not cursos:
return True
for curso in cursos:
for clase in curso['clases']:
for nueva_clase in nuevo_curso['clases']:
if not _clases_compatibles(clase, nueva_clase):
return False
return True
def superposiciones(materias):
"""
Devuelve True si hay al menos una combinacion de cursos disponible para
cursar simultaneamente la lista de materias indicada.
"""
combinaciones = []
for materia in materias:
nuevas = []
for curso in materia['cursos']:
for combinacion in combinaciones:
if _cursos_compatibles(combinacion, curso):
nuevas.append(combinacion + [curso])
# cuando no hay combinaciones agregar el curso directamente
if not combinaciones:
nuevas.append([curso])
combinaciones = nuevas
return combinaciones
def verificar_programa(path='informatica.json'):
archivo = open(path)
programa = json.load(archivo)
archivo.close()
for cuatrimestre in programa:
materias = []
for materia in programa[cuatrimestre]:
materia = get_info_materia(materia.replace('.', ''))
if not se_dicta(materia):
print "La materia {m} de {c} no tiene cursos publicados."\
.format(m=materia['nombre'], c=cuatrimestre)
else:
materias.append(materia)
if not horario_laboral(materia):
print ("La materia {m} de {c} no tiene horarios " +
"compatibles con jornada laboral.").format(
m=materia['nombre'], c=cuatrimestre)
if len(materias) and not superposiciones(materias):
print "Las materias de {c} no se pueden hacer simultaneamente."\
.format(c=cuatrimestre)
|
989,335 | dbee80e6d74633d873b96726fac160260fb0e17c | import poly
class GameMap(object):
def __init__(
self,
width,
height,
color_dark_wall,
color_dark_ground,
color_light_wall,
color_light_ground,
color_bg,
room_max_size=10,
room_min_size=6,
max_rooms=30,
):
self.width = width
self.height = height
self.color_dark_ground = color_dark_ground
self.color_dark_wall = color_dark_wall
self.color_light_ground = color_light_ground
self.color_light_wall = color_light_wall
self.color_bg = color_bg
self.room_max_size = room_max_size
self.room_min_size = room_min_size
self.max_rooms = max_rooms
self.tiles = [[Tile(False)
for y in range(self.height)]
for x in range(width)]
def __getitem__(self, i):
return self.tiles[i]
def generate(self, randlib):
rooms = []
for r in range(self.max_rooms):
w = randlib.random_get_int(
0,
self.room_min_size,
self.room_max_size,
)
h = randlib.random_get_int(
0,
self.room_min_size,
self.room_max_size,
)
x = randlib.random_get_int(
0,
0,
self.width - w - 1,
)
y = randlib.random_get_int(
0,
0,
self.height - h - 1,
)
new_room = poly.Rect(x, y, w, h)
new_x, new_y = new_room.center()
failed = False
for other_room in rooms:
if new_room.intersects(other_room):
failed = True
break
if not failed:
create_room(self, new_room)
if len(rooms) > 0:
(prev_x, prev_y) = rooms[-1].center()
if randlib.random_get_int(0, 0, 1) == 1:
create_h_tunnel(self, prev_x, new_x, prev_y)
create_v_tunnel(self, prev_y, new_y, new_x)
else:
create_h_tunnel(self, prev_x, new_x, new_y)
create_v_tunnel(self, prev_y, new_y, prev_x)
rooms.append(new_room)
return rooms
class Tile(object):
def __init__(
self,
blocked,
block_sight=None,
air=False,
):
self.blocked = blocked
if block_sight is None:
self.block_sight = blocked
else:
self.block_sight = block_sight
self.explored = False
self.air=air
def create_room(the_map, room):
for x in range(room.x1, room.x2 + 1):
for y in range(room.y1, room.y2 + 1):
if not the_map[x][y].air:
the_map[x][y].blocked = True
the_map[x][y].block_sight = True
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
the_map[x][y].blocked = False
the_map[x][y].block_sight = False
the_map[x][y].air = True
def create_h_tunnel(the_map, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
the_map[x][y].blocked = False
the_map[x][y].block_sight = False
the_map[x][y].air = True
if not the_map[x][y - 1].air:
the_map[x][y - 1].blocked = True
the_map[x][y - 1].block_sight = True
if not the_map[x][y + 1].air:
the_map[x][y + 1].blocked = True
the_map[x][y + 1].block_sight = True
for y in range(y-1, y + 2):
x = min(x1, x2) - 1
if not the_map[x][y].air:
the_map[x][y].blocked = True
the_map[x][y].block_sight = True
x = max(x1, x2) + 1
if not the_map[x][y].air:
the_map[x][y].blocked = True
the_map[x][y].block_sight = True
def create_v_tunnel(the_map, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
the_map[x][y].blocked = False
the_map[x][y].block_sight = False
the_map[x][y].air = True
if not the_map[x + 1][y].air:
the_map[x + 1][y].blocked = True
the_map[x + 1][y].block_sight = True
if not the_map[x - 1][y].air:
the_map[x - 1][y].blocked = True
the_map[x - 1][y].block_sight = True
for x in range(x-1, x + 2):
y = min(y1, y2) - 1
if not the_map[x][y].air:
the_map[x][y].blocked = True
the_map[x][y].block_sight = True
y = max(y1, y2) + 1
if not the_map[x][y].air:
the_map[x][y].blocked = True
the_map[x][y].block_sight = True
|
989,336 | 8b45e373e2729117ebb407c99453a555711eeec4 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-22 18:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bunker', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='photo',
field=models.ImageField(default=b'/Users/tarun/code/jitterbunk/bunker/media/profilePics/default-profile-picture.png', upload_to=b'profilePics/'),
),
]
|
989,337 | 92efe1406506a6d294645429661b3db85a57b88b | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GL import _errors
_EXTENSION_NAME = 'GL_ARB_texture_float'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_ARB_texture_float', error_checker=_errors._error_checker)
GL_ALPHA16F_ARB = _C('GL_ALPHA16F_ARB', 0x881C)
GL_ALPHA32F_ARB = _C('GL_ALPHA32F_ARB', 0x8816)
GL_INTENSITY16F_ARB = _C('GL_INTENSITY16F_ARB', 0x881D)
GL_INTENSITY32F_ARB=_C('GL_INTENSITY32F_ARB',0x8817)
GL_LUMINANCE16F_ARB=_C('GL_LUMINANCE16F_ARB',0x881E)
GL_LUMINANCE32F_ARB=_C('GL_LUMINANCE32F_ARB',0x8818)
GL_LUMINANCE_ALPHA16F_ARB=_C('GL_LUMINANCE_ALPHA16F_ARB',0x881F)
GL_LUMINANCE_ALPHA32F_ARB=_C('GL_LUMINANCE_ALPHA32F_ARB',0x8819)
GL_RGB16F_ARB=_C('GL_RGB16F_ARB',0x881B)
GL_RGB32F_ARB=_C('GL_RGB32F_ARB',0x8815)
GL_RGBA16F_ARB=_C('GL_RGBA16F_ARB',0x881A)
GL_RGBA32F_ARB=_C('GL_RGBA32F_ARB',0x8814)
GL_TEXTURE_ALPHA_TYPE_ARB=_C('GL_TEXTURE_ALPHA_TYPE_ARB',0x8C13)
GL_TEXTURE_BLUE_TYPE_ARB=_C('GL_TEXTURE_BLUE_TYPE_ARB',0x8C12)
GL_TEXTURE_DEPTH_TYPE_ARB=_C('GL_TEXTURE_DEPTH_TYPE_ARB',0x8C16)
GL_TEXTURE_GREEN_TYPE_ARB=_C('GL_TEXTURE_GREEN_TYPE_ARB',0x8C11)
GL_TEXTURE_INTENSITY_TYPE_ARB=_C('GL_TEXTURE_INTENSITY_TYPE_ARB',0x8C15)
GL_TEXTURE_LUMINANCE_TYPE_ARB=_C('GL_TEXTURE_LUMINANCE_TYPE_ARB',0x8C14)
GL_TEXTURE_RED_TYPE_ARB=_C('GL_TEXTURE_RED_TYPE_ARB',0x8C10)
GL_UNSIGNED_NORMALIZED_ARB=_C('GL_UNSIGNED_NORMALIZED_ARB',0x8C17)
|
989,338 | 659e7c482020505613b0f4fd899fec51b6f622e5 | # -*- coding: utf-8 -*-
'''
This is a simple resolution of given Sudoku.
Some Sudoku would have multiple resolutions, but we'll just figure out one of them.
You may be interested in the process how this program solve these, then make sure that the variable DEBUG == True.
'''
'''
TODO:
- [Done] Check the 3*3 cuber.
- Do more complete check() in Cuber9.check()
- Consider the line/row/Cuber3 which only has one EmptyValue first.
- Consider the line/row/Cuber3 which the last filled pos is in.
- Consider cases which have more than one solution.
'''
import random
import copy
import time
# For DEBUG
DEBUG = False
EmptyValue = 0
spac = [' ']
class Cuber(object):
'''
Base-class Cuber.
lineno/rowno should start with 0.
'''
def __init__(self, values):
# THe inner data stands for the puzzle.
self.values = values
def getLine(self, lineno):
return self.values[lineno]
def getRow(self, rowno):
return [line[rowno] for line in self.values]
def getValue(self, pos):
lineno, rowno = pos
return self.values[lineno][rowno]
def setValue(self, pos, value):
lineno, rowno = pos
if DEBUG:
global spac
if value == EmptyValue:
spac.pop()
print ''.join(spac), pos, '->', value,
# Roll back
if value == EmptyValue:
print '...'
else:
print
self.values[lineno][rowno] = value
if DEBUG:
if value != EmptyValue:
spac.extend(' ')
def lines(self):
# Iterator over all lines
return self.values
def rows(self):
# Iterator over all lines
length = len(self.values)
_rows = [None] * length
for i in range(length):
_rows[i] = self.getRow(i)
return _rows
def check(self):
pass
def __eq__(self, obj):
return self.values == obj.values
def __str__(self):
_str = []
for i in range(len(self.values)):
_str.append(' ')
for j in range(len(self.values[i])):
_str.append('%d ' % self.values[i][j])
if i < len(self.values[j]) - 1:
_str.append('\n\n')
return ''.join(_str)
class Cuber3(Cuber):
def __init__(self, values = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]):
self.values = values
def inCuber3(self, value):
if value != 0:
# We use '0' as an empty value
for line in self.values:
if value in line:
return True
return False
def getPos(self, value):
if value != 0:
# We use '0' as an empty value
for (i, v) in enumerate(self.values):
for (j, x) in enumerate(v):
if x == value:
return (i, j)
return None
def setValueAndCheck(self, pos, value):
if self.inCuber3(value):
return False
self.setValue(pos, value)
return True
def check(self):
_alllines = []
for line in values:
if 0 in line:
# There should be one more '0' element in some lines
return False
_alllines.append(line)
for i in range(1, 10):
if _alllines.count(i) != 1:
return False
return True
class Cuber9(Cuber):
def __init__(self, values):
self.values = values
def getSubCubers(self):
_SubCubers = {}
i = 0
while i < len(self.values):
j = 0
while j < len(self.values[i]):
_SubCubers[(i/3, j/3)] = Cuber3([
self.getLine(i)[j:j+3],
self.getLine(i+1)[j:j+3],
self.getLine(i+2)[j:j+3]
])
j += 3
i += 3
return _SubCubers
def getPosSubCuber(self, pos):
i, j = pos
return self.getSubCubers()[(i/3, j/3)]
def setValueAndCheck(self, pos, value):
# A roll-back operation
if value == EmptyValue:
self.setValue(pos, value)
else:
lineno, rowno = pos
if (value in self.getLine(lineno)) \
or (value in self.getRow(rowno)) \
or self.getPosSubCuber(pos).inCuber3(value):
return False
self.setValue(pos, value)
return True
def getAllEmptyPos(self):
'''Get all empty pos, return a pos list'''
_leftEmpty = []
for (l, line) in enumerate(self.values):
for (r, v) in enumerate(line):
if v == EmptyValue:
_leftEmpty.append((l, r))
return _leftEmpty
def check(self):
'''
Check the whole Cuber9 to see if it' ok.
Do something more.
'''
_alllines = []
for line in self.values:
if EmptyValue in line:
# There should be one more '0' element in some lines
return False
_alllines.extend(line)
for i in range(1, 10):
if _alllines.count(i) != 9:
return False
return True
# Used to get the subset of each line/row/Cuber3
_Values = set(range(0, 10))
def Boom(case):
'''
The main algorithm of this program.
I use recursion function to implement the backtrace.
When it finds that the current assumed value has conflict with others,
go back to the last assumed pos to try another possible value.
'''
leftEmpty = case.getAllEmptyPos()
# All empty places filled, so we give it a check.
if not leftEmpty:
if case.check():
return case
# Iterate over each empty pos
for pos in leftEmpty:
l, r = pos
linel = case.getLine(l)
subset = _Values - set(linel)
if EmptyValue in subset:
subset.remove(EmptyValue)
# Iterate over each valid values
for each in subset:
if case.setValueAndCheck(pos, each):
# Fill in one value, so pass it to next level.
if Boom(case):
return case
else:
# Roll back
case.setValueAndCheck(pos, EmptyValue)
# !!! Since the whole subset doesn't fit this line, the upper level must be wrong.
return None
def main():
_testcases = (
# Easy
([[0, 0, 0, 3, 0, 5, 0, 0, 4],
[3, 1, 0, 4, 0, 0, 0, 2, 0],
[9, 6, 4, 1, 7, 2, 0, 0, 0],
[0, 0, 0, 5, 6, 1, 2, 3, 9],
[0, 3, 6, 0, 0, 0, 4, 8, 0],
[5, 2, 9, 8, 3, 4, 0, 0, 0],
[0, 0, 0, 9, 1, 3, 6, 7, 2],
[0, 7, 0, 0, 0, 8, 0, 9, 3],
[2, 0, 0, 6, 0, 7, 0, 0, 0],
],
[[7, 8, 2, 3, 9, 5, 1, 6, 4],
[3, 1, 5, 4, 8, 6, 9, 2, 7],
[9, 6, 4, 1, 7, 2, 3, 5, 8],
[8, 4, 7, 5, 6, 1, 2, 3, 9],
[1, 3, 6, 7, 2, 9, 4, 8, 5],
[5, 2, 9, 8, 3, 4, 7, 1, 6],
[4, 5, 8, 9, 1, 3, 6, 7, 2],
[6, 7, 1, 2, 4, 8, 5, 9, 3],
[2, 9, 3, 6, 5, 7, 8, 4, 1],
]),
# Hard
([[0, 0, 3, 0, 8, 0, 0, 9, 0],
[8, 0, 0, 0, 0, 0, 6, 0, 1],
[0, 0, 4, 0, 0, 9, 2, 0, 0],
[0, 4, 0, 0, 6, 7, 9, 0, 0],
[0, 0, 8, 3, 0, 2, 7, 0, 0],
[0, 0, 7, 9, 5, 0, 0, 2, 0],
[0, 0, 6, 5, 0, 0, 3, 0, 0],
[4, 0, 2, 0, 0, 0, 0, 0, 7],
[0, 8, 0, 0, 7, 0, 4, 0, 0],
],
[[1, 2, 3, 7, 8, 6, 5, 9, 4],
[8, 7, 9, 4, 2, 5, 6, 3, 1],
[6, 5, 4, 1, 3, 9, 2, 7, 8],
[2, 4, 5, 8, 6, 7, 9, 1, 3],
[9, 6, 8, 3, 1, 2, 7, 4, 5],
[3, 1, 7, 9, 5, 4, 8, 2, 6],
[7, 9, 6, 5, 4, 1, 3, 8, 2],
[4, 3, 2, 6, 9, 8, 1, 5, 7],
[5, 8, 1, 2, 7, 3, 4, 6, 9],
]),
# Diabolical
([[8, 0, 7, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 9, 0, 8, 0, 0, 5, 0],
[1, 0, 0, 0, 5, 4, 9, 0, 6],
[0, 0, 4, 0, 1, 0, 3, 0, 0],
[9, 0, 3, 6, 2, 0, 0, 0, 1],
[0, 9, 0, 0, 6, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 8, 0, 0, 9],
[0, 0, 0, 0, 0, 0, 6, 0, 8],
],
[[8, 5, 7, 9, 4, 3, 1, 6, 2],
[3, 1, 6, 5, 7, 2, 8, 9, 4],
[2, 4, 9, 1, 8, 6, 7, 5, 3],
[1, 7, 2, 3, 5, 4, 9, 8, 6],
[5, 6, 4, 8, 1, 9, 3, 2, 7],
[9, 8, 3, 6, 2, 7, 5, 4, 1],
[7, 9, 8, 4, 6, 1, 2, 3, 5],
[6, 2, 5, 7, 3, 8, 4, 1, 9],
[4, 3, 1, 2, 9, 5, 6, 7, 8],
]),
)
# x = Cuber9(copy.deepcopy(testcase_r))
# for i in range(0, 20):
# l = random.randrange(0, 9)
# r = random.randrange(0, 9)
# x.setValue((l, r), EmptyValue)
for (p, s) in _testcases:
print '*' * 80
if DEBUG:
global spac
spac = [' ']
case = Cuber9(p)
case_solution = Cuber9(s)
start = time.time()
Boom(case)
end = time.time()
if case == case_solution:
print 'Case: '
print case
print 'Solution: '
print case_solution
print 'Congratulations, seconds elapsed: ', end - start
else:
print 'Case FAILED: '
print case
if __name__ == '__main__':
main()
|
989,339 | 602e4ba606868fb5837a7643626b36c1c54bd565 | from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
#env = Environment(undefined=StrictUndefined)
env = Environment()
env.loader = FileSystemLoader([".", "./templates/"])
vrf_var = {
"vrf_name" : "blue",
"Route_Distinguisher" : "100:1" ,
"ipv4_af" : False,
"ipv6_af" : False,
}
template_filename = "vrf.j2"
j2_template = env.get_template(template_filename)
output = j2_template.render(**vrf_var)
#output = j2_template.render(local_as = 10, peer1_ip = "10.1.20.2" , peer1_as= 20, peer2_ip = "10.1.30.2", peer2_as = 30)
print(output)
|
989,340 | 2c81741209aaa002d533f83d8bb93ffa2afff038 | import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
Connects to the database, generates a cursor, drops existing db,
creates a new db, disconnects, reconnects and generates a new cursor.
Arguments: None
Returns: cur=cursor object, conn=connection object
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=test user=postgres password=password1")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=postgres password=password1")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Executes the set of queries stored in drop_table_queries to drop existing tables.
Arguments: cur=cursor object, conn=connection object
Returns: None
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Executes the set of queries stored in create_table_queries to create new tables.
Arguments: cur=cursor object, conn=connection object
Returns: None
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
When the script is directly called, the main method runs the create_database() function,
drop_tables() function, and create_tables() function, and then disconnects.
Arguments: None
Returns: None
"""
cur, conn = create_database()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() |
989,341 | b393c462723c91b99a04993c04166041e3113baa | # set QT_API environment variable
import os
os.environ["QT_API"] = "pyqt5"
import qtpy
# qt libraries
from qtpy.QtCore import *
from qtpy.QtWidgets import *
from qtpy.QtGui import *
import control.utils as utils
from control._def import *
import control.tracking as tracking
from queue import Queue
from threading import Thread, Lock
import time
import numpy as np
import pyqtgraph as pg
import cv2
from datetime import datetime
class PDAFController(QObject):
def __init__(self):
QObject.__init__(self)
self.image1_received = False
self.image2_received = False
def register_image_from_camera_1(self,image):
self.image1 = image
self.image1_received = True
if(self.image2_received):
self.compute_defocus()
def register_image_from_camera_2(self,image):
self.image2 = image
self.image2_received = True
def compute_defocus(self):
print('computing defocus')
I1 = np.array(self.image1,dtype=np.int)
I2 = np.array(self.image2,dtype=np.int)
I1 = I1 - np.mean(I1)
I2 = I2 - np.mean(I2)
xcorr = cv2.filter2D(I1,cv2.CV_32F,I2)
# cv2.imshow('xcorr',np.array(255*xcorr/np.max(xcorr), dtype = np.uint8 ))
# cv2.imshow('xcorr',self.image2)
cv2.imshow('xcorr',np.array(255*xcorr/np.max(xcorr),dtype=np.uint8))
print(np.max(xcorr))
cv2.waitKey(15)
pass
def close(self):
pass |
989,342 | 7a7d9828d65a44e2ecdfa865879cc833f8d6f34a | # -*- coding: UTF-8 -*-
# 作者:hao.ren3
# 时间:2020/9/22 17:44
# IDE:PyCharm
from flask import Blueprint
bp = Blueprint('main', __name__)
from app.main.routes import index
|
989,343 | 18e47b55e18fa638317f41c70037a4b14c158ef5 | import os
import requests
from clint.textui import progress
class IsoDownloader(object):
def start(self, jobs, path_format):
total_jobs = len(jobs)
job_number = 1
for job in jobs:
path = job.get_path(path_format)
os.makedirs(os.path.dirname(path), exist_ok=True)
print("[{}/{}] Starting to download {} to {}".format(job_number, total_jobs, job.label, path))
job_number += 1
r = requests.get(job.url, stream=True)
with open(path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1, hide=False):
if chunk:
f.write(chunk)
f.flush() |
989,344 | e20e39bdb32936788f6e1908d73ecca9f5fee053 | #!/usr/bin/env python3
import math
import time
import json
import random
import yfinance as yf
import pandas as pd
import sys
import random
from datetime import date, timedelta
from pandas_datareader import data as pdr
# override yfinance with pandas – seems to be a common step
def ectract_data():
yf.pdr_override()
# Get stock data from Yahoo Finance – here, asking for about 10 years of Amazon
today = date.today()
decadeAgo = today - timedelta(days=3652)
data = pdr.get_data_yahoo('INTC', start=decadeAgo, end=today).reset_index()
# Other symbols: CSCO – Cisco, NFLX – Netflix, INTC – Intel, TSLA - Tesla
data["Date"] = data["Date"].apply(lambda x: pd.Timestamp(x).date().strftime('%m/%d/%Y'))
data['Buy']=0
data['Sell']=0
for i in range(len(data)):
# Hammer
realbody=math.fabs(data.Open[i]-data.Close[i])
bodyprojection=0.1*math.fabs(data.Close[i]-data.Open[i])
if data.High[i] >= data.Close[i] and data.High[i]-bodyprojection <= data.Close[i] and data.Close[i] > data.Open[i] and data.Open[i] > data.Low[i] and data.Open[i]-data.Low[i] > realbody:
data.at[data.index[i], 'Buy'] = 1
#print("H", data.Open[i], data.High[i], data.Low[i], data.Close[i])
# Inverted Hammer
if data.High[i] > data.Close[i] and data.High[i]-data.Close[i] > realbody and data.Close[i] > data.Open[i] and data.Open[i] >= data.Low[i] and data.Open[i] <= data.Low[i]+bodyprojection:
data.at[data.index[i], 'Buy'] = 1
#print("I", data.Open[i], data.High[i], data.Low[i], data.Close[i])
# Hanging Man
if data.High[i] >= data.Open[i] and data.High[i]-bodyprojection <= data.Open[i] and data.Open[i] > data.Close[i] and data.Close[i] > data.Low[i] and data.Close[i]-data.Low[i] > realbody:
data.at[data.index[i], 'Sell'] = 1
#print("M", data.Open[i], data.High[i], data.Low[i], data.Close[i])
# Shooting Star
if data.High[i] > data.Open[i] and data.High[i]-data.Open[i] > realbody and data.Open[i] > data.Close[i] and data.Close[i] >= data.Low[i] and data.Close[i] <= data.Low[i]+bodyprojection:
data.at[data.index[i], 'Sell'] = 1
#print("S", data.Open[i], data.High[i], data.Low[i], data.Close[i])
return data
def calculation_EC2(history,shots,sigal):
val=[]
elp=time.time()
exct_data=ectract_data()
minhistory = history
shots = shots
for i in range(minhistory, len(exct_data)):
if exct_data.Buy[i]==1: # if we’re interested in Buy signals
mean=exct_data.Close[i-minhistory:i].pct_change(1).mean()
std=exct_data.Close[i-minhistory:i].pct_change(1).std()
# generate much larger random number series with same broad characteristics
simulated = [random.gauss(mean,std) for x in range(shots)]
# sort and pick 95% and 99% - not distinguishing long/short here
simulated.sort(reverse=True)
var95 = simulated[int(len(simulated)*0.95)]
var99 = simulated[int(len(simulated)*0.99)]
print(var95, var99) # so you can see what is being produced
val.append([str(exct_data['Date'][i]),var95, var99])
elp_time=str(time.time() - elp)
return json.dumps({
"val_risk":val,
"Elp_time": elp_time,
})
sys.stdout.write(calculation_EC2(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])))
|
989,345 | c3f52e68417e6a4ae4effae7f4ac8735cc86e2c2 | import sys
import easy_alert.util.util
from datetime import datetime, timedelta
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestUtil(unittest.TestCase):
def test_with_retry(self):
self.assertEqual(easy_alert.util.util.with_retry(3, 1)(lambda: 123), 123)
def test_with_retry_error(self):
def f():
raise Exception('xxx')
t = datetime.now()
with self.assertRaises(Exception) as cm:
easy_alert.util.util.with_retry(2, 1)(f)
s = datetime.now()
self.assertEqual(cm.exception.args[0], 'xxx')
self.assertTrue(timedelta(seconds=2) < s - t)
|
989,346 | 823947f0f1d95ffa6f521a3837144759fe85fbdf | #program to check a number is prime or not
num=int(input("enter a number"))
flg=0
for i in range(2,num):
if(num%i==0):
flg=1
break
else:
flg=0
if(flg>0):
print("not prime")
else:
print("prime") |
989,347 | 4849bccef0784879457189e798a32f8114301303 | import quandl, math
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
import datetime
import pickle
style.use('ggplot')
#Get Acxiom stock data
df = quandl.get("WIKI/ACXM")
#Trim down to relevant variables
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
#Create new informative measures
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low'])/df['Adj. Low'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open'])/ df['Adj. Open'] * 100.0
#Final dataset with relevant + new variables.
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
print df.tail()
forecast_col = 'Adj. Close'
#Set missing values to -99999, cannot enter NaN values into a machine learning classifier. Most classivier will recodnise such an obtuse number as an outlier, however.
df.fillna(value =-99999, inplace = True)
#Create Timespan to predict ahead (in this instance, 1% of total records into the future)
forecast_out = int(math.ceil(0.01 *len(df)))
#Create output column, this is the price on the date of the decided timespan into the future (1%)
df['label'] = df[forecast_col].shift(-forecast_out)
#Create input and output numpy arrays
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
X_lately = X[-forecast_out:]
X = X[:-forecast_out]
df.dropna(inplace=True)
#Insert forecast column in dataframe
df['Forecast'] = np.nan
y = np.array(df['label'])
#Train, test and CV
X_Train, X_Test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
#Classifier
clf = LinearRegression(n_jobs = -1)
#Fit the classifier
clf.fit(X_Train, y_train)
confidence = clf.score(X_Test, y_test)
print(confidence)
forcast_set = clf.predict(X_lately)
print(forcast_set, confidence, forecast_out)
#Create forecast column
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forcast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += 86400
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
#pickle the classifier
with open('linearregression.pickle', 'wb') as f:
pickle.dump(clf, f)
#open pickled classifier and use:
pickle_in = open('linearregression.pickle', 'rb')
clf = pickle.load(pickle_in)
prediction = clf.predict(X_lately)
print(prediction)
|
989,348 | 35ab4b024bd64dd6b4f91f1394ab206caf67a25d | import logging
import requests
from Test_api.BASE_API.member import Member
class Test_api():
def setup_class(self):
self.member = Member()
def test_get(self):
logging.info('开始执行获取成员信息用例')
r = self.member.test_get()
logging.info('断言返回码')
assert r.json().get('errcode') == 0
def test_add(self):
logging.info('开始执行创建成员信息用例')
r = self.member.test_add()
logging.info('断言返回码')
assert r.json().get('errcode') == 0
def test_update(self):
logging.info('开始执行更新成员信息用例')
r = self.member.test_update()
logging.info('断言返回码')
assert r.json().get('errcode') == 0
def test_delete(self):
logging.info('开始执行删除成员信息用例')
r = self.member.test_delete()
logging.info('断言返回码')
assert r.json().get('errcode') == 0
|
989,349 | 473dfda760e67107893f254f080254c9264b4c3a | import cv2
import numpy as np
from scipy import ndimage
from scipy.misc import imresize
from PIL import Image, ImageStat
from imageio import imread, imsave
def detect_color_image(file, thumb_size=40, MSE_cutoff=22, adjust_color_bias=True):
pil_img = Image.open(file)
bands = pil_img.getbands()
if(bands == ('R','G','B') or bands== ('R','G','B','A')):
thumb = pil_img.resize((thumb_size,thumb_size))
SSE, bias = 0, [0,0,0]
if adjust_color_bias:
bias = ImageStat.Stat(thumb).mean[:3]
bias = [b - sum(bias)/3 for b in bias ]
for pixel in thumb.getdata():
mu = sum(pixel)/3
SSE += sum((pixel[i] - mu - bias[i])*(pixel[i] - mu - bias[i]) for i in [0,1,2])
MSE = float(SSE)/(thumb_size*thumb_size)
if MSE <= MSE_cutoff:
return False
else:
return True
elif len(bands)==1:
return False
else:
return False
def remove_background(img):
""" Remove noise using OTSU's method.
:param img: The image to be processed
:return: The normalized image
"""
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.astype(np.uint8)
# Binarize the image using OTSU's algorithm. This is used to find the center
# of mass of the image, and find the threshold to remove background noise
threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Remove noise - anything higher than the threshold. Note that the image is still grayscale
img[img > threshold] = 255
return img
def remove_background1(img):
""" Remove noise using OTSU's method.
:param img: The image to be processed
:return: The normalized image
"""
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.astype(np.uint8)
# Binarize the image using OTSU's algorithm. This is used to find the center
# of mass of the image, and find the threshold to remove background noise
threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Remove noise - anything higher than the threshold. Note that the image is still grayscale
img[img > threshold] = 255
return img |
989,350 | a7c7ea42cb1f5ee5e8ba09dec5d86eb1a53d28db | # -*- coding:utf-8 -*-
import sys
import json
listStr = [{"city": "北京"}, {"name": "大刘"}]
json.dump(listStr, open("listStr.json", "w"), ensure_ascii=False)
dictStr = {"city": "北京", "name": "大刘"}
json.dump(dictStr, open("dictStr.json", "w"), ensure_ascii=False)
# json_load.py
import json
strList = json.load(open("listStr.json"))
print(strList)
# [{u'city': u'\u5317\u4eac'}, {u'name': u'\u5927\u5218'}]
strDict = json.load(open("dictStr.json"))
print(strDict)
# {u'city': u'\u5317\u4eac', u'name': u'\u5927\u5218'}
|
989,351 | 017b7bde18931e35cd3199605009d110fe909614 | from app.models import db, User, Note, Power, Role
def initializer_database():
tables = [
User,
Note,
Power,
Role
]
try:
db.drop_tables(models=tables)
db.create_tables(models=tables)
except Exception as e:
raise e
if __name__ == '__main__':
initializer_database() |
989,352 | e5529dc1bdee3637cf9f1f111a1523c479bd2f0e | import numpy as np
import collections
import matplotlib.pyplot as plt
import argparse
import sys
import os
#data:(nboxes,channel,width,height)
def vis_square(data,title):
data_box=data[0] #box0:(channel,height,width)
#normalize data for display
data_box=(data_box-data_box.min())/(data_box.max()-data_box.min())
n=int(np.ceil(np.sqrt(data_box.shape[0])))
padding=(((0,n**2-data_box.shape[0]),(0,1),(0,1))+((0,0),)*(data_box.ndim-3))
data_box=np.pad(data_box,padding,mode='constant',constant_values=1)
data_box=data_box.reshape((n,n)+data_box.shape[1:]).transpose((0,2,1,3)+tuple(range(4,data_box.ndim+1)))
data_box=data_box.reshape((n*data_box.shape[1],n*data_box.shape[3])+data_box.shape[4:])
plt.imshow(data_box)
plt.title(title+"gt_box")
plt.axis('off')
plt.show()
data_box=data[1] #box0:(channel,height,width)
#normalize data for display
data_box=(data_box-data_box.min())/(data_box.max()-data_box.min())
n=int(np.ceil(np.sqrt(data_box.shape[0])))
padding=(((0,n**2-data_box.shape[0]),(0,1),(0,1))+((0,0),)*(data_box.ndim-3))
data_box=np.pad(data_box,padding,mode='constant',constant_values=1)
data_box=data_box.reshape((n,n)+data_box.shape[1:]).transpose((0,2,1,3)+tuple(range(4,data_box.ndim+1)))
data_box=data_box.reshape((n*data_box.shape[1],n*data_box.shape[3])+data_box.shape[4:])
plt.imshow(data_box)
plt.title(title+"gt_box-50")
plt.axis('off')
plt.show()
data_box=data[2] #box0:(channel,height,width)
#normalize data for display
data_box=(data_box-data_box.min())/(data_box.max()-data_box.min())
n=int(np.ceil(np.sqrt(data_box.shape[0])))
padding=(((0,n**2-data_box.shape[0]),(0,1),(0,1))+((0,0),)*(data_box.ndim-3))
data_box=np.pad(data_box,padding,mode='constant',constant_values=1)
data_box=data_box.reshape((n,n)+data_box.shape[1:]).transpose((0,2,1,3)+tuple(range(4,data_box.ndim+1)))
data_box=data_box.reshape((n*data_box.shape[1],n*data_box.shape[3])+data_box.shape[4:])
plt.imshow(data_box)
plt.title(title+"gt_box+50")
plt.axis('off')
plt.show()
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument("--dir",default="features",type=str)
parser.add_argument("--id",default=[0],type=int,nargs='+')
parser.add_argument("--seq",default="basketball",type=str)
args=parser.parse_args()
root_dir="/home/ccjiang/Documents/py-faster-rcnn/caffe-fast-rcnn/examples/tracker"
for id in args.id:
npy_path=os.path.join(root_dir,args.dir,"%s_features_%d.npy"%(args.seq,id))
features_raw=np.load(npy_path)
features_dict=collections.OrderedDict()
for f in features_raw:
features_dict.update(f)
#{'feature4': ,'feature5': ,'feature3': ,'im_info': }
for k,v in features_dict.items():
if k=='im_info': continue
vis_square(v,k) #v:(nboxes=3, channel=256/512, 7,7) |
989,353 | 04466b351a94d0d2035eb2fc19596c0523b793c5 | import os
import functools
import itertools
import pytest
import rlp
from ethereum import blocks
from web3.utils.encoding import (
decode_hex,
)
from alarm_client.contracts.transaction_request import TransactionRequestFactory
from testrpc import testrpc
NULL_ADDRESS = '0x0000000000000000000000000000000000000000'
@pytest.fixture()
def request_tracker(unmigrated_chain, web3):
chain = unmigrated_chain
tracker = chain.get_contract('RequestTracker')
return tracker
@pytest.fixture()
def request_factory(chain, web3, request_tracker):
factory = chain.get_contract('RequestFactory', deploy_args=[request_tracker.address])
chain_code = web3.eth.getCode(factory.address)
assert len(chain_code) > 10
return factory
@pytest.fixture()
def request_lib(chain):
return chain.get_contract('RequestLib')
@pytest.fixture()
def RequestLib(request_lib):
return type(request_lib)
@pytest.fixture()
def execution_lib(chain):
return chain.get_contract('ExecutionLib')
@pytest.fixture()
def ExecutionLib(execution_lib):
return type(execution_lib)
@pytest.fixture()
def payment_lib(chain):
return chain.get_contract('PaymentLib')
@pytest.fixture()
def PaymentLib(payment_lib):
return type(payment_lib)
@pytest.fixture()
def TransactionRequest(chain):
# force lazy deployment of the dependencies for the TransactionRequest
# contract.
chain.get_contract('RequestLib')
BaseTransactionRequest = chain.get_contract_factory('TransactionRequest')
return type(
'TransactionRequest',
(BaseTransactionRequest, TransactionRequestFactory),
{},
)
@pytest.fixture()
def RequestFactory(chain, request_factory):
return type(request_factory)
@pytest.fixture()
def denoms():
from web3.utils.currency import units
int_units = {
key: int(value)
for key, value in units.items()
}
return type('denoms', (object,), int_units)
MINUTE = 60
@pytest.fixture()
def RequestData(chain,
web3,
request_factory,
get_txn_request,
denoms,
txn_recorder,
TransactionRequest):
class _RequestData(object):
_contract = None
def __init__(self,
# claim
claimedBy=NULL_ADDRESS,
claimDeposit=0,
paymentModifier=0,
# meta
createdBy=web3.eth.coinbase,
owner=web3.eth.coinbase,
isCancelled=False,
wasCalled=False,
wasSuccessful=False,
# payment
anchorGasPrice=web3.eth.gasPrice,
donation=12345,
donationBenefactor='0xd3cda913deb6f67967b99d67acdfa1712c293601',
donationOwed=0,
payment=54321,
paymentBenefactor=NULL_ADDRESS,
paymentOwed=0,
# txnData
callData="",
toAddress=txn_recorder.address,
callGas=1000000,
callValue=0,
requiredStackDepth=10,
# schedule
claimWindowSize=None,
freezePeriod=None,
windowSize=None,
windowStart=None,
reservedWindowSize=None,
temporalUnit=1):
if freezePeriod is None:
if temporalUnit == 2:
freezePeriod = 3 * MINUTE
else:
freezePeriod = 10
if windowSize is None:
if temporalUnit == 2:
windowSize = 60 * MINUTE
else:
windowSize = 255
if windowStart is None:
if temporalUnit == 2:
windowStart = web3.eth.getBlock('latest')['timestamp'] + freezePeriod
else:
windowStart = web3.eth.blockNumber + freezePeriod
if reservedWindowSize is None:
if temporalUnit == 2:
reservedWindowSize = 4 * MINUTE
else:
reservedWindowSize = 16
if claimWindowSize is None:
if temporalUnit == 2:
claimWindowSize = 60 * MINUTE
else:
claimWindowSize = 255
self.claimData = type('claimData', (object,), {
'claimedBy': claimedBy,
'claimDeposit': claimDeposit,
'paymentModifier': paymentModifier,
})
self.meta = type('meta', (object,), {
'createdBy': createdBy,
'owner': owner,
'isCancelled': isCancelled,
'wasCalled': wasCalled,
'wasSuccessful': wasSuccessful,
})
self.paymentData = type('paymentData', (object,), {
'anchorGasPrice': anchorGasPrice,
'donation': donation,
'donationBenefactor': donationBenefactor,
'donationOwed': donationOwed,
'payment': payment,
'paymentBenefactor': paymentBenefactor,
'paymentOwed': paymentOwed,
})
self.txnData = type('txnData', (object,), {
'callData': callData,
'toAddress': toAddress,
'callGas': callGas,
'callValue': callValue,
'requiredStackDepth': requiredStackDepth,
})
self.schedule = type('schedule', (object,), {
'claimWindowSize': claimWindowSize,
'freezePeriod': freezePeriod,
'reservedWindowSize': reservedWindowSize,
'temporalUnit': temporalUnit,
'windowStart': windowStart,
'windowSize': windowSize,
})
def to_factory_kwargs(self):
return {
'addressArgs': [
self.meta.owner,
self.paymentData.donationBenefactor,
self.txnData.toAddress,
],
'uintArgs': [
self.paymentData.donation,
self.paymentData.payment,
self.schedule.claimWindowSize,
self.schedule.freezePeriod,
self.schedule.reservedWindowSize,
self.schedule.temporalUnit,
self.schedule.windowSize,
self.schedule.windowStart,
self.txnData.callGas,
self.txnData.callValue,
self.txnData.requiredStackDepth,
],
'callData': self.txnData.callData,
}
def deploy_via_factory(self, deploy_txn=None):
if deploy_txn is None:
deploy_txn = {'value': 10 * denoms.ether}
create_txn_hash = request_factory.transact(
deploy_txn,
).createRequest(
**self.to_factory_kwargs()
)
txn_request = get_txn_request(create_txn_hash)
return txn_request
def to_init_kwargs(self):
return {
'addressArgs': [
self.meta.createdBy,
self.meta.owner,
self.paymentData.donationBenefactor,
self.txnData.toAddress,
],
'uintArgs': [
self.paymentData.donation,
self.paymentData.payment,
self.schedule.claimWindowSize,
self.schedule.freezePeriod,
self.schedule.reservedWindowSize,
self.schedule.temporalUnit,
self.schedule.windowSize,
self.schedule.windowStart,
self.txnData.callGas,
self.txnData.callValue,
self.txnData.requiredStackDepth,
],
'callData': self.txnData.callData,
}
def direct_deploy(self, deploy_txn=None):
if deploy_txn is None:
deploy_txn = {'value': 10 * denoms.ether}
deploy_txn_hash = TransactionRequest.deploy(
transaction=deploy_txn,
kwargs=self.to_init_kwargs(),
)
txn_request_address = chain.wait.for_contract_address(deploy_txn_hash)
return TransactionRequest(address=txn_request_address)
def refresh(self):
if not self._contract:
raise ValueError("No contract set")
self.__dict__.update(self.from_contract(self._contract).__dict__)
@classmethod
def from_contract(cls, txn_request):
address_args, bool_args, uint_args, uint8_args = txn_request.call().requestData()
call_data = txn_request.call().callData()
instance = cls.from_deserialize(
address_args, bool_args, uint_args, uint8_args, call_data,
)
instance._contract = txn_request
return instance
@classmethod
def from_deserialize(cls, address_args, bool_args, uint_args, uint8_args, call_data):
init_kwargs = {
'claimedBy': address_args[0],
'createdBy': address_args[1],
'owner': address_args[2],
'donationBenefactor': address_args[3],
'paymentBenefactor': address_args[4],
'toAddress': address_args[5],
'wasCalled': bool_args[1],
'wasSuccessful': bool_args[2],
'isCancelled': bool_args[0],
'paymentModifier': uint8_args[0],
'claimDeposit': uint_args[0],
'anchorGasPrice': uint_args[1],
'donation': uint_args[2],
'donationOwed': uint_args[3],
'payment': uint_args[4],
'paymentOwed': uint_args[5],
'claimWindowSize': uint_args[6],
'freezePeriod': uint_args[7],
'reservedWindowSize': uint_args[8],
'temporalUnit': uint_args[9],
'windowSize': uint_args[10],
'windowStart': uint_args[11],
'callGas': uint_args[12],
'callValue': uint_args[13],
'requiredStackDepth': uint_args[14],
'callData': call_data,
}
return cls(**init_kwargs)
return _RequestData
@pytest.fixture()
def ValidationErrors():
return (
'InsufficientEndowment',
'ReservedWindowBiggerThanExecutionWindow',
'InvalidTemporalUnit',
'ExecutionWindowTooSoon',
'InvalidRequiredStackDepth',
'CallGasTooHigh',
'EmptyToAddress',
)
@pytest.fixture()
def extract_event_logs(chain, web3, get_all_event_data):
def _extract_event_logs(event_name, contract, txn_hash, return_single=True):
txn_receipt = chain.wait.for_receipt(txn_hash)
filter = contract.pastEvents(event_name, {
'fromBlock': txn_receipt['blockNumber'],
'toBlock': txn_receipt['blockNumber'],
})
log_entries = filter.get()
if len(log_entries) == 0:
all_event_logs = get_all_event_data(txn_receipt['logs'])
if all_event_logs:
raise AssertionError(
"Something went wrong. The following events were found in"
"the logs for the given transaction hash:\n"
"{0}".format('\n'.join([
event_log['event'] for event_log in all_event_logs
]))
)
raise AssertionError(
"Something went wrong. No '{0}' log entries found".format(event_name)
)
if return_single:
event_data = log_entries[0]
return event_data
else:
return log_entries
return _extract_event_logs
@pytest.fixture()
def get_txn_request(chain,
web3,
extract_event_logs,
RequestFactory,
TransactionRequest,
ValidationErrors):
def _get_txn_request(txn_hash):
try:
request_created_data = extract_event_logs('RequestCreated', RequestFactory, txn_hash)
except AssertionError:
validation_error_data = extract_event_logs('ValidationError', RequestFactory, txn_hash, return_single=False)
if validation_error_data:
errors = [
ValidationErrors[entry['args']['error']]
for entry in validation_error_data
]
raise AssertionError("ValidationError: {0}".format(', '.join(errors)))
raise
request_address = request_created_data['args']['request']
txn_request = TransactionRequest(address=request_address)
return txn_request
return _get_txn_request
@pytest.fixture
def ABORT_REASONS_ENUM_KEYS():
return (
'WasCancelled',
'AlreadyCalled',
'BeforeCallWindow',
'AfterCallWindow',
'ReservedForClaimer',
'StackTooDeep',
'InsufficientGas',
)
@pytest.fixture()
def AbortReasons(ABORT_REASONS_ENUM_KEYS):
return type('AbortReasons', (object,), {
name: idx for idx, name in enumerate(ABORT_REASONS_ENUM_KEYS)
})
@pytest.fixture()
def get_abort_data(chain, web3, RequestLib, extract_event_logs):
def _get_abort_data(txn_hash, return_single=False):
return extract_event_logs('Aborted', RequestLib, txn_hash, return_single=return_single)
return _get_abort_data
@pytest.fixture()
def get_execute_data(chain,
web3,
RequestLib,
extract_event_logs,
get_abort_data,
ABORT_REASONS_ENUM_KEYS):
def _get_execute_data(txn_hash):
try:
return extract_event_logs('Executed', RequestLib, txn_hash)
except AssertionError:
abort_data = get_abort_data(txn_hash, return_single=False)
if abort_data:
errors = [
ABORT_REASONS_ENUM_KEYS[entry['args']['reason']]
for entry in abort_data
]
raise AssertionError("Aborted: {0}".format(', '.join(errors)))
raise
return _get_execute_data
@pytest.fixture()
def get_claim_data(chain, web3, RequestLib, extract_event_logs):
return functools.partial(extract_event_logs, 'Claimed', RequestLib)
@pytest.fixture()
def get_cancel_data(chain, web3, RequestLib, extract_event_logs):
return functools.partial(extract_event_logs, 'Cancelled', RequestLib)
@pytest.fixture()
def get_all_event_data(topics_to_abi):
from web3.utils.events import (
get_event_data,
)
def _get_all_event_data(log_entries):
all_event_data = [
get_event_data(topics_to_abi[log_entry['topics'][0]], log_entry)
for log_entry in log_entries
if log_entry['topics'] and log_entry['topics'][0] in topics_to_abi
]
return all_event_data
return _get_all_event_data
@pytest.fixture()
def topics_to_abi(project):
from web3.utils.abi import (
filter_by_type,
event_abi_to_log_topic,
)
all_events_abi = filter_by_type('event', itertools.chain.from_iterable(
contract['abi'] for contract in project.compiled_contracts.values()
))
_topic_to_abi = {
event_abi_to_log_topic(abi): abi
for abi in all_events_abi
}
return _topic_to_abi
@pytest.fixture()
def test_contract_factories(web3):
from solc import compile_files
from populus.utils.filesystem import recursive_find_files
from populus.utils.contracts import (
package_contracts,
construct_contract_factories,
)
base_tests_dir = os.path.dirname(__file__)
solidity_source_files = recursive_find_files(base_tests_dir, '*.sol')
compiled_contracts = compile_files(solidity_source_files)
test_contract_factories = construct_contract_factories(web3, compiled_contracts)
return package_contracts(test_contract_factories)
@pytest.fixture()
def ErrorGenerator(test_contract_factories):
return test_contract_factories.ErrorGenerator
@pytest.fixture()
def error_generator(chain, ErrorGenerator):
chain.contract_factories['ErrorGenerator'] = ErrorGenerator
return chain.get_contract('ErrorGenerator')
@pytest.fixture()
def TransactionRecorder(test_contract_factories):
return test_contract_factories.TransactionRecorder
@pytest.fixture()
def txn_recorder(chain, TransactionRecorder):
chain.contract_factories['TransactionRecorder'] = TransactionRecorder
return chain.get_contract('TransactionRecorder')
@pytest.fixture()
def Proxy(test_contract_factories):
return test_contract_factories.Proxy
@pytest.fixture()
def proxy(chain, Proxy):
chain.contract_factories['Proxy'] = Proxy
return chain.get_contract('Proxy')
@pytest.fixture()
def DiggerProxy(test_contract_factories):
return test_contract_factories.DiggerProxy
@pytest.fixture()
def digger_proxy(chain, DiggerProxy):
chain.contract_factories['DiggerProxy'] = DiggerProxy
return chain.get_contract('DiggerProxy')
@pytest.fixture()
def evm(web3):
tester_client = testrpc.tester_client
assert web3.eth.blockNumber == len(tester_client.evm.blocks) - 1
return tester_client.evm
@pytest.fixture()
def set_timestamp(web3, evm):
def _set_timestamp(timestamp):
evm.block.finalize()
evm.block.commit_state()
evm.db.put(evm.block.hash, rlp.encode(evm.block))
block = blocks.Block.init_from_parent(
evm.block,
decode_hex(web3.eth.coinbase),
timestamp=timestamp,
)
evm.block = block
evm.blocks.append(evm.block)
return timestamp
return _set_timestamp
|
989,354 | 931e41317fa74e2dd1cac32a2e14ac3148890880 | from .base import (Drug, ObservationalStudy, PharmaCompany, # noqa
PaymentRecipient, PharmaPayment) # noqa
from .zerodocs import ZeroDoctor, ZeroDocSubmission # noqa
|
989,355 | 977fecfadde758549d6bd1c8963ef769273562d0 | # Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Nathan Embery nembery@paloaltonetworks.com
# This module holds custom exceptions for Pan-CNC
class SnippetRequiredException(Exception):
pass
class LoginRequired(Exception):
pass
class TargetCommitException(Exception):
pass
class TargetConnectionException(Exception):
pass
class TargetLoginException(Exception):
pass
class TargetGenericException(Exception):
pass
class CCFParserError(Exception):
pass
class SnippetNotFoundException(Exception):
pass
class ImportRepositoryException(Exception):
pass
class DuplicateSkilletException(ImportRepositoryException):
pass
class RepositoryPermissionsException(ImportRepositoryException):
pass
class DockerExecutionException(Exception):
pass
|
989,356 | e60da5ed1c4294bf548a7b5d2bbc8def04f9bc46 | class Solution:
def romanToInt(self, s):
dict = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
int = 0
for i in range(len(s)-1):
if dict[s[i]] >= dict[s[i+1]]:
int += dict[s[i]]
else:
int -= dict[s[i]]
int += dict[s[-1]]
return int
"""
:type s: str
:rtype: int
"""
|
989,357 | d8e8c15645ed18202c3e40a1d930e0dfa8944101 | import os
INPUT_PATH = os.path.realpath("{0}/../input".format(__loader__.path))
def exists(expenses, expense):
try:
return expenses.index(expense) and True
except:
return False
def get_complement_product(expenses, expected_sum):
for expense in expenses:
complement = expected_sum - expense
if exists(expenses, complement):
return expense * complement
with open(INPUT_PATH) as data:
expenses = [int(line) for line in data.readlines()]
print("solution 1:", get_complement_product(expenses, 2020))
for expense in expenses:
rest_product = get_complement_product(expenses, 2020 - expense)
if rest_product:
print("solution 2:", expense * rest_product)
break
|
989,358 | 42ea3c848e26ca013823d6067f926319c1bd06a9 | import debug_toolbar
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from brands.views import BrandViewSet
from elements.views import ElementViewSet
from projects.views import ProjectViewSet
from account.views import UserViewSet
from materials.views import MaterialViewSet
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register('brands', BrandViewSet)
router.register(r'elements',ElementViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'users', UserViewSet)
router.register('materials', MaterialViewSet)
urlpatterns = router.urls
urlpatterns = [
# path('api/brand/', include('brands.urls')),
path('api-auth/', include('rest_framework.urls')),
path('api-token/', include('djoser.urls.authtoken')),
path('api-userinfo/', include('account.urls')),
path('api/', include(router.urls)),
path('__debug__/', include(debug_toolbar.urls)),
path('admin/', admin.site.urls),
path('grappelli/', include('grappelli.urls')), # grappelli URLS
# url(r'vue-element-admin/user/', include('account.urls')),
# path('materials/', include('materials.urls')),
# path('projects/', include('projects.urls')),
# path('excel/', include('excel.urls')),
# path('modules/', include('modules.urls')),
# # path('', ProjectList.as_view(), name='index'),
# path('brands/', include('brands.urls'))
# path('vue-element-admin/user/', include('user.urls'))
]
|
989,359 | 84bffb71c1fa0d7664d3e8734b76b8f30e9674e7 | from django import forms
class LoginForm(forms.Form):
user = forms.CharField(
required=True,
widget= forms.TextInput(
attrs={'class': 'input', 'placeholder': 'enter name', 'name': 'user' })
)
password = forms.CharField(
required=True,
widget=forms.PasswordInput(attrs={'class': 'input', 'placeholder': 'enter password', 'name': 'password' })
) |
989,360 | 3c9a21161561f6781c4f29f15bcea0ef7fc84399 | def updateHand(hand,word):
newHand = hand.copy()
for l in word:
if l in newHand:
newHand[l] = newHand[l]-1
for k, v in newHand.items():
if v == 0:
del newHand[k]
return newHand
updateHand({'a': 1, 'r': 1, 'e': 3, 'd': 2},'red')
|
989,361 | 60420fae630717bfc43dde84feaa96bdf6f105e2 | import sys
import time
import os
import re
import shutil
import time
import json
import html
wpull_hook = globals().get('wpull_hook') # silence code checkers
counter = 0
firsturl = ''
ia_metadata = {'identifier': '', 'files': [], 'title': '', 'description': '', 'mediatype': 'movies', 'collection': 'archiveteam_videobot', 'date': '', 'original_url': '', 'creator': '', 'subject': ''}
video_id = ''
video_title = ''
added_to_list = []
vmap = ''
tempfiles = {}
tries = {}
ignored_urls = []
with open(os.path.join('..', '..', 'services', 'dl__ignores__twitter_com'), 'r') as f:
for line in f:
ignored_urls.append(line.strip())
def accept_url(url_info, record_info, verdict, reasons):
global added_to_list
if (firsturl == '' or url_info["url"] in added_to_list) and not ('?lang=' in url_info["url"] or '\\' in url_info["url"]):
return True
return False
def get_urls(filename, url_info, document_info):
global counter
global firsturl
global ia_metadata
global video_id
global added_to_list
global vmap
global tempfiles
global video_title
newurls = []
def url_allowed(url, parent_url=None):
if re.search(r'^https?://(?:www\.)?twitter\.com/[^/]+/status/' + video_id, url):
return True
elif not re.search(r'^https?://(?:www\.)?twitter\.com/[^/]+/status/[0-9]+', url):
video_user = re.search(r'^https?://(?:www\.)?twitter\.com/([^/]+)/status/[0-9]+', firsturl).group(1)
if re.search(r'^https?://(?:www\.)?twitter\.com/[^/]+(?:/status/)?$', url) and not video_user in url:
return False
return True
return False
def add_url(url, parent_url=None):
if url in added_to_list or url in ignored_urls:
return None
if url_allowed(url, parent_url):
added_to_list.append(url)
newurls.append({'url': url})
if re.search(r'^https?://video\.twimg\.com.+/[0-9a-zA-Z_-]+\.mp4', url_info["url"]):
if re.search(r'^https?://video\.twimg\.com.+/[0-9]+x[0-9]+/[0-9a-zA-Z_-]+\.mp4', url_info["url"]):
filename_new = re.search(r'^https?://video\.twimg\.com.+/([0-9]+x[0-9]+)/[0-9a-zA-Z_-]+\.mp4', url_info["url"]).group(1) + '.mp4'
else:
filename_new = re.search(r'^https?://video\.twimg\.com.+/([0-9a-zA-Z_-]+\.mp4)', url_info["url"]).group(1)
if not os.path.isdir('../ia_item'):
os.makedirs('../ia_item')
if not os.path.isfile('../ia_item/' + filename_new):
shutil.copyfile(filename, '../ia_item/' + filename_new)
ia_metadata['files'].append(filename_new)
# Rename .mp4 video from akamaihd.
elif re.search(r'^https?://(?:snappytv[^\.]+\.akamaihd\.net|amp\.twimg\.com).+/[^/]+\.mp4', url_info["url"]):
filename_new = re.search(r'^https?://(?:snappytv[^\.]+\.akamaihd\.net|amp\.twimg\.com).+/([^/]+\.mp4)', url_info["url"]).group(1)
if not os.path.isdir('../ia_item'):
os.makedirs('../ia_item')
if not os.path.isfile('../ia_item/' + filename_new):
shutil.copyfile(filename, '../ia_item/' + filename_new)
ia_metadata['files'].append(filename_new)
# Queue videos from .m3u8 playlists.
elif re.search(r'^https?://video\.twimg\.com/(?:ext_tw_video|amplify_video).+/[0-9a-zA-Z_-]+\.m3u8', url_info["url"]):
with open(filename, 'r', encoding='utf-8') as file:
for line in file:
part = re.search(r'^(https://video\.twimg\.com)', url_info["url"]).group(1)
if (line.startswith('/ext_tw_video') or line.startswith('/amplify_video')) and line.strip().endswith('.m3u8'):
print(part + line.strip())
newurls.append({'url': part + line.strip()})
elif (line.startswith('/ext_tw_video') or line.startswith('/amplify_video')) and line.strip().endswith('.ts'):
if re.search(r'/[0-9]+x[0-9]+/[0-9a-zA-Z_-]+\.ts', line):
newurl = part + line.strip()
size = re.search(r'/([0-9]+x[0-9]+)/[0-9a-zA-Z_-]+\.ts', line).group(1)
if not size in tempfiles:
tempfiles[size] = []
tempfiles[size].append(re.search(r'/([0-9a-zA-Z_-]+\.ts)', line).group(1))
newurls.append({'url': newurl})
# Prepare .ts videos from .m3u8 playlist for merging.
elif re.search(r'^https://video\.twimg\.com/(?:ext_tw_video|amplify_video).+/[0-9]+x[0-9]+/[0-9a-zA-Z_-]+\.ts', url_info["url"]):
filename_new = re.search(r'/([0-9a-zA-Z_-]+\.ts)', url_info["url"]).group(1)
if not os.path.isdir('../ia_item'):
os.makedirs('../ia_item')
if not os.path.isfile('../ia_item/' + filename_new):
shutil.copyfile(filename, '../ia_item/' + filename_new)
# The vmap URL contains videos.
elif url_info["url"] == vmap:
with open(filename, 'r', encoding='utf-8') as file:
content = file.read()
newurls += [{'url': url} for url in extract_urls(content, url_info["url"]) if not url in added_to_list]
# Prepare the metadata and queue new URLs.
elif re.search('^https://twitter\.com/i/videos/tweet/[0-9]+\?embed_source=clientlib&player_id=0&rpc_init=1', url_info["url"]):
with open(filename, 'r', encoding='utf-8') as file:
months = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
content = file.read()
content_json = html.unescape(re.search(r'data-config="([^"]+)"', content).group(1))
json_ = json.loads(content_json)
if 'vmap_url' in json_:
vmap = json_['vmap_url']
item_description = video_title
if json_['videoInfo']['title']:
item_description += '\n\n' + str(json_['videoInfo']['title'])
if json_['videoInfo']['description']:
item_description += '\n\n' + str(json_['videoInfo']['description'])
item_id = json_['status']['id_str']
item_name = json_['videoInfo']['publisher']['name']
item_url_t_co = json_['cardUrl']
item_date_ = json_['status']['created_at'].replace('T', ' ')
item_date = item_date_[-4:] + '-' + str(months[item_date_[4:7]]).zfill(2) + '-' + item_date_[8:10] + ' ' + item_date_[11:19]
ia_metadata['identifier'] = 'archiveteam_videobot_twitter_com_' + item_id
ia_metadata['title'] = video_title
ia_metadata['description'] = item_description
ia_metadata['date'] = item_date
ia_metadata['original_url'] = firsturl
ia_metadata['url_t_co'] = item_url_t_co
ia_metadata['user_name'] = json_['user']['name']
ia_metadata['user_screen_name'] = json_['user']['screen_name']
ia_metadata['creator'] = item_name
ia_metadata['tweet_id'] = video_id
ia_metadata['subject'] = ';'.join(['videobot', 'archiveteam', 'twitter', 'twitter.com', item_id, item_name]
+ re.findall(r'(#[^#\s]+)', ia_metadata['title'])
+ re.findall(r'#([^#\s]+)', ia_metadata['title']))
if ia_metadata['user_name'] != ia_metadata['creator']:
ia_metadata['creator'] = [item_name, ia_metadata['user_name']]
if not os.path.isdir('../ia_item'):
os.makedirs('../ia_item')
json.dump(json_, open('../ia_item/data_video.json', 'w'), indent = 4, ensure_ascii = False)
ia_metadata['files'].append('data_video.json')
for url in extract_urls(' '.join([content, content_json]), url_info["url"]):
add_url(url)
# Queue first-URL new urls.
if re.search('^https?://(?:www\.)?twitter\.com/[^/]+/status/[0-9]+', url_info["url"]) and video_id == '':
with open(filename, 'r', encoding='utf-8') as file:
content = file.read()
if not os.path.isdir('../ia_item'):
os.makedirs('../ia_item')
json.dump(json.loads(html.unescape(re.search(r'class="json-data"\s+value="([^"]+)"', content).group(1))), open('../ia_item/data.json', 'w'), indent = 4, ensure_ascii = False)
ia_metadata['files'].append('data.json')
video_title = html.unescape(re.search(r'<meta\s+property="og:description"\s+content=".([^"]+).">', content).group(1))
video_id = re.search('^https?://(?:www\.)?twitter\.com/[^/]+/status/([0-9]+)', url_info["url"]).group(1)
if not 'https://twitter.com/i/videos/tweet/' + video_id + '?embed_source=clientlib&player_id=0&rpc_init=1' in added_to_list:
newurls.append({'url': 'https://twitter.com/i/videos/tweet/' + video_id + '?embed_source=clientlib&player_id=0&rpc_init=1'})
if not 'https://twitter.com/i/videos/' + video_id in added_to_list:
newurls.append({'url': 'https://twitter.com/i/videos/' + video_id})
if not 'https://twitter.com/i/videos/' + video_id + '?embed_source=facebook' in added_to_list:
newurls.append({'url': 'https://twitter.com/i/videos/' + video_id + '?embed_source=facebook'})
if firsturl == '':
with open(filename, 'r', encoding='utf-8') as file:
content = file.read()
firsturl = url_info["url"]
for url in extract_urls(content, url_info["url"]):
add_url(url)
for newurl in newurls:
added_to_list.append(newurl["url"])
return [newurl for newurl in newurls if not '?lang=' in newurl['url']]
def exit_status(exit_code):
global ia_metadata
global tempfiles
if os.path.isdir('../ia_item'):
item_identifier = ia_metadata['identifier']
print(tempfiles)
if len(tempfiles) > 0:
for size, files in tempfiles.items():
os.system('ffmpeg -i "concat:' + '|'.join(['../ia_item/' + file for file in files]) + '" -c copy ../ia_item/' + size + '.ts')
ia_metadata['files'].append(size + '.ts')
for file in ['../ia_item/' + file for file in files]:
os.remove(file)
for a, b in ia_metadata.items():
with open('../ia_item/ia_metadata.py', 'a') as file:
if type(b) is list:
content_string = str(b)
else:
content_string = '\'' + str(b).replace('\'', '\\\'').replace('\n', '\\n').replace('\r', '\\r') + '\''
file.write(str(a) + ' = ' + content_string + '\n')
if len(os.listdir('../ia_item')) > 3:
print(ia_metadata['files'])
os.rename('../ia_item', '../../to_be_uploaded/ia_items/ia_item_' + item_identifier + '_' + str(int(time.time())))
return exit_code
handle_response_grabsite = wpull_hook.callbacks.handle_response
def handle_response(url_info, record_info, response_info):
global tries
if not url_info["url"] in tries:
tries[url_info["url"]] = 0
elif tries[url_info["url"]] > 5:
return wpull_hook.actions.FINISH
tries[url_info["url"]] += 1
return handle_response_grabsite(url_info, record_info, response_info)
wpull_hook.callbacks.get_urls = get_urls
wpull_hook.callbacks.exit_status = exit_status
wpull_hook.callbacks.accept_url = accept_url
wpull_hook.callbacks.handle_response = handle_response
def extract_urls(file, url):
extractedurls = []
for extractedurl in re.findall('((?:....=)?(?P<quote>[\'"]).*?(?P=quote))', file, re.I):
extractedstart = ''
if re.search('^....=[\'"](.*?)[\'"]$', extractedurl[0], re.I):
extractedstart = re.search(r'^(....)', extractedurl[0], re.I).group(1)
extractedurl = re.search('^....=[\'"](.*?)[\'"]$', extractedurl[0], re.I).group(1)
else:
extractedurl = extractedurl[0][1:-1]
extractedurl = re.search(r'^([^#]*)', extractedurl, re.I).group(1)
extractedurl = extractedurl.replace('%3A', ':').replace('%2F', '/')
if extractedurl.startswith('http:\/\/') or extractedurl.startswith('https:\/\/') or extractedurl.startswith('HTTP:\/\/') or extractedurl.startswith('HTTPS:\/\/'):
extractedurl = extractedurl.replace('\/', '/')
if extractedurl.startswith('//'):
extractedurls.append("http:" + extractedurl)
elif extractedurl.startswith('/'):
extractedurls.append(re.search(r'^(https?:\/\/[^\/]+)', url, re.I).group(1) + extractedurl)
elif re.search(r'^https?:?\/\/?', extractedurl, re.I):
extractedurls.append(extractedurl.replace(re.search(r'^(https?:?\/\/?)', extractedurl, re.I).group(1), re.search(r'^(https?)', extractedurl, re.I).group(1) + '://'))
elif extractedurl.startswith('?'):
extractedurls.append(re.search(r'^(https?:\/\/[^\?]+)', url, re.I).group(1) + extractedurl)
elif extractedurl.startswith('./'):
if re.search(r'^https?:\/\/.*\/', url, re.I):
extractedurls.append(re.search(r'^(https?:\/\/.*)\/', url, re.I).group(1) + '/' + re.search(r'^\.\/(.*)', extractedurl, re.I).group(1))
else:
extractedurls.append(re.search(r'^(https?:\/\/.*)', url, re.I).group(1) + '/' + re.search(r'^\.\/(.*)', extractedurl, re.I).group(1))
elif extractedurl.startswith('../'):
tempurl = url
tempextractedurl = extractedurl
while tempextractedurl.startswith('../'):
if not re.search(r'^https?://[^\/]+\/$', tempurl, re.I):
tempurl = re.search(r'^(.*\/)[^\/]*\/', tempurl, re.I).group(1)
tempextractedurl = re.search(r'^\.\.\/(.*)', tempextractedurl).group(1)
extractedurls.append(tempurl + tempextractedurl)
elif extractedstart == 'href':
if re.search(r'^https?:\/\/.*\/', url, re.I):
extractedurls.append(re.search(r'^(https?:\/\/.*)\/', url, re.I).group(1) + '/' + extractedurl)
else:
extractedurls.append(re.search(r'^(https?:\/\/.*)', url, re.I).group(1) + '/' + extractedurl)
for extractedurl in re.findall(r'>[^<a-zA-Z0-9]*(https?:?//?[^<]+)<', file, re.I):
extractedurl = re.search(r'^([^#]*)', extractedurl, re.I).group(1)
extractedurls.append(extractedurl.replace(re.search(r'^(https?:?\/\/?)', extractedurl, re.I).group(1), re.search(r'^(https?)', extractedurl, re.I).group(1) + '://'))
for extractedurl in re.findall(r'\[[^<a-zA-Z0-9]*(https?:?//?[^\]]+)\]', file, re.I):
extractedurl = re.search(r'^([^#]*)', extractedurl, re.I).group(1)
extractedurls.append(extractedurl.replace(re.search(r'^(https?:?\/\/?)', extractedurl, re.I).group(1), re.search(r'^(https?)', extractedurl, re.I).group(1) + '://'))
return [extractedurl.replace('&', '&').replace('&', '&') for extractedurl in extractedurls] |
989,362 | 9bd80ba5cefbe133fcaee14ae556f63434f637e7 | import numpy as np
def task1_1(position):
return position[0]**2 + position[1]**2 + 1
def task1_3(n_particles, particle_position_vector):
velocity_vector = ([np.array([0, 0]) for _ in range(n_particles)])
iteration = 0
while iteration < n_iterations:
for i in range(n_particles):
fitness_cadidate = task1_1(particle_position_vector[i])
print(fitness_cadidate, ' ', particle_position_vector[i])
if(pbest_fitness_value[i] > fitness_cadidate):
pbest_fitness_value[i] = fitness_cadidate
pbest_position[i] = particle_position_vector[i]
if(gbest_fitness_value > fitness_cadidate):
gbest_fitness_value = fitness_cadidates
gbest_position = particle_position_vector[i]
if(abs(gbest_fitness_value - target) < target_error):
break
for i in range(n_particles):
new_velocity = (W*velocity_vector[i]) + (c1*random.random()) * (pbest_position[i] - particle_position_vector[i]) + (c2*random.random()) * (gbest_position-particle_position_vector[i])
new_position = new_velocity + particle_position_vector[i]
particle_position_vector[i] = new_position
iteration = iteration + 1
def task1_5():
"""
The choice of population size is related to the complexity of the problem. As the complexity of the problem
increases, the population size also grows
The inertia weight ω affects the particle’s global and local search ability.
Along with the increase of the maximum velocity, mean fitness value is decreasing and success rate is increasing, which means
the convergence and stability of the algorithm is becoming stronger and stronger.
"""
def task2_1(parents, n_offspring):
n_parents = len(parents)
offspring = []
for i in range(n_offspring):
random_dad = parents[np.random.randit(low - 0, high = n_parents - 1)]
random_mom = parents[np.random.randit(low - 0, high = n_parents - 1)]
mask_dad = np.random.randit(0, 2, size = np.array(random_dad).shape)
mask_mom = np.logical_not(mask_dad)
child = np.add(np.multiply(random_dad, mask_dad), np.multiply(random_mom, mask_mom))
offspring.append(child)
def task2_3():
"""
population size says how many chromosomes are in population
crossover probabibility says how often crossover will be performed
mutation probabibility says how often mutation will occur
"""
|
989,363 | 47c124bdc57cb05d910187f9eb6e9bafbf711173 | # -*- coding: utf-8 -*-
# @Author: liusongwei
# @Date: 2019-05-16 13:54:38
# @Last Modified by: liusongwei
# @Last Modified time: 2019-05-18 14:24:35
import numpy as np
import tensorflow as tf
# with tf.Graph().as_default() as g1:
# base64_str = tf.placeholder(tf.string, name='input_string')
# input_str = tf.decode_base64(base64_str)
# decoded_image = tf.image.decode_png(input_str, channels=1)
# # Convert from full range of uint8 to range [0,1] of float32.
# decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
# tf.float32)
# decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
# resize_shape = tf.stack([28, 28])
# resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
# resized_image = tf.image.resize_bilinear(decoded_image_4d,
# resize_shape_as_int)
# # 展开为1维数组
# resized_image_1d = tf.reshape(resized_image, (-1, 28 * 28))
# print(resized_image_1d.shape)
# tf.identity(resized_image_1d, name="DecodeJPGOutput")
# with tf.Graph().as_default() as g_combined:
# with tf.Session(graph=g_combined) as sess:
# x = tf.placeholder(tf.string, name="base64_input")
# y, = tf.import_graph_def(g1def, input_map={"input_string:0": x}, return_elements=["DecodeJPGOutput:0"])
# z, = tf.import_graph_def(g2def, input_map={"myInput:0": y}, return_elements=["myOutput:0"])
# tf.identity(z, "myOutput")
# tf.saved_model.simple_save(sess,
# "./modelbase64",
# inputs={"base64_input": x},
# outputs={"myOutput": z})
def load_graph(model_path):
graph_def = tf.GraphDef()
with open(model_path, "rb") as f:
graph_def.ParseFromString(f.read())
return graph_def
def clip_graph_(model_path):
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
new_input1=tf.placeholder(dtype=tf.float32,name="images")
graph1=load_graph(model_path)
z,=tf.import_graph_def(graph1,input_map={"skip_layer_sum_0_1/add:0":new_input},
return_elements=["ground_truth/Reshape:0"])
output=tf.identity(z,"new_ground_truth/Reshape")
const_graph=tf.graph_util.convert_variables_to_constants(sess,sess.graph_def,
output_node_names=['new_ground_truth/Reshape'])
# 训练完成之后保存模型为.pb文件
with tf.gfile.FastGFile('./model.pb',mode='wb') as f:
f.write(const_graph.SerializeToString())
def clip_graph_2(model_path):
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
new_input1=tf.placeholder(dtype=tf.float32,name="images")
new_input2=tf.placeholder(dtype=tf.float32,name="pool5_out")
graph1=load_graph(model_path)
tf.import_graph_def(graph1,input_map={
"smallVGG16_alpha0.25_conv1_1_pool5/pool5/MaxPool:0":new_input2,
"images:0":new_input1},name='')
out_origin=graph.get_tensor_by_name('action/Reshape_1:0')
print(out_origin)
output=tf.identity(out_origin,"new_output")
print(output)
const_graph=tf.graph_util.convert_variables_to_constants(sess,sess.graph_def,
output_node_names=['new_output'])
# 训练完成之后保存模型为.pb文件
with tf.gfile.FastGFile('./sub_model.pb',mode='wb') as f:
f.write(const_graph.SerializeToString())
def show_all_node():
# review the graph node
tensor_node_list = [tensor for tensor in tf.get_default_graph().as_graph_def().node]
for tensor in tensor_node_list:
print('{}'.format(tensor.name), '\n')
def split_box(pb_file):
name=os.path.split(pb_file)
output_graph_path=os.path.join(name[0],'graph\\')
return output_graph_path
def load_and_read_graph(model_path,new_model_path):
test_data=np.zeros((1,10,120,80,3))
with tf.Graph().as_default() as graph:
image=tf.placeholder(dtype=tf.float32,name='image_input')
graph_def1=load_graph(model_path)
tf.import_graph_def(graph_def1,input_map={'images:0':image},name='')
shape=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Shape:0')
print(shape)
reshape1=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape:0')
print(reshape1)
poolingout=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/pool5/MaxPool:0')
print(poolingout)
packs=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape_1/shape:0')
print(packs)
reshape_node=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape_1:0')
print(reshape_node)
output_put1=graph.get_tensor_by_name('action/Reshape_1:0')
print(output_put1)
graph_def2=load_graph(new_model_path)
tf.import_graph_def(graph_def2,input_map={'pool5_out:0':poolingout,
'images:0':image},name='')
output_put2=tf.get_default_graph().get_tensor_by_name('new_output:0')
output_put1=tf.identity(output_put2,name="fusion_output")
print(output_put1)
with tf.Session(graph=graph) as sess:
results2=sess.run(output_put1,feed_dict={image:test_data})
print(results2)
print(results2.shape)
const_graph=tf.graph_util.convert_variables_to_constants(sess,sess.graph_def,
output_node_names=['fusion_output'])
# 训练完成之后保存模型为.pb文件
with tf.gfile.FastGFile('./fuse_model_.pb',mode='wb') as f:
f.write(const_graph.SerializeToString())
def run_clip_graph(model_path):
test_data=np.zeros((10,136,240,9))
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
image=tf.placeholder(dtype=tf.float32,shape=[None,136,240,9])
graph_def=load_graph(model_path)
tf.import_graph_def(graph_def,input_map={'new_skip_layer_sum_0_1:0':image},name='')
output_put=tf.get_default_graph().get_tensor_by_name('ground_truth/Reshape:0')
print(output_put)
result=sess.run(output_put,feed_dict={image:test_data})
print(result)
print(result.shape)
def load_whole_graph(pb_file):
with tf.gfile.GFile(pb_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
return graph
def read_node_shape(model_path):
graph=load_whole_graph(model_path)
imges=graph.get_tensor_by_name('images:0')
print(imges)
shape=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Shape:0')
print(shape)
reshape1=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape:0')
print(reshape1)
poolingout=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/pool5/MaxPool:0')
print(poolingout)
packs=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape_1/shape:0')
print(packs)
rshape2=graph.get_tensor_by_name('smallVGG16_alpha0.25_conv1_1_pool5/Reshape_1:0')
print(rshape2)
out=graph.get_tensor_by_name('action/Reshape_1:0')
print(out)
if __name__=="__main__":
model_path=r"./conv_lstm_benchmark_tf_graph.pb"
new_model_path=r"./model.pb"
# read_node_shape(model_path)
# clip_graph_2(model_path)
load_and_read_graph(model_path,r'./sub_model.pb')
# run_clip_graph(new_model_path)
# load_and_read_graph(model_path,new_model_path)
|
989,364 | 91d4c2dfcdcd9c24faed4dca8b482e34fe76472d | from rest_framework import serializers, viewsets, pagination
from images.models import Image
class ImagesSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = ('id', 'image', 'width', 'height')
|
989,365 | 61281760144b056e25119030ad9cd95e6be9e031 | from smoothies import smoothie_ingredients
from soupingredientsstandard import soup_ingredients
from saladtoppings import salad_ingredients
from dangerdanger import danger_ingredients
from random import choice, randint
food_type_databases = [smoothie_ingredients, soup_ingredients, salad_ingredients]
class DangerFactor(object):
"""Creates new_ingredient list which ends up replacing the toppings attribute on RandomRecipe.
The severity of the changes depends on the danger level which is passed in from the RandomRecipe object"""
def __init__(self, current_recipe):
"""Initializes and selects the correct the danger level to run"""
self.current_recipe = current_recipe
self.danger_level = int(current_recipe.danger)
#self.danger_level is passed in from RandomRecipe
if self.danger_level == 0:
self.level_zero()
elif self.danger_level == 1:
self.level_one()
elif self.danger_level == 2:
self.level_two()
elif self.danger_level == 3:
self.level_three()
elif self.danger_level == 4:
self.level_four()
elif self.danger_level == 5:
self.level_five()
def level_zero(self):
"""Just sets the new_ingredients to the old because there should be no change"""
self.new_ingredients = self.current_recipe.toppings
def level_one(self):
"""Swaps one ingredient with a random choice within food_type"""
swap_spot = randint(0,len(self.current_recipe.toppings)-1)
#if statements are to stay within food type of original recipe
if self.current_recipe.recipe_type == 'soup':
new_ingredient = choice(soup_ingredients)
self.current_recipe.toppings[swap_spot] = new_ingredient
if self.current_recipe.recipe_type == 'smoothie':
new_ingredient = choice(smoothie_ingredients)
self.current_recipe.toppings[swap_spot] = new_ingredient
if self.current_recipe.recipe_type == 'salad':
new_ingredient = choice(salad_ingredients)
self.current_recipe.toppings[swap_spot] = new_ingredient
if self.current_recipe.recipe_type == 'sandwich':
new_ingredient = choice(sandwich_ingredients)
self.current_recipe.toppings[swap_spot] = new_ingredient
self.new_ingredients = self.current_recipe.toppings
def level_two(self):
"""Replace an ingredient with a random ingredient without
regarding food_type"""
#swaps a random ingredient by choosing a random place
swap_spot = randint(0,len(self.current_recipe.toppings)-1)
#chooses which food type to choose from
extra_ingredient_type = choice(food_type_databases)
new_ingredient = choice(extra_ingredient_type)
self.current_recipe.toppings[swap_spot] = new_ingredient
self.new_ingredients = self.current_recipe.toppings
def level_three(self):
"""Replaces 2 ingredients in the recipe: one being in the food type
and one danger ingredient"""
swap_spot_1 = randint(0,len(self.current_recipe.toppings)-1)
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
while swap_spot_1 == swap_spot_2:
#so it doesn't replace the same spot twice
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
if self.current_recipe.recipe_type == 'soup':
new_ingredient = choice(soup_ingredients)
self.current_recipe.toppings[swap_spot_1] = new_ingredient
if self.current_recipe.recipe_type == 'smoothie':
new_ingredient = choice(smoothie_ingredients)
self.current_recipe.toppings[swap_spot_1] = new_ingredient
if self.current_recipe.recipe_type == 'salad':
new_ingredient = choice(salad_ingredients)
self.current_recipe.toppings[swap_spot_1] = new_ingredient
if self.current_recipe.recipe_type == 'sandwich':
new_ingredient = choice(sandwich_ingredients)
self.current_recipe.toppings[swap_spot_1] = new_ingredient
self.current_recipe.toppings[swap_spot_2] = choice(danger_ingredients)
self.new_ingredients = self.current_recipe.toppings
def level_four(self):
"""Replaces 2 ingredients in the recipe: one without regarding
food type, and one danger ingredient"""
swap_spot_1 = randint(0,len(self.current_recipe.toppings)-1)
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
while swap_spot_1 == swap_spot_2:
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
extra_ingredient_type = choice(food_type_databases)
new_ingredient = choice(extra_ingredient_type)
self.current_recipe.toppings[swap_spot_1] = new_ingredient #for ingredient replaced without food type
self.current_recipe.toppings[swap_spot_2] = choice(danger_ingredients) #for ingredient replaced with danger ingredient
self.new_ingredients = self.current_recipe.toppings
def level_five(self):
"""Replaces 1-2 ingredients with danger ingredients"""
swap_spot_1 = randint(0,len(self.current_recipe.toppings)-1)
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
while swap_spot_1 == swap_spot_2:
swap_spot_2 = randint(0,len(self.current_recipe.toppings)-1)
#yes it can be the same danger ingredient, twice. it's dangerous
self.current_recipe.toppings[swap_spot_1] = choice(danger_ingredients)
self.current_recipe.toppings[swap_spot_2] = choice(danger_ingredients)
self.new_ingredients = self.current_recipe.toppings
def remix_to_danger(current_recipe):
"""Runs danger factor"""
danger_recipe = DangerFactor(current_recipe)
return danger_recipe.new_ingredients |
989,366 | a0b406f88ad24bddecb633441a605649f0602771 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
data = pd.read_csv('data_linear.csv').values
x = data[:, 0].reshape(-1, 1)
y = data[:, 1].reshape(-1, 1)
plt.scatter(x, y)
plt.xlabel('mét vuông')
plt.ylabel('giá')
lrg = LinearRegression()
# Train mô hình với data giá đất
lrg.fit(x, y)
# Đoán giá nhà đất
y_pred = lrg.predict(x)
plt.plot((x[0], x[-1]),(y_pred[0], y_pred[-1]), 'r')
plt.show()
# Lưu nhiều tham số với numpy.savez(), định dạng '.npz'
np.savez('w2.npz', a=lrg.intercept_, b=lrg.coef_)
# Lấy lại các tham số trong file .npz
k = np.load('w2.npz')
lrg.intercept_ = k['a']
lrg.coef_ = k['b'] |
989,367 | 3929a8383c75d132a6dba889cd6c4e004261375d |
import MySQLdb
import struct, os
db=MySQLdb.connect(host="localhost",user="root", passwd="root",db="unweb_iub")
cursor=db.cursor()
sql_ses="""select DISTINCT SES_ID FROM hde"""
cursor.execute(sql_ses)
ses_rows=cursor.fetchall()
for srows in ses_rows:
print srows[0]
sid=input("PICK A SESSION ID FROM THE LIST: ")
sql="""select DISTINCT id, url_crawler.url FROM url_crawler , hde WHERE url_crawler.url= hde.URL AND hde.SES_ID=%d AND hde.INDEXED=1; """ %sid
x=[]
m=[]
c=0
cursor.execute(sql)
rows=cursor.fetchall()
print "these URLs are indexed and crawled"
for row in rows:
print row
a=row[0]
b=row[1]
x.append([a,b])
c+=1
print "lists of childs in first level and their information (only unindexed ones)"
for l in range(0,c):
rw="""SELECT id_1 FROM Link WHERE Link.id_2=%d""" % x[l][0]
cursor.execute(rw)
childs=cursor.fetchall()
print "----set-----"
print childs
print "----info-----"
for c in childs :
info="""SELECT url, secured, tld, indexed, http_access FROM url_crawler, Link WHERE id_1 = %d AND url_crawler.id=Link.id_1 AND url_crawler.indexed=0 """ % c[0]
cursor.execute(info)
inform=cursor.fetchall()
for inn in inform:
print inn
cursor.close()
db.close()
|
989,368 | 59ece7605884a0f0ef8bd57fc154d03c30971d33 | import boto3
INSTANCEID = ''
ec2 = boto3.resource('ec2')
instance = ec2.Instance(INSTANCEID)
print(instance.volumes.all())
|
989,369 | 67ba2c99e1dd6edb40f62bb088c74fb30aba1868 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import pms.supplier.models.category
class Migration(migrations.Migration):
dependencies = [
('supplier', '0019_add_field_salecategory_cid'),
]
operations = [
migrations.AlterField(
model_name='salecategory',
name='cid',
field=models.IntegerField(default=pms.supplier.models.category.default_salecategory_cid, unique=True, verbose_name='\u7c7b\u76eeID'),
),
]
|
989,370 | e014f9879692335d0236fb446187318cd1b58d1d | # =============================================================================
# MIT License
#
# Copyright (c) 2021 luckytyphlosion
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
from guietta import Gui, QFileDialog, _, Exceptions, QLineEdit, QMessageBox, QPushButton, QIcon
import pathlib
import configparser
import subprocess
import time
import platform
import ctypes
import os
# filename = QFileDialog.getOpenFileName(None, "Open File",
# "/home",
# "Images (*.png *.xpm *.jpg *.jpeg)")
#
# print(filename)
def read_config():
config_filepath = pathlib.Path("config.ini")
if config_filepath.is_file():
with open(config_filepath, "r") as f:
config = configparser.ConfigParser(allow_no_value=True)
config.read_file(f)
else:
config = configparser.ConfigParser(allow_no_value=True)
config["Directories"] = {
"InputAVIDirectory": "",
"OutputMOVDirectory": ""
}
serialize_config(config)
return config
def serialize_config(config):
with open("config.ini", "w+") as f:
config.write(f)
def write_and_serialize_config(config, section, key, value):
config[section][key] = value
serialize_config(config)
def get_file(config, config_key, editline, q_file_dialog_func, caption, file_type):
filename_tuple = q_file_dialog_func(None, caption,
config["Directories"][config_key],
file_type
)
#print(f"filename_tuple: {filename_tuple}")
avi_filename = filename_tuple[0]
if avi_filename != "":
avi_filepath = pathlib.Path(avi_filename)
avi_folder_name = str(avi_filepath.parent.resolve())
write_and_serialize_config(config, "Directories", config_key, avi_folder_name)
#print(avi_filename)
editline.setText(avi_filename)
def wait_ffmpeg_finish(ffmpeg_process):
while True:
return_code = ffmpeg_process.poll()
if return_code is not None:
break
else:
time.sleep(0.25)
if return_code == 0:
return True
else:
error_output = ""
error_output += f"return code: {return_code}\n"
error_output += "=== stderr below ===\n"
try:
stdout, stderr = ffmpeg_process.communicate(timeout=5)
error_output += f"{stderr}\n"
except TimeoutExpired:
ffmpeg_process.kill()
error_output += "Something went wrong while trying to retrieve error information\n"
with open("error.log", "w+") as f:
f.write(error_output)
return False
class Gui2(Gui):
def _close_handler(self, event):
if self.converting_avi and self.ffmpeg_process.poll() is None:
self.ffmpeg_process.terminate()
while self.ffmpeg_process.poll() is None:
time.sleep(0.1)
pathlib.Path(self.output_filename).unlink(missing_ok=True)
super()._close_handler(event)
def set_icon(self, filename):
app_icon = QIcon()
app_icon.addFile(filename)
self._app.setWindowIcon(app_icon)
if platform.system() == "Windows":
app_id = 'taslabz.ffv1_to_vegas_friendly.qm.1' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(app_id)
def main():
config = read_config()
input_avi_filename_editline = QLineEdit("")
output_mov_filename_editline = QLineEdit("")
convert_button = QPushButton("Convert")
converting_avi = False
gui = Gui2(
["Input AVI", input_avi_filename_editline, ["Open"]],
["Output MOV", output_mov_filename_editline, ["Save"]],
[_, convert_button, _],
exceptions = Exceptions.OFF
)
gui.converting_avi = False
gui.ffmpeg_process = None
gui.set_icon("gui/i_love_ffmpeg.png")
def get_input_avi(gui, *args):
get_file(config, "InputAVIDirectory", input_avi_filename_editline, QFileDialog.getOpenFileName, "Open File", "AVI files (*.avi)")
def get_output_mov(gui, *args):
get_file(config, "OutputMOVDirectory", output_mov_filename_editline, QFileDialog.getSaveFileName, "Save File", "MOV files (*.mov)")
def convert_avi(gui, *args):
if gui.converting_avi:
return
input_filename = input_avi_filename_editline.text()
input_filepath = pathlib.Path(input_filename)
output_filename = output_mov_filename_editline.text()
error_msg = ""
if input_filename == "":
error_msg += "- Input AVI is not specified!\n"
elif not input_filepath.is_file():
error_msg += "- Input AVI does not exist or is not a file!\n"
if output_filename == "":
error_msg += "- Output MOV is not specified!\n"
if error_msg != "":
QMessageBox.critical(None, "Error", f"Error occurred!\n{error_msg}")
return
#with open("log.txt", "w+") as f:
# f.write(f"os.getcwd(): {os.getcwd()}")
convert_button.setEnabled(False)
convert_button.setText("Converting...")
gui.converting_avi = True
gui.ffmpeg_process = subprocess.Popen(
("ffmpeg/ffmpeg.exe", "-y", "-i", input_filename, "-c:v", "png", output_filename),
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="ascii"
)
gui.output_filename = output_filename
gui.execute_in_background(wait_ffmpeg_finish, args=(gui.ffmpeg_process,), callback=after_ffmpeg_finish)
def after_ffmpeg_finish(gui, ret_value):
if ret_value:
QMessageBox.information(None, "Information", "Converted!")
else:
QMessageBox.critical(None, "Error", "Something went wrong. Details can be found in error.log")
convert_button.setEnabled(True)
convert_button.setText("Convert")
gui.converting_avi = False
gui.events(
[_, _, get_input_avi],
[_, _, get_output_mov],
[_, convert_avi, _]
)
#print("Running!")
try:
gui.run()
except Exception as e:
exception_output = f"Exception occurred: {e}\n{''.join(traceback.format_tb(e.__traceback__))}\n"
with open("exception.log", "w+") as f:
f.write(exception_output)
raise RuntimeError(e)
if __name__ == "__main__":
main()
|
989,371 | 3cf11133c33b84d9d802ab37962ed0a48db0047b | import pytest
from pytest_lazyfixture import lazy_fixture
from .hexagonal import * # noqa
# Fixtures need to be visible for lazy_fixture() calls.
from .honeycomb import * # noqa
from .kagome import * # noqa
from .linkerless_honeycomb import * # noqa
from .periodic_hexagonal import * # noqa
from .periodic_honeycomb import * # noqa
from .periodic_kagome import * # noqa
from .periodic_linkerless_honeycomb import * # noqa
from .periodic_square import * # noqa
from .square import * # noqa
@pytest.fixture(
params=(
lazy_fixture("cof_honeycomb"),
lazy_fixture("cof_kagome"),
lazy_fixture("cof_square"),
lazy_fixture("cof_hexagonal"),
lazy_fixture("cof_linkerless_honeycomb"),
lazy_fixture("cof_periodic_honeycomb"),
lazy_fixture("cof_periodic_kagome"),
lazy_fixture("cof_periodic_square"),
lazy_fixture("cof_periodic_hexagonal"),
lazy_fixture("cof_periodic_linkerless_honeycomb"),
),
)
def cof(request):
return request.param
|
989,372 | a86194d923edab80a6edac6d074c02b20389cb6e | def solution(brown, yellow):
answer = list()
for i in range(3, brown-2):
find = False
for j in range(3, brown-2):
if i < j:
break
if ((2 * i) + (2 * j)) - 4 == brown and (i-2)*(j-2) == yellow:
answer = [i, j]
find = True
break
if find:
break
return answer
'''
제곱근을 이용해 둘레 확인 -> 속도차 발생
def solution(brown, red):
for i in range(1, int(red**(1/2))+1):
if red % i == 0:
if 2*(i + red//i) == brown-4:
return [red//i+2, i+2]
'''
|
989,373 | 81e6c05f8bdc9246eedbd2b3176ed781bf45dcb2 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from decimal import Decimal
from django.core.urlresolvers import reverse
from django.db.models.loading import get_model
from django.test import TestCase
from django.test.client import Client
import mock
import getpaid.backends.payu
import getpaid.backends.transferuj
from getpaid_test_project.orders.models import Order
class OrderTest(TestCase):
def setUp(self):
self.client = Client()
def test_successful_create_payment_dummy_eur(self):
"""
Tests if payment is successfully created
"""
order = Order(name='Test EUR order', total=100, currency='EUR')
order.save()
response = self.client.post(reverse('getpaid-new-payment', kwargs={'currency' : 'EUR'}),
{'order': order.pk,
'backend': 'getpaid.backends.dummy'}
)
self.assertEqual(response.status_code, 302)
Payment = get_model('getpaid', 'Payment')
payment = Payment.objects.get(order=order.pk)
self.assertEqual(payment.backend, 'getpaid.backends.dummy')
self.assertEqual(payment.amount, order.total)
self.assertEqual(payment.currency, order.currency)
self.assertEqual(payment.status, 'in_progress')
self.assertEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, 0)
def test_successful_create_payment_payu_pln(self):
"""
Tests if payment is successfully created
"""
order = Order(name='Test PLN order', total=100, currency='PLN')
order.save()
response = self.client.post(reverse('getpaid-new-payment', kwargs={'currency' : 'PLN'}),
{'order': order.pk,
'backend': 'getpaid.backends.payu'}
)
self.assertEqual(response.status_code, 302)
Payment = get_model('getpaid', 'Payment')
payment = Payment.objects.get(order=order.pk)
self.assertEqual(payment.backend, 'getpaid.backends.payu')
self.assertEqual(payment.amount, order.total)
self.assertEqual(payment.currency, order.currency)
self.assertEqual(payment.status, 'in_progress')
self.assertEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, 0)
def test_failure_create_payment_eur(self):
"""
Tests if payment fails when wrong currency for backend.
PayU accepts only PLN currency payments.
"""
order = Order(name='Test EUR order', total=100, currency='EUR')
order.save()
response = self.client.post(reverse('getpaid-new-payment', kwargs={'currency' : 'EUR'}),
{'order': order.pk,
'backend': 'getpaid.backends.payu'}
)
self.assertEqual(response.status_code, 404)
def fake_payment_get_response_success(request):
class fake_response:
def read(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<response>
<status>OK</status>
<trans>
<id>234748067</id>
<pos_id>123456789</pos_id>
<session_id>99:1342616247.41</session_id>
<order_id>99</order_id>
<amount>12345</amount>
<status>99</status>
<pay_type>t</pay_type>
<pay_gw_name>pt</pay_gw_name>
<desc>Test 2</desc>
<desc2></desc2>
<create>2012-07-18 14:57:28</create>
<init></init>
<sent></sent>
<recv></recv>
<cancel>2012-07-18 14:57:30</cancel>
<auth_fraud>0</auth_fraud>
<ts>1342616255805</ts>
<sig>4d4df5557b89a4e2d8c48436b1dd3fef</sig> </trans>
</response>"""
return fake_response()
def fake_payment_get_response_failure(request):
class fake_response:
def read(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<response>
<status>OK</status>
<trans>
<id>234748067</id>
<pos_id>123456789</pos_id>
<session_id>98:1342616247.41</session_id>
<order_id>98</order_id>
<amount>12345</amount>
<status>2</status>
<pay_type>t</pay_type>
<pay_gw_name>pt</pay_gw_name>
<desc>Test 2</desc>
<desc2></desc2>
<create>2012-07-18 14:57:28</create>
<init></init>
<sent></sent>
<recv></recv>
<cancel>2012-07-18 14:57:30</cancel>
<auth_fraud>0</auth_fraud>
<ts>1342616255805</ts>
<sig>ee77e9515599e3fd2b3721dff50111dd</sig> </trans>
</response>"""
return fake_response()
class PayUBackendTest(TestCase):
def setUp(self):
self.client = Client()
def test_online_malformed(self):
response = self.client.post(reverse('getpaid-payu-online'), {})
self.assertEqual(response.content, 'MALFORMED')
def test_online_sig_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : 'wrong',
'session_id': '10:11111',
'ts' : '1111',
'sig' : 'wrong sig',
})
self.assertEqual(response.content, 'SIG ERR')
def test_online_wrong_pos_id_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '12345',
'session_id': '10:11111',
'ts' : '1111',
'sig' : '0d6129738c0aee9d4eb56f2a1db75ab4',
})
self.assertEqual(response.content, 'POS_ID ERR')
def test_online_wrong_session_id_err(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '123456789',
'session_id': '111111',
'ts' : '1111',
'sig' : 'fcf3db081d5085b45fe86ed0c6a9aa5e',
})
self.assertEqual(response.content, 'SESSION_ID ERR')
def test_online_ok(self):
response = self.client.post(reverse('getpaid-payu-online'), {
'pos_id' : '123456789',
'session_id': '1:11111',
'ts' : '1111',
'sig' : '2a78322c06522613cbd7447983570188',
})
self.assertEqual(response.content, 'OK')
@mock.patch("urllib2.urlopen", fake_payment_get_response_success)
def test_payment_get_paid(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=99, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=99) # this line is because django bug https://code.djangoproject.com/ticket/5903
processor = getpaid.backends.payu.PaymentProcessor(payment)
processor.get_payment_status('99:1342616247.41')
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertNotEqual(payment.amount_paid, Decimal('0'))
@mock.patch("urllib2.urlopen", fake_payment_get_response_failure)
def test_payment_get_failed(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(pk=98, order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
payment = Payment.objects.get(pk=98) # this line is because django bug https://code.djangoproject.com/ticket/5903
processor = getpaid.backends.payu.PaymentProcessor(payment)
processor.get_payment_status('98:1342616247.41')
self.assertEqual(payment.status, 'failed')
self.assertEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('0'))
class TransferujBackendTest(TestCase):
def test_online_not_allowed_ip(self):
self.assertEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None, None, None, None, None, None, None, None))
#Tests allowing IP given in settings
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj' : {'allowed_ip': ('1.1.1.1', '1.2.3.4'), 'key': ''},
}):
self.assertEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None, None, None, None, None, None, None, None))
self.assertNotEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('1.1.1.1', None, None, None, None, None, None, None, None, None, None, None))
self.assertNotEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('1.2.3.4', None, None, None, None, None, None, None, None, None, None, None))
#Tests allowing all IP
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj' : {'allowed_ip': [], 'key': ''},
}):
self.assertNotEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('0.0.0.0', None, None, None, None, None, None, None, None, None, None, None))
self.assertNotEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('1.1.1.1', None, None, None, None, None, None, None, None, None, None, None))
self.assertNotEqual('IP ERR', getpaid.backends.transferuj.PaymentProcessor.online('1.2.3.4', None, None, None, None, None, None, None, None, None, None, None))
def test_online_wrong_sig(self):
self.assertEqual('SIG ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1', '123.45', None, None, None, None, None, 'xxx'))
self.assertNotEqual('SIG ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1', '123.45', None, None, None, None, None, '21b028c2dbdcb9ca272d1cc67ed0574e'))
def test_online_wrong_id(self):
self.assertEqual('ID ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1111', '1', '', '1', '123.45', None, None, None, None, None, '15bb75707d4374bc6e578c0cbf5a7fc7'))
self.assertNotEqual('ID ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1', '123.45', None, None, None, None, None, 'f5f8276fbaa98a6e05b1056ab7c3a589'))
def test_online_crc_error(self):
self.assertEqual('CRC ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', '99999', '123.45', None, None, None, None, None, 'f5f8276fbaa98a6e05b1056ab7c3a589'))
self.assertEqual('CRC ERR', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', 'GRRGRRG', '123.45', None, None, None, None, None, '6a9e045010c27dfed24774b0afa37d0b'))
def test_online_payment_ok(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', payment.pk, '123.45', '123.45', '', 'TRUE', 0, '', '21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('123.45'))
def test_online_payment_ok_over(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', payment.pk, '123.45', '223.45', '', 'TRUE', 0, '', '21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('223.45'))
def test_online_payment_partial(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', payment.pk, '123.45', '23.45', '', 'TRUE', 0, '', '21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'partially_paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('23.45'))
def test_online_payment_failure(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', getpaid.backends.transferuj.PaymentProcessor.online('195.149.229.109', '1234', '1', '', payment.pk, '123.45', '23.45', '', False, 0, '', '21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'failed')
|
989,374 | 28c00c6b7f56572f690b06864e871c39eca95b42 | import math
def fuel_until_zero(mass, sum):
fuel = math.floor(mass / 3) - 2
if fuel <= 0:
return sum
else:
sum += fuel
return fuel_until_zero(fuel, sum)
f = open("inputp2.txt", "r")
lines = f.readlines()
sum = 0
for line in lines:
sum += fuel_until_zero(int(line), 0)
print(sum)
|
989,375 | 0216dfca668d112b2fed6213a45a82cc8438bbaa | #!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
def is_tool(name):
return which(name) is not None
def check_path(path):
paths = glob.glob(path)
if len(paths) == 0:
exit("file not found: %s" % path)
if len(paths) > 1:
print("warning: glob pattern found too many files, using first one: %s" % paths[0])
return paths[0]
def openlane_date_sort(e):
datestamp = os.path.basename(e)
if re.match(r'^\d+\-\d+\_\d+\-\d+$',datestamp):
timestamp = datetime.datetime.strptime(datestamp, '%d-%m_%H-%M')
return timestamp.timestamp()
return datestamp
def summary_report(summary_file):
# print short summary of the csv file
status = None
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
if "violation" in key or "error" in key:
print("%30s : %20s" % (key, value))
if "AREA" in key:
area = float(value)
if "flow_status" in key:
status = value
print("area %d um^2" % (1e6 * area))
if status is not None: # newer OpenLANE has status, older ones don't
print("flow status: %s" % status)
def full_summary_report(summary_file):
# print short summary of the csv file
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
print("%30s : %20s" % (key, value))
def drc_report(drc_file):
last_drc = None
drc_count = 0
with open(drc_file) as drc:
for line in drc.readlines():
drc_count += 1
if '(' in line:
if last_drc is not None:
print("* %s (%d)" % (last_drc, drc_count/4))
last_drc = line.strip()
drc_count = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path))
|
989,376 | a743bac43902af5309937415a26151e28d84367e | ''' Thermodynamic Parameter Routines '''
from __future__ import division
import numpy as np
import numpy.ma as ma
from sharppy.sharptab import interp, utils, thermo, winds
from sharppy.sharptab.constants import *
'''
This file contains various functions to perform the calculation of various convection indices.
Because of this, parcel lifting routines are also found in this file.
Functions denoted with a (*) in the docstring refer to functions that were added to the SHARPpy package that
were not ported from the Storm Prediction Center. They have been included as they have been used by the
community in an effort to expand SHARPpy to support the many parameters used in atmospheric science.
While the logic for these functions are based in the scientific literature, validation
of the output from these functions is occasionally difficult to perform. Although we have made an effort
to resolve code issues when they arise, values from these functions may be erronious and may require additional
inspection by the user. We appreciate any contributions by the meteorological community that can
help better validate these SHARPpy functions!
'''
__all__ = ['DefineParcel', 'Parcel', 'inferred_temp_adv']
__all__ += ['k_index', 't_totals', 'c_totals', 'v_totals', 'precip_water']
__all__ += ['inversion', 'temp_lvl', 'max_temp']
__all__ += ['mean_omega', 'mean_mixratio', 'mean_dewpoint', 'mean_wetbulb', 'mean_theta', 'mean_thetae', 'mean_thetaes', 'mean_thetaw', 'mean_thetaws', 'mean_thetawv', 'mean_relh']
__all__ += ['lapse_rate', 'most_unstable_level', 'parcelx', 'bulk_rich']
__all__ += ['bunkers_storm_motion', 'effective_inflow_layer']
__all__ += ['convective_temp', 'esp', 'pbl_top', 'precip_eff', 'dcape', 'sig_severe']
__all__ += ['dgz', 'ship', 'stp_cin', 'stp_fixed', 'scp', 'mmp', 'wndg', 'sherbs3_v1', 'sherbs3_v2', 'sherbe_v1', 'sherbe_v2', 'tei', 'tei_sfc', 'cape']
__all__ += ['mburst', 'dcp', 'ehi', 'sweat', 'hgz', 'lhp']
__all__ += ['alt_stg', 'spot', 'wbz', 'thomp', 'tq', 's_index', 'boyden', 'dci', 'pii', 'ko', 'brad', 'rack', 'jeff', 'sc_totals']
__all__ += ['esi', 'vgp', 'aded_v1', 'aded_v2', 'ei', 'eehi', 'strong_tor', 'vtp']
__all__ += ['snsq', 'snow']
__all__ += ['windex_v1', 'windex_v2', 'gustex_v1', 'gustex_v2', 'gustex_v3', 'gustex_v4', 'wmsi', 'dmpi_v1', 'dmpi_v2', 'hmi', 'mwpi']
__all__ += ['hi', 'ulii', 'ssi850', 'fmwi', 'fmdi', 'martin', 'csv', 'z_index', 'swiss00', 'swiss12', 'fin', 'yon_v1', 'yon_v2']
__all__ += ['fsi', 'fog_point', 'fog_threat']
__all__ += ['mvv', 'jli', 'gdi', 'cs_index', 'wmaxshear', 'ncape', 'ncinh', 'lsi', 'mcsi_v1', 'mcsi_v2', 'mosh', 'moshe', 'cii_v1', 'cii_v2', 'brooks_b']
__all__ += ['cpst_v1', 'cpst_v2', 'cpst_v3']
__all__ += ['tie']
__all__ += ['t1_gust', 't2_gust']
__all__ += ['tsi', 'hsev', 'hsiz']
__all__ += ['k_high_v1', 'k_high_v2', 'hltt', 'ssi700', 'khltt', 'kti', 'waci']
class DefineParcel(object):
'''
Create a parcel from a supplied profile object.
Parameters
----------
prof : profile object
Profile object
Optional Keywords
flag : int (default = 1)
Parcel Selection
1: Observed Surface Parcel
2: Forecast Surface Parcel
3: Most Unstable Parcel
4: Mean Mixed Layer Parcel
5: User Defined Parcel
6: Mean Effective Layer Parcel
7: Convective Temperature Parcel
Optional Keywords (Depending on Parcel Selected)
Parcel (flag) == 1: Observed Surface Parcel
None
Parcel (flag) == 2: Forecast Surface Parcel
pres : number (default = 100 hPa)
Depth over which to mix the boundary layer; only changes
temperature; does not affect moisture
Parcel (flag) == 3: Most Unstable Parcel
pres : number (default = 400 hPa)
Depth over which to look for the the most unstable parcel
starting from the surface pressure
Parcel (flag) == 4: Mixed Layer Parcel
pres : number (default = 100 hPa)
Depth over which to mix the surface parcel
Parcel (flag) == 5: User Defined Parcel
pres : number (default = SFC - 100 hPa)
Pressure of the parcel to lift
tmpc : number (default = Temperature at the provided pressure)
Temperature of the parcel to lift
dwpc : number (default = Dew Point at the provided pressure)
Dew Point of the parcel to lift
Parcel (flag) == 6: Effective Inflow Layer
ecape : number (default = 100)
The minimum amount of CAPE a parcel needs to be considered
part of the inflow layer
ecinh : number (default = -250)
The maximum amount of CINH allowed for a parcel to be
considered as part of the inflow layer
Parcel (flag) == 7: Convective Temperature Parcel
pres : number (default = 100 hPa)
Depth over which to mix the boundary layer; only changes
temperature; does not affect moisture
'''
def __init__(self, prof, flag, **kwargs):
self.flag = flag
if flag == 1:
self.presval = prof.pres[prof.sfc]
self.__sfc(prof)
elif flag == 2:
self.presval = kwargs.get('pres', 100)
self.__fcst(prof, **kwargs)
elif flag == 3:
self.presval = kwargs.get('pres', 300)
self.__mu(prof, **kwargs)
elif flag == 4:
self.presval = kwargs.get('pres', 100)
self.__ml(prof, **kwargs)
elif flag == 5:
self.presval = kwargs.get('pres', prof.pres[prof.sfc])
self.__user(prof, **kwargs)
elif flag == 6:
self.presval = kwargs.get('pres', 100)
self.__effective(prof, **kwargs)
elif flag == 7:
self.presval = kwargs.get('pres', 100)
self.__convective(prof, **kwargs)
else:
#print 'Defaulting to Surface Parcel'
self.presval = kwargs.get('pres', prof.gSndg[prof.sfc])
self.__sfc(prof)
def __sfc(self, prof):
'''
Create a parcel using surface conditions
'''
self.desc = 'Surface Parcel'
self.pres = prof.pres[prof.sfc]
self.tmpc = prof.tmpc[prof.sfc]
self.dwpc = prof.dwpc[prof.sfc]
def __fcst(self, prof, **kwargs):
'''
Create a parcel using forecast conditions.
'''
self.desc = 'Forecast Surface Parcel'
self.tmpc = max_temp(prof)
self.pres = prof.pres[prof.sfc]
pbot = self.pres; ptop = self.pres - 100.
self.dwpc = thermo.temp_at_mixrat(mean_mixratio(prof, pbot, ptop, exact=True), self.pres)
def __mu(self, prof, **kwargs):
'''
Create the most unstable parcel within the lowest XXX hPa, where
XXX is supplied. Default XXX is 400 hPa.
'''
self.desc = 'Most Unstable Parcel in Lowest %.2f hPa' % self.presval
pbot = prof.pres[prof.sfc]
ptop = pbot - self.presval
self.pres = most_unstable_level(prof, pbot=pbot, ptop=ptop)
self.tmpc = interp.temp(prof, self.pres)
self.dwpc = interp.dwpt(prof, self.pres)
def __ml(self, prof, **kwargs):
'''
Create a mixed-layer parcel with mixing within the lowest XXX hPa,
where XXX is supplied. Default is 100 hPa.
If
'''
self.desc = '%.2f hPa Mixed Layer Parcel' % self.presval
pbot = kwargs.get('pbot', prof.pres[prof.sfc])
ptop = pbot - self.presval
self.pres = pbot
mtheta = mean_theta(prof, pbot, ptop, exact=True)
self.tmpc = thermo.theta(1000., mtheta, self.pres)
mmr = mean_mixratio(prof, pbot, ptop, exact=True)
self.dwpc = thermo.temp_at_mixrat(mmr, self.pres)
def __user(self, prof, **kwargs):
'''
Create a user defined parcel.
'''
self.desc = '%.2f hPa Parcel' % self.presval
self.pres = self.presval
self.tmpc = kwargs.get('tmpc', interp.temp(prof, self.pres))
self.dwpc = kwargs.get('dwpc', interp.dwpt(prof, self.pres))
def __effective(self, prof, **kwargs):
'''
Create the mean-effective layer parcel.
'''
ecape = kwargs.get('ecape', 100)
ecinh = kwargs.get('ecinh', -250)
pbot, ptop = effective_inflow_layer(prof, ecape, ecinh)
if utils.QC(pbot) and pbot > 0:
self.desc = '%.2f hPa Mean Effective Layer Centered at %.2f' % ( pbot-ptop, (pbot+ptop)/2.)
mtha = mean_theta(prof, pbot, ptop)
mmr = mean_mixratio(prof, pbot, ptop)
self.pres = (pbot+ptop)/2.
self.tmpc = thermo.theta(1000., mtha, self.pres)
self.dwpc = thermo.temp_at_mixrat(mmr, self.pres)
else:
self.desc = 'Defaulting to Surface Layer'
self.pres = prof.pres[prof.sfc]
self.tmpc = prof.tmpc[prof.sfc]
self.dwpc = prof.dwpc[prof.sfc]
if utils.QC(pbot): self.pbot = pbot
else: self.pbot = ma.masked
if utils.QC(ptop): self.ptop = ptop
else: self.pbot = ma.masked
def __convective(self, prof, **kwargs):
'''
Create the convective temperature parcel.
'''
self.desc = 'Convective Temperature Parcel'
self.tmpc = convective_temp(prof, **kwargs)
self.pres = prof.pres[prof.sfc]
pbot = self.pres; ptop = self.pres - 100.
self.dwpc = thermo.temp_at_mixrat(mean_mixratio(prof, pbot, ptop, exact=True), self.pres)
class Parcel(object):
'''
Initialize the parcel variables
Parameters
----------
pbot : number
Lower-bound (pressure; hPa) that the parcel is lifted
ptop : number
Upper-bound (pressure; hPa) that the parcel is lifted
pres : number
Pressure of the parcel to lift (hPa)
tmpc : number
Temperature of the parcel to lift (C)
dwpc : number
Dew Point of the parcel to lift (C)
'''
def __init__(self, **kwargs):
self.pres = ma.masked # Parcel beginning pressure (mb)
self.tmpc = ma.masked # Parcel beginning temperature (C)
self.dwpc = ma.masked # Parcel beginning dewpoint (C)
self.ptrace = ma.masked # Parcel trace pressure (mb)
self.ttrace = ma.masked # Parcel trace temperature (C)
self.blayer = ma.masked # Pressure of the bottom of the layer the parcel is lifted (mb)
self.tlayer = ma.masked # Pressure of the top of the layer the parcel is lifted (mb)
self.entrain = 0. # A parcel entrainment setting (not yet implemented)
self.lclpres = ma.masked # Parcel LCL (lifted condensation level) pressure (mb)
self.lclhght = ma.masked # Parcel LCL height (m AGL)
self.lfcpres = ma.masked # Parcel LFC (level of free convection) pressure (mb)
self.lfchght = ma.masked # Parcel LFC height (m AGL)
self.elpres = ma.masked # Parcel EL (equilibrium level) pressure (mb)
self.elhght = ma.masked # Parcel EL height (m AGL)
self.mplpres = ma.masked # Maximum Parcel Level (mb)
self.mplhght = ma.masked # Maximum Parcel Level (m AGL)
self.bplus = ma.masked # Parcel CAPE (J/kg)
self.bminus = ma.masked # Parcel CIN (J/kg)
self.bfzl = ma.masked # Parcel CAPE up to freezing level (J/kg)
self.b3km = ma.masked # Parcel CAPE up to 3 km (J/kg)
self.b4km = ma.masked # Parcel CAPE up to 4 km (J/kg)
self.b6km = ma.masked # Parcel CAPE up to 6 km (J/kg)
self.p0c = ma.masked # Pressure value at 0 C (mb)
self.pm10c = ma.masked # Pressure value at -10 C (mb)
self.pm20c = ma.masked # Pressure value at -20 C (mb)
self.pm30c = ma.masked # Pressure value at -30 C (mb)
self.hght0c = ma.masked # Height value at 0 C (m AGL)
self.hghtm10c = ma.masked # Height value at -10 C (m AGL)
self.hghtm20c = ma.masked # Height value at -20 C (m AGL)
self.hghtm30c = ma.masked # Height value at -30 C (m AGL)
self.wm10c = ma.masked # w velocity at -10 C ?
self.wm20c = ma.masked # w velocity at -20 C ?
self.wm30c = ma.masked # Wet bulb at -30 C ?
self.li5 = ma.masked # Lifted Index at 500 mb (C)
self.li3 = ma.masked # Lifted Index at 300 mb (C)
self.brnshear = ma.masked # Bulk Richardson Number Shear
self.brnu = ma.masked # Bulk Richardson Number U (kts)
self.brnv = ma.masked # Bulk Richardson Number V (kts)
self.brn = ma.masked # Bulk Richardson Number (unitless)
self.limax = ma.masked # Maximum Lifted Index (C)
self.limaxpres = ma.masked # Pressure at Maximum Lifted Index (mb)
self.cap = ma.masked # Cap Strength (C)
self.cappres = ma.masked # Cap strength pressure (mb)
self.bmin = ma.masked # Buoyancy minimum in profile (C)
self.bminpres = ma.masked # Buoyancy minimum pressure (mb)
for kw in kwargs: setattr(self, kw, kwargs.get(kw))
def hgz(prof):
'''
Hail Growth Zone Levels
This function finds the pressure levels for the dendritic
growth zone (from -10 C to -30 C). If either temperature cannot be found,
it is set to be the surface pressure.
Parameters
----------
prof : profile object
Profile Object
Returns
-------
pbot : number
Pressure of the bottom level (mb)
ptop : number
Pressure of the top level (mb)
'''
pbot = temp_lvl(prof, -10)
ptop = temp_lvl(prof, -30)
if not utils.QC(pbot):
pbot = prof.pres[prof.sfc]
if not utils.QC(ptop):
ptop = prof.pres[prof.sfc]
return pbot, ptop
def dgz(prof):
'''
Dendritic Growth Zone Levels
This function finds the pressure levels for the dendritic
growth zone (from -12 C to -17 C). If either temperature cannot be found,
it is set to be the surface pressure.
Parameters
----------
prof : profile object
Profile Object
Returns
-------
pbot : number
Pressure of the bottom level (mb)
ptop : number
Pressure of the top level (mb)
'''
pbot = temp_lvl(prof, -12)
ptop = temp_lvl(prof, -17)
if not utils.QC(pbot):
pbot = prof.pres[prof.sfc]
if not utils.QC(ptop):
ptop = prof.pres[prof.sfc]
return pbot, ptop
def lhp(prof):
'''
Large Hail Parameter (*)
From Johnson and Sugden (2014), EJSSM
Parameters
----------
prof : profile object
ConvectiveProfile object
Returns
-------
lhp : number
large hail parameter (unitless)
'''
mag06_shr = utils.KTS2MS(utils.mag(*prof.sfc_6km_shear))
if prof.mupcl.bplus >= 400 and mag06_shr >= 14:
lr75 = prof.lapserate_700_500
zbot, ztop = interp.hght(prof, hgz(prof))
thk_hgz = ztop - zbot
term_a = (((prof.mupcl.bplus - 2000.)/1000.) +\
((3200 - thk_hgz)/500.) +\
((lr75 - 6.5)/2.))
if term_a < 0:
term_a = 0
p_1km, p_3km, p_6km = interp.pres(prof, interp.to_msl(prof, [1000, 3000, 6000]))
shear_el = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=prof.pres[prof.sfc], ptop=prof.mupcl.elpres)))
grw_el_dir = interp.vec(prof, prof.mupcl.elpres)[0]
grw_36_dir = utils.comp2vec(*winds.mean_wind(prof, pbot=p_3km, ptop=p_6km))[0]
grw_alpha_el = grw_el_dir - grw_36_dir
if grw_alpha_el > 180:
grw_alpha_el = -10
srw_01_dir = utils.comp2vec(*winds.sr_wind(prof, pbot=prof.pres[prof.sfc], ptop=p_1km, stu=prof.srwind[0], stv=prof.srwind[1]))[0]
srw_36_dir = utils.comp2vec(*winds.sr_wind(prof, pbot=p_3km, ptop=p_6km, stu=prof.srwind[0], stv=prof.srwind[1]))[0]
srw_alpha_mid = srw_36_dir - srw_01_dir
term_b = (((shear_el - 25.)/5.) +\
((grw_alpha_el + 5.)/20.) +\
((srw_alpha_mid - 80.)/10.))
if term_b < 0:
term_b = 0
lhp = term_a * term_b + 5
else:
lhp = 0
return lhp
def ship(prof, **kwargs):
'''
Calculate the Sig Hail Parameter (SHIP)
Parameters
----------
prof : Profile object
mupcl : (optional) Most-Unstable Parcel
lr75 : (optional) 700 - 500 mb lapse rate (C/km)
h5_temp : (optional) 500 mb temperature (C)
shr06 : (optional) 0-6 km shear (m/s)
frz_lvl : (optional) freezing level (m)
Returns
-------
ship : number
significant hail parameter (unitless)
Ryan Jewell (SPC) helped in correcting this equation as the SPC
sounding help page version did not have the correct information
of how SHIP was calculated.
The significant hail parameter (SHIP; SPC 2014) is
an index developed in-house at the SPC. (Johnson and Sugden 2014)
'''
mupcl = kwargs.get('mupcl', None)
sfc6shr = kwargs.get('sfc6shr', None)
frz_lvl = kwargs.get('frz_lvl', None)
h5_temp = kwargs.get('h5_temp', None)
lr75 = kwargs.get('lr75', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mumr = thermo.mixratio(mupcl.pres, mupcl.dwpc)
if not frz_lvl:
frz_lvl = interp.hght(prof, temp_lvl(prof, 0))
if not h5_temp:
h5_temp = interp.temp(prof, 500.)
if not lr75:
lr75 = lapse_rate(prof, 700., 500., pres=True)
if not sfc6shr:
try:
sfc_6km_shear = prof.sfc_6km_shear
except:
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
sfc_6km_shear = winds.wind_shear(prof, pbot=sfc, ptop=p6km)
sfc_6km_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
shr06 = utils.KTS2MS(sfc_6km_shear)
if shr06 > 27:
shr06 = 27.
elif shr06 < 7:
shr06 = 7.
if mumr > 13.6:
mumr = 13.6
elif mumr < 11.:
mumr = 11.
if h5_temp > -5.5:
h5_temp = -5.5
ship = -1. * (mucape * mumr * lr75 * h5_temp * shr06) / 42000000.
if mucape < 1300:
ship = ship*(mucape/1300.)
if lr75 < 5.8:
ship = ship*(lr75/5.8)
if frz_lvl < 2400:
ship = ship * (frz_lvl/2400.)
return ship
def stp_cin(mlcape, esrh, ebwd, mllcl, mlcinh):
'''
Significant Tornado Parameter (w/CIN)
From Thompson et al. 2012 WAF, page 1139
Parameters
----------
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
esrh : effective storm relative helicity (m2/s2)
ebwd : effective bulk wind difference (m/s)
mllcl : mixed-layer lifted condensation level (m)
mlcinh : mixed-layer convective inhibition (J/kg)
Returns
-------
stp_cin : number
significant tornado parameter (unitless)
'''
cape_term = mlcape / 1500.
eshr_term = esrh / 150.
if ebwd < 12.5:
ebwd_term = 0.
elif ebwd > 30.:
ebwd_term = 1.5
else:
ebwd_term = ebwd / 20.
if mllcl < 1000.:
lcl_term = 1.0
elif mllcl > 2000.:
lcl_term = 0.0
else:
lcl_term = ((2000. - mllcl) / 1000.)
if mlcinh > -50:
cinh_term = 1.0
elif mlcinh < -200:
cinh_term = 0
else:
cinh_term = ((mlcinh + 200.) / 150.)
stp_cin = np.maximum(cape_term * eshr_term * ebwd_term * lcl_term * cinh_term, 0)
return stp_cin
def stp_fixed(sbcape, sblcl, srh01, bwd6):
'''
Significant Tornado Parameter (fixed layer)
From Thompson et al. 2003
Parameters
----------
sbcape : Surface based CAPE from the parcel class (J/kg)
sblcl : Surface based lifted condensation level (LCL) (m)
srh01 : Surface to 1 km storm relative helicity (m2/s2)
bwd6 : Bulk wind difference between 0 to 6 km (m/s)
Returns
-------
stp_fixed : number
signifcant tornado parameter (fixed-layer)
'''
# Calculate SBLCL term
if sblcl < 1000.: # less than 1000. meters
lcl_term = 1.0
elif sblcl > 2000.: # greater than 2000. meters
lcl_term = 0.0
else:
lcl_term = ((2000.-sblcl)/1000.)
# Calculate 6BWD term
if bwd6 > 30.: # greater than 30 m/s
bwd6 = 30
elif bwd6 < 12.5:
bwd6 = 0.0
bwd6_term = bwd6 / 20.
cape_term = sbcape / 1500.
srh_term = srh01 / 150.
stp_fixed = cape_term * lcl_term * srh_term * bwd6_term
return stp_fixed
def scp(mucape, srh, ebwd):
'''
Supercell Composite Parameter
From Thompson et al. 2004
Parameters
----------
prof : Profile object
mucape : Most Unstable CAPE from the parcel class (J/kg) (optional)
srh : the effective SRH from the winds.helicity function (m2/s2)
ebwd : effective bulk wind difference (m/s)
Returns
-------
scp : number
supercell composite parameter
'''
if ebwd > 20:
ebwd = 20.
elif ebwd < 10:
ebwd = 0.
muCAPE_term = mucape / 1000.
esrh_term = srh / 50.
ebwd_term = ebwd / 20.
scp = muCAPE_term * esrh_term * ebwd_term
return scp
def k_index(prof):
'''
Calculates the K-Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
kind : number
K-Index
'''
t8 = interp.temp(prof, 850.)
t7 = interp.temp(prof, 700.)
t5 = interp.temp(prof, 500.)
td7 = interp.dwpt(prof, 700.)
td8 = interp.dwpt(prof, 850.)
return t8 - t5 + td8 - (t7 - td7)
def t_totals(prof):
'''
Calculates the Total Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
t_totals : number
Total Totals Index
'''
return c_totals(prof) + v_totals(prof)
def c_totals(prof):
'''
Calculates the Cross Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
c_totals : number
Cross Totals Index
'''
return interp.dwpt(prof, 850.) - interp.temp(prof, 500.)
def v_totals(prof):
'''
Calculates the Vertical Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
v_totals : number
Vertical Totals Index
'''
return interp.temp(prof, 850.) - interp.temp(prof, 500.)
def precip_water(prof, pbot=None, ptop=400, dp=-1, exact=False):
'''
Calculates the precipitable water from a profile object within the
specified layer. The default layer (lower=-1 & upper=-1) is defined to
be surface to 400 hPa.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa).
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
pwat : number,
Precipitable Water (in)
'''
if not pbot: pbot = prof.pres[prof.sfc]
if prof.pres[-1] > ptop:
ptop = prof.pres[-1]
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask], [dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
dwpt = interp.dwpt(prof, p)
w = thermo.mixratio(p, dwpt)
return (((w[:-1]+w[1:])/2 * (p[:-1]-p[1:])) * 0.00040173).sum()
def inferred_temp_adv(prof, lat=35):
'''
Inferred Temperature Advection
SHARP code deduced by Greg Blumberg. Not based on actual SPC code.
Calculates the inferred temperature advection from the surface pressure
and up every 100 mb assuming all winds are geostrophic. The units returned are
in C/hr. If no latitude is specified the function defaults to 35 degrees North.
This function doesn't compare well to SPC in terms of magnitude of the results. The direction
and relative magnitude I think I've got right. My calculations seem to be consistently less
than those seen on the SPC website. Although, this function may be right as the SPC values seem
a little high for typical synoptic scale geostrophic temperature advection (10 Kelvin/day is typical).
This code uses Equation 4.1.139 from Bluestein's "Synoptic-Dynamic Meteorology in Midlatitudes (Volume I)"
Parameters
----------
prof : Profile object
lat : latitude in decimal degrees (optional)
Returns
-------
temp_adv : an array of temperature advection values in C/hr
pressure_bounds: a 2D array indicating the top and bottom bounds of the temperature advection layers.
'''
omega = (2. * np.pi) / (86164.)
dp = -100
pres_idx = np.where(prof.pres >= 100.)[0]
pressures = np.arange(prof.pres[prof.get_sfc()], prof.pres[pres_idx][-1], dp, dtype=type(prof.pres[prof.get_sfc()])) # Units: mb
temps = thermo.ctok(interp.temp(prof, pressures))
heights = interp.hght(prof, pressures)
temp_adv = np.empty(len(pressures) - 1)
dirs = interp.vec(prof, pressures)[0]
pressure_bounds = np.empty((len(pressures) - 1, 2))
if utils.QC(lat):
f = 2. * omega * np.sin(np.radians(lat)) # Units: (s**-1)
else:
temp_adv[:] = np.nan
return temp_adv, pressure_bounds
multiplier = (f / G) * (np.pi / 180.) # Units: (s**-1 / (m/s**2)) * (radians/degrees)
for i in xrange(1, len(pressures)):
bottom_pres = pressures[i-1]
top_pres = pressures[i]
# Get the temperatures from both levels (in Kelvin)
btemp = temps[i-1]
ttemp = temps[i]
# Get the two heights of the top and bottom layer
bhght = heights[i-1] # Units: meters
thght = heights[i] # Units: meters
bottom_wdir = dirs[i-1] # Meteorological degrees (degrees from north)
top_wdir = dirs[i] # same units as top_wdir
# Calculate the average temperature
avg_temp = (ttemp + btemp) * 2.
# Calculate the mean wind between the two levels (this is assumed to be geostrophic)
mean_u, mean_v = winds.mean_wind(prof, pbot=bottom_pres, ptop=top_pres)
mean_wdir, mean_wspd = utils.comp2vec(mean_u, mean_v) # Wind speed is in knots here
mean_wspd = utils.KTS2MS(mean_wspd) # Convert this geostrophic wind speed to m/s
# Here we calculate the change in wind direction with height (thanks to Andrew Mackenzie for help with this)
# The sign of d_theta will dictate whether or not it is warm or cold advection
mod = 180 - bottom_wdir
top_wdir = top_wdir + mod
if top_wdir < 0:
top_wdir = top_wdir + 360
elif top_wdir >= 360:
top_wdir = top_wdir - 360
d_theta = top_wdir - 180.
# Here we calculate t_adv (which is -V_g * del(T) or the local change in temperature term)
# K/s s * rad/m * deg m^2/s^2 K degrees / m
t_adv = multiplier * np.power(mean_wspd,2) * avg_temp * (d_theta / (thght - bhght)) # Units: Kelvin / seconds
# Append the pressure bounds so the person knows the pressure
pressure_bounds[i-1, :] = bottom_pres, top_pres
temp_adv[i-1] = t_adv*60.*60. # Converts Kelvin/seconds to Kelvin/hour (or Celsius/hour)
return temp_adv, pressure_bounds
def inversion(prof, pbot=None, ptop=None):
'''
Finds the layers where temperature inversions are occurring.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default top of sounding)
Pressure of the top level (hPa).
Returns
-------
inv_bot : An array of bases of inversion layers
inv_top : An array of tops of inversion layers
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.top]
if not utils.QC(interp.vtmp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.vtmp(prof, ptop)): return ma.masked
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
vtmp1 = interp.vtmp(prof, pbot)
vtmp2 = interp.vtmp(prof, ptop)
hght1 = interp.hght(prof, pbot)
hght2 = interp.hght(prof, ptop)
mask = ~prof.vtmp.mask[ind1:ind2+1] * ~prof.hght.mask[ind1:ind2+1]
vtmp = np.concatenate([[vtmp1], prof.vtmp[ind1:ind2+1][mask], [vtmp2]])
hght = np.concatenate([[hght1], prof.hght[ind1:ind2+1][mask], [hght2]])
lr = ((vtmp[1:] - vtmp[:-1]) / (hght[1:] - hght[:-1])) * -1000
ind3 = ma.where(lr < 0)[0]
ind4bot = ind3 + ind1 - 1
ind4top = ind3 + ind1
inv_bot = prof.pres[ind4bot]
inv_top = prof.pres[ind4top]
return inv_bot, inv_top
def temp_lvl(prof, temp):
'''
Calculates the level (hPa) of the first occurrence of the specified
temperature.
Parameters
----------
prof : profile object
Profile Object
temp : number
Temperature being searched (C)
Returns
-------
First Level of the temperature (hPa)
'''
difft = prof.tmpc - temp
ind1 = ma.where(difft >= 0)[0]
ind2 = ma.where(difft <= 0)[0]
if len(ind1) == 0 or len(ind2) == 0:
return ma.masked
inds = np.intersect1d(ind1, ind2)
if len(inds) > 0:
return prof.pres[inds][0]
diff1 = ind1[1:] - ind1[:-1]
ind = np.where(diff1 > 1)[0] + 1
try:
ind = ind.min()
except:
ind = ind1[-1]
return np.power(10, np.interp(temp, [prof.tmpc[ind+1], prof.tmpc[ind]],
[prof.logp[ind+1], prof.logp[ind]]))
def max_temp(prof, mixlayer=100):
'''
Calculates a maximum temperature forecast based on the depth of the mixing
layer and low-level temperatures
Parameters
----------
prof : profile object
Profile Object
mixlayer : number (optional; default = 100)
Top of layer over which to "mix" (hPa)
Returns
-------
mtemp : number
Forecast Maximum Temperature
'''
mixlayer = prof.pres[prof.sfc] - mixlayer
temp = thermo.ctok(interp.temp(prof, mixlayer)) + 2
return thermo.ktoc(temp * (prof.pres[prof.sfc] / mixlayer)**ROCP)
def mean_relh(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean relative humidity from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Relative Humidity
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask],
[dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
tmp = interp.temp(prof, p)
dwpt = interp.dwpt(prof, p)
rh = thermo.relh(p, tmp, dwpt)
return ma.average(rh, weights=p)
def mean_omega(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean omega from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Omega
'''
if hasattr(prof, 'omeg'):
if prof.omeg.all() is np.ma.masked:
return prof.missing
else:
return prof.missing
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.omeg(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.omeg(prof, ptop)): return ma.masked
if exact:
# This condition of the if statement is not tested
omeg = prof.omeg
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
omeg1 = interp.omeg(prof, pbot)
omeg2 = interp.omeg(prof, ptop)
omeg = omeg[ind1:ind2+1]
mask = ~omeg.mask
omeg = np.concatenate([[omeg1], omeg[mask], omeg[mask], [omeg2]])
tott = omeg.sum() / 2.
num = float(len(omeg)) / 2.
omeg = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
omeg = interp.omeg(prof, p)
omeg = ma.average(omeg, weights=p)
return omeg
def mean_mixratio(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean mixing ratio from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Mixing Ratio
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask], prof.dwpc[ind1:ind2+1][mask], [dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask],prof.pres[ind1:ind2+1][mask], [ptop]])
totd = dwpt.sum() / 2.
totp = p.sum() / 2.
num = float(len(dwpt)) / 2.
w = thermo.mixratio(totp/num, totd/num)
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
dwpt = interp.dwpt(prof, p)
w = ma.average(thermo.mixratio(p, dwpt))
return w
def mean_dewpoint(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean dewpoint temperature from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Dewpoint temperature
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dewpoint1 = interp.dwpt(prof, pbot)
dewpoint2 = interp.dwpt(prof, ptop)
dewpoint = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(dewpoint), 1):
dewpoint[i] = prof.dwpc[ind1:ind2+1][i]
mask = ~dewpoint.mask
dewpoint = np.concatenate([[dewpoint1], dewpoint[mask], dewpoint[mask], [dewpoint2]])
tott = dewpoint.sum() / 2.
num = float(len(dewpoint)) / 2.
dpt = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
dewpoint = interp.dwpt(prof, p)
dpt = ma.average(dewpoint, weights=p)
return dpt
def mean_wetbulb(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean wetbulb temperature from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Wetbulb temperature
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
wetbulb1 = thermo.wetbulb(pbot, interp.temp(prof, pbot), interp.dwpt(prof, pbot))
wetbulb2 = thermo.wetbulb(ptop, interp.temp(prof, ptop), interp.dwpt(prof, ptop))
wetbulb = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(wetbulb), 1):
wetbulb[i] = thermo.wetbulb(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i], prof.dwpc[ind1:ind2+1][i])
mask = ~wetbulb.mask
wetbulb = np.concatenate([[wetbulb1], wetbulb[mask], wetbulb[mask], [wetbulb2]])
tott = wetbulb.sum() / 2.
num = float(len(wetbulb)) / 2.
wtb = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
dwpt = interp.dwpt(prof, p)
wetbulb = np.empty(p.shape)
for i in np.arange(0, len(wetbulb), 1):
wetbulb[i] = thermo.wetbulb(p[i], temp[i], dwpt[i])
wtb = ma.average(wetbulb, weights=p)
return wtb
def mean_thetae(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta-e from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-E
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetae1 = thermo.thetae(pbot, interp.temp(prof, pbot), interp.dwpt(prof, pbot))
thetae2 = thermo.thetae(ptop, interp.temp(prof, ptop), interp.dwpt(prof, ptop))
thetae = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetae), 1):
thetae[i] = thermo.thetae(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i], prof.dwpc[ind1:ind2+1][i])
mask = ~thetae.mask
thetae = np.concatenate([[thetae1], thetae[mask], thetae[mask], [thetae2]])
tott = thetae.sum() / 2.
num = float(len(thetae)) / 2.
thtae = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
#temp = interp.temp(prof, p)
#dwpt = interp.dwpt(prof, p)
#thetae = np.empty(p.shape)
#for i in np.arange(0, len(thetae), 1):
# thetae[i] = thermo.thetae(p[i], temp[i], dwpt[i])
thetae = interp.thetae(prof, p)
thtae = ma.average(thetae, weights=p)
return thtae
def mean_thetaes(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta-es from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-ES
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetaes1 = thermo.thetaes(pbot, interp.temp(prof, pbot))
thetaes2 = thermo.thetaes(ptop, interp.temp(prof, ptop))
thetaes = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetaes), 1):
thetaes[i] = thermo.thetaes(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i])
mask = ~thetaes.mask
thetaes = np.concatenate([[thetaes1], thetaes[mask], thetaes[mask], [thetaes2]])
tott = thetaes.sum() / 2.
num = float(len(thetaes)) / 2.
thtaes = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
thetaes = np.empty(p.shape)
for i in np.arange(0, len(thetaes), 1):
thetaes[i] = thermo.thetaes(p[i], temp[i])
thtaes = ma.average(thetaes, weights=p)
return thtaes
def mean_theta(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
theta1 = thermo.theta(pbot, interp.temp(prof, pbot))
theta2 = thermo.theta(ptop, interp.temp(prof, ptop))
theta = thermo.theta(prof.pres[ind1:ind2+1], prof.tmpc[ind1:ind2+1])
mask = ~theta.mask
theta = np.concatenate([[theta1], theta[mask], theta[mask], [theta2]])
tott = theta.sum() / 2.
num = float(len(theta)) / 2.
thta = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
theta = thermo.theta(p, temp)
thta = ma.average(theta, weights=p)
return thta
def mean_thetaw(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta-w from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-W
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetaw1 = thermo.thetaw(pbot, interp.temp(prof, pbot), interp.dwpt(prof, pbot))
thetaw2 = thermo.thetaw(ptop, interp.temp(prof, ptop), interp.dwpt(prof, ptop))
thetaw = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetaw), 1):
thetaw[i] = thermo.thetaw(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i], prof.dwpc[ind1:ind2+1][i])
mask = ~thetaw.mask
thetaw = np.concatenate([[thetaw1], thetaw[mask], thetaw[mask], [thetaw2]])
tott = thetaw.sum() / 2.
num = float(len(thetaw)) / 2.
thtaw = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
dwpt = interp.dwpt(prof, p)
thetaw = thermo.thetaw(p, temp, dwpt)
thtaw = ma.average(thetaw, weights=p)
return thtaw
def mean_thetaws(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta-ws from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-WS
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetaws1 = thermo.thetaws(pbot, interp.temp(prof, pbot))
thetaws2 = thermo.thetaws(ptop, interp.temp(prof, ptop))
thetaws = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetaws), 1):
thetaws[i] = thermo.thetaws(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i])
mask = ~thetaws.mask
thetaws = np.concatenate([[thetaws1], thetaws[mask], thetaws[mask], [thetaws2]])
tott = thetaws.sum() / 2.
num = float(len(thetaws)) / 2.
thtaws = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
thetaws = np.empty(p.shape)
for i in np.arange(0, len(thetaws), 1):
thetaws[i] = thermo.thetaws(p[i], temp[i])
thtaws = ma.average(thetaws, weights=p)
return thtaws
def mean_thetawv(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean virtual wetbulb potential temperature (theta-wv)
from a profile object within the specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-WV
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.vtmp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.vtmp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetawv1 = thermo.thetaws(pbot, interp.vtmp(prof, pbot))
thetawv2 = thermo.thetaws(ptop, interp.vtmp(prof, ptop))
thetawv = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetawv), 1):
thetawv[i] = thermo.thetaws(prof.pres[ind1:ind2+1][i], prof.vtmp[ind1:ind2+1][i])
mask = ~thetawv.mask
thetawv = np.concatenate([[thetawv1], thetawv[mask], thetawv[mask], [thetawv2]])
tott = thetawv.sum() / 2.
num = float(len(thetawv)) / 2.
thtawv = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
vtmp = interp.vtmp(prof, p)
thetawv = np.empty(p.shape)
for i in np.arange(0, len(thetawv), 1):
thetawv[i] = thermo.thetaws(p[i], vtmp[i])
thtawv = ma.average(thetawv, weights=p)
return thtawv
def lapse_rate(prof, lower, upper, pres=True):
'''
Calculates the lapse rate (C/km) from a profile object
Parameters
----------
prof : profile object
Profile Object
lower : number
Lower Bound of lapse rate
upper : number
Upper Bound of lapse rate
pres : bool (optional; default = True)
Flag to determine if lower/upper are pressure [True]
or height [False]
Returns
-------
lapse rate (float [C/km])
'''
if pres:
if (prof.pres[-1] > upper): return ma.masked
p1 = lower
p2 = upper
z1 = interp.hght(prof, lower)
z2 = interp.hght(prof, upper)
else:
z1 = interp.to_msl(prof, lower)
z2 = interp.to_msl(prof, upper)
p1 = interp.pres(prof, z1)
p2 = interp.pres(prof, z2)
tv1 = interp.vtmp(prof, p1)
tv2 = interp.vtmp(prof, p2)
return (tv2 - tv1) / (z2 - z1) * -1000.
def most_unstable_level(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Finds the most unstable level between the lower and upper levels.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Pressure level of most unstable level (hPa)
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 400
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
t1 = interp.temp(prof, pbot)
t2 = interp.temp(prof, ptop)
d1 = interp.dwpt(prof, pbot)
d2 = interp.dwpt(prof, ptop)
t = prof.tmpc[ind1:ind2+1]
d = prof.dwpc[ind1:ind2+1]
p = prof.pres[ind1:ind2+1]
mask = ~t.mask * ~d.mask * ~p.mask
t = np.concatenate([[t1], t[mask], [t2]])
d = np.concatenate([[d1], d[mask], [d2]])
p = np.concatenate([[pbot], p[mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
t = interp.temp(prof, p)
d = interp.dwpt(prof, p)
p2, t2 = thermo.drylift(p, t, d)
mt = thermo.wetlift(p2, t2, 1000.) # Essentially this is making the Theta-E profile, which we are already doing in the Profile object!
ind = np.where(np.fabs(mt - np.nanmax(mt)) < TOL)[0]
return p[ind[0]]
def parcelTraj(prof, parcel, smu=None, smv=None):
'''
Parcel Trajectory Routine (Storm Slinky)
Coded by Greg Blumberg
This routine is a simple 3D thermodynamic parcel trajectory model that
takes a thermodynamic profile and a parcel trace and computes the
trajectory of a parcel that is lifted to its LFC, then given a 5 m/s
nudge upwards, and then left to accelerate up to the EL. (Based on a description
in the AWIPS 2 Online Training.)
This parcel is assumed to be moving horizontally via the storm motion vector, which
if not supplied is taken to be the Bunkers Right Mover storm motion vector.
As the parcel accelerates upwards, it is advected by the storm relative winds.
The environmental winds are assumed to be steady-state.
This simulates the path a parcel in a storm updraft would take using pure parcel theory.
Parameters
----------
prof : Profile object
parcel : parcel object
smu: optional (storm motion vector u)
smv: optional (storm motion vector v)
Returns
-------
pos_vector : a list of tuples, where each element of the list is a location of the parcel in time
theta : the tilt of the updraft measured by the angle of the updraft with respect to the horizon
'''
t_parcel = parcel.ttrace # temperature
p_parcel = parcel.ptrace # mb
elhght = parcel.elhght # meter
y_0 = 0 # meter
x_0 = 0 # meter
z_0 = parcel.lfchght # meter
p_0 = parcel.lfcpres # meter
g = 9.8 # m/s**2
t_0 = 0 # seconds
w_0 = 5 # m/s (the initial parcel nudge)
u_0 = 0 # m/s
v_0 = 0 # m/s (initial parcel location, which is storm motion relative)
delta_t = 25 # the trajectory increment
pos_vector = [(x_0,y_0,z_0)]
speed_vector = [(u_0, v_0, w_0)]
if smu==None or smv==None:
smu = prof.srwind[0] # Expected to be in knots
smv = prof.srwind[1] # Is expected to be in knots
if parcel.bplus < 1e-3:
# The parcel doesn't have any positively buoyant areas.
return np.ma.masked, np.nan
if not utils.QC(elhght):
elhght = prof.hght[-1]
while z_0 < elhght:
t_1 = delta_t + t_0 # the time step increment
# Compute the vertical acceleration
env_tempv = interp.vtmp(prof, p_0) + 273.15
pcl_tempv = interp.generic_interp_pres(np.log10(p_0), np.log10(p_parcel.copy())[::-1], t_parcel[::-1]) + 273.15
accel = g * ((pcl_tempv - env_tempv) / env_tempv)
# Compute the vertical displacement
z_1 = (.5 * accel * np.power(t_1 - t_0, 2)) + (w_0 * (t_1 - t_0)) + z_0
w_1 = accel * (t_1 - t_0) + w_0
# Compute the parcel-relative winds
u, v = interp.components(prof, p_0)
u_0 = utils.KTS2MS(u - smu)
v_0 = utils.KTS2MS(v - smv)
# Compute the horizontal displacements
x_1 = u_0 * (t_1 - t_0) + x_0
y_1 = v_0 * (t_1 - t_0) + y_0
pos_vector.append((x_1, y_1, z_1))
speed_vector.append((u_0, v_0, w_1))
# Update parcel position
z_0 = z_1
y_0 = y_1
x_0 = x_1
t_0 = t_1
p_0 = interp.pres(prof, interp.to_msl(prof, z_1))
if ma.is_masked(p_0):
print "p_0 is masked. Can't continue slinky"
break
# Update parcel vertical velocity
w_0 = w_1
# Compute the angle tilt of the updraft
r = np.sqrt(np.power(pos_vector[-1][0], 2) + np.power(pos_vector[-1][1], 2))
theta = np.degrees(np.arctan2(pos_vector[-1][2],r))
return pos_vector, theta
def cape(prof, pbot=None, ptop=None, dp=-1, **kwargs):
'''
Lifts the specified parcel, calculates various levels and parameters from
the profile object. Only B+/B- are calculated based on the specified layer.
This is a convenience function for effective_inflow_layer and convective_temp,
as well as any function that needs to lift a parcel in an iterative process.
This function is a stripped back version of the parcelx function, that only
handles bplus and bminus. The intention is to reduce the computation time in
the iterative functions by reducing the calculations needed.
This method of creating a stripped down parcelx function for CAPE/CIN calculations
was developed by Greg Blumberg and Kelton Halbert and later implemented in
SPC's version of SHARP to speed up their program.
For full parcel objects, use the parcelx function.
!! All calculations use the virtual temperature correction unless noted. !!
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
flag : number (optional; default = 5)
Flag to determine what kind of parcel to create; See DefineParcel for
flag values
lplvals : lifting parcel layer object (optional)
Contains the necessary parameters to describe a lifting parcel
Returns
-------
pcl : parcel object
Parcel Object
'''
flag = kwargs.get('flag', 5)
pcl = Parcel(pbot=pbot, ptop=ptop)
pcl.lplvals = kwargs.get('lplvals', DefineParcel(prof, flag))
if prof.pres.compressed().shape[0] < 1: return pcl
# Variables
pres = kwargs.get('pres', pcl.lplvals.pres)
tmpc = kwargs.get('tmpc', pcl.lplvals.tmpc)
dwpc = kwargs.get('dwpc', pcl.lplvals.dwpc)
pcl.pres = pres
pcl.tmpc = tmpc
pcl.dwpc = dwpc
totp = 0.
totn = 0.
tote = 0.
cinh_old = 0.
# See if default layer is specified
if not pbot:
pbot = prof.pres[prof.sfc]
pcl.blayer = pbot
pcl.pbot = pbot
if not ptop:
ptop = prof.pres[prof.pres.shape[0]-1]
pcl.tlayer = ptop
pcl.ptop = ptop
# Make sure this is a valid layer
if pbot > pres:
pbot = pres
pcl.blayer = pbot
if type(interp.vtmp(prof, pbot)) == type(ma.masked): return ma.masked
if type(interp.vtmp(prof, ptop)) == type(ma.masked): return ma.masked
# Begin with the Mixing Layer
pe1 = pbot
h1 = interp.hght(prof, pe1)
tp1 = thermo.virtemp(pres, tmpc, dwpc)
# Lift parcel and return LCL pres (hPa) and LCL temp (C)
pe2, tp2 = thermo.drylift(pres, tmpc, dwpc)
blupper = pe2
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
# Calculate lifted parcel theta for use in iterative CINH loop below
# RECALL: lifted parcel theta is CONSTANT from LPL to LCL
theta_parcel = thermo.theta(pe2, tp2, 1000.)
# Environmental theta and mixing ratio at LPL
bltheta = thermo.theta(pres, interp.temp(prof, pres), 1000.)
blmr = thermo.mixratio(pres, dwpc)
# ACCUMULATED CINH IN THE MIXING LAYER BELOW THE LCL
# This will be done in 'dp' increments and will use the virtual
# temperature correction where possible
pp = np.arange(pbot, blupper+dp, dp, dtype=type(pbot))
hh = interp.hght(prof, pp)
tmp_env_theta = thermo.theta(pp, interp.temp(prof, pp), 1000.)
tmp_env_dwpt = interp.dwpt(prof, pp)
tv_env = thermo.virtemp(pp, tmp_env_theta, tmp_env_dwpt)
tmp1 = thermo.virtemp(pp, theta_parcel, thermo.temp_at_mixrat(blmr, pp))
tdef = (tmp1 - tv_env) / thermo.ctok(tv_env)
tidx1 = np.arange(0, len(tdef)-1, 1)
tidx2 = np.arange(1, len(tdef), 1)
lyre = G * (tdef[tidx1]+tdef[tidx2]) / 2 * (hh[tidx2]-hh[tidx1])
totn = lyre[lyre < 0].sum()
if not totn: totn = 0.
# Move the bottom layer to the top of the boundary layer
if pbot > pe2:
pbot = pe2
pcl.blayer = pbot
if pbot < prof.pres[-1]:
# Check for the case where the LCL is above the
# upper boundary of the data (e.g. a dropsonde)
return pcl
# Find lowest observation in layer
lptr = ma.where(pbot > prof.pres)[0].min()
uptr = ma.where(ptop < prof.pres)[0].max()
# START WITH INTERPOLATED BOTTOM LAYER
# Begin moist ascent from lifted parcel LCL (pe2, tp2)
pe1 = pbot
h1 = interp.hght(prof, pe1)
te1 = interp.vtmp(prof, pe1)
tp1 = thermo.wetlift(pe2, tp2, pe1)
lyre = 0
lyrlast = 0
for i in xrange(lptr, prof.pres.shape[0]):
if not utils.QC(prof.tmpc[i]): continue
pe2 = prof.pres[i]
h2 = prof.hght[i]
te2 = prof.vtmp[i]
tp2 = thermo.wetlift(pe1, tp1, pe2)
tdef1 = (thermo.virtemp(pe1, tp1, tp1) - te1) / thermo.ctok(te1)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyrlast = lyre
lyre = G * (tdef1 + tdef2) / 2. * (h2 - h1)
# Add layer energy to total positive if lyre > 0
if lyre > 0: totp += lyre
# Add layer energy to total negative if lyre < 0, only up to EL
else:
if pe2 > 500.: totn += lyre
tote += lyre
pelast = pe1
pe1 = pe2
h1 = h2
te1 = te2
tp1 = tp2
# Is this the top of the specified layer
if i >= uptr and not utils.QC(pcl.bplus):
pe3 = pe1
h3 = h1
te3 = te1
tp3 = tp1
lyrf = lyre
if lyrf > 0:
pcl.bplus = totp - lyrf
pcl.bminus = totn
else:
pcl.bplus = totp
if pe2 > 500.: pcl.bminus = totn + lyrf
else: pcl.bminus = totn
pe2 = ptop
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
if lyrf > 0: pcl.bplus += lyrf
else:
if pe2 > 500.: pcl.bminus += lyrf
if pcl.bplus == 0: pcl.bminus = 0.
return pcl
def parcelx(prof, pbot=None, ptop=None, dp=-1, **kwargs):
'''
Lifts the specified parcel, calculated various levels and parameters from
the profile object. B+/B- are calculated based on the specified layer.
!! All calculations use the virtual temperature correction unless noted. !!
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
flag : number (optional; default = 5)
Flag to determine what kind of parcel to create; See DefineParcel for
flag values
lplvals : lifting parcel layer object (optional)
Contains the necessary parameters to describe a lifting parcel
Returns
-------
pcl : parcel object
Parcel Object
'''
flag = kwargs.get('flag', 5)
pcl = Parcel(pbot=pbot, ptop=ptop)
pcl.lplvals = kwargs.get('lplvals', DefineParcel(prof, flag))
if prof.pres.compressed().shape[0] < 1: return pcl
# Variables
pres = kwargs.get('pres', pcl.lplvals.pres)
tmpc = kwargs.get('tmpc', pcl.lplvals.tmpc)
dwpc = kwargs.get('dwpc', pcl.lplvals.dwpc)
pcl.pres = pres
pcl.tmpc = tmpc
pcl.dwpc = dwpc
cap_strength = -9999.
cap_strengthpres = -9999.
li_max = -9999.
li_maxpres = -9999.
totp = 0.
totn = 0.
tote = 0.
cinh_old = 0.
# See if default layer is specified
if not pbot:
pbot = prof.pres[prof.sfc]
pcl.blayer = pbot
pcl.pbot = pbot
if not ptop:
ptop = prof.pres[prof.pres.shape[0]-1]
pcl.tlayer = ptop
pcl.ptop = ptop
# Make sure this is a valid layer
if pbot > pres:
pbot = pres
pcl.blayer = pbot
if type(interp.vtmp(prof, pbot)) == type(ma.masked): return ma.masked
if type(interp.vtmp(prof, ptop)) == type(ma.masked): return ma.masked
# Begin with the Mixing Layer
pe1 = pbot
h1 = interp.hght(prof, pe1)
tp1 = thermo.virtemp(pres, tmpc, dwpc)
ttrace = [tp1]
ptrace = [pe1]
# Lift parcel and return LCL pres (hPa) and LCL temp (C)
pe2, tp2 = thermo.drylift(pres, tmpc, dwpc)
blupper = pe2
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
pcl.lclpres = min(pe2, prof.pres[prof.sfc]) # Make sure the LCL pressure is
# never below the surface
pcl.lclhght = interp.to_agl(prof, h2)
ptrace.append(pe2)
ttrace.append(thermo.virtemp(pe2, tp2, tp2))
# Calculate lifted parcel theta for use in iterative CINH loop below
# RECALL: lifted parcel theta is CONSTANT from LPL to LCL
theta_parcel = thermo.theta(pe2, tp2, 1000.)
# Environmental theta and mixing ratio at LPL
bltheta = thermo.theta(pres, interp.temp(prof, pres), 1000.)
blmr = thermo.mixratio(pres, dwpc)
# ACCUMULATED CINH IN THE MIXING LAYER BELOW THE LCL
# This will be done in 'dp' increments and will use the virtual
# temperature correction where possible
pp = np.arange(pbot, blupper+dp, dp, dtype=type(pbot))
hh = interp.hght(prof, pp)
tmp_env_theta = thermo.theta(pp, interp.temp(prof, pp), 1000.)
tmp_env_dwpt = interp.dwpt(prof, pp)
tv_env = thermo.virtemp(pp, tmp_env_theta, tmp_env_dwpt)
tmp1 = thermo.virtemp(pp, theta_parcel, thermo.temp_at_mixrat(blmr, pp))
tdef = (tmp1 - tv_env) / thermo.ctok(tv_env)
tidx1 = np.arange(0, len(tdef)-1, 1)
tidx2 = np.arange(1, len(tdef), 1)
lyre = G * (tdef[tidx1]+tdef[tidx2]) / 2 * (hh[tidx2]-hh[tidx1])
totn = lyre[lyre < 0].sum()
if not totn: totn = 0.
# Move the bottom layer to the top of the boundary layer
if pbot > pe2:
pbot = pe2
pcl.blayer = pbot
# Calculate height of various temperature levels
p0c = temp_lvl(prof, 0.)
pm10c = temp_lvl(prof, -10.)
pm20c = temp_lvl(prof, -20.)
pm30c = temp_lvl(prof, -30.)
hgt0c = interp.hght(prof, p0c)
hgtm10c = interp.hght(prof, pm10c)
hgtm20c = interp.hght(prof, pm20c)
hgtm30c = interp.hght(prof, pm30c)
pcl.p0c = p0c
pcl.pm10c = pm10c
pcl.pm20c = pm20c
pcl.pm30c = pm30c
pcl.hght0c = hgt0c
pcl.hghtm10c = hgtm10c
pcl.hghtm20c = hgtm20c
pcl.hghtm30c = hgtm30c
if pbot < prof.pres[-1]:
# Check for the case where the LCL is above the
# upper boundary of the data (e.g. a dropsonde)
return pcl
# Find lowest observation in layer
lptr = ma.where(pbot >= prof.pres)[0].min()
uptr = ma.where(ptop <= prof.pres)[0].max()
# START WITH INTERPOLATED BOTTOM LAYER
# Begin moist ascent from lifted parcel LCL (pe2, tp2)
pe1 = pbot
h1 = interp.hght(prof, pe1)
te1 = interp.vtmp(prof, pe1)
tp1 = thermo.wetlift(pe2, tp2, pe1)
lyre = 0
lyrlast = 0
iter_ranges = np.arange(lptr, prof.pres.shape[0])
ttraces = ma.zeros(len(iter_ranges))
ptraces = ma.zeros(len(iter_ranges))
ttraces[:] = ptraces[:] = ma.masked
for i in iter_ranges:
if not utils.QC(prof.tmpc[i]): continue
pe2 = prof.pres[i]
h2 = prof.hght[i]
te2 = prof.vtmp[i]
#te2 = thermo.virtemp(prof.pres[i], prof.tmpc[i], prof.dwpc[i])
tp2 = thermo.wetlift(pe1, tp1, pe2)
tdef1 = (thermo.virtemp(pe1, tp1, tp1) - te1) / thermo.ctok(te1)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
ptraces[i-iter_ranges[0]] = pe2
ttraces[i-iter_ranges[0]] = thermo.virtemp(pe2, tp2, tp2)
lyrlast = lyre
lyre = G * (tdef1 + tdef2) / 2. * (h2 - h1)
# Add layer energy to total positive if lyre > 0
if lyre > 0: totp += lyre
# Add layer energy to total negative if lyre < 0, only up to EL
else:
if pe2 > 500.: totn += lyre
# Check for Max LI
mli = thermo.virtemp(pe2, tp2, tp2) - te2
if mli > li_max:
li_max = mli
li_maxpres = pe2
# Check for Max Cap Strength
mcap = te2 - mli
if mcap > cap_strength:
cap_strength = mcap
cap_strengthpres = pe2
tote += lyre
pelast = pe1
pe1 = pe2
te1 = te2
tp1 = tp2
# Is this the top of the specified layer
if i >= uptr and not utils.QC(pcl.bplus):
pe3 = pe1
h3 = h2
te3 = te1
tp3 = tp1
lyrf = lyre
if lyrf > 0:
pcl.bplus = totp - lyrf
pcl.bminus = totn
else:
pcl.bplus = totp
if pe2 > 500.: pcl.bminus = totn + lyrf
else: pcl.bminus = totn
pe2 = ptop
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
if lyrf > 0: pcl.bplus += lyrf
else:
if pe2 > 500.: pcl.bminus += lyrf
if pcl.bplus == 0: pcl.bminus = 0.
# Is this the freezing level
if te2 < 0. and not utils.QC(pcl.bfzl):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.bfzl = totp - lyrf
else: pcl.bfzl = totp
if not utils.QC(p0c) or p0c > pe3:
pcl.bfzl = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgt0c - h3)
if lyrf > 0: pcl.bfzl += lyrf
# Is this the -10C level
if te2 < -10. and not utils.QC(pcl.wm10c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm10c = totp - lyrf
else: pcl.wm10c = totp
if not utils.QC(pm10c) or pm10c > pcl.lclpres:
pcl.wm10c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm10c - h3)
if lyrf > 0: pcl.wm10c += lyrf
# Is this the -20C level
if te2 < -20. and not utils.QC(pcl.wm20c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm20c = totp - lyrf
else: pcl.wm20c = totp
if not utils.QC(pm20c) or pm20c > pcl.lclpres:
pcl.wm20c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm20c - h3)
if lyrf > 0: pcl.wm20c += lyrf
# Is this the -30C level
if te2 < -30. and not utils.QC(pcl.wm30c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm30c = totp - lyrf
else: pcl.wm30c = totp
if not utils.QC(pm30c) or pm30c > pcl.lclpres:
pcl.wm30c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm30c - h3)
if lyrf > 0: pcl.wm30c += lyrf
# Is this the 3km level
if pcl.lclhght < 3000.:
if interp.to_agl(prof, h1) <= 3000. and interp.to_agl(prof, h2) >= 3000. and not utils.QC(pcl.b3km):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0: pcl.b3km = totp - lyrf
else: pcl.b3km = totp
h4 = interp.to_msl(prof, 3000.)
pe4 = interp.pres(prof, h4)
if utils.QC(pe2):
te2 = interp.vtmp(prof, pe4)
tp2 = thermo.wetlift(pe3, tp3, pe4)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe4, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h4 - h3)
if lyrf > 0: pcl.b3km += lyrf
else: pcl.b3km = 0.
# Is this the 4km level
if pcl.lclhght < 4000.:
if interp.to_agl(prof, h1) <= 4000. and interp.to_agl(prof, h2) >= 4000. and not utils.QC(pcl.b4km):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0: pcl.b4km = totp - lyrf
else: pcl.b4km = totp
h4 = interp.to_msl(prof, 4000.)
pe4 = interp.pres(prof, h4)
if utils.QC(pe2):
te2 = interp.vtmp(prof, pe4)
tp2 = thermo.wetlift(pe3, tp3, pe4)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe4, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h4 - h3)
if lyrf > 0: pcl.b4km += lyrf
else: pcl.b4km = 0.
# Is this the 6km level
if pcl.lclhght < 6000.:
if interp.to_agl(prof, h1) <= 6000. and interp.to_agl(prof, h2) >= 6000. and not utils.QC(pcl.b6km):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0: pcl.b6km = totp - lyrf
else: pcl.b6km = totp
h4 = interp.to_msl(prof, 6000.)
pe4 = interp.pres(prof, h4)
if utils.QC(pe2):
te2 = interp.vtmp(prof, pe4)
tp2 = thermo.wetlift(pe3, tp3, pe4)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe4, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h4 - h3)
if lyrf > 0: pcl.b6km += lyrf
else: pcl.b6km = 0.
h1 = h2
# LFC Possibility
if lyre >= 0. and lyrlast <= 0.:
tp3 = tp1
#te3 = te1
pe2 = pe1
pe3 = pelast
if interp.vtmp(prof, pe3) < thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)):
# Found an LFC, store height/pres and reset EL/MPL
pcl.lfcpres = pe3
pcl.lfchght = interp.to_agl(prof, interp.hght(prof, pe3))
pcl.elpres = ma.masked
pcl.elhght = ma.masked
pcl.mplpres = ma.masked
else:
while interp.vtmp(prof, pe3) > thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)) and pe3 > 0:
pe3 -= 5
if pe3 > 0:
# Found a LFC, store height/pres and reset EL/MPL
pcl.lfcpres = pe3
pcl.lfchght = interp.to_agl(prof, interp.hght(prof, pe3))
cinh_old = totn
tote = 0.
li_max = -9999.
if cap_strength < 0.: cap_strength = 0.
pcl.cap = cap_strength
pcl.cappres = cap_strengthpres
pcl.elpres = ma.masked
pcl.elhght = ma.masked
pcl.mplpres = ma.masked
# Hack to force LFC to be at least at the LCL
if pcl.lfcpres >= pcl.lclpres:
pcl.lfcpres = pcl.lclpres
pcl.lfchght = pcl.lclhght
# EL Possibility
if lyre <= 0. and lyrlast >= 0.:
tp3 = tp1
#te3 = te1
pe2 = pe1
pe3 = pelast
while interp.vtmp(prof, pe3) < thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)):
pe3 -= 5
pcl.elpres = pe3
pcl.elhght = interp.to_agl(prof, interp.hght(prof, pcl.elpres))
pcl.mplpres = ma.masked
pcl.limax = -li_max
pcl.limaxpres = li_maxpres
# MPL Possibility
if tote < 0. and not utils.QC(pcl.mplpres) and utils.QC(pcl.elpres):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
totx = tote - lyre
pe2 = pelast
while totx > 0:
pe2 -= 1
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
h2 = interp.hght(prof, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
totx += lyrf
tp3 = tp2
te3 = te2
pe3 = pe2
pcl.mplpres = pe2
pcl.mplhght = interp.to_agl(prof, interp.hght(prof, pe2))
# 500 hPa Lifted Index
if prof.pres[i] <= 500. and not utils.QC(pcl.li5):
a = interp.vtmp(prof, 500.)
b = thermo.wetlift(pe1, tp1, 500.)
pcl.li5 = a - thermo.virtemp(500, b, b)
# 300 hPa Lifted Index
if prof.pres[i] <= 300. and not utils.QC(pcl.li3):
a = interp.vtmp(prof, 300.)
b = thermo.wetlift(pe1, tp1, 300.)
pcl.li3 = a - thermo.virtemp(300, b, b)
# pcl.bminus = cinh_old
if not utils.QC(pcl.bplus): pcl.bplus = totp
# Calculate BRN if available
bulk_rich(prof, pcl)
# Save params
if np.floor(pcl.bplus) == 0: pcl.bminus = 0.
pcl.ptrace = ma.concatenate((ptrace, ptraces))
pcl.ttrace = ma.concatenate((ttrace, ttraces))
# Find minimum buoyancy from Trier et al. 2014, Part 1
idx = np.ma.where(pcl.ptrace >= 500.)[0]
if len(idx) != 0:
b = pcl.ttrace[idx] - interp.vtmp(prof, pcl.ptrace[idx])
idx2 = np.ma.argmin(b)
pcl.bmin = b[idx2]
pcl.bminpres = pcl.ptrace[idx][idx2]
return pcl
def bulk_rich(prof, pcl):
'''
Calculates the Bulk Richardson Number for a given parcel.
Parameters
----------
prof : profile object
Profile object
pcl : parcel object
Parcel object
Returns
-------
Bulk Richardson Number
'''
# Make sure parcel is initialized
if not utils.QC(pcl.lplvals):
pbot = ma.masked
elif pcl.lplvals.flag > 0 and pcl.lplvals.flag < 5 or pcl.lplvals.flag == 7:
ptop = interp.pres(prof, interp.to_msl(prof, 6000.))
pbot = prof.pres[prof.sfc]
else:
h0 = interp.hght(prof, pcl.pres)
try:
pbot = interp.pres(prof, h0-500.)
except:
pbot = ma.masked
if utils.QC(pbot): pbot = prof.pres[prof.sfc]
h1 = interp.hght(prof, pbot)
ptop = interp.pres(prof, h1+6000.)
if not utils.QC(pbot) or not utils.QC(ptop):
pcl.brnshear = ma.masked
pcl.brn = ma.masked
pcl.brnu = ma.masked
pcl.brnv = ma.masked
pcl.brnshear = ma.masked
pcl.brnu = ma.masked
pcl.brnv = ma.masked
pcl.brn = ma.masked
return pcl
# Calculate the lowest 500m mean wind
p = interp.pres(prof, interp.hght(prof, pbot)+500.)
mnlu, mnlv = winds.mean_wind(prof, pbot, p)
# Calculate the 6000m mean wind
mnuu, mnuv = winds.mean_wind(prof, pbot, ptop)
# Make sure CAPE and Shear are available
if not utils.QC(pcl.bplus) or not utils.QC(mnlu) or not utils.QC(mnuu):
pcl.brnshear = ma.masked
pcl.brnu = ma.masked
pcl.brnv = ma.masked
pcl.brn = ma.masked
return pcl
# Calculate shear between levels
dx = mnuu - mnlu
dy = mnuv - mnlv
pcl.brnu = dx
pcl.brnv = dy
pcl.brnshear = utils.KTS2MS(utils.mag(dx, dy))
pcl.brnshear = pcl.brnshear**2 / 2.
pcl.brn = pcl.bplus / pcl.brnshear
return pcl
def effective_inflow_layer(prof, ecape=100, ecinh=-250, **kwargs):
'''
Calculates the top and bottom of the effective inflow layer based on
research by Thompson et al. (2004).
Parameters
----------
prof : profile object
Profile object
ecape : number (optional; default=100)
Minimum amount of CAPE in the layer to be considered part of the
effective inflow layer.
echine : number (optional; default=250)
Maximum amount of CINH in the layer to be considered part of the
effective inflow layer
mupcl : parcel object
Most Unstable Layer parcel
Returns
-------
pbot : number
Pressure at the bottom of the layer (hPa)
ptop : number
Pressure at the top of the layer (hPa)
'''
mupcl = kwargs.get('mupcl', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mucinh = mupcl.bminus
pbot = ma.masked
ptop = ma.masked
if mucape != 0:
if mucape >= ecape and mucinh > ecinh:
# Begin at surface and search upward for effective surface
for i in xrange(prof.sfc, prof.top):
pcl = cape(prof, pres=prof.pres[i], tmpc=prof.tmpc[i], dwpc=prof.dwpc[i])
if pcl.bplus >= ecape and pcl.bminus > ecinh:
pbot = prof.pres[i]
break
if not utils.QC(pbot):
return ma.masked, ma.masked
bptr = i
# Keep searching upward for the effective top
for i in xrange(bptr+1, prof.top):
if not prof.dwpc[i] or not prof.tmpc[i]:
continue
pcl = cape(prof, pres=prof.pres[i], tmpc=prof.tmpc[i], dwpc=prof.dwpc[i])
if pcl.bplus < ecape or pcl.bminus <= ecinh: #Is this a potential "top"?
j = 1
while not utils.QC(prof.dwpc[i-j]) and not utils.QC(prof.tmpc[i-j]):
j += 1
ptop = prof.pres[i-j]
if ptop > pbot: ptop = pbot
break
return pbot, ptop
def bunkers_storm_motion(prof, **kwargs):
'''
Compute the Bunkers Storm Motion for a right moving supercell using a
parcel based approach. This code is consistent with the findings in
Bunkers et. al 2014, using the Effective Inflow Base as the base, and
65% of the most unstable parcel equilibrium level height using the
pressure weighted mean wind.
Parameters
----------
prof : profile object
Profile Object
pbot : float (optional)
Base of effective-inflow layer (hPa)
mupcl : parcel object (optional)
Most Unstable Layer parcel
Returns
-------
rstu : number
Right Storm Motion U-component
rstv : number
Right Storm Motion V-component
lstu : number
Left Storm Motion U-component
lstv : number
Left Storm Motion V-component
'''
d = utils.MS2KTS(7.5) # Deviation value emperically derived at 7.5 m/s
mupcl = kwargs.get('mupcl', None)
pbot = kwargs.get('pbot', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=400)
mupcl = parcelx(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mucinh = mupcl.bminus
muel = mupcl.elhght
if not pbot:
pbot, ptop = effective_inflow_layer(prof, 100, -250, mupcl=mupcl)
if pbot and mucape > 100. and utils.QC(muel):
base = interp.to_agl(prof, interp.hght(prof, pbot))
depth = muel - base
htop = base + ( depth * (65./100.) )
ptop = interp.pres(prof, interp.to_msl(prof, htop))
mnu, mnv = winds.mean_wind(prof, pbot, ptop)
sru, srv = winds.wind_shear(prof, pbot, ptop)
srmag = utils.mag(sru, srv)
uchg = d / srmag * srv
vchg = d / srmag * sru
rstu = mnu + uchg
rstv = mnv - vchg
lstu = mnu - uchg
lstv = mnv + vchg
else:
rstu, rstv, lstu, lstv = winds.non_parcel_bunkers_motion(prof)
return rstu, rstv, lstu, lstv
def convective_temp(prof, **kwargs):
'''
Computes the convective temperature, assuming no change in the moisture
profile. Parcels are iteratively lifted until only mincinh is left as a
cap. The first guess is the observed surface temperature.
Parameters
----------
prof : profile object
Profile Object
mincinh : parcel object (optional; default -1)
Amount of CINH left at CI
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
Returns
-------
Convective Temperature (float) in degrees C
'''
mincinh = kwargs.get('mincinh', 0.)
mmr = mean_mixratio(prof)
pres = kwargs.get('pres', prof.pres[prof.sfc])
tmpc = kwargs.get('tmpc', prof.tmpc[prof.sfc])
dwpc = kwargs.get('dwpc', thermo.temp_at_mixrat(mmr, pres))
# Do a quick search to fine whether to continue. If you need to heat
# up more than 25C, don't compute.
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc+25., dwpc=dwpc)
if pcl.bplus == 0. or pcl.bminus < mincinh: return ma.masked
excess = dwpc - tmpc
if excess > 0: tmpc = tmpc + excess + 4.
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc)
if pcl.bplus == 0.: pcl.bminus = ma.masked
while pcl.bminus < mincinh:
if pcl.bminus < -100: tmpc += 2.
else: tmpc += 0.5
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc)
if pcl.bplus == 0.: pcl.bminus = ma.masked
return tmpc
def tei(prof):
'''
Theta-E Index (TEI)
TEI is the difference between the surface theta-e and the minimum theta-e value
in the lowest 400 mb AGL
Note: This is the definition of TEI on the SPC help page,
but these calculations do not match up with the TEI values on the SPC Online Soundings.
The TEI values online are more consistent with the max Theta-E
minus the minimum Theta-E found in the lowest 400 mb AGL.
This is what our TEI calculation shall be for the time being.
Parameters
----------
prof : Profile object
Returns
-------
tei : theta-e index
'''
sfc_pres = prof.pres[prof.sfc]
top_pres = sfc_pres - 400.
layer_idxs = ma.where(prof.pres >= top_pres)[0]
min_thetae = ma.min(prof.thetae[layer_idxs])
max_thetae = ma.max(prof.thetae[layer_idxs])
tei = max_thetae - min_thetae
return tei
def tei_sfc(prof):
'''
Theta-E Index (TEI) (*)
TEI is the difference between the surface theta-e and the minimum theta-e value
in the lowest 400 mb AGL
Note: This is the definition of TEI on the SPC help page,
but these calculations do not match up with the TEI values on the SPC Online Soundings.
The TEI values online are more consistent with the max Theta-E
minus the minimum Theta-E found in the lowest 400 mb AGL.
This is the original formulation of TEI.
Parameters
----------
prof : Profile object
Returns
-------
tei : theta-e index
'''
sfc_theta = prof.thetae[prof.sfc]
sfc_pres = prof.pres[prof.sfc]
top_pres = sfc_pres - 400.
layer_idxs = ma.where(prof.pres >= top_pres)[0]
min_thetae = ma.min(prof.thetae[layer_idxs])
tei = sfc_theta - min_thetae
return tei
def esp(prof, **kwargs):
'''
Enhanced Stretching Potential (ESP)
This composite parameter identifies areas where low-level buoyancy
and steep low-level lapse rates are co-located, which may
favor low-level vortex stretching and tornado potential.
REQUIRES: 0-3 km MLCAPE (from MLPCL)
Parameters
----------
prof : Profile object
mlpcl : Mixed-Layer Parcel object (optional)
Returns
-------
esp : ESP index
'''
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mlpcl = parcelx(prof, flag=4)
mlcape = mlpcl.b3km
lr03 = prof.lapserate_3km # C/km
if lr03 < 7. or mlpcl.bplus < 250.:
return 0
esp = (mlcape / 50.) * ((lr03 - 7.0) / (1.0))
return esp
def sherbs3_v1(prof, **kwargs):
'''
Severe Hazards In Environments with Reduced Buoyancy (SHERB) Parameter, 0-3km AGL shear (SHERBS3), version 1 (*)
A composite parameter designed to assist forecasters in the High-Shear
Low CAPE (HSLC) environment. This allows better discrimination
between significant severe and non-severe convection in HSLC enviroments.
It can detect significant tornadoes and significant winds. Values above
1 are more likely associated with significant severe.
See Sherburn et. al. 2014, WAF v.29 pgs. 854-877 for more information.
There are two versions: Version 1 is the original computation, which uses
the 700-500 mb lapse rate as part of its computation. Sherburn et. al.
2016, WAF v.31 pgs. 1899-1927 created a new version (Version 2) that
replaces the 700-500 mb lapse rate with the 3-5 km AGL lapse rate, and
recommends using this instead.
REQUIRES (if effective==True): The effective inflow layer be defined
Parameters
----------
prof : Profile object
Returns
-------
sherbs3_v1 : an integer for the SHERB parameter
'''
lr03 = lapse_rate(prof, 0, 3000, pres=False)
lr75 = lapse_rate(prof, 700, 500, pres=True)
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc_pres = prof.pres[prof.get_sfc()]
shear = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=p3km)))
sherbs3_v1 = ( shear / 26. ) * ( lr03 / 5.2 ) * ( lr75 / 5.6 )
return sherbs3_v1
def sherbs3_v2(prof, **kwargs):
'''
Severe Hazards In Environments with Reduced Buoyancy (SHERB) Parameter, 0-3km AGL shear (SHERBS3), version 2 (*)
A composite parameter designed to assist forecasters in the High-Shear
Low CAPE (HSLC) environment. This allows better discrimination
between significant severe and non-severe convection in HSLC enviroments.
It can detect significant tornadoes and significant winds. Values above
1 are more likely associated with significant severe.
See Sherburn et. al. 2014, WAF v.29 pgs. 854-877 for more information.
There are two versions: Version 1 is the original computation, which uses
the 700-500 mb lapse rate as part of its computation. Sherburn et. al.
2016, WAF v.31 pgs. 1899-1927 created a new version (Version 2) that
replaces the 700-500 mb lapse rate with the 3-5 km AGL lapse rate, and
recommends using this instead.
Parameters
----------
prof : Profile object
Returns
-------
sherbs3_v2 : an integer for the SHERB parameter
'''
lr03 = lapse_rate(prof, 0, 3000, pres=False)
lr35k = lapse_rate(prof, 3000, 5000, pres=False)
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc_pres = prof.pres[prof.get_sfc()]
shear = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=p3km)))
sherbs3_v2 = ( shear / 26. ) * ( lr03 / 5.2 ) * ( lr35k / 5.6 )
return sherbs3_v2
def sherbe_v1(prof, **kwargs):
'''
Severe Hazards In Environments with Reduced Buoyancy (SHERB) Parameter, Effective shear (SHERBE), version 1 (*)
A composite parameter designed to assist forecasters in the High-Shear
Low CAPE (HSLC) environment. This allows better discrimination
between significant severe and non-severe convection in HSLC enviroments.
It can detect significant tornadoes and significant winds. Values above
1 are more likely associated with significant severe.
See Sherburn et. al. 2014, WAF v.29 pgs. 854-877 for more information.
There are two versions: Version 1 is the original computation, which uses
the 700-500 mb lapse rate as part of its computation. Sherburn et. al.
2016, WAF v.31 pgs. 1899-1927 created a new version (Version 2) that
replaces the 700-500 mb lapse rate with the 3-5 km AGL lapse rate, and
recommends using this instead.
REQUIRES: The effective inflow layer be defined
Parameters
----------
prof : Profile object
Returns
-------
sherbe_v1 : an integer for the SHERB parameter
'''
lr03 = lapse_rate(prof, 0, 3000, pres=False)
lr75 = lapse_rate(prof, 700, 500, pres=True)
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
# Calculate the effective inflow layer
ebottom, etop = effective_inflow_layer( prof, mupcl=mupcl )
if ebottom is ma.masked or etop is ma.masked:
# If the inflow layer doesn't exist, return missing
return prof.missing
else:
# Calculate the Effective Bulk Wind Difference
ebotm = interp.to_agl(prof, interp.hght(prof, ebottom))
depth = ( mupcl.elhght - ebotm ) / 2
elh = interp.pres(prof, interp.to_msl(prof, ebotm + depth))
ebwd = winds.wind_shear(prof, pbot=ebottom, ptop=elh)
shear = utils.KTS2MS(utils.mag( ebwd[0], ebwd[1] ))
sherbe_v1 = ( shear / 27. ) * ( lr03 / 5.2 ) * ( lr75 / 5.6 )
return sherbe_v1
def sherbe_v2(prof, **kwargs):
'''
Severe Hazards In Environments with Reduced Buoyancy (SHERB) Parameter, Effective shear (SHERBE), version 2 (*)
A composite parameter designed to assist forecasters in the High-Shear
Low CAPE (HSLC) environment. This allows better discrimination
between significant severe and non-severe convection in HSLC enviroments.
It can detect significant tornadoes and significant winds. Values above
1 are more likely associated with significant severe.
See Sherburn et. al. 2014, WAF v.29 pgs. 854-877 for more information.
There are two versions: Version 1 is the original computation, which uses
the 700-500 mb lapse rate as part of its computation. Sherburn et. al.
2016, WAF v.31 pgs. 1899-1927 created a new version (Version 2) that
replaces the 700-500 mb lapse rate with the 3-5 km AGL lapse rate, and
recommends using this instead.
REQUIRES: The effective inflow layer be defined
Parameters
----------
prof : Profile object
Returns
-------
sherbe_v2 : an integer for the SHERB parameter
'''
lr03 = lapse_rate(prof, 0, 3000, pres=False)
lr35k = lapse_rate(prof, 3000, 5000, pres=False)
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
# Calculate the effective inflow layer
ebottom, etop = effective_inflow_layer( prof, mupcl=mupcl )
if ebottom is ma.masked or etop is ma.masked:
# If the inflow layer doesn't exist, return missing
return prof.missing
else:
# Calculate the Effective Bulk Wind Difference
ebotm = interp.to_agl(prof, interp.hght(prof, ebottom))
depth = ( mupcl.elhght - ebotm ) / 2
elh = interp.pres(prof, interp.to_msl(prof, ebotm + depth))
ebwd = winds.wind_shear(prof, pbot=ebottom, ptop=elh)
shear = utils.KTS2MS(utils.mag( ebwd[0], ebwd[1] ))
sherbe_v2 = ( shear / 27. ) * ( lr03 / 5.2 ) * ( lr35k / 5.6 )
return sherbe_v2
def mmp(prof, **kwargs):
"""
MCS Maintenance Probability (MMP)
The probability that a mature MCS will maintain peak intensity
for the next hour.
This equation was developed using proximity soundings and a regression equation
Uses MUCAPE, 3-8 km lapse rate, maximum bulk shear, 3-12 km mean wind speed
From Coniglio et. al. 2006 WAF
REQUIRES: MUCAPE (J/kg)
Parameters
----------
prof : Profile object
mupcl : Most-Unstable Parcel object (optional)
Returns
-------
mmp : MMP index (%)
Note:
Per Mike Coniglio (personal comm.), the maximum deep shear value is computed by
computing the shear vector between all the wind vectors
in the lowest 1 km and all the wind vectors in the 6-10 km layer.
The maximum speed shear from this is the max_bulk_shear value (m/s).
"""
mupcl = kwargs.get('mupcl', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
if mucape < 100.:
return 0.
agl_hght = interp.to_agl(prof, prof.hght)
lowest_idx = np.where(agl_hght <= 1000)[0]
highest_idx = np.where((agl_hght >= 6000) & (agl_hght < 10000))[0]
if len(lowest_idx) == 0 or len(highest_idx) == 0:
return ma.masked
possible_shears = np.empty((len(lowest_idx),len(highest_idx)))
pbots = interp.pres(prof, prof.hght[lowest_idx])
ptops = interp.pres(prof, prof.hght[highest_idx])
for b in xrange(len(pbots)):
for t in xrange(len(ptops)):
if b < t: continue
u_shear, v_shear = winds.wind_shear(prof, pbot=pbots[b], ptop=ptops[t])
possible_shears[b,t] = utils.mag(u_shear, v_shear)
max_bulk_shear = utils.KTS2MS(np.nanmax(possible_shears.ravel()))
lr38 = lapse_rate(prof, 3000., 8000., pres=False)
plower = interp.pres(prof, interp.to_msl(prof, 3000.))
pupper = interp.pres(prof, interp.to_msl(prof, 12000.))
mean_wind_3t12 = winds.mean_wind( prof, pbot=plower, ptop=pupper)
mean_wind_3t12 = utils.KTS2MS(utils.mag(mean_wind_3t12[0], mean_wind_3t12[1]))
a_0 = 13.0 # unitless
a_1 = -4.59*10**-2 # m**-1 * s
a_2 = -1.16 # K**-1 * km
a_3 = -6.17*10**-4 # J**-1 * kg
a_4 = -0.17 # m**-1 * s
mmp = 1. / (1. + np.exp(a_0 + (a_1 * max_bulk_shear) + (a_2 * lr38) + (a_3 * mucape) + (a_4 * mean_wind_3t12)))
return mmp
def wndg(prof, **kwargs):
'''
Wind Damage Parameter (WNDG)
A non-dimensional composite parameter that identifies areas
where large CAPE, steep low-level lapse rates,
enhanced flow in the low-mid levels, and minimal convective
inhibition are co-located.
WNDG values > 1 favor an enhanced risk for scattered damaging
outflow gusts with multicell thunderstorm clusters, primarily
during the afternoon in the summer.
REQUIRES: MLCAPE (J/kg), MLCIN (J/kg)
Parameters
----------
prof : Profile object
mlpcl : Mixed-Layer Parcel object (optional)
Returns
-------
wndg : WNDG index
'''
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mllplvals = DefineParcel(prof, flag=4)
mlpcl = cape(prof, lplvals=mllplvals)
mlcape = mlpcl.bplus
lr03 = lapse_rate( prof, 0, 3000., pres=False ) # C/km
bot = interp.pres( prof, interp.to_msl( prof, 1000. ) )
top = interp.pres( prof, interp.to_msl( prof, 3500. ) )
mean_wind = winds.mean_wind(prof, pbot=bot, ptop=top) # needs to be in m/s
mean_wind = utils.KTS2MS(utils.mag(mean_wind[0], mean_wind[1]))
mlcin = mlpcl.bminus # J/kg
if lr03 < 7:
lr03 = 0.
if mlcin < -50:
mlcin = -50.
wndg = (mlcape / 2000.) * (lr03 / 9.) * (mean_wind / 15.) * ((50. + mlcin)/40.)
return wndg
def sig_severe(prof, **kwargs):
'''
Significant Severe (SigSevere)
Craven and Brooks, 2004
REQUIRES: MLCAPE (J/kg), 0-6km Shear (kts)
Parameters
----------
prof : Profile object
mlpcl : Mixed-Layer Parcel object (optional)
Returns
-------
sigsevere : significant severe parameter (m3/s3)
'''
mlpcl = kwargs.get('mlpcl', None)
sfc6shr = kwargs.get('sfc6shr', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mllplvals = DefineParcel(prof, flag=4)
mlpcl = cape(prof, lplvals=mllplvals)
mlcape = mlpcl.bplus
if not sfc6shr:
try:
sfc_6km_shear = prof.sfc_6km_shear
except:
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
sfc_6km_shear = winds.wind_shear(prof, pbot=sfc, ptop=p6km)
sfc_6km_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
shr06 = utils.KTS2MS(sfc_6km_shear)
sigsevere = mlcape * shr06
return sigsevere
def dcape(prof):
'''
Downdraft CAPE (DCAPE)
Adapted from John Hart's (SPC) DCAPE code in NSHARP donated by Rich Thompson (SPC)
Calculates the downdraft CAPE value using the downdraft parcel source found in the lowest
400 mb of the sounding. This downdraft parcel is found by identifying the minimum 100 mb layer
averaged Theta-E.
Afterwards, this parcel is lowered to the surface moist adiabatically (w/o virtual temperature
correction) and the energy accumulated is called the DCAPE.
Future adaptations of this function may utilize the Parcel/DefineParcel object.
Parameters
----------
prof : Profile object
Returns
-------
dcape : downdraft CAPE (J/kg)
ttrace : downdraft parcel trace temperature (C)
ptrace : downdraft parcel trace pressure (mb)
'''
sfc_pres = prof.pres[prof.sfc]
prof_thetae = prof.thetae
prof_wetbulb = prof.wetbulb
mask1 = prof_thetae.mask
mask2 = prof.pres.mask
mask = np.maximum( mask1, mask2 )
prof_thetae = prof_thetae[~mask]
prof_wetbulb = prof_wetbulb[~mask]
pres = prof.pres[~mask]
hght = prof.hght[~mask]
dwpc = prof.dwpc[~mask]
tmpc = prof.tmpc[~mask]
idx = np.where(pres >= sfc_pres - 400.)[0]
# Find the minimum average theta-e in a 100 mb layer
mine = 1000.0
minp = -999.0
for i in idx:
thta_e_mean = mean_thetae(prof, pbot=pres[i], ptop=pres[i]-100.)
if utils.QC(thta_e_mean) and thta_e_mean < mine:
minp = pres[i] - 50.
mine = thta_e_mean
upper = minp
uptr = np.where(pres >= upper)[0]
uptr = uptr[-1]
# Define parcel starting point
tp1 = thermo.wetbulb(upper, interp.temp(prof, upper), interp.dwpt(prof, upper))
pe1 = upper
te1 = interp.temp(prof, pe1)
h1 = interp.hght(prof, pe1)
tote = 0
lyre = 0
# To keep track of the parcel trace from the downdraft
ttrace = [tp1]
ptrace = [upper]
# Lower the parcel to the surface moist adiabatically and compute
# total energy (DCAPE)
iter_ranges = xrange(uptr, -1, -1)
ttraces = ma.zeros(len(iter_ranges))
ptraces = ma.zeros(len(iter_ranges))
ttraces[:] = ptraces[:] = ma.masked
for i in iter_ranges:
pe2 = pres[i]
te2 = tmpc[i]
h2 = hght[i]
tp2 = thermo.wetlift(pe1, tp1, pe2)
if utils.QC(te1) and utils.QC(te2):
tdef1 = (tp1 - te1) / (thermo.ctok(te1))
tdef2 = (tp2 - te2) / (thermo.ctok(te2))
lyrlast = lyre
lyre = 9.8 * (tdef1 + tdef2) / 2.0 * (h2 - h1)
tote += lyre
ttraces[i] = tp2
ptraces[i] = pe2
pe1 = pe2
te1 = te2
h1 = h2
tp1 = tp2
drtemp = tp2 # Downrush temp in Celsius
return tote, ma.concatenate((ttrace, ttraces[::-1])), ma.concatenate((ptrace, ptraces[::-1]))
def precip_eff(prof, **kwargs):
'''
Precipitation Efficiency (*)
This calculation comes from Noel and Dobur 2002, published
in NWA Digest Vol 26, No 34.
The calculation multiplies the PW from the whole atmosphere
by the 1000 - 700 mb mean relative humidity (in decimal form)
Values on the SPC Mesoanalysis range from 0 to 2.6.
Larger values means that the precipitation is more efficient.
Parameters
----------
prof : Profile object
if the Profile object does not have a pwat attribute
this function will perform the calculation.
pwat : (optional) precomputed precipitable water vapor (inch)
pbot : (optional) the bottom pressure of the RH layer (mb)
ptop : (optional) the top pressure of the RH layer (mb)
Returns
-------
precip_efficency : the PE value (units inches)
'''
pw = kwargs.get('pwat', None)
pbot = kwargs.get('pbot', 1000)
ptop = kwargs.get('ptop', 700)
if pw is None or not hasattr(prof, 'pwat'):
pw = precip_water(prof)
else:
pw = prof.pwat
mean_rh = mean_relh(prof, pbot=pbot, ptop=ptop) / 100.
return pw*mean_rh
def pbl_top(prof):
'''
Planetary Boundary Layer Depth
Adapted from NSHARP code donated by Rich Thompson (SPC)
Calculates the planetary boundary layer depth by calculating the
virtual potential temperature of the surface parcel + .5 K, and then searching
for the location above the surface where the virtual potential temperature of the profile
is greater than the surface virtual potential temperature.
While this routine suggests a parcel lift, this Python adaptation does not use loop
like parcelx().
Parameters
----------
prof : Profile object
Returns
-------
ppbl_top : the pressure that corresponds to the top of the PBL
'''
thetav = thermo.theta(prof.pres, thermo.virtemp(prof.pres, prof.tmpc, prof.dwpc))
try:
level = np.where(thetav[prof.sfc]+.5 < thetav)[0][0]
except IndexError:
print "Warning: PBL top could not be found."
level = thetav.shape[0] - 1
return prof.pres[level]
def dcp(prof):
'''
Derecho Composite Parameter (*)
This parameter is based on a data set of 113 derecho events compiled by Evans and Doswell (2001).
The DCP was developed to identify environments considered favorable for cold pool "driven" wind
events through four primary mechanisms:
1) Cold pool production [DCAPE]
2) Ability to sustain strong storms along the leading edge of a gust front [MUCAPE]
3) Organization potential for any ensuing convection [0-6 km shear]
4) Sufficient flow within the ambient environment to favor development along downstream portion of the
gust front [0-6 km mean wind].
This index is fomulated as follows:
DCP = (DCAPE/980)*(MUCAPE/2000)*(0-6 km shear/20 kt)*(0-6 km mean wind/16 kt)
Reference:
Evans, J.S., and C.A. Doswell, 2001: Examination of derecho environments using proximity soundings. Wea. Forecasting, 16, 329-342.
Parameters
----------
prof : Profile object
Returns
-------
dcp : number
Derecho Composite Parameter (unitless)
'''
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
dcape_val = getattr(prof, 'dcape', dcape( prof )[0])
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
sfc_6km_shear = getattr(prof, 'sfc_6km_shear', winds.wind_shear(prof, pbot=sfc, ptop=p6km))
mean_6km = getattr(prof, 'mean_6km', utils.comp2vec(*winds.mean_wind(prof, pbot=sfc, ptop=p6km)))
mag_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
mag_mean_wind = mean_6km[1]
dcp = (dcape_val/980.) * (mupcl.bplus/2000.) * (mag_shear / 20. ) * (mag_mean_wind / 16.)
return dcp
def mburst(prof):
'''
Microburst Composite Index
Formulated by Chad Entremont NWS JAN 12/7/2014
Code donated by Rich Thompson (SPC)
Below is taken from the SPC Mesoanalysis:
The Microburst Composite is a weighted sum of the following individual parameters: SBCAPE, SBLI,
lapse rates, vertical totals (850-500 mb temperature difference), DCAPE, and precipitable water.
All of the terms are summed to arrive at the final microburst composite value.
The values can be interpreted in the following manner: 3-4 infers a "slight chance" of a microburst;
5-8 infers a "chance" of a microburst; >= 9 infers that microbursts are "likely".
These values can also be viewed as conditional upon the existence of a storm.
This code was updated on 9/11/2018 - TT was being used in the function instead of VT.
The original SPC code was checked to confirm this was the problem.
This error was not identified during the testing phase for some reason.
Parameters
----------
prof : Profile object
Returns
-------
mburst : number
Microburst Composite (unitless)
'''
sbpcl = getattr(prof, 'sfcpcl', parcelx(prof, flag=1))
lr03 = getattr(prof, 'lapserate_3km', lapse_rate( prof, 0., 3000., pres=False ))
vt = getattr(prof, 'vertical_totals', v_totals(prof))
dcape_val = getattr(prof, 'dcape', dcape( prof )[0])
pwat = getattr(prof, 'pwat', precip_water( prof ))
tei_val = thetae_diff(prof)
sfc_thetae = thermo.thetae(sbpcl.lplvals.pres, sbpcl.lplvals.tmpc, sbpcl.lplvals.dwpc)
# SFC Theta-E term
if thermo.ctok(sfc_thetae) >= 355:
te = 1
else:
te = 0
# Surface-based CAPE Term
if not utils.QC(sbpcl.bplus):
sbcape_term = np.nan
else:
if sbpcl.bplus < 2000:
sbcape_term = -5
if sbpcl.bplus >= 2000:
sbcape_term = 0
if sbpcl.bplus >= 3300:
sbcape_term = 1
if sbpcl.bplus >= 3700:
sbcape_term = 2
if sbpcl.bplus >= 4300:
sbcape_term = 4
# Surface based LI term
if not utils.QC(sbpcl.li5):
sbli_term = np.nan
else:
if sbpcl.li5 > -7.5:
sbli_term = 0
if sbpcl.li5 <= -7.5:
sbli_term = 1
if sbpcl.li5 <= -9.0:
sbli_term = 2
if sbpcl.li5 <= -10.0:
sbli_term = 3
# PWAT Term
if not utils.QC(pwat):
pwat_term = np.nan
else:
if pwat < 1.5:
pwat_term = -3
else:
pwat_term = 0
# DCAPE Term
if not utils.QC(dcape_val):
dcape_term = np.nan
else:
if pwat > 1.70:
if dcape_val > 900:
dcape_term = 1
else:
dcape_term = 0
else:
dcape_term = 0
# Lapse Rate Term
if not utils.QC(lr03):
lr03_term = np.nan
else:
if lr03 <= 8.4:
lr03_term = 0
else:
lr03_term = 1
# Vertical Totals term
if not utils.QC(vt):
vt_term = np.nan
else:
if vt < 27:
vt_term = 0
elif vt >= 27 and vt < 28:
vt_term = 1
elif vt >= 28 and vt < 29:
vt_term = 2
else:
vt_term = 3
# TEI term?
if not utils.QC(tei_val):
ted = np.nan
else:
if tei_val >= 35:
ted = 1
else:
ted = 0
mburst = te + sbcape_term + sbli_term + pwat_term + dcape_term + lr03_term + vt_term + ted
if mburst < 0:
mburst = 0
if np.isnan(mburst):
mburst = np.ma.masked
return mburst
def ehi(prof, pcl, hbot, htop, stu=0, stv=0):
'''
Energy-Helicity Index
Computes the energy helicity index (EHI) using a parcel
object and a profile object.
The equation is EHI = (CAPE * HELICITY) / 160000.
Parameters
----------
prof : Profile object
pcl : Parcel object
hbot : number
Height of the bottom of the helicity layer [m]
htop : number
Height of the top of the helicity layer [m]
stu : number
Storm-relative wind U component [kts]
(optional; default=0)
stv : number
Storm-relative wind V component [kts]
(optional; default=0)
Returns
-------
ehi : number
Energy Helicity Index (unitless)
'''
helicity = winds.helicity(prof, hbot, htop, stu = stu, stv = stv)[0]
ehi = (helicity * pcl.bplus) / 160000.
return ehi
def sweat(prof):
'''
SWEAT Index (*)
Computes the SWEAT (Severe Weather Threat Index) using the following numbers:
1.) 850 Dewpoint
2.) Total Totals Index
3.) 850 mb wind speed
4.) 500 mb wind speed
5.) Direction of wind at 500
6.) Direction of wind at 850
Formulation taken from
Notes on Analysis and Severe-Storm Forecasting Procedures of the Air Force Global Weather Central, 1972
by RC Miller.
Parameters
----------
prof : Profile object
Returns
-------
sweat : number
SWEAT Index (number)
'''
td850 = interp.dwpt(prof, 850)
vec850 = interp.vec(prof, 850)
vec500 = interp.vec(prof, 500)
tt = getattr(prof, 'totals_totals', t_totals( prof ))
if td850 > 0:
term1 = 12. * td850
else:
term1 = 0
if tt < 49:
term2 = 0
else:
term2 = 20. * (tt - 49)
term3 = 2 * vec850[1]
term4 = vec500[1]
if 130 <= vec850[0] and 250 >= vec850[0] and 210 <= vec500[0] and 310 >= vec500[0] and vec500[0] - vec850[0] > 0 and vec850[1] >= 15 and vec500[1] >= 15:
term5 = 125 * (np.sin( np.radians(vec500[0] - vec850[0])) + 0.2)
else:
term5 = 0
sweat = term1 + term2 + term3 + term4 + term5
return sweat
def thetae_diff(prof):
'''
thetae_diff()
Adapted from code for thetae_diff2() provided by Rich Thompson (SPC)
Find the maximum and minimum Theta-E values in the lowest 3000 m of
the sounding and returns the difference. Only positive difference values
(where the minimum Theta-E is above the maximum) are returned.
Parameters
----------
prof : Profile object
Returns
-------
thetae_diff : the Theta-E difference between the max and min values (K)
'''
thetae = getattr(prof, 'thetae', prof.get_thetae_profile())
idx = np.where(interp.to_agl(prof, prof.hght) <= 3000)[0]
maxe_idx = np.ma.argmax(thetae[idx])
mine_idx = np.ma.argmin(thetae[idx])
maxe_pres = prof.pres[idx][maxe_idx]
mine_pres = prof.pres[idx][mine_idx]
thetae_diff = thetae[idx][maxe_idx] - thetae[idx][mine_idx]
if maxe_pres < mine_pres:
return 0
else:
return thetae_diff
def alt_stg(prof, units='mb'):
'''
Altimeter Setting (*)
Computes the altimeter setting of the surface level.
The altimeter setting can optionally be displayed by setting the "units" value
to the corresponding unit shorthand in the following list:
1.) 'mb' or 'hPa' : Millibars (mb) / hectopascals (hPa) (default)
2.) 'mmHg' or 'torr' : Millimeters of mercury (mmHg) / torr (torr)
3.) 'inHg' : Inches of mercury (inHg)
Parameters
----------
prof : Profile object
Returns
-------
alt_stg : number
Altimeter setting (number)
'''
sfc_pres = prof.pres[prof.sfc]
sfc_hght = prof.hght[prof.sfc]
asm = sfc_pres * (( 1 + ((( 1013.25 / sfc_pres ) ** ( 501800000. / 2637400451. )) * (( 0.0065 * sfc_hght ) / 288.15 ))) ** ( 2637400451. / 501800000. ))
if units == 'mb' or units == 'hPa':
alt_stg = asm
elif units == 'mmHg' or units == 'torr':
alt_stg = utils.MB2MMHG(asm)
elif units == 'inHg':
alt_stg = utils.MB2INHG(asm)
return alt_stg
def spot(prof):
'''
SPOT Index (*)
The Surface Potential (SPOT) Index, unlike most other forecasting indices,
uses only data collected from the surface level. As such, it has the
advantage of being able to use surface plots, which usually update hourly
instead of every 12 to 24 hours as with upper-air observations.
Using the the SWEAT and SPOT values together tends to offer more skill
than using either index by itself.
The SPOT Index is computed using the following variables:
1.) Surface ambient temperature (in degrees Fahrenheit)
2.) Surface dewpoint temperature (in degrees Fahrenheit)
3.) Altimeter setting (in inches of mercury (inHg))
4.) Wind direction (in degrees)
5.) Wind speed (in knots)
Parameters
----------
prof : Profile object
Returns
-------
spot : number
SPOT Index (number)
'''
tmpf_sfc = thermo.ctof(prof.tmpc[prof.sfc])
dwpf_sfc = thermo.ctof(prof.dwpc[prof.sfc])
alt_stg_inHg = alt_stg(prof, units='inHg')
wdir_sfc = prof.wdir[prof.sfc]
wspd_sfc = prof.wspd[prof.sfc]
# Ambient temperature factor
taf = tmpf_sfc - 60
# Dewpoint temperature factor
tdf = dwpf_sfc - 55
# Altimeter setting factor
if tmpf_sfc < 50 and alt_stg_inHg < 29.50:
asf = 50 * ( 30 - alt_stg_inHg )
else:
asf = 100 * ( 30 - alt_stg_inHg )
# Wind vector factor
if 0 <= wdir_sfc and wdir_sfc < 40:
if dwpf_sfc < 55:
wvf = -2 * wspd_sfc
else:
wvf = -1 * wspd_sfc
elif 40 <= wdir_sfc and wdir_sfc < 70:
wvf = 0
elif 70 <= wdir_sfc and wdir_sfc < 130:
if dwpf_sfc < 55:
wvf = wspd_sfc / 2
else:
wvf = wspd_sfc
elif 130 <= wdir_sfc and wdir_sfc <= 210:
if dwpf_sfc < 55:
wvf = wspd_sfc
else:
wvf = 2 * wspd_sfc
elif 210 < wdir_sfc and wdir_sfc <= 230:
if dwpf_sfc < 55:
wvf = 0
elif 55 <= dwpf_sfc and dwpf_sfc <= 60:
wvf = wspd_sfc / 2
else:
wvf = wspd_sfc
elif 230 < wdir_sfc and wdir_sfc <= 250:
if dwpf_sfc < 55:
wvf = -2 * wspd_sfc
elif 55 <= dwpf_sfc and dwpf_sfc <= 60:
wvf = -1 * wspd_sfc
else:
wvf = wspd_sfc
else:
wvf = -2 * wspd_sfc
spot = taf + tdf + asf + wvf
return spot
def wbz(prof):
'''
Wetbulb Zero height
The wetbulb zero (WBZ) height identifies the height in feet AGL at which the
wetbulb temperature equals 0 degrees C. It is assumed that hailstones above
this level do not melt, as even if the ambient temperature is above freezing,
evaporation would absorb latent heat, chilling the hailstones. However, if
the wetbulb temperature is above 0, hailstones will melt as they fall.
A WBZ height of less than 6,000 feet is usually associated with a relatively
cool airmass with low CAPE, making it unlikely for large hail to form
(although there have been occasional instances of large hail falling in areas
of low WBZ). A WBZ height of over 12,000 feet suggests that a hailstone will
fall through a very deep column of warm air over an extended period of time,
and will likely melt substantially or even completely by the time it reaches
the ground.
WBZ heights of between 6,000 and 12,000 feet are usually considered to be a
"sweet spot" of sorts for hail to form, with heights in the 7,000-9,000 foot
range being most associated with large hail.
Parameters
----------
prof : Profile object
Returns
-------
wbzp : mb
Wetbulb Zero (mb)
wbzh : feet
Wetbulb Zero (feet AGL)
'''
dp = -1
sfc_pres = prof.pres[prof.sfc]
ps = np.arange(sfc_pres, 499, dp)
plog = np.log10(ps)
temp = interp.temp(prof, ps)
dwpt = interp.dwpt(prof, ps)
hght = interp.hght(prof, ps)
wetbulb = np.empty(ps.shape)
for i in np.arange(0, len(ps), 1):
wetbulb[i] = thermo.wetbulb(ps[i], temp[i], dwpt[i])
ind1 = ma.where(wetbulb >= 0)[0]
ind2 = ma.where(wetbulb <= 0)[0]
if len(ind1) == 0 or len(ind2) == 0:
wbzp = ma.masked
else:
inds = np.intersect1d(ind1, ind2)
if len(inds) > 0:
wbzp = prof.pres[inds][0]
else:
diff1 = ind1[1:] - ind1[:-1]
ind = np.where(diff1 > 1)[0] + 1
try:
ind = ind.min()
except:
ind = ind1[-1]
wtblr = ( ( wetbulb[ind+1] - wetbulb[ind] ) / ( hght[ind+1] - hght[ind] ) ) * -1000
if wtblr > 0:
wbzp = np.power(10, np.interp(0, [wetbulb[ind+1], wetbulb[ind]],
[plog[ind+1], plog[ind]]))
else:
wbzp = np.power(10, np.interp(0, [wetbulb[ind], wetbulb[ind+1]],
[plog[ind], plog[ind+1]]))
wbzh = utils.M2FT(interp.to_agl(prof, interp.hght(prof, wbzp)))
return wbzp, wbzh
def thomp(prof, pcl):
'''
Thompson Index (*)
The Thompson Index is a combination of the K Index and Lifted Index.
It attempts to integrate elevated moisture into the index, using the
850 mb dewpoint and 700 mb humidity. Accordingly, it works best in
tropical and mountainous locations.
Parameters
----------
prof : Profile object
Returns
-------
thomp : number
Thompson Index (number)
'''
ki = getattr(prof, 'k_index', k_index(prof))
thomp = ki - pcl.li5
return thomp
def tq(prof):
'''
TQ Index (*)
The TQ index is used for assessing the probability of low-topped
convection. Values of more than 12 indicate an unstable lower
troposphere with thunderstorms possible outside of stratiform clouds.
Values of of more than 17 indicate an unstable lower troposphere
with thunderstorms possible when stratiform clouds are present.
Parameters
----------
prof : Profile object
Returns
-------
tq : number
TQ Index (number)
'''
tmp850 = interp.temp(prof, 850)
dpt850 = interp.dwpt(prof, 850)
tmp700 = interp.temp(prof, 700)
tq = tmp850 + dpt850 - ( 1.7 * tmp700 )
return tq
def s_index(prof):
'''
S-Index (*)
This European index is a mix of the K Index and Vertical Totals Index.
The S-Index was developed by the German Military Geophysical Office.
Parameters
----------
prof : Profile object
Returns
-------
s_index : number
S-Index (number)
'''
ki = getattr(prof, 'k_index', k_index(prof))
vt = getattr(prof, 'vertical_totals', v_totals(prof))
tmp5 = interp.temp(prof, 500)
if vt < 22:
af = 6
elif 22 <= vt and vt <= 25:
af = 2
else:
af = 0
s_index = ki - ( tmp5 + af )
return s_index
def boyden(prof):
'''
Boyden Index (*)
This index, used in Europe, does not factor in moisture.
It evaluates thickness and mid-level warmth. It was defined
in 1963 by C. J. Boyden.
Parameters
----------
prof : Profile object
Returns
-------
boyden : number
Boyden Index (number)
'''
# Height in decameters (dam)
h700 = interp.hght(prof, 700) / 10
h1000 = interp.hght(prof, 1000) / 10
tmp700 = interp.temp(prof, 700)
boyden = h700 - h1000 - tmp700 - 200
return boyden
def dci(prof, pcl):
'''
Deep Convective Index (*)
This index is a combination of parcel theta-e at 850 mb and
Lifted Index. This attempts to further improve the Lifted Index.
It was defined by W. R. Barlow in 1993.
Parameters
----------
prof : Profile object
Returns
-------
dci : number
Deep Convective Index (number)
'''
tmp850 = interp.temp(prof, 850)
dpt850 = interp.dwpt(prof, 850)
dci = tmp850 + dpt850 - pcl.li5
return dci
def pii(prof):
'''
Potential Instability Index (*)
This index relates potential instability in the middle atmosphere with
thickness. It was proposed by A. J. Van Delden in 2001. Positive values
indicate increased potential for convective weather.
The units in the original formulation are in degrees Kelvin per meter (K/m);
however, this formulation will use degrees Kelvin per kilometer (K/km) so as
to make the values easier to read.
Parameters
----------
prof : Profile object
Returns
-------
pii : number
Potential Instability Index (number)
'''
te925 = thermo.thetae(925, interp.temp(prof, 925), interp.dwpt(prof, 925))
te500 = thermo.thetae(500, interp.temp(prof, 500), interp.dwpt(prof, 500))
z500 = interp.hght(prof, 500)
z925 = interp.hght(prof, 925)
th95 = ( z500 - z925 ) / 1000
pii = ( te925 - te500 ) / th95
return pii
def ko(prof):
'''
KO Index (*)
This index was developed by Swedish meteorologists and used heavily by the
Deutsche Wetterdienst. It compares values of equivalent potential
temperature at different levels. It was developed by T. Andersson, M.
Andersson, C. Jacobsson, and S. Nilsson.
Parameters
----------
prof : Profile object
Returns
-------
ko : number
KO Index (number)
'''
te500 = thermo.thetae(500, interp.temp(prof, 500), interp.dwpt(prof, 500))
te700 = thermo.thetae(700, interp.temp(prof, 700), interp.dwpt(prof, 700))
te850 = thermo.thetae(850, interp.temp(prof, 850), interp.dwpt(prof, 850))
sfc_pres = prof.pres[prof.sfc]
if sfc_pres < 1000:
pr1s = sfc_pres
else:
pr1s = 1000
te1s = thermo.thetae(pr1s, interp.temp(prof, pr1s), interp.dwpt(prof, pr1s))
ko = ( 0.5 * ( te500 + te700 ) ) - ( 0.5 * ( te850 + te1s ) )
return ko
def brad(prof):
'''
Bradbury Index (*)
Also known as the Potential Wet-Bulb Index, this index is used in Europe.
It is a measure of the potential instability between 850 and 500 mb. It
was defined in 1977 by T. A. M. Bradbury.
Parameters
----------
prof : Profile object
Returns
-------
brad : number
Bradbury Index (number)
'''
qw500 = thermo.thetaw(500, interp.temp(prof, 500), interp.dwpt(prof, 500))
qw850 = thermo.thetaw(850, interp.temp(prof, 850), interp.dwpt(prof, 850))
brad = qw500 - qw850
return brad
def rack(prof):
'''
Rackliff Index (*)
This index, used primarily in Europe during the 1950s, is a simple comparison
of the 900 mb wet bulb temperature with the 500 mb dry bulb temperature.
It is believed to have been developed by Peter Rackliff during the 1940s.
Parameters
----------
prof : Profile object
Returns
-------
rack : number
Rackliff Index (number)
'''
qw900 = thermo.thetaw(900, interp.temp(prof, 900), interp.dwpt(prof, 900))
tmp500 = interp.temp(prof, 500)
rack = qw900 - tmp500
return rack
def jeff(prof):
'''
Jefferson Index (*)
A European stability index, the Jefferson Index was intended to be an improvement of the
Rackliff Index. The change would make it less dependent on temperature. The version used
since the 1960s is a slight modification of G. J. Jefferson's 1963 definition.
Parameters
----------
prof : Profile object
Returns
-------
jeff : number
Jefferson Index (number)
'''
qw850 = thermo.thetaw(850, interp.temp(prof, 850), interp.dwpt(prof, 850))
tmp500 = interp.temp(prof, 500)
tdd700 = interp.tdd(prof, 700)
jeff = ( 1.6 * qw850 ) - tmp500 - ( 0.5 * tdd700 ) - 8
return jeff
def sc_totals(prof):
'''
Surface-based Cross Totals (*)
This index, developed by J. Davies in 1988, is a modification of the Cross Totals index that
replaces the 850 mb dewpoint with the surface dewpoint. As such, this index will usually
give a higher value than the original Cross Totals.
Parameters
----------
prof : Profile object
Returns
-------
sc_totals : number
Surface-based Cross Totals (number)
'''
sc_totals = prof.dwpc[prof.sfc] - interp.temp(prof, 500)
return sc_totals
def esi(prof, sbcape):
'''
Energy Shear Index (*)
This index, proposed as a way of parameterizing updraft duration, multiplies SBCAPE by
850 mb-6 km AGL mean vertical shear magnitude in m/s. A 2002 study by Brimelow and Reuter
indicated considerable success with using this index to forecast large hail. An ESI
value approaching 5 is considered favorable for large hail, with values above 5 not
having much further significance.
Parameters
----------
prof : Profile object
sbcape : Surface-based Convective Available Potential Energy (J/kg)
Returns
-------
esi : number
Energy Shear Index (unitless)
'''
p6km = interp.pres(prof, interp.to_msl(prof, 6000))
shr_850mb_6km = winds.wind_shear(prof, pbot=850, ptop=p6km)
shr_850mb_6km = utils.KTS2MS(utils.mag(*shr_850mb_6km)) / ( 6000 - interp.to_agl(prof, interp.hght(prof, 850)) )
esi = shr_850mb_6km * sbcape
return esi
def vgp(prof, pcl):
'''
Vorticity Generation Potential (*)
The Vorticity Generation Potential index was developed by Erik Rasmussen and
David Blanchard in 1998. It assesses the possibility for vorticity being
tilted into the vertical to create rotating updrafts.
The formula is:
VGP = sqrt(CAPE) * U03
Where U03 is the normalized total shear between the surface and 3 km AGL.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
vgp : number
Vorticity Generation Potential (number)
'''
psfc = prof.pres[prof.sfc]
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc3shr = winds.norm_total_shear(prof, pbot=psfc, ptop=p3km)[-1]
vgp = sfc3shr * ( pcl.bplus ** 0.5 )
return vgp
def aded_v1(prof):
'''
Adedokun Index, version 1 (*)
The Adedokun Index (created by J. A. Adedokun in 1981 and 1982) was developed
in two versions for forecasting precipitation in west Africa. The Index
lowers a 500 mb parcel moist adiabatically to 1000 mb, then compares it to
the wet bulb potential temperature (theta-w) of a specified level.
Version 1 subtracts the parcel's temperature from the theta-w of the 850 mb
level. This version has been found to be better for forecasting non-
occurrence of precipitation.
For both versions, values >= -1 were defined to be indicative of precipitation
occurrence while values < -1 were defined to be indicative of precipitation
non-occurrence.
Parameters
----------
prof : Profile object
Returns
-------
aded_v1 : number
Adedokun Index, version 1 (number)
'''
pclm500 = thermo.thetaws(500, interp.temp(prof, 500))
thtw850 = thermo.thetaw(850, interp.temp(prof, 850), interp.dwpt(prof, 850))
aded_v1 = thtw850 - pclm500
return aded_v1
def aded_v2(prof):
'''
Adedokun Index, version 2 (*)
The Adedokun Index (created by J. A. Adedokun in 1981 and 1982) was developed
in two versions for forecasting precipitation in west Africa. The Index
lowers a 500 mb parcel moist adiabatically to 1000 mb, then compares it to
the wet bulb potential temperature (theta-w) of a specified level.
Version 2 subtracts the parcel's temperature from the theta-w of the surface
level. This version has been found to be better for forecasting occurrence
of precipitation.
For both versions, values >= -1 were defined to be indicative of precipitation
occurrence while values < -1 were defined to be indicative of precipitation
non-occurrence.
Parameters
----------
prof : Profile object
Returns
-------
aded_v2 : number
Adedokun Index, version 2 (number)
'''
pclm500 = thermo.thetaws(500, interp.temp(prof, 500))
thtw_sfc = thermo.thetaw(prof.pres[prof.sfc], prof.tmpc[prof.sfc], prof.dwpc[prof.sfc])
aded_v2 = thtw_sfc - pclm500
return aded_v2
def ei(prof):
'''
Energy Index (*)
The Energy Index (also known as the Total Energy Index) was developed by G. L.
Darkow in 1968. It calculates the moist static energy at the 500 and 850 mb levels
and then subtracts the latter from the former. The energy is calculated in units
of cal/gm. Negative values indicate instability.
Parameters
----------
prof : Profile object
Returns
-------
ei : number
Energy Index (number)
'''
tmp500 = thermo.ctok(interp.temp(prof, 500)) # Temperature in degrees Kelvin
hght500 = interp.hght(prof, 500)
mxr500 = thermo.mixratio(500, interp.dwpt(prof,500))
tmp850 = thermo.ctok(interp.temp(prof, 850)) # Temperature in degrees Kelvin
hght850 = interp.hght(prof, 850)
mxr850 = thermo.mixratio(850, interp.dwpt(prof, 850))
# Calculate moist static energy in joules/kilogram
mse5_j = ( 1004.6851 * tmp500 ) + ( G * hght500 ) + ( 2500 * mxr500 )
mse8_j = ( 1004.6851 * tmp850 ) + ( G * hght850 ) + ( 2500 * mxr850 )
# Convert moist static energy to calories/gram
mse5_c = mse5_j / 4186.8
mse8_c = mse8_j / 4186.8
ei = mse5_c - mse8_c
return ei
def eehi(prof, sbcape, mlcape, sblcl, mllcl, srh01, bwd6, **kwargs):
'''
Enhanced Energy Helicity Index (*)
The original 0-1 km EHI presented a normalized product of 0-1 km storm-relative helicity
(SRH) and 100 mb mean parcel (ML) CAPE. This modified version more closely mimics the
fixed-layer significant tornado parameter with its inclusion of the same fixed-layer (0-6 km)
bulk wind difference term (SHR6), and the addition of a 4 km AGL max vertical velocity term (WMAX4).
If surface-based (SB) CAPE exceeds the MLCAPE, the ML lifting condensation level (LCL) in less than
1000 m AGL, and the surface temperature - dewpoint depression is no more than 10 F, then the SB
parcel is used in the EEHI calculation. Otherwise, the calculation defaults to the ML parcel.
The index is formulated as follows:
EEHI = ((CAPE * 0-1 km SRH)/ 160000) * SRH6 * WMAX4
The 0-6 km bulk wind difference term is capped at a value of 1.5 for SRH6 greater than 30 m/s,
(SHR6 / 20 m/s) for values from 12.5-30 m/s, and set to 0.0 when SHR6 is less than 12.5 m/s.
The WMAX4 term is capped at 1.5 for WMAX4 greater than 30 m/s, (WMAX4 / 20 m/s) for values
from 10-30 m/s, and set to 0.0 when WMAX4 is less than 10 m/s. Lastly, the entire index is
set to 0.0 if the average of the SBLCL and MLLCL is greater than 2000 m AGL.
This enhanced EHI is meant to highlight tornadic supercell potential into a lower range of buoyancy,
compared to the fixed-layer significant tornado parameter, with decreased false alarms compared to
the original 0-1 km EHI. The WMAX4 term reflects the thermodynamic potential for low-level vortex
stretching, while the SB parcel is used for CAPE calculations in relatively moist environments more
typical of the cool season or tropical cyclones. Values greater than 1 are associated with greater
probabilities of tornadic supercells.
Parameters
----------
prof : Profile object
pcl : Parcel object
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
sbcape : Surface based CAPE from the parcel class (J/kg)
sblcl : Surface based lifted condensation level (m)
mllcl : Mixed-layer lifted condensation level (m)
srh01 : 0-1 km storm-relative helicity (m2/s2)
bwd6 : Bulk wind difference between 0 to 6 km (m/s)
Returns
-------
eehi : number
Enhanced Energy Helicity Index (unitless)
'''
tddsfc = thermo.ctof(prof.tmpc[prof.sfc]) - thermo.ctof(prof.dwpc[prof.sfc])
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mlpcl = parcelx(prof, flag=4)
if sbcape > mlcape and mllcl < 1000 and tddsfc <= 10:
capef = sbcape
else:
capef = mlcape
cape4 = mupcl.b4km
wmax4 = ( 2 * cape4 ) ** 0.5
if bwd6 > 30:
srh6f = 1.5
elif bwd6 < 12.5:
srh6f = 0
else:
srh6f = bwd6 / 20
if wmax4 > 30:
wmax4f = 1.5
elif wmax4 < 10:
wmax4f = 0
else:
wmax4f = wmax4 / 20
if ( sblcl + mllcl ) / 2 > 2000:
eehi = 0
else:
eehi = (( capef * srh01 ) / 160000 ) * srh6f * wmax4f
return eehi
def strong_tor(mlcape, bwd1, bwd6, mllcl, dcape):
'''
Strong Tornado Parameter (Strong-Tor) (*)
Formulation taken from Craven and Brooks 2004, NWD v.28 pg. 20.
This index was inspired by the original (fixed-layer) version of the Significant Tornado Parameter
(STP-Fixed (q.v.)), but is not to be confused with it. It makes use of some of the same parameters
as STP-Fixed; however, it replaces storm-relative helicity with 0-1 km AGL bulk shear (meaning neither
observed nor estimated storm motion is required for calculation) and adds in downdraft CAPE (DCAPE).
The source paper notes that well over 50% of the significant tornado cases it studied occurred with
values over 0.25, while over 75% of the cases that didn't involve significant tornadoes occurred with
values under 0.25.
Parameters
----------
prof : Profile object
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
bwd1 : 0-1 km AGL bulk wind difference (m/s)
bwd6 : 0-6 km AGL bulk wind difference (m/s)
mllcl : mixed-layer lifted condensation level (m)
dcape : downdraft CAPE (J/kg)
'''
dcape_t = dcape[0]
strong_tor = ( ( mlcape * bwd1 * bwd6 ) / ( mllcl * dcape_t) )
return strong_tor
def vtp(prof, mlcape, esrh, ebwd, mllcl, mlcinh, **kwargs):
'''
Violent Tornado Parameter (*)
From Hampshire et. al. 2017, JOM page 8.
Research using observed soundings found that 0-3 km CAPE and 0-3 km lapse rate were notable
discriminators of violent tornado environments (versus weak and/or significant tornado environments).
These parameters were combined into the effective layer version of the Significant Tornado Parameter
(STP) to create the Violent Tornado Parameter (VTP).
Parameters
----------
prof : Profile object
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
esrh : effective storm relative helicity (m2/s2)
ebwd : effective bulk wind difference (m/s)
mllcl : mixed-layer lifted condensation level (m)
mlcinh : mixed-layer convective inhibition (J/kg)
Returns
-------
vtp : number
Violent Tornado Parameter (unitless)
'''
cape_term = mlcape / 1500.
eshr_term = esrh / 150.
lr03_term = lapse_rate(prof, 0, 3000, pres=False) / 6.5
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mlpcl = parcelx(prof, flag=4)
if ebwd < 12.5:
ebwd_term = 0.
elif ebwd > 30.:
ebwd_term = 1.5
else:
ebwd_term = ebwd / 20.
if mllcl < 1000.:
lcl_term = 1.0
elif mllcl > 2000.:
lcl_term = 0.0
else:
lcl_term = ((2000. - mllcl) / 1000.)
if mlcinh > -50:
cinh_term = 1.0
elif mlcinh < -200:
cinh_term = 0
else:
cinh_term = ((mlcinh + 200.) / 150.)
if mlpcl.b3km > 100:
cape3_term = 2
else:
cape3_term = mlpcl.b3km / 50
vtp = np.maximum(cape_term * eshr_term * ebwd_term * lcl_term * cinh_term * cape3_term * lr03_term, 0)
return vtp
def snsq(prof):
'''
Snow Squall Parameter (*)
From Banacos et. al. 2014, JOM page 142.
A non-dimensional composite parameter that combines 0-2 km AGL relative humidity, 0-2 km AGL
potential instability (theta-e decreases with height), and 0-2 km AGL mean wind speed (m/s).
The intent of the parameter is to identify areas with low-level potential instability, sufficient
moisture, and strong winds to support snow squall development. Surface potential temperatures
(theta) and MSL pressure are also plotted to identify strong baroclinic zones which often
provide the focused low-level ascent in cases of narrow snow bands.
Parameters
----------
prof : Profile object
Returns
-------
snsq : number
Snow Squall Parameter (unitless)
'''
sfc_pres = prof.pres[prof.sfc]
pres_2km = interp.pres(prof, interp.to_msl(prof, 2000))
relh02 = mean_relh(prof, pbot=sfc_pres, ptop=pres_2km)
sfc_thetae = prof.thetae[prof.sfc]
thetae_2km = interp.thetae(prof, pres_2km)
thetae_d02 = thetae_2km - sfc_thetae
mw02 = utils.KTS2MS(utils.mag(*winds.mean_wind_npw(prof, pbot=sfc_pres, ptop=pres_2km)))
sfc_wtb = prof.wetbulb[prof.sfc]
if relh02 < 60:
relhf = 0
else:
relhf = ( relh02 - 60 ) / 15
if thetae_d02 > 4:
thetaef = 0
else:
thetaef = ( 4 - thetae_d02 ) / 4
mwf = mw02 / 9
if sfc_wtb >= 1:
snsq = 0
else:
snsq = relhf * thetaef * mwf
return snsq
def snow(prof):
'''
Snow Index (*)
This index uses two thickness layers: the 850-700 mb thickness layer and the 1000-850 mb thickness
layer. A value of greater than 4179 indicates liquid precipitation; a value of 4179 indicates
mixed precipitation; and a value of less than 4179 indicates solid precipitation.
Parameters
----------
prof : Profile object
Returns
-------
snow : number
Snow Index (number)
'''
hght1000 = interp.hght(prof, 1000)
hght850 = interp.hght(prof, 850)
hght700 = interp.hght(prof, 700)
thick78 = hght700 - hght850
thick18 = hght1000 - hght850
snow = thick78 + ( 2 * thick18 )
return snow
def windex_v1(prof, **kwargs):
'''
Wind Index, version 1 (*)
This index, a measure of microburst potential and downdraft instability, estimates maximum
convective wind gust speeds. Created by Donald McCann in 1994, the index is displayed in knots.
There are two main versiona available. Version 1 uses the lapse rate from the observed surface
to the freezing level. Version 2 uses the lapse rate from the maximum predicted surface
temperature to the freezing level.
Parameters
----------
prof : Profile object
Returns
-------
windex_v1 : knots
WINDEX, version 1 (knots)
'''
frz_lvl = kwargs.get('frz_lvl', None)
sfc_pres = prof.pres[prof.sfc]
pres_1km = interp.pres(prof, interp.to_msl(prof, 1000))
mxr01 = mean_mixratio(prof, pbot=sfc_pres, ptop=pres_1km)
if not frz_lvl:
frz_lvl = interp.hght(prof, temp_lvl(prof, 0))
frz_pres = interp.pres(prof, frz_lvl)
frz_dwpt = interp.dwpt(prof, frz_pres)
mxr_frz = thermo.mixratio(frz_pres, frz_dwpt)
hm_m = interp.to_agl(prof, frz_lvl)
hm_km = hm_m / 1000
lr_frz = lapse_rate(prof, 0, hm_m, pres=False)
if mxr01 > 12:
rq = 1
else:
rq = mxr01 / 12
windex_v1 = 5 * ( ( hm_km * rq * ((lr_frz ** 2 ) - 30 + mxr01 - ( 2 * mxr_frz )) ) ** 0.5 )
return windex_v1
def windex_v2(prof, **kwargs):
'''
Wind Index, version 2 (*)
This index, a measure of microburst potential and downdraft instability, estimates maximum
convective wind gust speeds. Created by Donald McCann in 1994, the index is displayed in knots.
There are two main versiona available. Version 1 uses the lapse rate from the observed surface
to the freezing level. Version 2 uses the lapse rate from the maximum predicted surface
temperature to the freezing level.
Parameters
----------
prof : Profile object
Returns
-------
windex_v2 : knots
WINDEX, version 2 (knots)
'''
frz_lvl = kwargs.get('frz_lvl', None)
sfc_pres = prof.pres[prof.sfc]
max_tmp = getattr(prof, 'max_temp', max_temp(prof))
max_dpt = thermo.temp_at_mixrat(mean_mixratio(prof, sfc_pres, sfc_pres - 100, exact=True), sfc_pres)
max_vtp = thermo.virtemp(sfc_pres, max_tmp, max_dpt)
pres_1km = interp.pres(prof, interp.to_msl(prof, 1000))
mxr01 = mean_mixratio(prof, pbot=sfc_pres, ptop=pres_1km)
if not frz_lvl:
frz_lvl = interp.hght(prof, temp_lvl(prof, 0))
frz_pres = interp.pres(prof, frz_lvl)
frz_dwpt = interp.dwpt(prof, frz_pres)
mxr_frz = thermo.mixratio(frz_pres, frz_dwpt)
hm_m = interp.to_agl(prof, frz_lvl)
hm_km = hm_m / 1000
frz_vtp = interp.vtmp(prof, frz_pres)
lr_frz = ( frz_vtp - max_vtp ) / -hm_km
if mxr01 > 12:
rq = 1
else:
rq = mxr01 / 12
windex_v2 = 5 * ( ( hm_km * rq * ((lr_frz ** 2 ) - 30 + mxr01 - ( 2 * mxr_frz )) ) ** 0.5 )
return windex_v2
def gustex_v1(prof):
'''
Gust Index, version 1 (*)
Formulation taken from Greer 2001, WAF v.16 pg. 266.
This index attempts to improve the WINDEX (q.v.) by multiplying it by an emperically
derived constant between 0 and 1, then adding a wind speed factor.
There are four versions known. Versions 1 and 2 add half the 500 mb wind speed to
the multiple of the WINDEX and the constant. Version 1 uses WINDEX version 1.
Version 2 uses WINDEX version 2.
Versions 3 and 4 add the density-weighted mean wind speed between 1 and 4 km AGL.
Version 3 uses Windex version 1. Version 4 uses WINDEX version 2.
Parameters
----------
prof : Profile object
Returns
-------
gustex_v1 : knots
GUSTEX, version 1 (knots)
'''
windex1 = getattr(prof, 'windex_v1', windex_v1(prof))
mag500 = interp.vec(prof, 500)[1]
# The original paper derived a value of 0.6 for the constant, so that's what will be used here.
const = 0.6
gustex_v1 = ( const * windex1 ) + ( mag500 / 2 )
return gustex_v1
def gustex_v2(prof):
'''
Gust Index, version 2 (*)
Formulation taken from Greer 2001, WAF v.16 pg. 266.
This index attempts to improve the WINDEX (q.v.) by multiplying it by an emperically
derived constant between 0 and 1, then adding a wind speed factor.
There are four versions known. Versions 1 and 2 add half the 500 mb wind speed to
the multiple of the WINDEX and the constant. Version 1 uses WINDEX version 1.
Version 2 uses WINDEX version 2.
Versions 3 and 4 add the density-weighted mean wind speed between 1 and 4 km AGL.
Version 3 uses Windex version 1. Version 4 uses WINDEX version 2.
Parameters
----------
prof : Profile object
Returns
-------
gustex_v2 : knots
GUSTEX, version 2 (knots)
'''
windex2 = getattr(prof, 'windex_v2', windex_v2(prof))
mag500 = interp.vec(prof, 500)[1]
# The original paper derived a value of 0.6 for the constant, so that's what will be used here.
const = 0.6
gustex_v2 = ( const * windex2 ) + ( mag500 / 2 )
return gustex_v2
def gustex_v3(prof):
'''
Gust Index, version 3 (*)
Formulation taken from Greer 2001, WAF v.16 pg. 266.
This index attempts to improve the WINDEX (q.v.) by multiplying it by an emperically
derived constant between 0 and 1, then adding a wind speed factor.
There are four versions known. Versions 1 and 2 add half the 500 mb wind speed to
the multiple of the WINDEX and the constant. Version 1 uses WINDEX version 1.
Version 2 uses WINDEX version 2.
Versions 3 and 4 add the density-weighted mean wind speed between 1 and 4 km AGL.
Version 3 uses Windex version 1. Version 4 uses WINDEX version 2.
Parameters
----------
prof : Profile object
Returns
-------
gustex_v3 : knots
GUSTEX, version 3 (knots)
'''
windex1 = getattr(prof, 'windex_v1', windex_v1(prof))
pres1k = interp.pres(prof, interp.to_msl(prof, 1000))
pres4k = interp.pres(prof, interp.to_msl(prof, 4000))
mn_wd_1k_4k = utils.mag(*winds.mean_wind(prof, pbot=pres1k, ptop=pres4k))
# The original paper derived a value of 0.6 for the constant, so that's what will be used here.
const = 0.6
gustex_v3 = ( const * windex1 ) + ( mn_wd_1k_4k / 2 )
return gustex_v3
def gustex_v4(prof):
'''
Gust Index, version 4 (*)
Formulation taken from Greer 2001, WAF v.16 pg. 266.
This index attempts to improve the WINDEX (q.v.) by multiplying it by an emperically
derived constant between 0 and 1, then adding a wind speed factor.
There are four versions known. Versions 1 and 2 add half the 500 mb wind speed to
the multiple of the WINDEX and the constant. Version 1 uses WINDEX version 1.
Version 2 uses WINDEX version 2.
Versions 3 and 4 add the density-weighted mean wind speed between 1 and 4 km AGL.
Version 3 uses Windex version 1. Version 4 uses WINDEX version 2.
Parameters
----------
prof : Profile object
Returns
-------
gustex_v4 : knots
GUSTEX, version 4 (knots)
'''
windex2 = getattr(prof, 'windex_v2', windex_v2(prof))
pres1k = interp.pres(prof, interp.to_msl(prof, 1000))
pres4k = interp.pres(prof, interp.to_msl(prof, 4000))
mn_wd_1k_4k = utils.mag(*winds.mean_wind(prof, pbot=pres1k, ptop=pres4k))
# The original paper derived a value of 0.6 for the constant, so that's what will be used here.
const = 0.6
gustex_v4 = ( const * windex2 ) + ( mn_wd_1k_4k / 2 )
return gustex_v4
def wmsi(prof, **kwargs):
'''
Wet Microburst Severity Index (*)
This index, developed by K. L. Pryor and G. P. Ellrod in 2003, was developed to better
assess the potential deverity of wet microbursts. WSMI is a product of CAPE
(specifically from the most unstable parcel) and delta-Theta-E (see the Theta-E
Index parameter).
Parameters
----------
prof : Profile object
Returns
-------
wmsi : number
Wet Microburst Severity Index (number)
'''
mupcl = kwargs.get('mupcl', None)
tei_s = getattr(prof, 'tei_sfc', tei_sfc(prof))
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
wmsi = ( mucape * tei_s ) / 1000
return wmsi
def dmpi_v1(prof):
'''
Dry Microburst Potential Index, version 1 (*)
This index was primarily derived by R. Wakimoto in 1985 to forecast potential for
dry microbursts.
The original index, calculated using soundings in the region of Denver, CO, used
the 700 and 500 mb layers for its calculations. However, the RAOB Program manual
recommends the use of the 5,000 and 13,000-ft AGL layers so that the results can
be consistently used for any worldwide sounding, regardless of station elevation.
The decision was made to split the index into two versions, the original (version
1) and the RAOB (version 2).
Parameters
----------
prof : Profile object
Returns
-------
dmpi_v1 : number
Dry Microburst Potential Index, version 1 (number)
'''
tdd500 = interp.tdd(prof, 500)
tdd700 = interp.tdd(prof, 700)
lr75 = lapse_rate(prof, 700, 500, pres=True)
dmpi_v1 = lr75 + tdd700 - tdd500
return dmpi_v1
def dmpi_v2(prof):
'''
Dry Microburst Potential Index, version 2 (*)
This index was primarily derived by R. Wakimoto in 1985 to forecast potential for
dry microbursts.
The original index, calculated using soundings in the region of Denver, CO, used
the 700 and 500 mb layers for its calculations. However, the RAOB Program manual
recommends the use of the 5,000 and 13,000-ft AGL layers so that the results can
be consistently used for any worldwide sounding, regardless of station elevation.
The decision was made to split the index into two versions, the original (version
1) and the RAOB (version 2).
Parameters
----------
prof : Profile object
Returns
-------
dmpi_v2 : number
Dry Microburst Potential Index, version 2 (number)
'''
lvl5k = interp.to_msl(prof, utils.FT2M(5000))
lvl13k = interp.to_msl(prof, utils.FT2M(13000))
pres5k = interp.pres(prof, lvl5k)
pres13k = interp.pres(prof, lvl13k)
tdd5k = interp.tdd(prof, pres5k)
tdd13k = interp.tdd(prof, pres13k)
lr_5k_13k = lapse_rate(prof, lvl5k, lvl13k, pres=False)
dmpi_v2 = lr_5k_13k + tdd5k - tdd13k
return dmpi_v2
def hmi(prof):
'''
Hybrid Microburst Index (*)
This index, created by K. L. Pryor in 2006, is designed to detect conditions
favorable for both wet and dry microbursts.
Parameters
----------
prof : Profile object
Returns
-------
hmi : number
Hybrid Microburst Index
'''
tdd850 = interp.tdd(prof, 850)
tdd670 = interp.tdd(prof, 670)
lr_86 = lapse_rate(prof, 850, 670, pres=True)
hmi = lr_86 + tdd850 - tdd670
return hmi
def mwpi(prof, sbcape):
'''
Microburst Windspeed Potential Index (*)
This index is designed to improve the Hybrid Microburst Index by adding a
term related to surface-based CAPE values.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
mwpi : number
Microburst Windspeed Potential Index (number)
'''
hmi_t = getattr(prof, 'hmi', hmi(prof))
mwpi = ( sbcape / 1000 ) + ( hmi_t / 5 )
return mwpi
def mdpi(prof):
'''
Microburst Day Potential Index (*)
This index, developed jointly by the USAFs 45th Weather Squadronm and NASA's Applied
Meteorology Uint (AMU) in 1995, calculates the risk of a microburst based on the maximum
Theta-E temperature difference at two levels: the lowest 150 mb and the 650-500 mb levels.
If the MDPI value is at least 1, microbursts are likely.
Parameters
----------
prof : Profile object
Returns
-------
mdpi : number
Microburst Day Potential Index (number)
'''
sfc_pres = prof.pres[prof.sfc]
upr_pres = sfc_pres - 150.
layer_idxs_low = ma.where(prof.pres >= upr_pres)[0]
layer_idxs_high = np.logical_and(650 >= prof.pres, prof.pres >= 500)
min_thetae = ma.min(prof.thetae[layer_idxs_high])
max_thetae = ma.max(prof.thetae[layer_idxs_low])
mdpi = ( max_thetae - min_thetae ) / 30
return mdpi
def hi(prof):
'''
Humidity Index (*)
This index, derived by Z. Litynska in 1976, calculates moisture and instability using the dewpoint
depressions of several levels. It has proven to be fairly reliable, especially in the
Mediterranean regions of the world. Lower values indicate higher moisture content and greater
instability potential.
Parameters
----------
prof : Profile object
Returns
-------
hi :number
Humidity Index (number)
'''
tdd850 = interp.tdd(prof, 850)
tdd700 = interp.tdd(prof, 700)
tdd500 = interp.tdd(prof, 500)
hi = tdd850 + tdd700 + tdd500
return hi
def ulii(prof):
'''
Upper Level Instability Index (*)
This index was developed as part of a method for computing wind gusts produced by
high-based thunderstorms, typically in the Rocky Mountains region. It makes use of
the 400-mb ambient temperature, the 300-mb ambient temperature, and a parcel lifted
from the 500 mb level to both the 400 and 300-mb levels.
Parameters
----------
prof : Profile object
Returns
-------
ulii : number
Upper Level Instability Index (number)
'''
tmp500 = interp.temp(prof, 500)
dpt500 = interp.dwpt(prof, 500)
vtp400 = interp.vtmp(prof, 400)
vtp300 = interp.vtmp(prof, 300)
t_pcl54 = thermo.lifted(500, tmp500, dpt500, 400)
t_pcl53 = thermo.lifted(500, tmp500, dpt500, 300)
vt_pcl54 = thermo.virtemp(400, t_pcl54, t_pcl54)
vt_pcl53 = thermo.virtemp(300, t_pcl53, t_pcl53)
ulii = ( vtp400 - vt_pcl54 ) + ( vtp300 - vt_pcl53 )
return ulii
def ssi850(prof):
'''
Showalter Stability Index, 850 mb version (*)
This index, one of the first forecasting indices ever constructed, lifts a parcel
from 850 mb to 500 mb, then compares it with the ambient temperature (similar to
the lifted index). It does not work well in mountainous areas, and cannot be used
when the 850 mb level is below ground. The SSI was devised by Albert Showalter in
1947.
The version used here makes use of the virtual temperature correction, much like the
lifted indices in this program.
Parameters
----------
prof : Profile object
Returns
-------
ssi850 : number
Showalter Stability Index, 850 mb version (number)
'''
tmp850 = interp.temp(prof, 850)
dpt850 = interp.dwpt(prof, 850)
vtp500 = interp.vtmp(prof, 500)
t_pcl85 = thermo.lifted(850, tmp850, dpt850, 500)
vt_pcl85 = thermo.virtemp(500, t_pcl85, t_pcl85)
ssi850 = vtp500 - vt_pcl85
return ssi850
def fmwi(prof):
'''
Fawbush-Miller Wetbulb Index (*)
Formulation taken from Fawbush and Miller 1954, BAMS v.35 pgs. 154-165.
This index (referred to in the source paper as the Stability Index and in most other
sources as the Fawbush-Miller Index) is roughly similar to the Lifted Index; however,
it uses the mean wetbulb temperature in the moist layer, which is defined as the
lowest layer in which the relative humidity is at or above 65 percent. As such, the
bottom of this layer is defined as either the surface (if the surface relative humidity
is less than 65 percent) or the layer in which the relative humidity rises to 65
percent. The top of this layer is defined as the height at which the relative humidity
decreases to 65 percent. If the layer top is over 150 mb above the layer bottom, then
the height of the top of the moist layer is arbitrarly set to 150 mb above the bottom
level.
Negative values indicate increasing chances for convective and even severe weather.
The version used here makes use of the virtual temperature correction.
Parameters
----------
prof : Profile object
Returns
-------
fmwi : number
Fawbush-Miller Wetbulb Index (number)
'''
# Find moist layer thickness
dp = -1
psfc = prof.pres[prof.sfc]
ps = np.arange(psfc, 499, dp)
plog = np.log10(ps)
temp = interp.temp(prof, ps)
dwpt = interp.dwpt(prof, ps)
hght = interp.hght(prof, ps)
wetbulb = np.empty(ps.shape)
relh = np.empty(ps.shape)
for i in np.arange(0, len(ps), 1):
wetbulb[i] = thermo.wetbulb(ps[i], temp[i], dwpt[i])
relh[i] = thermo.relh(ps[i], temp[i], dwpt[i])
ind1 = ma.where(relh >= 65)[0]
ind2 = ma.where(relh <= 65)[0]
if len(ind1) == 0 or len(ind2) == 0:
relhp0 = ma.masked
relhp1 = ma.masked
else:
inds1 = np.intersect1d(ind1, ind2)
if len(inds1) == 1:
relhp0 = prof.pres[inds1][0]
elif len(inds1) == 2:
relhp0 = prof.pres[inds1][0]
relhp1 = prof.pres[inds1][1]
else:
diff1 = ind1[1:] - ind1[:-1]
diff2 = ind2[1:] - ind2[:-1]
inda = np.where(diff1 > 1)[0]
indb = np.where(diff2 > 1)[0] + inda + 1
if not utils.QC(inda) or not utils.QC(indb):
ind_x = ind1[-1]
else:
ind_x = ma.append(inda, indb)
#Identify layers that either increase or decrease in RH, then arrange interpolation settings accordingly
rhlr = ( ( relh[ind_x+1] - relh[ind_x] ) / ( hght[ind_x+1] - hght[ind_x] ) ) * -100
if rhlr[0] > 0:
relhp0 = np.power(10, np.interp(65, [relh[ind_x+1][0], relh[ind_x][0]],
[plog[ind_x+1][0], plog[ind_x][0]]))
lyr_bot = psfc
lyr_top = relhp0
else:
relhp0 = np.power(10, np.interp(65, [relh[ind_x][0], relh[ind_x+1][0]],
[plog[ind_x][0], plog[ind_x+1][0]]))
lyr_bot = relhp0
if not utils.QC(rhlr[1]):
relhp1 = ma.masked
try:
lyr_top = lyr_bot - 150
except:
rhlr[0] > 0
else:
if rhlr[1] > 0:
relhp1 = np.power(10, np.interp(65, [relh[ind_x+1][1], relh[ind_x][1]],
[plog[ind_x+1][1], plog[ind_x][1]]))
lyr_top = relhp1
else:
relhp1 = np.power(10, np.interp(65, [relh[ind_x][1], relh[ind_x+1][1]],
[plog[ind_x][1], plog[ind_x+1][1]]))
# Determine whether the moist layer's thickness is greater than 150 mb;
# if so, then reduce it down to 150 mb above the bottom layer
if lyr_bot - lyr_top <= 150:
lyr_thk = lyr_bot - lyr_top
else:
lyr_thk = 150
lyr_top = lyr_bot - 150
# Find mean wetbulb temperature, then lift from the middle of the moist layer
mn_wtb = mean_wetbulb(prof, pbot=lyr_bot, ptop=lyr_top)
mid_lyr_pr = lyr_bot - ( lyr_thk / 2 )
vt500 = interp.vtmp(prof, 500)
lift_mn_wtb = thermo.wetlift(mid_lyr_pr, mn_wtb, 500)
vt_pcl500 = thermo.virtemp(500, lift_mn_wtb, lift_mn_wtb)
fmwi = vt500 - vt_pcl500
return fmwi
def fmdi(prof):
'''
Fawbush-Miller Dewpoint Index (*)
Formulation taken from Fawbush and Miller 1954, BAMS v.35 pgs. 154-165.
This index (referred to in the source paper as the Dew-Point Index) is roughly similar
to the Lifted Index; however, it uses the mean dewpoint temperature in the moist layer,
which is defined as the lowest layer in which the relative humidity is at or above 65
percent. As such, the bottom of this layer is defined as either the surface (if the
surface relative humidity is less than 65 percent) or the layer in which the relative
humidity rises to 65 percent. The top of this layer is defined as the height at which
the relative humidity decreases to 65 percent. If the layer top is over 150 mb above the
layer bottom, then the height of the top of the moist layer is arbitrarly set to 150 mb
above the bottom level.
Values that are only slightly positive (+2 or below) indicate a slight chance of convection.
Negative values indicate increasing chances for convective and even severe weather.
The version used here makes use of the virtual temperature correction.
Parameters
----------
prof : Profile object
Returns
-------
fmdi : number
Fawbush-Miller Dewpoint Index (number)
'''
# Find moist layer thickness
dp = -1
psfc = prof.pres[prof.sfc]
ps = np.arange(psfc, 499, dp)
plog = np.log10(ps)
temp = interp.temp(prof, ps)
dwpt = interp.dwpt(prof, ps)
hght = interp.hght(prof, ps)
wetbulb = np.empty(ps.shape)
relh = np.empty(ps.shape)
for i in np.arange(0, len(ps), 1):
wetbulb[i] = thermo.wetbulb(ps[i], temp[i], dwpt[i])
relh[i] = thermo.relh(ps[i], temp[i], dwpt[i])
ind1 = ma.where(relh >= 65)[0]
ind2 = ma.where(relh <= 65)[0]
if len(ind1) == 0 or len(ind2) == 0:
relhp0 = ma.masked
relhp1 = ma.masked
else:
inds1 = np.intersect1d(ind1, ind2)
if len(inds1) == 1:
relhp0 = prof.pres[inds1][0]
elif len(inds1) == 2:
relhp0 = prof.pres[inds1][0]
relhp1 = prof.pres[inds1][1]
else:
diff1 = ind1[1:] - ind1[:-1]
diff2 = ind2[1:] - ind2[:-1]
inda = np.where(diff1 > 1)[0]
indb = np.where(diff2 > 1)[0] + inda + 1
if not utils.QC(inda) or not utils.QC(indb):
ind_x = ind1[-1]
else:
ind_x = ma.append(inda, indb)
#Identify layers that either increase or decrease in RH, then arrange interpolation settings accordingly
rhlr = ( ( relh[ind_x+1] - relh[ind_x] ) / ( hght[ind_x+1] - hght[ind_x] ) ) * -100
if rhlr[0] > 0:
relhp0 = np.power(10, np.interp(65, [relh[ind_x+1][0], relh[ind_x][0]],
[plog[ind_x+1][0], plog[ind_x][0]]))
lyr_bot = psfc
lyr_top = relhp0
else:
relhp0 = np.power(10, np.interp(65, [relh[ind_x][0], relh[ind_x+1][0]],
[plog[ind_x][0], plog[ind_x+1][0]]))
lyr_bot = relhp0
if not utils.QC(rhlr[1]):
relhp1 = ma.masked
try:
lyr_top = lyr_bot - 150
except:
rhlr[0] > 0
else:
if rhlr[1] > 0:
relhp1 = np.power(10, np.interp(65, [relh[ind_x+1][1], relh[ind_x][1]],
[plog[ind_x+1][1], plog[ind_x][1]]))
lyr_top = relhp1
else:
relhp1 = np.power(10, np.interp(65, [relh[ind_x][1], relh[ind_x+1][1]],
[plog[ind_x][1], plog[ind_x+1][1]]))
# Determine whether the moist layer's thickness is greater than 150 mb;
# if so, then reduce it down to 150 mb above the bottom layer
if lyr_bot - lyr_top <= 150:
lyr_thk = lyr_bot - lyr_top
else:
lyr_thk = 150
lyr_top = lyr_bot - 150
# Find mean dewpoint temperature, then lift from the middle of the moist layer
mn_dpt = mean_dewpoint(prof, pbot=lyr_bot, ptop=lyr_top)
mid_lyr_pr = lyr_bot - ( lyr_thk / 2 )
vt500 = interp.vtmp(prof, 500)
lift_mn_dpt = thermo.wetlift(mid_lyr_pr, mn_dpt, 500)
vt_pcl500 = thermo.virtemp(500, lift_mn_dpt, lift_mn_dpt)
fmdi = vt500 - vt_pcl500
return fmdi
def martin(prof):
'''
Martin Index (*)
Formulation taken from
AWS/TR-79/006, The Use of the Skew T, Log P Diagram in Analysis and Forecasting
December 1979 (Revised March 1990), pg. 5-37.
Unlike most thermodynamic indices (e.g. Lifted Index), which lift a parcel upwards from a lower level
to a higher level, the Martin Index works in reverse: it lowers a parcel moist-adiabatically from 500
mb down to where the moist adiabat crosses the highest measured mixing ratio in the profile. From
there, it is lowered down dry adiabatically to a particular level depending on the following circumstances:
If there is an inversion present in the profile and the base of the inversion is below 850 mb, then the
parcel is lowered to the level of the inversion base. If the inversion base is at or above 850 mb, or
there is no inversion present in the profile, then the parcel is lowered to 850 mb.
Upon reaching the selected level, the parcel's ambient temperature is compared with the profile's
ambient temperature. Negative numbers indicate instability, with increasingly negative values
suggesting increasing instability.
The version used here makes use of the virtual temperature correction. This required some rewriting
of the equation (particularly regarding the 500 mb parcel's ambient temperature).
Parameters
----------
prof : Profile object
Returns
-------
martin : number
Martin Index (number)
'''
# Find 500 mb parcel's saturation ambient temperature given its virtual temperature
vtp500 = interp.vtmp(prof, 500)
pcl_tmp500 = thermo.sat_temp(500, vtp500)
# Find maximum mixing ratio
mxr_prof = thermo.mixratio(prof.pres, prof.dwpc)
mxr_max = ma.max(mxr_prof)
# Find if an inversion exists; if so, find if bottom of lowest inversion is below 850 mb
inv_bot = getattr(prof, 'inversion', inversion(prof)[0][0])
if not utils.QC(inv_bot) or inv_bot <= 850:
bot_lvl = 850
else:
bot_lvl = inv_bot
bot_lvl_vtp = interp.vtmp(prof, bot_lvl)
# Find where 500 mb parcel's moist adiabat intersects the maximum mixing ratio; if parcel's
# moist adiabat's mixing ratio at the base level is lower than or equal to the maximum
# mixing ratio, use the parcel's moist adiabat temperature at the base level
sfc_pres = prof.pres[prof.sfc]
dp = -1
p_wtb = np.arange(sfc_pres, 500+dp, dp)
plog = np.log10(p_wtb)
pcl_wtb = np.empty(p_wtb.shape)
pcl_mxr = np.empty(p_wtb.shape)
for i in np.arange(0, len(p_wtb), 1):
pcl_wtb[i] = thermo.wetlift(500, pcl_tmp500, p_wtb[i])
pcl_mxr[i] = thermo.mixratio(p_wtb[i], pcl_wtb[i])
ind0 = ma.where(p_wtb == bot_lvl)[0]
ind1 = ma.where(pcl_mxr >= mxr_max)[0]
ind2 = ma.where(pcl_mxr <= mxr_max)[0]
if len(ind1) == 0:
pcl_bot_tmp = pcl_wtb[ind0][0]
pcl_bot_vtp = thermo.virtemp(bot_lvl, pcl_bot_tmp, pcl_bot_tmp)
elif len(ind2) == 0:
martin = ma.masked
else:
inds = np.intersect1d(ind1, ind2)
if len(inds) > 1:
pcl_lcl_p = p_wtb[inds][0]
else:
diff1 = ind1[1:] - ind1[:-1]
ind = ma.where(diff1 > 1)[0] + 1
try:
ind = ind.min()
except:
ind = ind1[-1]
pcl_lcl_p = np.power(10, np.interp(mxr_max, [pcl_mxr[ind+1], pcl_mxr[ind]],
[plog[ind+1], plog[ind]]))
pcl_lcl_tmp = thermo.wetlift(500, pcl_tmp500, pcl_lcl_p)
pcl_bot_tmp = thermo.theta(pcl_lcl_p, pcl_lcl_tmp, bot_lvl)
pcl_bot_dpt = thermo.temp_at_mixrat(bot_lvl, mxr_max)
pcl_bot_vtp = thermo.virtemp(bot_lvl, pcl_bot_tmp, pcl_bot_dpt)
martin = pcl_bot_vtp - bot_lvl_vtp
return martin
def csv(prof):
'''
"C" Stability Value (*)
Formulation taken from Cox 1961, BAMS v.42 pg. 770.
This index was originally derived in an effort to forecast thunderstorm potential. It is found by
raising the potential temperature at the 850 mb level moist adiabatically up to 600 mb, then subtracting
the 600 mb ambient temperature from it.
Parameters
----------
prof : Profile object
Returns
-------
csv : number
"C" Stability Value (number)
'''
thetv850 = thermo.theta(850, interp.vtmp(prof, 850))
wtlft86 = thermo.wetlift(1000, thetv850, 600)
vtp600 = interp.vtmp(prof, 600)
csv = wtlft86 - vtp600
return csv
def z_index(prof):
'''
Z-Index (*)
Formulation taken from Randerson 1977, MWR v.105 pg. 711.
This index was developed by D. Randerson in 1977 in an effort to forecast thunderstorms over Nevada.
It makes use of a regression equation that uses multiple variables: surface pressure (mb), surface
temperature (degrees C), surface dewpoint depression (degrees C), 850 mb ambient temperature (degrees C),
850 mb dewpoint depression (degrees C), 700 mb height (m), 500 mb ambient temperature (degrees C), the
U-component of the 500 mb wind (kts), and 500 mb dewpoint temperature (degrees C).
If the Z value is 0, then the probability of a thunderstorm is about 50%. If the Z value is positive,
then the probability is less than 50%. If the Z value is negative, then the probability is over 50%.
Parameters
----------
prof : Profile object
Returns
-------
z_index : number
Z-Index (number)
'''
pres_sfc = prof.pres[prof.sfc]
tmp_sfc = prof.tmpc[prof.sfc]
tdd_sfc = tmp_sfc - prof.dwpc[prof.sfc]
tmp850 = interp.temp(prof, 850)
tdd850 = interp.tdd(prof, 850)
hght700 = interp.hght(prof, 700)
tmp500 = interp.temp(prof, 500)
dpt500 = interp.dwpt(prof, 500)
vec500 = interp.vec(prof, 500)
u500 = utils.vec2comp(vec500[0], vec500[1])[0]
z_d = ( 165.19 * pres_sfc ) - ( 14.63 * tmp_sfc ) + ( 11.73 * tdd_sfc ) + ( 31.52 * tmp850 ) + ( 38.22 * tdd850 ) - ( 17.30 * hght700 ) + ( 85.89 * tmp500 ) + ( 12.69 * u500 ) - ( 12.85 * dpt500 )
z_index = 0.01 * ( z_d - 93200 )
return z_index
def swiss00(prof):
'''
Stability and Wind Shear index for thunderstorms in Switzerland, 00z version (SWISS00) (*)
This index is one of two versions of a forecasting index that was developed for use in forecasting
thunderstorms in Switzerland (see Huntrieser et. al., WAF v.12 pgs. 108-125). This version was
developed for forecasting nocturnal thunderstorms using soundings taken around 00z. It makes use
of the Showalter Index, the 3-6 km AGL wind shear, and the dewpoint depression at the 600 mb level.
Parameters
----------
prof : Profile object
Returns
-------
swiss00 : number
Stability and Wind Shear index for thunderstorms in Switzerland, 00z version (number)
'''
si850 = getattr(prof, 'ssi850', ssi850(prof))
p3km, p6km = interp.pres(prof, interp.to_msl(prof, np.array([3000., 6000.])))
ws36 = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=p3km, ptop=p6km)))
tdd600 = interp.tdd(prof, 600)
swiss00 = si850 + ( 0.4 * ws36 ) + ( tdd600 / 10 )
return swiss00
def swiss12(prof):
'''
Stability and Wind Shear index for thunderstorms in Switzerland, 12z version (SWISS00) (*)
This index is one of two versions of a forecasting index that was developed for use in forecasting
thunderstorms in Switzerland (see Huntrieser et. al., WAF v.12 pgs. 108-125). This version was
developed for forecasting nocturnal thunderstorms using soundings taken around 12z. It makes use
of the Surface-based Lifted Index, the 0-3 km AGL wind shear, and the dewpoint depression at the
650 mb level.
Parameters
----------
prof : Profile object
Returns
-------
swiss12 : number
Stability and Wind Shear index for thunderstorms in Switzerland, 12z version (number)
'''
sbpcl = getattr(prof, 'sfcpcl', parcelx(prof, flag=1))
sli = sbpcl.li5
p_sfc = prof.pres[prof.sfc]
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
ws03 = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=p_sfc, ptop=p3km)))
tdd650 = interp.tdd(prof, 650)
swiss12 = sli - ( 0.3 * ws03 ) + ( 0.3 * tdd650 )
return swiss12
def fin(prof):
'''
FIN Index (*)
Formulation taken from Ukkonen et. al. 2017, JAMC 56 pg. 2349
This index is a modified version of the SWISS12 Index (q.v.) that makes use of the Most Unstable
Lifted Index, the 700 mb dewpoint depression, and the wind shear between the surface and 750 mb.
Negative values indicate favorable instability and shear for thunderstorm development.
Parameters
----------
prof : Profile object
Returns
-------
fin : number
FIN Index (number)
'''
# Calculate MULI
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
muli = mupcl.li5
# Calculate 700 mb dewpoint depression
tdd700 = interp.tdd(prof, 700)
# Calculate surface-750 mb shear
sfc_pres = prof.pres[prof.sfc]
ws_sfc_750mb = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=750)))
fin = muli + ( tdd700 / 10 ) - ( ws_sfc_750mb / 10 )
return fin
def yon_v1(prof):
'''
Yonetani Index, version 1 (*)
This index, derived by T. Yonetani in 1979, was developed to help forecast thunderstorms
over the Kanto Plains region of Japan. It makes use of the environmental lapse rates at
the 900-850 mb and 850-500 mb levels, the average relative humidity of the 900-850 mb
level, and the moist adiabatic lapse rate of the ambient air temperature at 850 mb.
Positive values indicate a likely chance for thunderstorms.
Parameters
----------
prof : Profile object
Returns
-------
yon_v1 : number
Yonetani Index, version 1 (number)
'''
lr98 = lapse_rate(prof, 900, 850, pres=True)
lr85 = lapse_rate(prof, 850, 500, pres=True)
rh98 = mean_relh(prof, 900, 850) / 100
# Calculate moist adiabatic lapse rate at the ambient temperature at 850 mb
tmp850c = interp.temp(prof, 850)
tmp850k = thermo.ctok(tmp850c)
mxr850 = thermo.mixratio(850, tmp850c) / 1000
cp_m = 1.0046851 * ( 1 + ( 1.84 * mxr850 ) )
gocp = G / cp_m
lvocp = 2.5e6 / cp_m
lvord = 1680875 / 193
eps = 0.62197
num = 1 + ( lvord * ( mxr850 / tmp850k ) )
denom = 1 + ( lvocp * lvord * ( ( mxr850 * eps ) / ( tmp850k ** 2 ) ) )
malr850 = gocp * ( num / denom )
if rh98 > 0.57:
final_term = 15
else:
final_term = 16.5
yon_v1 = ( 0.966 * lr98 ) + ( 2.41 * ( lr85 - malr850 ) ) + ( 9.66 * rh98 ) - final_term
return yon_v1
def yon_v2(prof):
'''
Yonetani Index, version 2 (*)
This index is a modification of the original Yonetani Index that was developed in an effort
to better predict thunderstorms over the island of Cyprus (see Jacovides and Yonetani 1990,
WAF v.5 pgs. 559-569). It makes use of the same variables as the original index, but the
weighing factors are rearrainged. Positive values indicate a likely chance for
thunderstorms.
Parameters
----------
prof : Profile object
Returns
-------
yon_v2 : number
Yonetani Index, version 2 (number)
'''
lr98 = lapse_rate(prof, 900, 850, pres=True)
lr85 = lapse_rate(prof, 850, 500, pres=True)
rh98 = mean_relh(prof, 900, 850) / 100
# Calculate moist adiabatic lapse rate at the ambient temperature at 850 mb
tmp850c = interp.temp(prof, 850)
tmp850k = thermo.ctok(tmp850c)
mxr850 = thermo.mixratio(850, tmp850c) / 1000
cp_m = 1.0046851 * ( 1 + ( 1.84 * mxr850 ) )
gocp = G / cp_m
lvocp = 2.5e6 / cp_m
lvord = 1680875 / 193
eps = 0.62197
num = 1 + ( lvord * ( mxr850 / tmp850k ) )
denom = 1 + ( lvocp * lvord * ( ( mxr850 * eps ) / ( tmp850k ** 2 ) ) )
malr850 = gocp * ( num / denom )
if rh98 > 0.50:
final_term = 13
else:
final_term = 14.5
yon_v2 = ( 0.964 * lr98 ) + ( 2.46 * ( lr85 - malr850 ) ) + ( 9.64 * rh98 ) - final_term
return yon_v2
def fsi(prof):
'''
Fog Stability Index
Although this index was developed by USAF meteorologists for use in Germany, it can
also be applied to similar climates. The index is designed to indicate the
potential for radiation fog. Lower values indicate higher chances of radiation fog.
Parameters
----------
prof : Profile object
Returns
-------
fsi : number
Fog Stability Index (number)
'''
tmp_sfc = prof.tmpc[prof.sfc]
dpt_sfc = prof.dwpc[prof.sfc]
tmp850 = interp.temp(prof, 850)
vec850 = interp.vec(prof, 850)
fsi = ( 4 * tmp_sfc ) - ( 2 * ( tmp850 - dpt_sfc ) ) + vec850[1]
return fsi
def fog_point(prof, pcl):
'''
Fog Point (*)
This value indicates the temperature at which radiation fog will form. It is
determined by following the saturation mixing ratio line from the dew point curve
at the LCL pressure level to the surface.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
fog_point : (float [C])
Fog Point (Celsuis)
'''
dpt_lcl = interp.dwpt(prof, pcl.lclpres)
mxr_lcl = thermo.mixratio(pcl.lclpres, dpt_lcl)
sfc_pres = prof.pres[prof.sfc]
fog_point = thermo.temp_at_mixrat(mxr_lcl, sfc_pres)
return fog_point
def fog_threat(prof, pcl):
'''
Fog Threat (*)
This value indicates the potential for radiation fog. It is calculated by
subtracting the fog point from the 850 mb wet-bulb potential temperature.
Lower values indicate a higher likelihood for radiation fog.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
fog_threat : number
Fog Threat (number)
'''
fp = getattr(prof, 'fog_point', fog_point(prof, pcl))
thtw850 = thermo.thetaw(850, interp.temp(prof, 850), interp.dwpt(prof, 850))
fog_threat = thtw850 - fp
return fog_threat
def mvv(prof, pcl):
'''
Maximum Vertical Velocity (*)
This is the maximum vertical velocity of the potential convective updraft.
MVV is a function of CAPE.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
mvv : (float [m/s])
Maximum Vertical Velocity (meters/second)
'''
mvv = ( 2 * pcl.bplus ) ** 0.5
return mvv
def jli(prof):
'''
Johnson Lag Index (*)
Developed by D. L. Johnson in 1982, this index was based on a series of soundings
made during experiments in the 1970s. It is a parametric index that takes into
account temperature and moisture differences at several layers, and is used to
predict the likelihood of convective weather within several hours of the original
sounding. Negative values indicate increasing chances for convective weather.
Parameters
----------
prof : Profile object
Returns
-------
jli : number
Johnson Lag Index (number)
'''
tmp800 = interp.temp(prof, 800)
tmp650 = interp.temp(prof, 650)
tmp500 = interp.temp(prof, 500)
thetae900 = thermo.thetae(900, interp.temp(prof, 900), interp.dwpt(prof, 900))
thetae800 = thermo.thetae(800, interp.temp(prof, 800), interp.dwpt(prof, 800))
thetae750 = thermo.thetae(750, interp.temp(prof, 750), interp.dwpt(prof, 750))
thetae700 = thermo.thetae(700, interp.temp(prof, 700), interp.dwpt(prof, 700))
dt68 = tmp650 - tmp800
dt56 = tmp500 - tmp650
dte89 = thetae800 - thetae900
dte75 = thetae700 - thetae750
jli = ( -11.5 - dt68 ) + ( 2 * ( dt56 + 14.9 ) ) + (2 * ( dte89 + 3.5 ) ) - ( ( 3.0 + dte75 ) / 3 )
return jli
def gdi(prof, exact=False):
'''
Galvez-Davison Index (*)
Formulation takem from:
The Galvez-Davison Index for Tropical Convection
Galvez and Davison, 2016
(Available at http://wpc.ncep.noaa.gov/international/gdi/)
This index was developed in an effort to improve forecasting of convection in tropical
climates. It is an almagamation of four separate sub-indices, each of which measures an
important parameter for tropical convection:
Column Buoyancy Index (CBI) : Describes the availability of heat and moisture in a column
of air. This index is the only sub-index to produce positive values, and as such can be
considered the enhancement sub-index.
Mid-tropospheric Warming Index (MWI) : Accounts for stabilization/destabilization in
association with warm ridges/cool troughs in the mid-troposphere. It is an inhibition sub-
index, meaning it only produces negative numbers.
Inversion Index (II) : Designed to capture the effects of trade wind inversions, specifically
two processes that can inhibit convection: stability across the inversion and dry air
entrainment. Since it is an inhibition sub-index, it only produces negative numbers.
Terrain Correction (TC) : While the GDI should, strictly speaking, only be applicable in
places that are located below the 950 hPa level, numerical models usually interpolate data so
as to fill in layers that are below ground level in reality. The TC sub-index is intended to
be a correction factor to keep model-derived GDI values in high-altitude regions from becoming
unrealistically high.
One note to be aware of: The index makes use of the equivalent potential temperature (theta-e)
of several layers. The source paper suggests using a proxy equation to estimate the theta-e
values. However, anyone who desires more accuracy in the calculations should use SHARPpy's
built-in theta-e formula to calculate theta-e. This should be done by setting the "exact"
parameter to "True". One must, however, be aware that the use of the proxy formula could end
up producing a GDI value that is noticeably different from a value produced from the SHARPpy
formula (though some attempt has been made to balance the SHARPpy-based values so as to be
closer to those produced with the proxy equations.).
Parameters
----------
prof : Profile object
exact : bool (optional; default = False)
Switch between using SHARPpy's built-in theta-e formula (slower) or using the source paper's
recommended proxy formula (faster)
Returns
-------
gdi : number
Galvez-Davison Index (number)
'''
psfc = prof.pres[prof.sfc]
tmp950 = interp.temp(prof, 950)
dpt950 = interp.dwpt(prof, 950)
tmp850 = interp.temp(prof, 850)
dpt850 = interp.dwpt(prof, 850)
tmp700 = interp.temp(prof, 700)
dpt700 = interp.dwpt(prof, 700)
tmp500 = interp.temp(prof, 500)
dpt500 = interp.dwpt(prof, 500)
if exact:
thte950 = interp.thetae(prof, 950)
thte857 = ( ( interp.thetae(prof, 850) + interp.thetae(prof, 700) ) / 2 ) - 11.89
thte500 = interp.thetae(prof, 500) - 11.9
else:
tht950 = thermo.ctok(thermo.theta(950, tmp950))
tht857 = thermo.ctok( ( thermo.theta(850, tmp850) + thermo.theta(700, tmp700) ) / 2 )
tht500 = thermo.ctok(thermo.theta(500, tmp500))
mxr950 = thermo.mixratio(950, dpt950) / 1000
mxr857 = ( ( thermo.mixratio(850, dpt850) + thermo.mixratio(700, dpt700) ) / 2 ) / 1000
mxr500 = thermo.mixratio(500, dpt500) / 1000
thte950 = tht950 * np.exp( ( 2.69e6 * mxr950 ) / ( 1005.7 * thermo.ctok(tmp850) ) )
thte857 = tht857 * np.exp( ( 2.69e6 * mxr857 ) / ( 1005.7 * thermo.ctok(tmp850) ) ) - 10
thte500 = tht500 * np.exp( ( 2.69e6 * mxr500 ) / ( 1005.7 * thermo.ctok(tmp850) ) ) - 10
me = thte500 - 303
le = thte950 - 303
if le > 0:
cbi = 6.5e-2 * me * le
else:
cbi = 0
if tmp500 + 10 > 0:
mwi = -7 * ( tmp500 + 10 )
else:
mwi = 0
lr_97 = tmp950 - tmp700
lr_thte879 = thte857 - thte950
if lr_97 + lr_thte879 > 0:
ii = 0
else:
ii = 1.5 * ( lr_97 + lr_thte879 )
tc = 18 - ( 9000 / ( psfc - 500 ) )
gdi = cbi + mwi + ii + tc
return gdi
def cs_index(prof):
'''
CS Index (*)
Formulation taken from Huntrieser et. al. 1997, WAF v.12 pg. 119.
This index is a multiple of two parameters. The first parameter is the CAPE produced
by a parcel that is lifted from the convective temperature (labeled in the source paper
as "CAPE_CCL"). The second parameter is the shear used in the calculation of the Bulk
Richardson Number (BRN) shear term (labeled in the source paper as simply "S")..
The source paper notes that values of over 2700 indicate increased likelihood of widespread
thunderstorms.
Parameters
----------
prof : Profile object
Returns
-------
cs_index : number
CS Index (number)
'''
cnvcpcl = getattr(prof, 'cnvcpcl', parcelx(prof, flag=7))
cnvc_cape = cnvcpcl.bplus
ptop = interp.pres(prof, interp.to_msl(prof, 6000.))
pbot = prof.pres[prof.sfc]
p = interp.pres(prof, interp.hght(prof, pbot)+500.)
mnlu, mnlv = winds.mean_wind(prof, pbot, p)
mnuu, mnuv = winds.mean_wind(prof, pbot, ptop)
dx = mnuu - mnlu
dy = mnuv - mnlv
shr = utils.KTS2MS(utils.mag(dx, dy))
cs_index = cnvc_cape * shr
return cs_index
def wmaxshear(prof):
'''
WMAXSHEAR Parameter (*)
This parameter was derived in Taszarek et. al. 2017, MWR v.145 pg. 1519, as part of a study
on European convective weather climatology. It multiplies the maximum vertical velocity
(WMAX) of a mixed-layer parcel (derived from the mixed-layer CAPE (MLCAPE)) by the 0-6 km
AGL bulk shear (SHEAR). The source paper notes that, of all the parameters it tested, this
particular parameter best discriminated among severe and non-severe convection, as well as
among the various categories of severe convection.
Higher values generally indicate higher chances of severe weather, and increasing severity
of convective weather.
Parameters
----------
prof : Profile object
Returns
-------
wmaxshear : m**2 / s**2
WMAXSHEAR (meters**2 / second**2)
'''
mlpcl = getattr(prof, 'mlpcl', parcelx(prof, flag=4))
pres_sfc = prof.pres[prof.sfc]
p6k = interp.pres(prof, interp.to_msl(prof, 6000))
wmax = mvv(prof, mlpcl)
shear = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=pres_sfc, ptop=p6k)))
wmaxshear = wmax * shear
return wmaxshear
def ncape(prof, pcl):
'''
Normalized CAPE (*)
NCAPE is CAPE that is divided by the depth of the positive-buoyancy layer. Values
around or less than 0.1 suggest a relatively "skinny" CAPE profile with relatively
weak parcel acceleration. Values around 0.3 or above suggest a relatively "fat"
CAPE profile with large parcel accelerations possible. Larger parcel accelerations
can likely lead to stronger, more sustained updrafts.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
ncape : number
NCAPE (number)
'''
p_buoy_depth = pcl.elhght - pcl.lfchght
ncape = pcl.bplus / p_buoy_depth
return ncape
def ncinh(prof, pcl):
'''
Normalized CINH (*)
NCINH is CINH that is divided by the depth of the negative-buoyancy layer. Values
around or greater than -0.01 suggest a relatively "skinny" CINH profile that only
requires relatively weak parcel acceleration to overcome the cap. Values around
-0.03 or below suggest a relatively "fat" CINH profile with large parcel accelerations
required to overcome the cap.
Parameters
----------
prof : Profile object
pcl : Parcel object
Returns
-------
ncinh : number
NCINH (number)
'''
n_buoy_depth = pcl.lfchght
ncinh = pcl.bminus / n_buoy_depth
return ncinh
def lsi(prof):
'''
Lid Strength Index (*)
Formulation taken from Carson et. al. 1980, BAMS v.61 pg. 1022.
The Lid Strength Index was originally derived as an analogue for the Lifted Index,
but as a way to measure the strength of the cap rather than stability. It uses the
mean theta-w of the lowest 100 mb, the maximum theta-ws in the atmosphere below 500
mb, and the average theta-ws between the maximum theta-ws layer and 500 mb.
Values below 1 indicate a very weak cap that would be easy to break; values between
1 and 2 indicate a cap that is just strong enough to suppress convection while still
being eventually breakable; and values above 2 indicate a very strong cap that is
unlikely to be broken.
Parameters
----------
prof : Profile object
Returns
-------
lsi : number
Lid Strength Index (number)
'''
sfc_pres = prof.pres[prof.sfc]
pres_100 = sfc_pres - 100
thetawv = getattr(prof, 'thetawv', prof.get_thetawv_profile())
ml_thtw = mean_thetaw(prof, sfc_pres, pres_100)
ml_pcl500 = thermo.wetlift(1000, ml_thtw, 500)
vt_pcl500 = thermo.virtemp(500, ml_pcl500, ml_pcl500)
thtw_vt500 = thermo.thetaws(500, vt_pcl500)
idx = ma.where(prof.pres >= 500)[0]
max_idx = np.ma.argmax(thetawv[idx])
max_pres = prof.pres[idx][max_idx]
ml_pcl_max = thermo.wetlift(1000, ml_thtw, max_pres)
vt_pcl_max = thermo.virtemp(max_pres, ml_pcl_max, ml_pcl_max)
thtw_vt_max = thermo.thetaws(max_pres, vt_pcl_max)
if max_pres < sfc_pres:
max_thetawv = thetawv[idx][max_idx]
else:
max_thetawv = thtw_vt_max
thtwv_up = mean_thetawv(prof, max_pres, 500)
lsi = ( thtw_vt500 - thtwv_up ) - ( max_thetawv - thtw_vt_max )
return lsi
def mcsi_v1(prof, lat=35):
'''
MCS Index, version 1 (*)
Formulation taken from Jirak and Cotton 2007, WAF v.22 pg. 825.
The MCS Index was originally derived by I. Jirak and W. Cotton in 2007 as an attempt
to determine the likelihood that convection will develop into a mesoscale convective
system (MCS). It makes use of the most-unstable Lifted Index, 0-3 km AGL bulk shear,
and temperature advection at the 700 mb level.
In WAF 24 pages 351-355, Bunkers warned that the results produced by the original
equation (version 1) could be strongly biased in gridded datasets by the temperature
advection term. In response, in WAF v.24 pgs. 356-360, Jirak and Cotton created a
second version (version 2) that rebalanced the equation so as to reduce the biasing.
MCSI values on below -1.5 are considered unfavorable for MCS development; between -1.5
and 0 are considered marginal; between 0 and 3 are considered favorable; and values
exceeding 3 are considered very favorable.
Parameters
----------
prof : Profile object
Returns
-------
mcsi_v1 : number
MCS Index, version 1 (number)
'''
# Calculate LI
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
muli = mupcl.li5
# Calculate shear
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc_pres = prof.pres[prof.sfc]
mag03_shr = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=p3km)))
# Calculate 700 mb temperature advection
omega = (2. * np.pi) / (86164.)
b_pr = 750 # Pressure of bottom of layer
t_pr = 650 # Pressure of top of layer
b_tmp = thermo.ctok(interp.temp(prof, b_pr)) # Temperature of bottom of layer (Kelvin)
t_tmp = thermo.ctok(interp.temp(prof, t_pr)) # Temperature of top of layer (Kelvin)
b_ht = interp.hght(prof, b_pr) # Height ASL of bottom of layer (meters)
t_ht = interp.hght(prof, t_pr) # Height ASL of top of layer (meters)
b_wdir = interp.vec(prof, b_pr)[0] # Wind direction at bottom of layer (degrees from north)
t_wdir = interp.vec(prof, t_pr)[0] # Wind direction at top of layer (degrees from north)
# Calculate the average temperature
avg_tmp = (t_tmp + b_tmp) / 2.
# Calculate the mean wind between the two levels (this is assumed to be geostrophic)
mean_u, mean_v = winds.mean_wind(prof, pbot=b_pr, ptop=t_pr)
mean_wdir, mean_wspd = utils.comp2vec(mean_u, mean_v) # Wind speed is in knots here
mean_wspd = utils.KTS2MS(mean_wspd) # Convert this geostrophic wind speed to m/s
if utils.QC(lat):
f = 2. * omega * np.sin(np.radians(lat)) # Units: (s**-1)
else:
t7_adv = np.nan
return mcsi_v1
multiplier = (f / G) * (np.pi / 180.) # Units: (s**-1 / (m/s**2)) * (radians/degrees)
# Calculate change in wind direction with height; this will help determine whether advection is warm or cold
mod = 180 - b_wdir
t_wdir = t_wdir + mod
if t_wdir < 0:
t_wdir = t_wdir + 360
elif t_wdir >= 360:
t_wdir = t_wdir - 360
d_theta = t_wdir - 180.
# Here we calculate t_adv (which is -V_g * del(T) or the local change in temperature term)
# K/s s * rad/m * deg m^2/s^2 K degrees / m
t7_adv = multiplier * np.power(mean_wspd,2) * avg_tmp * (d_theta / (t_ht - b_ht)) # Units: Kelvin / seconds
# Calculate LI term
li_term = -( muli + 4.4 ) / 3.3
# Calculate shear term
shr_term = ( mag03_shr - 11.5 ) / 5
# Calculate advection term
adv_term = ( t7_adv - 4.5e-5 ) / 7.3e-5
# Calculate equation
mcsi_v1 = li_term + shr_term + adv_term
return mcsi_v1
def mcsi_v2(prof, lat=35):
'''
MCS Index, version 2 (*)
Formulation taken from Jirak and Cotton 2009, WAF v.24 pg. 359.
The MCS Index was originally derived by I. Jirak and W. Cotton in 2007 as an attempt
to determine the likelihood that convection will develop into a mesoscale convective
system (MCS). It makes use of the most-unstable Lifted Index, 0-3 km AGL bulk shear,
and temperature advection at the 700 mb level.
In WAF 24 pages 351-355, Bunkers warned that the results produced by the original
equation (version 1) could be strongly biased in gridded datasets by the temperature
advection term. In response, in WAF v.24 pgs. 356-360, Jirak and Cotton created a
second version (version 2) that rebalanced the equation so as to reduce the biasing.
MCSI values on below -1.5 are considered unfavorable for MCS development; between -1.5
and 0 are considered marginal; between 0 and 3 are considered favorable; and values
exceeding 3 are considered very favorable.
Parameters
----------
prof : Profile object
Returns
-------
mcsi_v2 : number
MCS Index, version 2 (number)
'''
# Calculate LI
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
muli = mupcl.li5
# Calculate shear
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc_pres = prof.pres[prof.sfc]
mag03_shr = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=p3km)))
# Calculate 700 mb temperature advection
omega = (2. * np.pi) / (86164.)
b_pr = 750 # Pressure of bottom of layer
t_pr = 650 # Pressure of top of layer
b_tmp = interp.temp(prof, b_pr) # Temperature of bottom of layer (Celsius)
t_tmp = interp.temp(prof, t_pr) # Temperature of top of layer (Celsius)
b_ht = interp.hght(prof, b_pr) # Height ASL of bottom of layer (meters)
t_ht = interp.hght(prof, t_pr) # Height ASL of top of layer (meters)
b_wdir = interp.vec(prof, b_pr)[0] # Wind direction at bottom of layer (degrees from north)
t_wdir = interp.vec(prof, t_pr)[0] # Wind direction at top of layer (degrees from north)
# Calculate the average temperature
avg_tmp = (t_tmp + b_tmp) / 2.
# Calculate the mean wind between the two levels (this is assumed to be geostrophic)
mean_u, mean_v = winds.mean_wind(prof, pbot=b_pr, ptop=t_pr)
mean_wdir, mean_wspd = utils.comp2vec(mean_u, mean_v) # Wind speed is in knots here
mean_wspd = utils.KTS2MS(mean_wspd) # Convert this geostrophic wind speed to m/s
if utils.QC(lat):
f = 2. * omega * np.sin(np.radians(lat)) # Units: (s**-1)
else:
t7_adv = np.nan
return mcsi_v2
multiplier = (f / G) * (np.pi / 180.) # Units: (s**-1 / (m/s**2)) * (radians/degrees)
# Calculate change in wind direction with height; this will help determine whether advection is warm or cold
mod = 180 - b_wdir
t_wdir = t_wdir + mod
if t_wdir < 0:
t_wdir = t_wdir + 360
elif t_wdir >= 360:
t_wdir = t_wdir - 360
d_theta = t_wdir - 180.
# Here we calculate t_adv (which is -V_g * del(T) or the local change in temperature term)
# K/s s * rad/m * deg m^2/s^2 K degrees / m
t7_adv = multiplier * np.power(mean_wspd,2) * avg_tmp * (d_theta / (t_ht - b_ht)) # Units: Kelvin / seconds
# Calculate LI term
li_term = -( muli + 4.4 ) / 3.3
# Calculate shear term
shr_term = ( mag03_shr - 11.5 ) / 4.1
# Calculate advection term
adv_term = ( t7_adv - 4.5e-5 ) / 1.6e-4
# Calculate equation
mcsi_v2 = li_term + shr_term + adv_term
return mcsi_v2
def mosh(prof):
'''
Modified SHERB Parameter, standard version (MOSH) (*)
Formulation taken from Sherburn et. al. 2016, WAF v.31 pg. 1918.
In their 2016 followup to their 2014 paper that produced the SHERB parameter (q.v.), Sherburn
et. al. noted that while said parameter offered a means of identifying high-shear low-CAPE (HSLC)
environments, a thorough study of the synoptic factors prevalent in such environments offered
several parameters that, in combination, offered improved discrimination among severe weather in
HSLC environments. The Modified SHERB (MOSH) parameters were created as a result.
The standard version (simply referred to as "MOSH") makes use of the 0-3 km AGL lapse rate, the
0-1.5 km AGL bulk shear vector magnitude (in meters per second), and the maximum product of the
theta-e lapse rate and omega calculated from the 0-2 km AGL layer to the 0-6 km AGL layer at 0.5
km intervals. The enhanced version (called the Modified SHERB, Effective version or "MOSHE")
multiplies the basic MOSH by a factor involving the effective bulk wind difference.
Since both versions of the MOSH make use of omega, which is only available on model-derived
soundings, these parameters cannot be used on observed soundings.
Increasing values indicate increasing likelihood for HSLC severe weather.
Parameters
----------
prof : Profile object
Returns
-------
mosh : number
Modified SHERB Parameter, standard version (number)
'''
lr03k = lapse_rate(prof, 0, 3000, pres=False)
pbot = prof.pres[prof.sfc]
ptop = interp.pres(prof, interp.to_msl(prof, 1500))
shr015 = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=pbot, ptop=ptop)))
hghts = np.arange(2000, 6500, 500)
prs = interp.pres(prof, interp.to_msl(prof, hghts))
thetae_lr = ( interp.thetae(prof, prs) - prof.thetae[prof.sfc] ) / hghts * 1000
max_thetae_lr = ma.max(thetae_lr)
idx = ma.where(prof.pres > prs[-1])[0]
max_omega = ma.min(prof.omeg[idx])
maxtevv = max_thetae_lr * max_omega
if not utils.QC(prof.omeg):
return ma.masked
else:
if lr03k < 4:
lllr = 0
else:
lllr = ( ( lr03k - 4 ) ** 2 ) / 4
if shr015 < 8:
shr = 0
else:
shr = ( shr015 - 8 ) / 10
if maxtevv < -10:
mxtv = 0
else:
mxtv = ( maxtevv + 10 ) / 9
mosh = lllr * shr * mxtv
return mosh
def moshe(prof, **kwargs):
'''
Modified SHERB Parameter, Enhanced version (MOSHE) (*)
Formulation taken from Sherburn et. al. 2016, WAF v.31 pg. 1918.
In their 2016 followup to their 2014 paper that produced the SHERB parameter (q.v.), Sherburn
et. al. noted that while said parameter offered a means of identifying high-shear low-CAPE (HSLC)
environments, a thorough study of the synoptic factors prevalent in such environments offered
several parameters that, in combination, offered improved discrimination among severe weather in
HSLC environments. The Modified SHERB (MOSH) parameters were created as a result.
The standard version (simply referred to as "MOSH") makes use of the 0-3 km AGL lapse rate, the
0-1.5 km AGL bulk shear vector magnitude (in meters per second), and the maximum product of the
theta-e lapse rate and omega calculated from the 0-2 km AGL layer to the 0-6 km AGL layer at 0.5
km intervals. The enhanced version (called the Modified SHERB, Effective version or "MOSHE")
multiplies the basic MOSH by a factor involving the effective bulk wind difference.
Since both versions of the MOSH make use of omega, which is only available on model-derived
soundings, these parameters cannot be used on observed soundings.
Increasing values indicate increasing likelihood for HSLC severe weather.
Parameters
----------
prof : Profile object
Returns
-------
mosh : number
Modified SHERB Parameter, Enhanced version (number)
'''
mosh_s = getattr(prof, 'mosh', mosh(prof))
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
# Calculate the effective inflow layer
ebottom, etop = effective_inflow_layer( prof, mupcl=mupcl )
if ebottom is ma.masked or etop is ma.masked:
# If the inflow layer doesn't exist, return missing
return prof.missing
else:
# Calculate the Effective Bulk Wind Difference
ebotm = interp.to_agl(prof, interp.hght(prof, ebottom))
depth = ( mupcl.elhght - ebotm ) / 2
elh = interp.pres(prof, interp.to_msl(prof, ebotm + depth))
ebwd = winds.wind_shear(prof, pbot=ebottom, ptop=elh)
shear = utils.KTS2MS(utils.mag( ebwd[0], ebwd[1] ))
if shear < 8:
eshr = 0
else:
eshr = ( shear - 8 ) / 10
moshe = mosh_s * eshr
return moshe
def cii_v1(prof):
'''
Convective Instability Index, version 1 (*)
This index was developed by W. D. Bonner, R. M. Reap, and J. E. Kemper in 1971. It uses
the equivalent potential temperature (Theta-e) at the surface, 850 mb, and 700 mb levels.
Values <= 0 are indicative of convective instability and perhaps potential storm development.
Parameters
----------
prof : Profile object
Returns
-------
cii_v1 : number
Convective Instability Index, version 1 (number)
'''
te_sfc = prof.thetae[prof.sfc]
te850 = interp.thetae(prof, 850)
te700 = interp.thetae(prof, 700)
te_s8 = ( te_sfc + te850 ) / 2
cii_v1 = te700 - te_s8
return cii_v1
def cii_v2(prof):
'''
Convective Instability Index, version 2 (*)
This index was derived by D. A. Barber in 1975. It subtracts the average Theta-e value in
the 600-500 mb layer from the average Theta-e value in the lowest 100 mb. Values >= 0
indicate likely convective instability.
Parameters
----------
prof : Profile object
Returns
-------
cii_v2 : number
Convective Instability Index, version 2 (number)
'''
sfc_pres = prof.pres[prof.sfc]
top_pres = sfc_pres - 100
te_low100 = mean_thetae(prof, pbot=sfc_pres, ptop=top_pres)
te65 = mean_thetae(prof, pbot=600, ptop=500)
cii_v2 = te_low100 - te65
return cii_v2
def brooks_b(prof):
'''
Brooks B Parameter (*)
Formulation taken from Rasmussen and Blanchard 1998, WAF v.13 pg. 1158.
This equation was originally derived in Brooks et. al. 1994, WAF v.9 pgs. 606-618, as
part of a study on the relationship between low-level helicity, mid-level storm-
relative wind flow, and low-level moisture. The version used here was modified by
Rasmussen and Blanchard for their tornado climatology study. Higher values indicate
a greater chance of severe weather and possibly tornadoes.
Parameters
----------
prof : Profile object
Returns
-------
brooks_b : number
Brooks B Parameter (number)
'''
srwind = bunkers_storm_motion(prof)
srh3km = winds.helicity(prof, 0, 3000, srwind[0], srwind[1])[0]
p1k = interp.pres(prof, interp.to_msl(prof, 1000))
p2k = interp.pres(prof, interp.to_msl(prof, 2000))
p9k = interp.pres(prof, interp.to_msl(prof, 9000))
ind1 = np.where((p2k > prof.pres) | (np.isclose(p2k, prof.pres)))[0][0]
ind2 = np.where((p9k < prof.pres) | (np.isclose(p9k, prof.pres)))[0][-1]
gru, grv = utils.vec2comp(prof.wdir, prof.wspd)
sru, srv = gru - srwind[0], grv - srwind[1]
srwspd = utils.comp2vec(sru, srv)[1]
if len(srwspd[ind1:ind2+1]) == 0 or ind1 == ind2:
minu, minv = sru[ind1], srv[ind1]
return minu, minv, prof.pres[ind1]
arr = srwspd[ind1:ind2+1]
inds = np.ma.argsort(arr)
inds = inds[~arr[inds].mask][0::]
minu, minv = sru[ind1:ind2+1][inds], srv[ind1:ind2+1][inds]
vmin = utils.KTS2MS(utils.comp2vec(minu[0], minv[0])[1])
mn_mxr = mean_mixratio(prof, pbot=None, ptop=p1k)
brooks_b = mn_mxr + ( 11.5 * np.log10(srh3km / vmin) )
return brooks_b
def cpst_v1(mlcape, bwd6, srh03, mlcinh):
'''
Conditional Probability of a Significant Tornado, version 1 (*)
This equation is one of three that were derived in Togstead et. al., Weather and
Forecasting 2011 p. 729-743, as part of an effort to develop logistic regression
equations that could help assess the probability of the occurrence of significant
tornadoes (i.e. tornadoes rated EF2 or higher on the Enhanced Fujita (EF) scale).
This equation makes use of mixed-layer CAPE, 0-6 km bulk shear, 0-3 km storm-relative
helicity, and mixed-layer CIN.
Parameters
----------
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
bwd6 : 0-6 km bulk shear (m/s)
srh03 : 0-3 km storm-relative helicity (m2/s2)
mlcinh : mixed-layer convective inhibition (J/kg)
Returns
-------
cpst_v1 : percent
Conditional Probability of a Significant Tornado, version 1 (percent)
'''
# Normalization values taken from the original paper.
mlcape_n = 40.7
bwd6_n = 23.4
srh03_n = 164.8
mlcinh_n = 58.1
# f(x) in the original paper.
reg = -4.69 + ( 2.98 * ( ( ( mlcape ** 0.5 ) / mlcape_n ) * ( bwd6 / bwd6_n ) ) ) + ( 1.67 * ( srh03 / srh03_n ) ) + ( 1.82 * ( mlcinh / mlcinh_n ) )
# P in the original paper.
cpst_v1 = 100 / ( 1 + np.exp(-reg) )
return cpst_v1
def cpst_v2(mlcape, bwd6, bwd1, mlcinh):
'''
Conditional Probability of a Significant Tornado, version 2 (*)
This equation is one of three that were derived in Togstead et. al., Weather and
Forecasting 2011 p. 729-743, as part of an effort to develop logistic regression
equations that could help assess the probability of the occurrence of significant
tornadoes (i.e. tornadoes rated EF2 or higher on the Enhanced Fujita (EF) scale).
This equation makes use of mixed-layer CAPE, 0-6 km bulk shear, 0-1 km bulk shear,
and mixed-layer CIN.
Parameters
----------
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
bwd6 : 0-6 km bulk wind difference (m/s)
bwd1 : 0-1 km bulk wind difference (m/s)
mlcinh : mixed-layer convective inhibition (J/kg)
Returns
-------
cpst_v2 : percent
Conditional Probability of a Significant Tornado, version 2 (percent)
'''
# Normalization values taken from the original paper.
mlcape_n = 40.7
bwd6_n = 23.4
bwd1_n = 11.0
mlcinh_n = 58.1
# f(x) in the original paper.
reg = -5.67 + ( 3.11 * ( ( ( mlcape ** 0.5 ) / mlcape_n ) * ( bwd6 / bwd6_n ) ) ) + ( 2.23 * ( bwd1 / bwd1_n ) ) + ( 1.38 * ( mlcinh / mlcinh_n ) )
# P in the original paper.
cpst_v2 = 100 / ( 1 + np.exp(-reg) )
return cpst_v2
def cpst_v3(mlcape, bwd6, bwd1, mllcl, mlcinh):
'''
Conditional Probability of a Significant Tornado, version 3 (*)
This equation is one of three that were derived in Togstead et. al., Weather and
Forecasting 2011 p. 729-743, as part of an effort to develop logistic regression
equations that could help assess the probability of the occurrence of significant
tornadoes (i.e. tornadoes rated EF2 or higher on the Enhanced Fujita (EF) scale).
This equation makes use of mixed-layer CAPE, 0-6 km bulk shear, 0-1 km bulk shear,
mixed-layer LCL, and mixed-layer CIN.
The original paper states that, out the three conditional probability equations
derived, this one has the lowest chi square score. This is due to the fact that,
outside of very high LCL heights (i.e. near or above 2000 m AGL), LCL is less
discriminatory of tornadic vs. nontornadic environments than the other components
that were used in this and the other equations.
Parameters
----------
mlcape : Mixed-layer CAPE from the parcel class (J/kg)
bwd6 : 0-6 km bulk wind difference (m/s)
bwd1 : 0-1 km bulk wind difference (m/s)
mllcl : mixed-layer lifted condensation level (m)
mlcinh : mixed-layer convective inhibition (J/kg)
Returns
-------
cpst_v3 : percent
Conditional Probability of a Significant Tornado, version 3 (percent)
'''
# Normalization values taken from the original paper.
mlcape_n = 40.7
bwd6_n = 23.4
bwd1_n = 11.0
mllcl_n = 1170.0
mlcinh_n = 58.1
# f(x) in the original paper.
reg = -4.73 + ( 3.21 * ( ( ( mlcape ** 0.5 ) / mlcape_n ) * ( bwd6 / bwd6_n ) ) ) + ( 0.78 * ( ( bwd1 / bwd1_n ) / ( mllcl / mllcl_n ) ) ) + ( 1.06 * ( mlcinh / mlcinh_n ) )
# P in the original paper.
cpst_v3 = 100 / ( 1 + np.exp(-reg) )
return cpst_v3
def tie(prof):
'''
Tornado Intensity Equation (*)
Formulation taken from Colquhoun and Riley 1996, WAF v.11 pg. 367.
This equation is a regression equation designed to help predict the likely intensity
of a tornado forming within the proximity of a sounding station, given surface-based
Lifted Index and surface to 500 mb wind shear.
Parameters
----------
prof : Profile object
Returns
-------
tie : number
Tornado Intensity Equation (number)
'''
sbpcl = getattr(prof, 'sfcpcl', parcelx(prof, flag=1))
sli = sbpcl.li5
p_sfc = prof.pres[prof.sfc]
sfc_600mb_shr = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=p_sfc, ptop=600)))
tie = ( -0.145 * sli ) + ( 0.136 * sfc_600mb_shr ) - 1.5
return tie
def t1_gust(prof):
'''
T1 Gust (*)
Formulation taken from
Notes on Analysis and Severe-Storm Forecasting Procedures of the Air Force Global Weather Central, 1972
by RC Miller.
This parameter estimates the maximum average wind gusts. If the sounding has an inversion
layer with a top less than 200 mb above the ground, then the maximum temperature in the
inversion is moist adiabatically lifted to 600 mb; if no inversion is present or if the top
of the inversion is above 200 mb above the ground, then the maximum forecast surface
temperature is moist adiabatically lifted to 600 mb. In either case, the lifted temperature
is subrtacted from the 600 mb ambient temperature; the square root of the difference is then
multiplied by 13 to get the likely T1 Average Gust.
The maximum peak gust is calculated by adding one third (1/3) of the mean wind in the lower
5,000 feet AGL to the T1 Average Gust value.
For the direction of the gusts, the mean wind direction in the level from 10,000 to 14,000
feet AGL is used.
Parameters
----------
prof : Profile object
Returns
-------
t1_avg : knots
T1 Average Gust (knots)
t1_peak : knots
T1 Peak Gust (knots)
t1_dir : degrees
T1 Gust Direction (degrees)
'''
inv_top = getattr(prof, 'inversion', inversion(prof, pbot=None, ptop=600)[1][0])
sfc_pres = prof.pres[prof.sfc]
if not utils.QC(inv_top) or inv_top < sfc_pres - 200:
max_tmp = getattr(prof, 'max_temp', max_temp(prof))
max_dpt = thermo.temp_at_mixrat(mean_mixratio(prof, sfc_pres, sfc_pres - 100, exact=True), sfc_pres)
max_vtp = thermo.virtemp(sfc_pres, max_tmp, max_dpt)
max_vtp_pcl = thermo.wetlift(sfc_pres, max_vtp, 600)
else:
idx = np.logical_and(inv_top >= prof.pres, prof.pres >= 600)
max_idx = np.ma.argmax(prof.tmpc[idx])
max_pres = prof.pres[idx][max_idx]
max_vtp = prof.vtmp[idx][max_idx]
max_vtp_pcl = thermo.wetlift(max_pres, max_vtp, 600)
vtp600 = interp.vtmp(prof, 600)
t1_diff = max_vtp_pcl - vtp600
t1_avg = 13 * (t1_diff ** 0.5)
pres5k = interp.pres(prof, interp.to_msl(prof, utils.FT2M(5000)))
mn_wd_sfc_5k = utils.mag(*winds.mean_wind(prof, pbot=sfc_pres, ptop=pres5k))
# If low-level wind speed data is unavailable, return only the average gust value.
if not utils.QC(mn_wd_sfc_5k):
t1_peak = t1_avg
else:
t1_peak = t1_avg + ( mn_wd_sfc_5k / 3 )
pres10k = interp.pres(prof, interp.to_msl(prof, utils.FT2M(10000)))
pres14k = interp.pres(prof, interp.to_msl(prof, utils.FT2M(14000)))
mn_wd_10_14 = winds.mean_wind(prof, pbot=pres10k, ptop=pres14k)
# If mid-level wind direction data is unavailable, return a value of 0 to represent variable (VRB) wind direction.
if not utils.QC(mn_wd_10_14):
t1_dir = 0
else:
t1_dir = utils.comp2vec(mn_wd_10_14[0], mn_wd_10_14[1])[0]
return t1_avg, t1_peak, t1_dir
def t2_gust(prof):
'''
T2 Gust (*)
Formulation taken from Fawbus and Miller 1954, BAMS v.35 pg. 14.
This parameter, which estimates maximum probable gusts, is most useful for isolated air-mass
thunderstorms and/or squall-line gust potential. The moist adiabat at the Wetbulb Zero height
(q.v.) is followed down to the surface level, and the temperature read off from there. It is
then subtracted from the surface temperature, and this difference is run through a non-linear
formula to calculate the probable average gust speed. The minimum and maximum probable gusts
are derived by, respectively, subtracting and adding eight knots to the average gust speed.
Note that, unlike the T1 Gust (q.v.), the T2 Gust does not make use of the low-level wind speed
data; however, it still makes use of the mid-level wind direction data.
Parameters
----------
prof : Profile object
Returns
-------
t2_min : knots
T2 Minimum Gust (knots)
t2_avg : knots
T2 Agerage Gust (knots)
t2_max : knots
T2 Maximum Gust (knots)
t2_dir : degrees
T2 Gust Direction (degrees)
'''
wbzp = getattr(prof, 'wbz', wbz(prof)[0])
sfc_pres = prof.pres[prof.sfc]
sfc_vtp = prof.vtmp[prof.sfc]
sfc_wtb_pot = thermo.wetlift(wbzp, 0, sfc_pres)
tmp_diff = sfc_vtp - sfc_wtb_pot
peak_gust_avg = 7 + ( 3.06 * tmp_diff ) - ( 0.0073 * np.power(tmp_diff, 2) ) - ( 0.000284 * np.power(tmp_diff, 3) )
peak_gust_min = peak_gust_avg - 8
peak_gust_max = peak_gust_avg + 8
if peak_gust_min < 0:
t2_min = 0
else:
t2_min = peak_gust_min
if peak_gust_avg < 0:
t2_avg = 0
else:
t2_avg = peak_gust_avg
if peak_gust_max < 0:
t2_max = 0
else:
t2_max = peak_gust_max
pres10k = interp.pres(prof, interp.to_msl(prof, utils.FT2M(10000)))
pres14k = interp.pres(prof, interp.to_msl(prof, utils.FT2M(14000)))
mn_wd_10_14 = winds.mean_wind(prof, pbot=pres10k, ptop=pres14k)
# If mid-level wind direction data is unavailable, return a value of 0 to represent variable (VRB) wind direction.
if not utils.QC(mn_wd_10_14):
t2_dir = 0
else:
t2_dir = utils.comp2vec(mn_wd_10_14[0], mn_wd_10_14[1])[0]
return t2_min, t2_avg, t2_max, t2_dir
def tsi(prof):
'''
Thunderstorm Severity Index (*)
This index is used to help measure and predict the severity of
thunderstorm events, using a regression equation based around instability,
shear, helicity, and storm motion. Lower values indicate higher
potential severity of a thunderstorm event; however, this is all
contigent on whether or not thunderstorms actually do occur.
Parameters
----------
prof : Profile object
Returns
-------
tsi : number
Thunderstorm Severity Index (number)
'''
sbpcl = getattr(prof, 'sfcpcl', parcelx(prof, flag=1))
hght_t = interp.to_agl(prof, prof.hght[prof.top])
wmax_c = winds.max_wind(prof, 0, hght_t, all=False)
wmax = utils.mag(wmax_c[0], wmax_c[1], missing=MISSING)
srwind = bunkers_storm_motion(prof)
srh03 = winds.helicity(prof, 0, 3000, stu = srwind[0], stv = srwind[1])[0]
ehi03 = ehi(prof, sbpcl, 0, 3000, stu = srwind[0], stv = srwind[1])
sspd = utils.mag(srwind[0], srwind[1], missing=MISSING)
tsi = 4.943709 - ( 0.000777 * sbpcl.bplus ) - ( 0.004005 * wmax ) + ( 0.181217 * ehi03 ) - ( 0.026867 * sspd ) - (0.006479 * srh03 )
return tsi
def hsev(prof):
'''
Hail Severity Equation (*)
Formulation taken from LaPenta et. al. 2000, NWD v.24 pg. 55.
This index is a regression equation (labeled as "CAT" in the source paper) that is intended to help
predict the possible severity of a hail event (which the source paper defines as being a function of
both reported hail size and the number of hail reports). It makes use of six parameters: most unstable
CAPE, most unstable equilibrium level (in thousands of feet), the value of the Total Totals index, 0-3
km AGL storm-relative helicity, 850 mb temperature, and deviation of the wetbulb zero (WBZ) altitude
from 10,000 feet AGL (measured as a set of categories each representing a range of WBZ values).
However, this index is not intended to forecast thunderstorms in general, and as such is congruent
upon thunderstorm development.
The source paper establishes a set of threshold values for hailstorm severity:
HSEV < 3.5 : no severe hail
3.5 <= HSEV < 5.5 : minor severe hail
5.5 <= HSEV < 7.5 : major severe hail
7.5 <= HSEV : extreme severe hail
Note, however, that this index was originally constructed for use in the general region of New York state.
Threshold values may have to be adjusted for other regions.
Parameters
----------
prof : Profile object
Returns
-------
hsev : number
Hail Severity Equation (number)
'''
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
tt = getattr(prof, 'total_totals', t_totals(prof))
wbzh = wbz(prof)[1]
eqlv = utils.M2FT(mupcl.elhght) / 1000
mucp = mupcl.bplus
srwinds = bunkers_storm_motion(prof)
srh03 = winds.helicity(prof, 0, 3000, stu = srwinds[0], stv = srwinds[1])[0]
tmp850 = interp.temp(prof, 850)
if wbzh < 8000 or ( 12000 < wbzh and wbzh <= 13000 ):
wbzcat = 2
elif ( 8000 <= wbzh and wbzh < 9000 ) or ( 11000 < wbzh and wbzh <= 12000 ):
wbzcat = 1
elif 9000 <= wbzh and wbzh <= 11000:
wbzcat = 0
elif 13000 < wbzh and wbzh <= 14000:
wbzcat = 3
else:
wbzcat = 4
hsev = ( 0.144 * eqlv ) - ( 0.502 * wbzcat ) + ( 0.00182 * mucp ) + ( 0.0804 * tt ) + ( 0.00605 * srh03 ) + ( 0.203 * tmp850 ) + 0.153
return hsev
def hsiz(prof):
'''
Hail Size Equation (*)
Formulation taken from LaPenta et. al. 2000, NWD v.24 pg. 55.
This index is a regression equation (labeled as "SIZE" in the source paper) that is intended to help
predict the possible size (in inches) of hail produced by a hailstorm. It makes use of six parameters:
most unstable CAPE, most unstable equilibrium level (in thousands of feet), the value of the Total
Totals index, 0-3 km AGL storm-relative helicity, 850 mb temperature, and deviation of the wetbulb
zero (WBZ) altitude from 10,000 feet AGL (measured as a set of categories each representing a range
of WBZ values). However, this index is not intended to forecast thunderstorms in general, and as
such is congruent upon thunderstorm development.
Parameters
----------
prof : Profile object
Returns
-------
hsiz : inches
Hail Size Equation (inches)
'''
mupcl = getattr(prof, 'mupcl', parcelx(prof, flag=3))
tt = getattr(prof, 'total_totals', t_totals(prof))
wbzh = wbz(prof)[1]
eqlv = utils.M2FT(mupcl.elhght) / 1000
mucp = mupcl.bplus
srwinds = bunkers_storm_motion(prof)
srh03 = winds.helicity(prof, 0, 3000, stu = srwinds[0], stv = srwinds[1])[0]
tmp850 = interp.temp(prof, 850)
if wbzh < 8000 or ( 12000 < wbzh and wbzh <= 13000 ):
wbzcat = 2
elif ( 8000 <= wbzh and wbzh < 9000 ) or ( 11000 < wbzh and wbzh <= 12000 ):
wbzcat = 1
elif 9000 <= wbzh and wbzh <= 11000:
wbzcat = 0
elif 13000 < wbzh and wbzh <= 14000:
wbzcat = 3
else:
wbzcat = 4
hsiz = ( -0.0318 * eqlv ) + ( 0.000483 * mucp ) + ( 0.0235 * tt ) + ( 0.00233 * srh03 ) - ( 0.124 * wbzcat ) + ( 0.0548 * tmp850 ) - 0.772
return hsiz
def k_high_v1(prof):
'''
K-Index, high altitude version 1 (*)
Formulation taken from Modahl 1979, JAM v.18 pg. 675.
This index was derived by A. Modahl as a variant of the K-Index to be used in high-altitude areas.
However, testing of the initial modified version (version 1) suggested that omitting the
temperature lapse rate term and leaving just the 850 mb dewpoint temperature and 500 mb dewpoint
depression terms (version 2) would give results similar to the initial version.
Parameters
----------
prof : Profile object
Returns
-------
k_high_v1 : number
K-Index, high altitude version 1
'''
tmp700 = interp.temp(prof, 700)
tmp300 = interp.temp(prof, 300)
dpt850 = interp.dwpt(prof, 850)
tdd500 = interp.tdd(prof, 500)
k_high_v1 = ( tmp700 - tmp300 ) + dpt850 - tdd500
return k_high_v1
def k_high_v2(prof):
'''
K-Index, high altitude version 2 (*)
Formulation taken from Modahl 1979, JAM v.18 pg. 675.
This index was derived by A. Modahl as a variant of the K-Index to be used in high-altitude areas.
However, testing of the initial modified version (version 1) suggested that omitting the
temperature lapse rate term and leaving just the 850 mb dewpoint temperature and 500 mb dewpoint
depression terms (version 2) would give results similar to the initial version.
Parameters
----------
prof : Profile object
Returns
-------
k_high_v2 : number
K-Index, high altitude version 2
'''
dpt850 = interp.dwpt(prof, 850)
tdd500 = interp.tdd(prof, 500)
k_high_v2 = dpt850 - tdd500
return k_high_v2
def hltt(prof):
'''
High-Level Total Totals (HLTT) (*)
Formulation taken from:
A Modified Total Totals Index for Thunderstorm Potential Over the Intermountain West
Milne, 2004
(Available at https://www.weather.gov/media/wrh/online_publications/TAs/ta0404.pdf)
This index is a modification of the Total Totals index (q.v.) that is modified for use in high-altitude
terrain (e.g. the Intermountain West of the United States). It replaces the 850 mb temperature and
dewpoint variables with 700 mb temperature and dewpoint, since the 850 mb level will usually be
underneath ground level. Threshold values for the HLTT are lower than their equivalents for the
original Total Totals, as demonstrated below:
28 - 29 : Isolated thunderstorms possible.
29 - 30 : Isolated thunderstorms
31 - 32 : Isolated to scattered thunderstorms
Above 32 : Scattered to numerous thunderstorms
This index should be used with caution, particularly in the winter months, as high HLTT values can still
be achieved even when the lower level temperature and dewpoint are below freezing. It is best used in
the summer months, especially when the 500 mb temperature is below -15 degrees Celsius.
Parameters
----------
prof : Profile object
Returns
-------
hltt : number
High Level Total Totals (number)
'''
tmp700 = interp.temp(prof, 700)
dpt700 = interp.dwpt(prof, 700)
tmp500 = interp.temp(prof, 500)
hltt = tmp700 + dpt700 - ( 2 * tmp500 )
return hltt
def ssi700(prof):
'''
Showalter Stability Index, 700 mb version (*)
This index is a modification of the Showalter Stability Index (q.v.) which raises a parcel from the 700 mb
level instead of the 850 mb level. This is intended to make it useable for predicting convective weather
over high-altitude terrain. As such, threshold values for this index should be assumed to generally be
higher than the original index.
The version implemented here uses the virtual temperature correction.
Parameters
----------
prof : Profile object
Returns
-------
ssi700 : number
Showalter Stability Index, 700 mb version (number)
'''
tmp700 = interp.temp(prof, 700)
dpt700 = interp.dwpt(prof, 700)
vtp500 = interp.vtmp(prof, 500)
t_pcl75 = thermo.lifted(850, tmp700, dpt700, 500)
vt_pcl75 = thermo.virtemp(500, t_pcl75, t_pcl75)
ssi700 = vtp500 - vt_pcl75
return ssi700
def khltt(prof):
'''
Kabul High Level Total Totals (*)
Formulation taken from:
Climate and Weather Analysis of Afghan Thunderstorms
Geis, 2011
(Available at https://apps.dtic.mil/dtic/tr/fulltext/u2/a551911.pdf)
This index was derived in an effort to improve the forecasting of convective weather over the elevated
desert terrain of Afghanistan. It is based on the High Level Total Totals (q.v.), but uses the 800 mb
temperature and dewpoint and the 700 mb temperature in lieu of (respectively) the 700 mb temperature
and dewpoint and the 500 mb temperature. This modification was made in an effort to reduce false alarm
rates.
The source paper notes that thunderstorms are more (less) likely when the values are positive (negative);
however, verification statistics still show some overlap between the thunderstorm and non-thunderstorm
categories in the value range of -5 to 5.
Parameters
----------
prof : Profile object
Returns
-------
khltt : number
Kabul High Level Total Totals (number)
'''
tmp800 = interp.temp(prof, 800)
dpt800 = interp.dwpt(prof, 800)
tmp700 = interp.temp(prof, 700)
khltt = tmp800 + dpt800 - ( 2 * tmp700 )
return khltt
def kti(prof):
'''
Kabul Thunderstorm Index (KTI) (*)
Formulation taken from:
Climate and Weather Analysis of Afghan Thunderstorms
Geis, 2011
(Available at https://apps.dtic.mil/dtic/tr/fulltext/u2/a551911.pdf)
This index was derived in an effort to improve the forecasting of convective weather over the elevated
desert terrain of Afghanistan. The source paper's formula takes the 800 mb temperature and subtracts
it by twice the 800 mb dewpoint. The implementation used here subtracts the 800 mb dewpoint depression
by the 800 mb dewpoint, effectively giving the same result.
The source paper's verification statistics suggest that values below 25 should be considered a good
indicator of increased chances for thunderstorms, though there is some overlap between the thunderstorm
and non-thunderstorm categories in the value range of 17 to 25. The paper also notes that the false alarm
rate for the KTI is even lower still than the Kabul High Level Total Totals (q.v.).
Parameters
----------
prof : Profile object
Returns
-------
kti : number
Kabul Thunderstorm Index (number)
'''
tdd800 = interp.tdd(prof, 800)
dpt800 = interp.dwpt(prof, 800)
kti = tdd800 - dpt800
return kti
def waci(prof):
'''
Wind Adjusted Convective Index (WACI) (*)
Formulation taken from:
Severe Weather as Seen Via a Preliminary Sounding Climatology and a Wind Adjusted Convective Index (WACI)
Small, 2004
(Available at http://ams.confex.com/ams/pdfpapers/72344.pdf)
This index was derived in an effort to improve forecasting of severe weather and flooding conditions over
southern California. It makes use of a lifted index derived from a 750 mb parcel (which is assumed to be
saturated at the start), a moisture modifier based on the 600 mb and 750 mb dewpoint depressions, a wind
adjustment term based on the 500 mb wind speed, and a constant intended to make sure that the values of
the WACI resemble those of the Total Totals index (q.v.)
Parameters
----------
prof : Profile object
Returns
-------
waci : number
Wind Adjusted Convective Index (number)
'''
tmp750 = interp.temp(prof, 750)
tdd750 = interp.tdd(prof, 750)
tdd600 = interp.tdd(prof, 600)
vtp500 = interp.vtmp(prof, 500)
spd500 = interp.vec(prof, 500)[1]
waci_const = 30.
# Calculate the 750 mb saturated lifted index, with vitrual temperature correction
pcl_tmp500 = thermo.wetlift(750, tmp750, 500)
pcl_vtp500 = thermo.virtemp(500, pcl_tmp500, pcl_tmp500)
sat_li = vtp500 - pcl_vtp500
if sat_li < -8.:
sat_li_code = -8.
elif sat_li > -1.:
sat_li_code = -1.
else:
sat_li_code = sat_li
# Calculate the moisture modifier
if tdd600 < 4.:
tdd600_code = 10. + ( tdd600 - 4. )
elif tdd600 == 4.:
tdd600_code = 10.
elif tdd600 > 4. and tdd600 <= 8.:
tdd600_code = 10. - ( 2. * ( tdd600 - 4. ) )
elif tdd600 > 8. and tdd600 <= 9.5:
tdd600_code = 2. - ( tdd600 - 8. )
elif tdd600 > 9.5 and tdd600 <= 15:
tdd600_code = 0.5
else:
tdd600_code = 0.
if tdd750 < 4.:
tdd750_code = 10. + ( tdd750 - 4. )
elif tdd750 == 4.:
tdd750_code = 10.
elif tdd750 > 4. and tdd750 <= 8.:
tdd750_code = 10. - ( 2. * ( tdd750 - 4. ) )
elif tdd750 > 8. and tdd750 <= 9.5:
tdd750_code = 2. - ( tdd750 - 8. )
elif tdd750 > 9.5 and tdd750 <= 15:
tdd750_code = 0.5
else:
tdd750_code = 0.
moist_mod = ( tdd600_code + ( 2. * tdd750_code ) ) / 3.
# Calculate the wind adjustment
spd500_code = spd500 / 2.
# Calculate WACI
waci = ( -1. * sat_li_code * moist_mod ) - spd500_code + waci_const
return waci
|
989,377 | afa5b1c76f155900b8834a2ad6c4903c6ed7479f | number = input(
"Enter a number, and I'll tell you if it is a multiple of ten or not: ")
number = int(number)
if number % 10 == 0:
print("The number " + str(number) + " is a multiple of ten.")
else:
print("The number " + str(number) + " is not a multiple of ten.")
|
989,378 | 00a6c0387f98bd2a806e8953fd8387f18ecad504 | import json
context_path = "/metabolights/ws"
class TestIsaStudy(object):
def test_get_study_contacts_01(self, flask_app, sensitive_data):
study_id = "MTBLS1"
with flask_app.test_client() as c:
headers = {"user_token": sensitive_data.super_user_token_001}
result = c.get(f"{context_path}/studies/{study_id}/contacts", headers=headers, json={})
assert result is not None
contacts = json.loads(result.data)
assert contacts is not None
def test_post_study_contacts_01(self, flask_app, sensitive_data, mocker):
"""
Verifies WsClient reindex_study method update
"""
study_id = "MTBLS1"
json_data = {'contacts': [{'comments': [], 'firstName': 'Reza', 'lastName': 'Salek', 'email': 'rms72@cam.ac.uk',
'affiliation': 'University of Cambridge',
'address': 'The Department of Biochemistry, The Sanger Building, 80 Tennis Court Road, Cambridge, CB2 1GA, UK.',
'fax': '', 'midInitials': 'M', 'phone': '',
'roles': [{'annotationValue': 'principal investigator role'}]}]}
with flask_app.test_client() as c:
headers = {"user_token": sensitive_data.super_user_token_001, "save_audit_copy": True}
mock_elastic = mocker.Mock()
mocker.patch("app.ws.elasticsearch.elastic_service.ElasticsearchService.client", mock_elastic)
mock_elastic.index.return_value = ""
result = c.post(f"{context_path}/studies/{study_id}/contacts", headers=headers, json=json_data)
assert result is not None
assert result.status_code in (200, 201)
mock_elastic.assert_called()
contacts = json.loads(result.data)
assert contacts is not None
|
989,379 | 7d4b477ff1e7e3b7f1b2bd1e6427f1682393abdc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import built-in packages
import argparse, os, pickle, time
# Import external packages
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
def generate_area_crop(img):
"""
args:
img : PIL.Image
return:
crop_area : tuple, of length = 4
"""
size_img = img.size
min_side = min(size_img)
padding_h, padding_v = (size_img[0] - min_side)/2, (size_img[1] - min_side)/2
crop_area = (padding_h, padding_v, size_img[0] - padding_h, size_img[1] - padding_v)
return crop_area
# from params import params
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="not yet written")
args = parser.parse_args()
arr_fpath = os.listdir(args.dir)
data = np.zeros(shape=(len(arr_fpath), 448, 448, 3))
for i, fpath in enumerate(arr_fpath):
print(fpath)
img = Image.open(os.path.join(args.dir, fpath)).convert('RGB')
# area_crop = generate_area_crop(img)
# img_crop = img.crop(area_crop)
# img_resize = img_crop.resize((448, 448))
img_np = np.asarray(img)
print(img_np)
data[i] = img_np
avr = np.sum(data, axis=0)/data.shape[0]
stack_avr = np.stack([avr]*data.shape[0])
var = np.sum((data - stack_avr)**2, axis=0)/data.shape[0]
print(avr.shape)
print(var.shape)
plt.imshow(var)
plt.show() |
989,380 | f187ee11e1f91100d8faaf9706e58f6f2130c2a0 | import numpy as np
def binary_classifications(predictions: np.ndarray) -> np.ndarray:
assert predictions.shape[1] == 2, \
f'Predictions are not binary, {predictions.shape=}'
return (predictions[:, 1] > 0.5).astype(int)
def classifications(predictions: np.ndarray) -> np.ndarray:
return np.argmax(predictions, axis=1)
def multilabel_classifications(predictions: np.ndarray) -> np.ndarray:
return (predictions > 0.5).astype(int)
def instance_wise(predictions: np.ndarray) -> np.ndarray:
"""
Converts predictions to be instance wise ordered as opposed
to model ordered.
Done by inverting the 1st axis (models) and 2nd axis (instances)
"""
axes = (1, 0, *range(2, predictions.ndim))
return np.transpose(predictions, axes=axes)
|
989,381 | 7eee9db0aab66a0270527a8e34c60cc4378a41ef | import xml.etree.ElementTree as ET
import os
oname = ['person', 'car', 'bicycle', 'motorbike', 'dog']
def parse_rec(filename):
tree = ET.parse(filename)
root = tree.getroot()
rmId = []
for i, obj in enumerate(tree.findall('object')):
if obj.find('name').text not in oname:
rmId.append(i)
a = len(rmId)-1
while(a>=0):
if (filename.split('/')[-1]).split('_')[0] in ['2007', '2008']:
root.remove(root[rmId[a]+5])
else:
root.remove(root[rmId[a]+2])
a =a -1
objs = tree.findall('object')
if objs != []:
tree.write('VOC2012/Annotations/'+filename.split('/')[-1])
file1.write((filename.split('/')[-1]).split('.')[0]+'\n')
with open('VOC2012/ImageSets/Main/trainval.txt', 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
file1 = open('trainvalremain.txt', 'w')
for imagename in imagenames:
parse_rec('VOC2012/Annotations1/'+imagename + '.xml')
file1.close()
|
989,382 | e9161e807dee353ee302711d4eda79f7b6862ed2 | # -*- coding: utf-8 -*-
"""
Created on Sat May 30 22:31:10 2020
@author: Mouiad
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import re
import getLane
# Code to generate lane coordinates from probablity maps.
# Experiment name
exp = "vgg_SCNN_DULR_w9"
# Data root
data = "../data/CULane"
# Directory where prob imgs generated by CNN are saved.
probRoot = "../experiments/predicts/" + exp
# Directory to save fitted lanes.
output = "../prob2lines/output/" + exp
testList = data + "/list/test.txt" # TODO edited the text file
show = True # set to true to visualize
with open(testList) as f:
image_list = f.readlines()
image_list = [x.strip() for x in image_list]
#print(image_list)
num = len(image_list)
pts = 18
for i in range(num):
if np.mod(i + 1, 100) == 0:
print(('Processing the %d th image...\\n'), i)
imname = image_list[i]
existPath = probRoot + imname[:-3] + "exist.txt"
with open(existPath) as f:
exist = f.readlines()
exist = [y for x in exist for y in x if y == '1' or y == '0']
# TODO better to show with opencv
coordinates = np.zeros((4, pts))
for j in range(4):
if exist[j] == '1':
scorePath = probRoot + imname[:-4] + "_" + str(j + 1) + "_avg.png"
scoreMap = mpimg.imread(scorePath) * 255.0
coordinate = getLane.getLane(scoreMap)
coordinates[j, :] = coordinate
if show:
img = mpimg.imread(data + imname)
probMaps = np.uint8(np.zeros((288, 800, 3))) # TODO this needs to change for the right size
plt.imshow(img)
for k in range(4):
color = ['g', 'b', 'r', 'y']
if exist[k] == '1':
for m in range(pts): # The devil is in the details m and k and -1
if coordinates[k, m] > 0: # plotting!
plt.plot(np.uint16(np.round(coordinates[k, m] * 1640.0 / 800.0)) - 1, np.uint16(np.round(590 - m * 20)) - 1, "." + color[k], "markersize", 30)
probPath = probRoot + imname[:-4] + "_" + str(k + 1) + "_avg.png"
probMap = mpimg.imread(probPath) * 255 # The mod
probMaps[:, :, np.mod(k + 1, 3)] = probMaps[:, :, np.mod(k + 1, 3)] + probMap
#plt.show()
#plt.imshow(probMaps)
plt.show()
else:
save_name = output + imname[:- 3] + "lines.txt"
position = [m.start() for m in re.finditer('/', save_name)]
prefix = ''
if position:
prefix = save_name[:position[-1]]
if not os.path.exists(prefix) and prefix != ' ':
os.makedirs(prefix)
fp = open(save_name, "w")
for k in range(4):
if exist[k] == '1' and np.sum(coordinates[k, :] > 0) > 1:
for m in range(pts):
if coordinates[k, m] > 0:
fp.write(str(np.uint16(np.round(coordinates[k, m] * 1640.0 / 800.0)) - 1) + ' ' + str(np.uint16(np.round(590 - m * 20)) - 1) + ' ')
#print(np.uint16(np.round(coordinates[k, m] * 1640.0 / 800.0)) - 1, np.uint16(np.round(590 - m * 20)) - 1)
fp.write('\n')
fp.close()
|
989,383 | fcc3327637da097f1d7e19c2ea685b5c4e39ff48 | # -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2016-04-08 15:02:26
# @Last modified by: LC
# @Last Modified time: 2016-04-10 16:22:30
# @Email: liangchaowu5@gmail.com
# 找到与target相同的数,从这个位置往左右递归找
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) == 0:
return [-1,-1]
posi = [-1,-1]
left = 0
right = len(nums)-1
self.find(nums,target,left,right,posi)
return posi
def find(self,nums,target,left,right,position):
while left < right:
mid = (left + right)/2
if nums[mid] < target:
left = mid + 1
elif nums[mid]> target:
right = mid - 1
else:
if position[0] == -1 or position[0]>mid:
position[0] = mid
if position[1] == -1 or position[1]<mid:
position[1] = mid
self.find(nums,target,left,mid-1,position)
self.find(nums,target,mid+1,right,position)
return # 这里需要有返回,否则程序回到上一次找到这个值的地方会一直死循环递归下去
if nums[left]==target:
if position[0] == -1 or position[0]>left:
position[0] = left
if position[1] == -1 or position[1]<right:
position[1] = right
## recursive
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
result = [-1, -1]
self.helper(0, len(nums) - 1, nums, target, result)
return result
def helper(self, left, right, nums, target, result):
if left > right:
return
mid = left + ((right-left)>>1)
if nums[mid] < target:
self.helper(mid+1, right, nums, target, result)
elif nums[mid] > target:
self.helper(left, mid - 1, nums, target, result)
else:
result[0] = mid if result[0] == -1 else min(result[0], mid)
result[1] = max(result[1], mid)
self.helper(left, mid-1, nums, target, result)
self.helper(mid+1, right, nums, target, result)
|
989,384 | 7fb9ef06d147de2a718e78ebaf0f66666fd0493a | # Installing Python Packages
# Get Package Import Manager "pip":
# http://pip.readthedocs.io/en/stable/installing/#installing-with-get-pip-py
# Upgrade pip
# python -m pip install -U pip
# Install some packages
# pip3 install numpy
# pip3 install pandas
# First NumPy Array
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Import the numpy package as np
import numpy as np
# Create a numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out type of np_baseball
print(np_baseball)
|
989,385 | e0481a81f8aab91275d5f005d3c8b59712fe261c | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import pandas as pd
import os
class CsvFile():
path = ''
reader = None
def __init__(self,path):
self.path = path
self.reader = pd.read_table(self.getListFiles(path)[0], sep=',', chunksize=1)
def getListFiles(self):
path = self.path
assert os.path.isdir(path), '%s not exist.' % path
ret = []
for root, dirs, files in os.walk(path):
print
'%s, %s, %s' % (root, dirs, files)
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret |
989,386 | e1643a273e238ad3fa61e43af774185ce702c43d | import os
import matplotlib.pyplot as plt
from keras.models import load_model
from keras import backend as K
import numpy as np
import pandas as pd
def load_data():
x_train = []
temp = pd.read_csv("train.csv", skiprows = 0)
y_train = np.array(temp.ix[:, 0])
for i in range(temp.shape[0]):
x_train.append(temp.ix[i, 1].split(' '))
x_train = np.asarray(x_train)
x_train = x_train.astype('float32')
x_train = x_train / 255
y_train = y_train.astype('int')
return (x_train, y_train)
def main():
base_path = './'
store_path = './'
model_path = os.path.join(base_path,'model.h5')
emotion_classifier = load_model(model_path)
layer_dict = dict([layer.name, layer] for layer in emotion_classifier.layers[1:])
input_img = emotion_classifier.input
name_ls = ['conv1_1']
collect_layers = [ K.function([input_img, K.learning_phase()], [layer_dict[name].output]) for name in name_ls ]
(x_train, y_train) = load_data()
x_train = x_train[25800:28709]
y_train = y_train[25800:28709]
private_pixels = x_train
private_pixels = [ private_pixels[i].reshape((1, 48, 48, 1))
for i in range(len(private_pixels)) ]
choose_id = 0
photo = private_pixels[choose_id]
for cnt, fn in enumerate(collect_layers):
im = fn([photo, 0]) #get the output of that layer
fig = plt.figure(figsize=(14, 8))
nb_filter = im[0].shape[3]
for i in range(nb_filter):
ax = fig.add_subplot(nb_filter/16, 16, i+1)
ax.imshow(im[0][0, :, :, i], cmap='BuGn')
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
fig.suptitle('Output of layer{} (Given image{})'.format(cnt, choose_id))
img_path = os.path.join(store_path, 'vis')
if not os.path.isdir(img_path):
os.mkdir(img_path)
fig.savefig(os.path.join(img_path,'layer{}'.format(cnt)))
if __name__ == "__main__":
main() |
989,387 | b90dea0ddc8cc2da632fbc9bd3f037de71b2a91f | from django.apps import AppConfig
class MencrepairlogConfig(AppConfig):
name = 'mencrepairlog'
|
989,388 | c11b4c1c535c54c9d7e8bd0956e2168b2d70c75f | def exchange(a, b):
temp = a
a = b
b = temp
return a, b
a = 10
b = 20
a, b = exchange(a, b)
print(a, b)
a, b = b, a
print(a, b) |
989,389 | 04d17dbfafd817027a4fe9e64f10b529b58d7dc4 | VENV_FOLDER = '~/.virtualenvs'
VENV_NAME = 'cms'
APT_GET_DELAY = 24*60*60 # in seconds
package_info = [
('apt-get-update',
{
'exists': [
('[ $(($(date +%s) - $(stat -c %Y /var/lib/apt/periodic/update-success-stamp) - {})) -le 0 ]'.format(APT_GET_DELAY), 0),
],
'install': [
'sudo apt-get -y update',
'sudo apt-get -y upgrade',
],
}),
('sublime',
{
'exists': [
('cat ~/.bashrc | grep -q sublime_text', 0),
('ls /usr/local/bin | grep -q sublime', 0)
],
'install': [
'rm -rf /tmp/sublime',
'sudo rm -rf /usr/local/bin/sublime',
'wget http://c758482.r82.cf2.rackcdn.com/Sublime%20Text%202.0.2%20x64.tar.bz2 -O /tmp/sublime.tar.bz2',
'cd /tmp; tar -xvjf sublime.tar.bz2',
'mv /tmp/Sublime\ Text\ 2 /tmp/sublime',
'sudo mv /tmp/sublime /usr/local/bin/sublime',
'''
if grep -q "alias sublime" ~/.bashrc; then
echo "alias sublime already exists"
else
echo "alias sublime=/usr/local/bin/sublime/sublime_text" >> ~/.bashrc
fi''',
],
}),
('chrome',
{
'options':{
'ignore_result': True,
'verify_install': 'which google-chrome',
},
'exists': [
('which google-chrome', 0),
],
'install': [
'wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -O /tmp/google-chrome-stable_current_amd64.deb',
'sudo dpkg -i /tmp/google-chrome-stable_current_amd64.deb',
'sudo apt-get -fy install',
],
}),
('wingide', {
'exists': [
('which wing4.1', 0)
],
'install': [
'sudo apt-get -y install enscript',
'wget http://wingware.com/pub/wingide/4.1.14/wingide4.1_4.1.14-1_amd64.deb -O /tmp/wingide.deb',
'sudo dpkg -i /tmp/wingide.deb',
'sudo apt-get -fy install',
],
}),
('fix-scrollbar-menubar', {
'exists': [
('dpkg -s overlay-scrollbar', 256),
('dpkg -s liboverlay-scrollbar-0.2-0', 256),
('dpkg -s liboverlay-scrollbar3-0.2-0', 256),
('dpkg -s appmenu-gtk', 256),
('dpkg -s appmenu-gtk3', 256),
('dpkg -s appmenu-qt', 256),
],
'install': [
'sudo apt-get -y purge "scrollbar*"',
'sudo apt-get -y purge appmenu-gtk appmenu-gtk3 appmenu-qt',
],
}),
('geany', {
'exists': [
('dpkg -s geany', 0),
],
'install': [
'sudo apt-get install -y geany',
],
}),
('restricted-extras (fonts)', {
'options': {
'stdout_redirect': False,
},
'exists': [
('dpkg -s ubuntu-restricted-extras', 0)
],
'install': [
'sudo apt-get install -y ubuntu-restricted-extras'
],
}),
('adobe reader', {
'options': {
'stdout_redirect': False,
},
'exists': [
('dpkg -s acroread', 0),
],
'install': [
'sudo apt-add-repository -y "deb http://archive.canonical.com/ $(lsb_release -sc) partner"',
'sudo apt-get update',
'sudo apt-get install -y acroread',
],
}),
('ccsm',
{
'exists': [
('dpkg -s compizconfig-settings-manager', 0),
],
'install': [
'sudo apt-get install -y compizconfig-settings-manager',
],
}),
('diffuse', {
'exists': [
('dpkg -s diffuse', 0),
],
'install': [
'sudo apt-get install -y diffuse',
'''
if grep -q "alias diffuse" ~/.bashrc; then
echo "alias diffuse already exists"
else
echo "alias diffuse='/usr/bin/python /usr/bin/diffuse'" >> ~/.bashrc
fi''',
],
}),
('git', {
'exists': [
('dpkg -s git', 0),
],
'install': [
'sudo apt-get install -y git',
],
}),
('git-configuration', {
'exists': [
('cat ~/.gitconfig | grep motleytech', 0),
('cat ~/.gitconfig | grep motleytechnet', 0),
],
'install': [
'git config --global user.name motleytech',
'git config --global user.email motleytechnet@gmail.com',
'git config --global alias.st "status"',
'git config --global alias.stat "status"',
'git config --global alias.wdiff "diff --color-words"',
'git config --global merge.tool diffuse',
'git config --global merge.summary true',
'git config --global difftool.prompt false',
'git config --global diff.tool diffuse',
'git config --global color.ui true',
],
}),
('build-essential', {
'exists': [
('dpkg -s build-essential', 0),
],
'install': [
'sudo apt-get install -y build-essential',
],
}),
('python-dev', {
'exists': [
('dpkg -s python-dev', 0),
],
'install': [
'sudo apt-get install -y python-dev',
],
}),
('python-yaml', {
'exists': [
('dpkg -s python-yaml', 0),
],
'install': [
'sudo apt-get install -y python-yaml',
],
}),
('python-setuptools', {
'exists': [
('dpkg -s python-setuptools', 0),
],
'install': [
'sudo apt-get install -y python-setuptools',
],
}),
('ipython', {
'exists': [
('dpkg -s ipython', 0),
],
'install': [
'sudo apt-get install -y ipython',
],
}),
('pylint', {
'exists': [
('dpkg -s pylint', 0),
],
'install': [
'sudo apt-get install -y pylint',
],
}),
('python-pip', {
'exists': [
('dpkg -s python-pip', 0),
],
'install': [
'sudo apt-get install -y python-pip',
],
}),
('python-imaging', {
'exists': [
('dpkg -s python-imaging', 0),
],
'install': [
'sudo apt-get install -y python-imaging',
],
}),
('virtualenvwrapper', {
'exists': [
('dpkg -s virtualenvwrapper', 0),
],
'install': [
'sudo apt-get install -y virtualenvwrapper',
],
}),
('vim', {
'exists': [
('dpkg -s vim', 0),
],
'install': [
'sudo apt-get install -y vim',
],
}),
('openssh-server', {
'exists': [
('dpkg -s openssh-server', 0),
],
'install': [
'sudo apt-get install -y openssh-server',
],
}),
('postgresql', {
'exists': [
('dpkg -s postgresql', 0),
],
'install': [
'sudo apt-get install -y postgresql',
],
}),
('pgadmin3', {
'exists': [
('dpkg -s pgadmin3', 0),
],
'install': [
'sudo apt-get install -y pgadmin3',
],
}),
('pillow-libs', {
'exists': [
('dpkg -s libjpeg8-dev', 0),
('dpkg -s zlibc', 0),
('dpkg -s libtiff4-dev', 0),
],
'install': [
'sudo apt-get install -y zlibc',
'sudo apt-get install -y libjpeg8-dev',
'sudo apt-get install -y libtiff4-dev',
],
}),
('apt-cleanup', {
'install': [
'sudo apt-get -fy install',
'sudo apt-get -y autoclean',
'sudo apt-get -y autoremove',
],
}),
('create-virt-env', {
'exists': [
('[ -d %s/%s ]' % (VENV_FOLDER, VENV_NAME), 0)
],
'install': [
'mkdir -p %s; cd %s; virtualenv %s' % (VENV_FOLDER, VENV_FOLDER, VENV_NAME),
],
}),
('install-virt-pkgs', {
'install': [
'/bin/bash -c "source %s/%s/bin/activate; pip install -r requirements_cms.txt"' % (VENV_FOLDER, VENV_NAME),
],
}),
]
|
989,390 | 2ba2f3a6cdd5ec29a261c599691150e8d4532b0c | #!/usr/bin/env python
from json_base import json_base
from tools.user_management import user_pack
class invalidation(json_base):
_json_base__status = ['new', "hold", 'announced', 'acknowledged']
_json_base__schema = {
'_id': '',
'prepid': '',
'object': '',
'status': '',
'type': ''
}
def __init__(self, json_input=None):
json_input = json_input if json_input else {}
# set invalidation status
self._json_base__schema['status'] = self._json_base__status[0]
# update self according to json_input
self.update(json_input)
self.validate()
user_p = user_pack()
self.current_user_email = user_p.get_email()
def set_announced(self):
self.set_attribute('status', 'announced') |
989,391 | 7062241f343e8ea2495894f7fc245c5a6d66dc21 | import json
import os
def get_data(path=None):
"""
get config data from file
Args:
path(string): path to config file
Return:
dict {"login":value", "password":value}
"""
# use default path
if not path:
path = os.path.relpath("config.json")
try:
with open(path, mode="r") as f:
data = json.load(f)
return data
except Exception as e:
print(e)
if __name__ == "__main__":
pass
|
989,392 | 85c5507aa7f00b862e447d3440d1ffe51f258c99 | from ForwardAnalysis.Utilities.etaMinCandViewSelector_cfi import etaMinCandViewSelector as etaMinPFCands
etaMinPFCands.src = "particleFlow"
|
989,393 | 08d99fa97a58cb5115aeb04265ab5f97367818e4 |
from flask import Blueprint, request, jsonify
http_methods = Blueprint("http_methods", __name__)
# getting data from different type requests
@http_methods.route("/", methods=["GET"])
def get_method(**request_variables):
return jsonify({
"request_method": request.method,
"request_variables": request_variables,
"request_args": request.args.to_dict(),
"request_data": request.form.to_dict()
})
@http_methods.route("/", methods=["POST"])
def post_method(**request_variables):
return jsonify({
"request_method": request.method,
"request_variables": request_variables,
"request_args": request.args.to_dict(),
"request_data": request.form.to_dict()
}) |
989,394 | 34fc556dc40f3a84e04620fd858332f64050a502 | if __name__ == "__main__":
fp = open('hello.txt','rt',encoding='utf-8')
line = fp.readline()
print(line.strip())
line = fp.readline()
print(line.strip())
line = fp.readline()
print(line.strip())
fp.close()
|
989,395 | 8fc1812550898d9ae6d156ce3a75ef86d41c6eba | """initiate strain advisor app"""
from app import create_app
APP = create_app() |
989,396 | 90cf841f5af6fccd7150892bb76bae9cdac48edc | class JointProbabilityTable:
def __init__(self, columns, data):
self._columns = columns
self._probability_index = len(columns)
self._data = self._normalize(data)
def _normalize(self, data):
probability_sum = 0
for row in data:
probability_sum += row[-1]
for row in data:
if probability_sum != 0:
row[-1] = row[-1]/probability_sum
else:
row[-1] = 0
return data
def given(self, event_name, event_happened_value):
contextual_columns = [entry for entry in self._columns]
contextual_data = []
event_column_index = self._columns.index(event_name)
probability_sum = 0
for row in self._data:
if row[event_column_index] == event_happened_value:
new_row = [entry for i, entry in enumerate(row)]
probability_sum += new_row[-1]
contextual_data.append(new_row)
else:
new_row = [entry for i, entry in enumerate(row)]
new_row[-1] = 0
contextual_data.append(new_row)
for row in contextual_data:
if probability_sum != 0:
row[-1] = row[-1]/probability_sum
else:
row[-1] = 0
return JointProbabilityTable(contextual_columns, contextual_data)
def _get_matching_probability(self, new_beliefs, event_value):
for new_belief in new_beliefs:
if new_belief[0] == event_value:
return new_belief[1]
def _clone_data(self):
return [list(row) for row in self._data]
def _add_to_current_beliefs(self, current_beliefs, event_value, probability):
if not event_value in current_beliefs:
current_beliefs[event_value] = 0
current_beliefs[event_value] += probability
def _get_current_beliefs_for_event(self, event_name):
current_beliefs = {}
event_column_index = self._columns.index(event_name)
for row in self._data:
row_event_name = row[event_column_index]
row_event_probability = row[self._probability_index]
self._add_to_current_beliefs(current_beliefs, row_event_name, row_event_probability)
return current_beliefs
def _get_belief_shifts(self, current_beliefs, new_beliefs):
belief_shifts = {}
for event_value in new_beliefs:
updated_probability = new_beliefs[event_value]
current_probability = current_beliefs[event_value]
if current_probability != 0:
probability_shift = updated_probability / current_probability
else:
probability_shift = 0
belief_shifts[event_value] = probability_shift
return belief_shifts
def update_belief(self, event_name, new_beliefs):
current_beliefs = self._get_current_beliefs_for_event(event_name)
belief_shifts = self._get_belief_shifts(current_beliefs, new_beliefs)
event_column_index = self._columns.index(event_name)
new_table = self._clone_data()
for row in new_table:
row[-1] = row[-1] * belief_shifts[row[event_column_index]]
return JointProbabilityTable(self._columns, new_table)
def probability(self, event_name):
beliefs = {}
event_column_index = self._columns.index(event_name)
for row in self._data:
event_value = row[event_column_index]
if not (event_value in beliefs):
beliefs[event_value] = 0
beliefs[event_value] += row[-1]
return beliefs
def update_applicable_beliefs(self, node_name, joint_probability_table):
for event_name in joint_probability_table._columns:
if event_name in self._columns:
event_beliefs = joint_probability_table.probability(event_name)
self._data = self.update_belief(event_name, event_beliefs)._data
def clone(self):
return JointProbabilityTable(self._columns, self._clone_data())
def __str__(self):
return str([self._columns, self._data])
class BayesianNode:
def __init__(self, name, joint_probability_table):
self._name = name
self._original_joint_probability_table = joint_probability_table
self._joint_probability_table = joint_probability_table
self._affects_nodes = []
self._affected_by = []
self._known = False
def affected_by(self, other_node):
self._affected_by.append(other_node)
def affects(self, node):
self._affects_nodes.append(node)
node.affected_by(self)
def _forward_propagate(self, joint_probability_table):
self._joint_probability_table.update_applicable_beliefs(self._name, joint_probability_table)
for affected_node in self._affects_nodes:
affected_node._forward_propagate(self._joint_probability_table)
def _backward_propagate(self, joint_probability_table):
self._joint_probability_table.update_applicable_beliefs(self._name, joint_probability_table)
for affected_node in self._affected_by:
affected_node._backward_propagate(self._joint_probability_table)
def given(self, value):
if not self._known:
self._joint_probability_table = self._joint_probability_table.given(self._name, value)
self._known = True
jpt = self._joint_probability_table.clone()
for affected_node in self._affects_nodes:
affected_node._forward_propagate(jpt)
for affected_node in self._affected_by:
affected_node._backward_propagate(jpt)
for affected_node in self._affects_nodes:
affected_node._backward_propagate(jpt)
for affected_node in self._affected_by:
affected_node._forward_propagate(jpt)
def probability(self):
return self._joint_probability_table.probability(self._name)
def __str__(self):
return str(self._joint_probability_table)
door_picked_table = JointProbabilityTable(
columns=['door_picked'],
data = [
['red', 0.3333],
['blue', 0.3333],
['green', 0.3334],
])
prize_behind_door_table = JointProbabilityTable(
columns=['prize_behind'],
data = [
['red', 0.3333],
['blue', 0.3333],
['green', 0.3334],
])
shown_empty_table = JointProbabilityTable(
columns=['door_picked', 'prize_behind', 'shown_empty'],
data = [
['red', 'red', 'red', 0.0],
['red', 'red', 'green', 0.5],
['red', 'red', 'blue', 0.5],
['red', 'green', 'red', 0.0],
['red', 'green', 'green', 0.0],
['red', 'green', 'blue', 1.0],
['red', 'blue', 'red', 0.0],
['red', 'blue', 'green', 1.0],
['red', 'blue', 'blue', 0.0],
['green', 'red', 'red', 0.0],
['green', 'red', 'green', 0.0],
['green', 'red', 'blue', 1.0],
['green', 'green', 'red', 0.5],
['green', 'green', 'green', 0.0],
['green', 'green', 'blue', 0.5],
['green', 'blue', 'red', 1.0],
['green', 'blue', 'green', 0.0],
['green', 'blue', 'blue', 0.0],
['blue', 'red', 'red', 0.0],
['blue', 'red', 'green', 1.0],
['blue', 'red', 'blue', 0.0],
['blue', 'green', 'red', 1.0],
['blue', 'green', 'green', 0.0],
['blue', 'green', 'blue', 0.0],
['blue', 'blue', 'red', 0.5],
['blue', 'blue', 'green', 0.5],
['blue', 'blue', 'blue', 0.0],
])
switch_table = JointProbabilityTable(
columns=['switch_or_stay'],
data=[
['switch', 0.5],
['stay', 0.5],
])
door_after_choice_table = JointProbabilityTable(
columns=['door_picked', 'shown_empty', 'switch_or_stay', 'door_after_choice'],
data=[
['red', 'red', 'switch', 'red', 0.0],
['red', 'red', 'switch', 'green', 0.0],
['red', 'red', 'switch', 'blue', 0.0],
['red', 'red', 'stay', 'red', 0.0],
['red', 'red', 'stay', 'green', 0.0],
['red', 'red', 'stay', 'blue', 0.0],
['red', 'blue', 'switch', 'red', 0.0],
['red', 'blue', 'switch', 'blue', 0.0],
['red', 'blue', 'switch', 'green', 1.0],
['red', 'blue', 'stay', 'red', 1.0],
['red', 'blue', 'stay', 'blue', 0.0],
['red', 'blue', 'stay', 'green', 0.0],
['red', 'green', 'switch', 'red', 0.0],
['red', 'green', 'switch', 'blue', 1.0],
['red', 'green', 'switch', 'green', 0.0],
['red', 'green', 'stay', 'red', 1.0],
['red', 'green', 'stay', 'blue', 0.0],
['red', 'green', 'stay', 'green', 0.0],
#~~~~~~~~
['blue', 'red', 'switch', 'red', 0.0],
['blue', 'red', 'switch', 'green', 1.0],
['blue', 'red', 'switch', 'blue', 0.0],
['blue', 'red', 'stay', 'red', 0.0],
['blue', 'red', 'stay', 'green', 0.0],
['blue', 'red', 'stay', 'blue', 1.0],
['blue', 'blue', 'switch', 'red', 0.0],
['blue', 'blue', 'switch', 'blue', 0.0],
['blue', 'blue', 'switch', 'green', 0.0],
['blue', 'blue', 'stay', 'red', 0.0],
['blue', 'blue', 'stay', 'blue', 0.0],
['blue', 'blue', 'stay', 'green', 0.0],
['blue', 'green', 'switch', 'red', 1.0],
['blue', 'green', 'switch', 'blue', 0.0],
['blue', 'green', 'switch', 'green', 0.0],
['blue', 'green', 'stay', 'red', 0.0],
['blue', 'green', 'stay', 'blue', 0.0],
['blue', 'green', 'stay', 'green', 1.0],
#~~~~~~~~
['green', 'red', 'switch', 'red', 0.0],
['green', 'red', 'switch', 'green', 0.0],
['green', 'red', 'switch', 'blue', 1.0],
['green', 'red', 'stay', 'red', 0.0],
['green', 'red', 'stay', 'green', 1.0],
['green', 'red', 'stay', 'blue', 0.0],
['green', 'blue', 'switch', 'red', 1.0],
['green', 'blue', 'switch', 'blue', 0.0],
['green', 'blue', 'switch', 'green', 0.0],
['green', 'blue', 'stay', 'red', 0.0],
['green', 'blue', 'stay', 'blue', 1.0],
['green', 'blue', 'stay', 'green', 0.0],
['green', 'green', 'switch', 'red', 0.0],
['green', 'green', 'switch', 'blue', 0.0],
['green', 'green', 'switch', 'green', 0.0],
['green', 'green', 'stay', 'red', 0.0],
['green', 'green', 'stay', 'blue', 0.0],
['green', 'green', 'stay', 'green', 0.0],
])
win_prize_table = JointProbabilityTable(
columns=['prize_behind', 'door_after_choice', 'win_prize'],
data = [
['red', 'red', True, 1.0],
['red', 'red', False, 0.0],
['red', 'green', True, 0.0],
['red', 'green', False, 1.0],
['red', 'blue', True, 0.0],
['red', 'blue', False, 1.0],
['green', 'red', True, 0.0],
['green', 'red', False, 1.0],
['green', 'green',True, 1.0],
['green', 'green',False, 0.0],
['green', 'blue', True, 0.0],
['green', 'blue', False, 1.0],
['blue', 'red', True, 0.0],
['blue', 'red', False, 1.0],
['blue', 'green', True, 0.0],
['blue', 'green', False, 1.0],
['blue', 'blue', True, 1.0],
['blue', 'blue', False, 0.0],
])
informed_agent_table = JointProbabilityTable(
columns=['informed_agent'],
data=[
['informed', 0.2],
['uninformed', 0.8],
])
door_picked_node = BayesianNode('door_picked', door_picked_table)
prize_behind_node = BayesianNode('prize_behind', prize_behind_door_table)
shown_empty_node = BayesianNode('shown_empty', shown_empty_table)
win_prize_node = BayesianNode('win_prize', win_prize_table)
door_after_choice_node = BayesianNode('door_after_choice', door_after_choice_table)
switch_node = BayesianNode('switch_or_stay', switch_table)
informed_agent_node = BayesianNode('informed_agent', informed_agent_table)
print "Win prize original: " + str(win_prize_table.probability('win_prize'))
# Original
door_picked_node.affects(shown_empty_node)
prize_behind_node.affects(shown_empty_node)
# New
shown_empty_node.affects(door_after_choice_node)
door_picked_node.affects(door_after_choice_node)
door_after_choice_node.affects(win_prize_node)
# For fun (need to update joint probability tables as well)
# informed_agent_node.affects(switch_node)
# informed_agent_node.affects(win_prize_node)
prize_behind_node.affects(win_prize_node)
switch_node.affects(door_after_choice_node)
def print_all_nodes():
print ""
print "Door picked: " + str(door_picked_node.probability())
print "Prize behind door: " + str(prize_behind_node.probability())
print "Door shown empty: " + str(shown_empty_node.probability())
print "Win prize: " + str(win_prize_node.probability())
print "Updated door choice: " + str(door_after_choice_node.probability())
print "Switch or stay: " + str(switch_node.probability())
print "~~~~~"
print "Before doing anything..."
print_all_nodes()
door_picked_node.given('red')
print "After initially picking the red door..."
print_all_nodes()
shown_empty_node.given('green')
print "After being shown the green door..."
print_all_nodes()
switch_node.given('switch')
print "After switching doors..."
print_all_nodes()
print "After choosing another color door..."
door_after_choice_node.given('blue')
print_all_nodes() |
989,397 | 844aa040e6f099c42330543d6555d49f3e6e49d3 | '''
You are given a string of length 5 called time, representing the current time on a digital clock in the format "hh:mm". The earliest possible time is "00:00" and the latest possible time is "23:59".
In the string time, the digits represented by the ? symbol are unknown, and must be replaced with a digit from 0 to 9.
Return an integer answer, the number of valid clock times that can be created by replacing every ? with a digit from 0 to 9.
Example 1:
Input: time = "?5:00"
Output: 2
Explanation: We can replace the ? with either a 0 or 1, producing "05:00" or "15:00". Note that we cannot replace it with a 2, since the time "25:00" is invalid. In total, we have two choices.
Example 2:
Input: time = "0?:0?"
Output: 100
Explanation: Each ? can be replaced by any digit from 0 to 9, so we have 100 total choices.
Example 3:
Input: time = "??:??"
Output: 1440
Explanation: There are 24 possible choices for the hours, and 60 possible choices for the minutes. In total, we have 24 * 60 = 1440 choices.
Constraints:
time is a valid string of length 5 in the format "hh:mm".
"00" <= hh <= "23"
"00" <= mm <= "59"
Some of the digits might be replaced with '?' and need to be replaced with digits from 0 to 9.
'''
class Solution:
def countTime(self, time: str) -> int:
ans = 1;
if time[0] == '?':
if time[1] == '?':
ans *= 24
elif time[1] <= '3':
ans *= 3
else:
ans *= 2;
elif time[1] == '?':
ans *= 10 if time[0] < '2' else 4
if time[3] == '?':
ans *= 6
if time[4] == '?':
ans *= 10
return ans
|
989,398 | d1e21490055489024d5371eb25eef2e5f5b415d1 | from itertools import permutations
from src.bst import Bst
import pytest
THREE_NODE_TREE = list(map(list, permutations(range(3))))
FOUR_NODE_TREE = list(map(list, permutations(range(4))))
FIVE_NODE_TREE = list(map(list, permutations(range(5))))
SIX_NODE_TREE = list(map(list, permutations(range(6))))
SEVEN_NODE_TREE = list(map(list, permutations(range(7))))
@pytest.mark.parametrize("tree_list", THREE_NODE_TREE)
def test_balance_3_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert list(tree.breadth_first()) == [1, 0, 2]
@pytest.mark.parametrize("tree_list", THREE_NODE_TREE)
def test_balance_3_nodes_delete(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
for i in range(3):
tree.delete(i)
assert tree.balance() in [1, 0, -1]
@pytest.mark.parametrize("tree_list", FOUR_NODE_TREE)
def test_balance_4_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
@pytest.mark.parametrize("tree_list", FOUR_NODE_TREE)
def test_balance_4_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
@pytest.mark.parametrize("tree_list", FIVE_NODE_TREE)
def test_balance_5_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
@pytest.mark.parametrize("tree_list", SIX_NODE_TREE)
def test_balance_6_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
def test_balance_4_nodes_delete():
tree = Bst()
for i in [3, 1, 0, 2]:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
tree.delete(0)
assert tree.balance() in [-1, 0, 1]
def test_delete_root(bst_root_fifty):
bst_root_fifty.delete(50)
assert not bst_root_fifty.contains(50)
assert bst_root_fifty.balance() in [-1, 0, 1]
assert list(bst_root_fifty.breadth_first()) == [75, 25, 100, 12, 35, 150]
def test_delete_root2(bst_root_fifty):
bst_root_fifty.delete(25)
bst_root_fifty.delete(35)
assert bst_root_fifty.balance() in [-1, 0, 1]
bst_root_fifty.delete(50)
assert bst_root_fifty.balance() in [-1, 0, 1]
def test_three():
tree = Bst()
tree.insert(3)
tree.insert(2)
tree.insert(4)
tree.insert(4.5)
tree.insert(5)
tree.delete(2)
assert not tree.contains(2)
def test_balance1():
tree = Bst()
lst = [5, 3, 2, 4, 1, 0]
for i in lst:
tree.insert(i)
assert list(tree.breadth_first()) == [3, 1, 5, 0, 2, 4]
tree.delete(0)
assert list(tree.breadth_first()) == [3, 1, 5, 2, 4]
tree.delete(1)
assert list(tree.breadth_first()) == [3, 2, 5, 4]
tree.delete(2)
assert not tree.contains(2)
@pytest.mark.parametrize("tree_list", SIX_NODE_TREE)
def test_balance_4_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
tree.delete(0)
tree.delete(1)
tree.delete(2)
assert not tree.contains(2)
@pytest.mark.parametrize("tree_list", SIX_NODE_TREE)
def test_balance_6_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
for i in range(6):
tree.delete(i)
assert not tree.contains(i)
assert tree.balance() in [-1, 0, 1]
@pytest.mark.parametrize("tree_list", FIVE_NODE_TREE)
def test_balance_6_nodes_insert(tree_list):
tree = Bst()
for i in tree_list:
tree.insert(i)
assert tree.balance() in [-1, 0, 1]
for i in range(5):
tree.delete(i)
assert not tree.contains(i)
assert tree.balance() in [-1, 0, 1]
|
989,399 | 84daa781023ce420144cee4d18aaa8e09c737d49 | #Gear Selection for Max Acceleration on a Bicycle
import numpy
import matplotlib.pyplot as plt
import math
#Bicycle Parameters
M = 70 #[kg]
It = 0.2 #[kg*m^2]
Rt = 0.4 #[m]
grats = [32/53, 20/53, 11/53] #Defined as R2/R1 (same as cars)
thetapdot = numpy.linspace(0,250,501) #[rpm]
thetapdot2 = thetapdot*2*math.pi/60 #[rad/s]
#thetapdot = numpy.ndarray.tolist(thetapdot)
#thetapdot2 = numpy.ndarray.tolist(thetapdot2)
#numpy.multiply(5.73,thetapdot2)
xdot_g1 = (Rt/grats[0])*thetapdot2 #bike speed [m/s]
xdot_g2 = (Rt/grats[1])*thetapdot2 #bike speed [m/s]
xdot_g3 = (Rt/grats[2])*thetapdot2 #bike speed [m/s]
xddot_g1 = ((grats[0]/Rt)*(150-5.73*thetapdot2)) / (M + (2*It)/(Rt**2))
xddot_g2 = ((grats[1]/Rt)*(150-5.73*thetapdot2)) / (M + (2*It)/(Rt**2))
xddot_g3 = ((grats[2]/Rt)*(150-5.73*thetapdot2)) / (M + (2*It)/(Rt**2))
plt.plot(thetapdot, xddot_g1,'r:',
thetapdot, xddot_g2,'b:',
thetapdot, xddot_g3,'g:')
plt.xlabel('Pedaling Rate [rpm]')
plt.ylabel('Bike Acceleration [m/s^2]')
plt.legend(['Gear Ratio: 32/53','Gear Ratio: 20/53','Gear Ratio: 11/53'])
plt.title('Acceleration as a Function of Pedaling Rate')
plt.grid(True)
plt.figure()
plt.plot(xdot_g1*2.23694, xddot_g1,'r:',
xdot_g2*2.23694, xddot_g2,'b:',
xdot_g3*2.23694, xddot_g3,'g:')
plt.xlabel('Bike Speed [mph]')
plt.ylabel('Bike Acceleration [m/s^2]')
plt.legend(['Gear Ratio: 32/53','Gear Ratio: 20/53','Gear Ratio: 11/53'])
plt.title('Acceleration as a Function of Bike Speed')
plt.grid(True)
#Simulating the Race using Euler Forward Method
N = 501
dt = 0.5 #Sampling interval [s]
#t = list(range(0,(N-1)*dt,dt))
t = numpy.ndarray.tolist(numpy.arange(0,N*dt,dt))
x = [0] #Initial condition
xdot = [0] #Initial condition
xddot = []
for k in list(range(len(t)-1)): #Because k must be an integer; ie the index of t rather than the value of t
#len(t) - 1 because if not xdot.- and x.append will predict an extra value
if xdot[k] < 24/2.23694:
#Pedaling Rate and Acceleration at the Current Time
print('gear 1')
thetapdot2[k] = (1/Rt)*grats[0]*xdot[k]
xddot.append(((grats[0]/Rt)*(150-5.73*thetapdot2[k])) / (M + ((2*It)/(Rt**2))))
elif xdot[k] < 40/2.23694:
print('gear 2')
thetapdot2[k] = (1/Rt)*grats[1]*xdot[k]
xddot.append(((grats[1]/Rt)*(150-5.73*thetapdot2[k])) / (M + ((2*It)/(Rt**2))))
else:
print('gear 3')
thetapdot2[k] = (1/Rt)*grats[2]*xdot[k]
xddot.append(((grats[2]/Rt)*(150-5.73*thetapdot2[k])) / (M + ((2*It)/(Rt**2))))
#Velocity and Position at the Next Time Step
xdot.append(xdot[k] + xddot[k]*dt)
x.append(x[k] + xdot[k]*dt)
[item*2.23694 for item in xdot] #Convert to mph
plt.figure()
plt.plot(t,xdot)
plt.ylabel('Velocity [mph]')
plt.xlabel('Time [s]')
plt.grid(True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.